hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c806655a98372e3737c71b32958435ab122164a | 2,069 | py | Python | combine_classifiers.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | 1 | 2021-10-08T11:40:09.000Z | 2021-10-08T11:40:09.000Z | combine_classifiers.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | null | null | null | combine_classifiers.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import argparse, os.path
import nltk.data
from nltk_trainer import dump_object
from nltk_trainer.classification import multi
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Combine NLTK Classifiers')
parser.add_argument('classifiers', nargs='+',
help='one or more pickled classifiers to load and combine')
parser.add_argument('filename', default='~/nltk_data/classifiers/combined.pickle',
help='Filename to pickle combined classifier, defaults to %(default)s')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
parser.add_argument('--hierarchy', nargs='+', default=[],
help='''Mapping of labels to classifier pickle paths to specify a classification hierarchy, such as
"-h neutral:classifiers/movie_reviews.pickle"
''')
args = parser.parse_args()
#####################
## AvgProb combine ##
#####################
# TODO: support MaxVote combining
classifiers = []
for name in args.classifiers:
if args.trace:
print 'loading %s' % name
classifiers.append(nltk.data.load(name))
combined = multi.AvgProbClassifier(classifiers)
##########################
## Hierarchical combine ##
##########################
labels = combined.labels()
label_classifiers = {}
for h in args.hierarchy:
label, path = h.split(':')
if label not in labels:
raise ValueError('%s is not in root labels: %s' % (label, labels))
label_classifiers[label] = nltk.data.load(path)
if args.trace:
print 'mapping %s to %s from %s' % (label, label_classifiers[label], path)
if label_classifiers:
if args.trace:
'combining %d label classifiers for root %s' % (len(label_classifiers), combined)
combined = multi.HierarchicalClassifier(combined, label_classifiers)
##############################
## dump combined classifier ##
##############################
fname = os.path.expanduser(args.filename)
dump_object(combined, fname, trace=args.trace) | 29.557143 | 100 | 0.658772 |
0f82fa97a8f8ff6afb5752fb69df5ce497e8db2c | 6,500 | py | Python | ymir/command/mir/commands/model_importing.py | phoenix-xhuang/ymir | 537d3ac389c4a365ce4daef431c95b42ddcd5b1b | [
"Apache-2.0"
] | 64 | 2021-11-15T03:48:00.000Z | 2022-03-25T07:08:46.000Z | ymir/command/mir/commands/model_importing.py | phoenix-xhuang/ymir | 537d3ac389c4a365ce4daef431c95b42ddcd5b1b | [
"Apache-2.0"
] | 35 | 2021-11-23T04:14:35.000Z | 2022-03-26T09:03:43.000Z | ymir/command/mir/commands/model_importing.py | phoenix-xhuang/ymir | 537d3ac389c4a365ce4daef431c95b42ddcd5b1b | [
"Apache-2.0"
] | 57 | 2021-11-11T10:15:40.000Z | 2022-03-29T07:27:54.000Z | import argparse
import logging
import os
import shutil
import yaml
from mir.commands import base
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import checker, mir_storage_ops, revs_parser
from mir.tools import settings as mir_settings, utils as mir_utils
from mir.tools.code import MirCode
from mir.tools.command_run_in_out import command_run_in_out
from mir.tools.errors import MirRuntimeError
class CmdModelImport(base.BaseCommand):
def run(self) -> int:
logging.debug("command import-model: %s", self.args)
return CmdModelImport.run_with_args(mir_root=self.args.mir_root,
dst_rev=self.args.dst_rev,
src_revs='master',
work_dir=self.args.work_dir,
package_path=self.args.package_path,
model_location=self.args.model_location)
@staticmethod
@command_run_in_out
def run_with_args(mir_root: str, dst_rev: str, src_revs: str, work_dir: str, package_path: str,
model_location: str) -> int:
# check args
if not model_location:
logging.error('empty --model-location')
return MirCode.RC_CMD_INVALID_ARGS
src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False)
dst_typ_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True)
check_code = checker.check(mir_root,
[checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.HAVE_LABELS])
if check_code != MirCode.RC_OK:
return check_code
if not package_path or not os.path.isfile(package_path):
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS,
error_message=f"model package: {package_path} is not file")
# unpack
extract_model_dir_path = os.path.join(work_dir, 'model')
model_storage = mir_utils.prepare_model(model_location=os.path.dirname(package_path),
model_hash=os.path.basename(package_path),
dst_model_path=extract_model_dir_path)
logging.info(f"importing model with storage: {model_storage}")
# check
_check_model(model_storage=model_storage, mir_root=mir_root)
# update model_storage and pack
model_storage.task_context['src-revs'] = src_revs
model_storage.task_context['dst_rev'] = dst_rev
model_storage.task_context['type'] = mirpb.TaskType.TaskTypeImportModel
model_hash = mir_utils.pack_and_copy_models(model_storage=model_storage,
model_dir_path=extract_model_dir_path,
model_location=model_location)
# remove tmp files
shutil.rmtree(extract_model_dir_path)
# create task and commit
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportModel,
task_id=dst_typ_rev_tid.tid,
message=f"import model {package_path} as {model_hash}",
model_hash=model_hash,
model_mAP=float(model_storage.task_context.get('mAP', 0)),
return_code=MirCode.RC_OK,
return_msg='',
src_revs=src_revs,
dst_rev=dst_rev,
serialized_executor_config=yaml.safe_dump(model_storage.executor_config),
serialized_task_parameters=model_storage.task_context.get(
mir_settings.TASK_CONTEXT_PARAMETERS_KEY, ''),
executor=model_storage.task_context.get('executor', ''))
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root,
mir_branch=dst_typ_rev_tid.rev,
his_branch=src_typ_rev_tid.rev,
mir_datas={},
task=task)
return MirCode.RC_OK
def _check_model(model_storage: mir_utils.ModelStorage, mir_root: str) -> int:
# check producer
producer = model_storage.task_context.get(mir_settings.PRODUCER_KEY, None)
if producer != mir_settings.PRODUCER_NAME:
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_FILE,
error_message=f"can not import model, invalid producer: {producer}")
return MirCode.RC_OK
def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None:
importing_arg_parser = subparsers.add_parser("models",
parents=[parent_parser],
description="use this command to import model package",
help="import model")
importing_arg_parser.add_argument("--package-path",
dest="package_path",
type=str,
required=True,
help="path to model package file")
importing_arg_parser.add_argument("--dst-rev",
dest="dst_rev",
type=str,
required=True,
help="rev@tid: destination branch name and task id")
importing_arg_parser.add_argument("--model-location",
dest="model_location",
type=str,
required=True,
help="storage place (upload location) to store packed model")
importing_arg_parser.add_argument('-w', dest='work_dir', type=str, required=True, help='working directory')
importing_arg_parser.set_defaults(func=CmdModelImport)
| 51.587302 | 116 | 0.545692 |
f0bb775ef3d4325d9576af8c2db8ab59e676304f | 34,055 | py | Python | arcade/sprite_list.py | biggzlar/arcade | fc444db356452660ac6cb2ffe241f0b1a3d4bcf3 | [
"MIT"
] | 1 | 2020-01-18T04:48:38.000Z | 2020-01-18T04:48:38.000Z | arcade/sprite_list.py | biggzlar/arcade | fc444db356452660ac6cb2ffe241f0b1a3d4bcf3 | [
"MIT"
] | null | null | null | arcade/sprite_list.py | biggzlar/arcade | fc444db356452660ac6cb2ffe241f0b1a3d4bcf3 | [
"MIT"
] | null | null | null | """
This module provides functionality to manage Sprites in a list.
"""
from typing import Iterable
from typing import Any
from typing import TypeVar
from typing import Generic
from typing import List
from typing import Tuple
from typing import Optional
import pyglet.gl as gl
import math
import array
from PIL import Image
from arcade import Sprite
from arcade import get_distance_between_sprites
from arcade import are_polygons_intersecting
from arcade import is_point_in_polygon
from arcade import rotate_point
from arcade import get_projection
from arcade import shader
from arcade import Point
_VERTEX_SHADER = """
#version 330
uniform mat4 Projection;
// per vertex
in vec2 in_vert;
in vec2 in_texture;
// per instance
in vec2 in_pos;
in float in_angle;
in vec2 in_size;
in vec4 in_sub_tex_coords;
in vec4 in_color;
out vec2 v_texture;
out vec4 v_color;
void main() {
mat2 rotate = mat2(
cos(in_angle), sin(in_angle),
-sin(in_angle), cos(in_angle)
);
vec2 pos;
pos = in_pos + vec2(rotate * (in_vert * (in_size / 2)));
gl_Position = Projection * vec4(pos, 0.0, 1.0);
vec2 tex_offset = in_sub_tex_coords.xy;
vec2 tex_size = in_sub_tex_coords.zw;
v_texture = (in_texture * tex_size + tex_offset) * vec2(1, -1);
v_color = in_color;
}
"""
_FRAGMENT_SHADER = """
#version 330
uniform sampler2D Texture;
in vec2 v_texture;
in vec4 v_color;
out vec4 f_color;
void main() {
vec4 basecolor = texture(Texture, v_texture);
basecolor = basecolor * v_color;
if (basecolor.a == 0.0){
discard;
}
f_color = basecolor;
}
"""
def _create_rects(rect_list: Iterable[Sprite]) -> List[float]:
"""
Create a vertex buffer for a set of rectangles.
"""
v2f = []
for shape in rect_list:
x1 = -shape.width / 2 + shape.center_x
x2 = shape.width / 2 + shape.center_x
y1 = -shape.height / 2 + shape.center_y
y2 = shape.height / 2 + shape.center_y
p1 = x1, y1
p2 = x2, y1
p3 = x2, y2
p4 = x1, y2
if shape.angle:
p1 = rotate_point(p1[0], p1[1], shape.center_x, shape.center_y, shape.angle)
p2 = rotate_point(p2[0], p2[1], shape.center_x, shape.center_y, shape.angle)
p3 = rotate_point(p3[0], p3[1], shape.center_x, shape.center_y, shape.angle)
p4 = rotate_point(p4[0], p4[1], shape.center_x, shape.center_y, shape.angle)
v2f.extend([p1[0], p1[1],
p2[0], p2[1],
p3[0], p3[1],
p4[0], p4[1]])
return v2f
class _SpatialHash:
"""
Structure for fast collision checking.
See: https://www.gamedev.net/articles/programming/general-and-gameplay-programming/spatial-hashing-r2697/
"""
def __init__(self, cell_size):
self.cell_size = cell_size
self.contents = {}
def _hash(self, point):
return int(point[0] / self.cell_size), int(point[1] / self.cell_size)
def reset(self):
self.contents = {}
def insert_object_for_box(self, new_object: Sprite):
"""
Insert a sprite.
"""
# Get the corners
min_x = new_object.left
max_x = new_object.right
min_y = new_object.bottom
max_y = new_object.top
# print(f"New - Center: ({new_object.center_x}, {new_object.center_y}), Angle: {new_object.angle}, "
# f"Left: {new_object.left}, Right {new_object.right}")
min_point = (min_x, min_y)
max_point = (max_x, max_y)
# print(f"Add 1: {min_point} {max_point}")
# hash the minimum and maximum points
min_point, max_point = self._hash(min_point), self._hash(max_point)
# print(f"Add 2: {min_point} {max_point}")
# print("Add: ", min_point, max_point)
# iterate over the rectangular region
for i in range(min_point[0], max_point[0] + 1):
for j in range(min_point[1], max_point[1] + 1):
# append to each intersecting cell
bucket = self.contents.setdefault((i, j), [])
if new_object in bucket:
# print(f"Error, {new_object.guid} already in ({i}, {j}) bucket. ")
pass
else:
bucket.append(new_object)
# print(f"Adding {new_object.guid} to ({i}, {j}) bucket. "
# f"{new_object._position} {min_point} {max_point}")
def remove_object(self, sprite_to_delete: Sprite):
"""
Remove a Sprite.
:param Sprite sprite_to_delete: Pointer to sprite to be removed.
"""
# Get the corners
min_x = sprite_to_delete.left
max_x = sprite_to_delete.right
min_y = sprite_to_delete.bottom
max_y = sprite_to_delete.top
# print(f"Del - Center: ({sprite_to_delete.center_x}, {sprite_to_delete.center_y}), "
# f"Angle: {sprite_to_delete.angle}, Left: {sprite_to_delete.left}, Right {sprite_to_delete.right}")
min_point = (min_x, min_y)
max_point = (max_x, max_y)
# print(f"Remove 1: {min_point} {max_point}")
# hash the minimum and maximum points
min_point, max_point = self._hash(min_point), self._hash(max_point)
# print(f"Remove 2: {min_point} {max_point}")
# print("Remove: ", min_point, max_point)
# iterate over the rectangular region
for i in range(min_point[0], max_point[0] + 1):
for j in range(min_point[1], max_point[1] + 1):
bucket = self.contents.setdefault((i, j), [])
try:
bucket.remove(sprite_to_delete)
# print(f"Removing {sprite_to_delete.guid} from ({i}, {j}) bucket. {sprite_to_delete._position} "
# f"{min_point} {max_point}")
except ValueError:
print(f"Warning, tried to remove item {sprite_to_delete.guid} from spatial hash {i} {j} when "
f"it wasn't there. {min_point} {max_point}")
def get_objects_for_box(self, check_object: Sprite) -> List[Sprite]:
"""
Returns colliding Sprites.
:param Sprite check_object: Sprite we are checking to see if there are
other sprites in the same box(es)
:return: List of close-by sprites
:rtype: List
"""
# Get the corners
min_x = check_object.left
max_x = check_object.right
min_y = check_object.bottom
max_y = check_object.top
min_point = (min_x, min_y)
max_point = (max_x, max_y)
# hash the minimum and maximum points
min_point, max_point = self._hash(min_point), self._hash(max_point)
close_by_sprites: List[Sprite] = []
# iterate over the rectangular region
for i in range(min_point[0], max_point[0] + 1):
for j in range(min_point[1], max_point[1] + 1):
# print(f"Checking {i}, {j}")
# append to each intersecting cell
new_items = self.contents.setdefault((i, j), [])
# for item in new_items:
# print(f"Found {item.guid} in {i}, {j}")
close_by_sprites.extend(new_items)
return close_by_sprites
def get_objects_for_point(self, check_point: Point) -> List[Sprite]:
"""
Returns Sprites at or close to a point.
:param Point check_point: Point we are checking to see if there are
other sprites in the same box(es)
:return: List of close-by sprites
:rtype: List
"""
hash_point = self._hash(check_point)
close_by_sprites: List[Sprite] = []
new_items = self.contents.setdefault(hash_point, [])
close_by_sprites.extend(new_items)
return close_by_sprites
_SpriteType = TypeVar('_SpriteType', bound=Sprite)
class SpriteList(Generic[_SpriteType]):
array_of_images: Optional[List[Any]]
next_texture_id = 0
def __init__(self, use_spatial_hash=False, spatial_hash_cell_size=128, is_static=False):
"""
Initialize the sprite list
:param bool use_spatial_hash: If set to True, this will make moving a sprite
in the SpriteList slower, but it will speed up collision detection
with items in the SpriteList. Great for doing collision detection
with walls/platforms.
:param int spatial_hash_cell_size:
:param bool is_static: Speeds drawing if this list won't change.
"""
# List of sprites in the sprite list
self.sprite_list = []
self.sprite_idx = dict()
# Used in drawing optimization via OpenGL
self.program = None
self._sprite_pos_data = None
self._sprite_pos_buf = None
self._sprite_pos_desc = None
self._sprite_pos_changed = False
self._sprite_size_data = None
self._sprite_size_buf = None
self._sprite_size_desc = None
self._sprite_size_changed = False
self._sprite_angle_data = None
self._sprite_angle_buf = None
self._sprite_angle_desc = None
self._sprite_angle_changed = False
self._sprite_color_data = None
self._sprite_color_buf = None
self._sprite_color_desc = None
self._sprite_color_changed = False
self._sprite_sub_tex_data = None
self._sprite_sub_tex_buf = None
self._sprite_sub_tex_desc = None
self._sprite_sub_tex_changed = False
self.texture_id = None
self._texture = None
self._vao1 = None
self.vbo_buf = None
self.array_of_texture_names = []
self.array_of_images = []
# Used in collision detection optimization
self.is_static = is_static
self.use_spatial_hash = use_spatial_hash
if use_spatial_hash:
self.spatial_hash = _SpatialHash(cell_size=spatial_hash_cell_size)
else:
self.spatial_hash = None
def append(self, item: _SpriteType):
"""
Add a new sprite to the list.
:param Sprite item: Sprite to add to the list.
"""
idx = len(self.sprite_list)
self.sprite_list.append(item)
self.sprite_idx[item] = idx
item.register_sprite_list(self)
self._vao1 = None
if self.use_spatial_hash:
self.spatial_hash.insert_object_for_box(item)
def _recalculate_spatial_hash(self, item: _SpriteType):
""" Recalculate the spatial hash for a particular item. """
if self.use_spatial_hash:
self.spatial_hash.remove_object(item)
self.spatial_hash.insert_object_for_box(item)
def _recalculate_spatial_hashes(self):
if self.use_spatial_hash:
self.spatial_hash.reset()
for sprite in self.sprite_list:
self.spatial_hash.insert_object_for_box(sprite)
def remove(self, item: _SpriteType):
"""
Remove a specific sprite from the list.
:param Sprite item: Item to remove from the list
"""
self.sprite_list.remove(item)
# Rebuild index list
self.sprite_idx[item] = dict()
for idx, sprite in enumerate(self.sprite_list):
self.sprite_idx[sprite] = idx
self._vao1 = None
if self.use_spatial_hash:
self.spatial_hash.remove_object(item)
def update(self):
"""
Call the update() method on each sprite in the list.
"""
for sprite in self.sprite_list:
sprite.update()
def on_update(self, delta_time: float = 1/60):
"""
Update the sprite. Similar to update, but also takes a delta-time.
"""
for sprite in self.sprite_list:
sprite.on_update(delta_time)
def update_animation(self, delta_time: float = 1/60):
for sprite in self.sprite_list:
sprite.update_animation(delta_time)
def _get_center(self) -> Tuple[float, float]:
""" Get the mean center coordinates of all sprites in the list. """
x = sum((sprite.center_x for sprite in self.sprite_list)) / len(self.sprite_list)
y = sum((sprite.center_y for sprite in self.sprite_list)) / len(self.sprite_list)
return x, y
center = property(_get_center)
def rescale(self, factor: float) -> None:
""" Rescale all sprites in the list relative to the spritelists center. """
for sprite in self.sprite_list:
sprite.rescale_relative_to_point(self.center, factor)
def move(self, change_x: float, change_y: float):
"""
Moves all Sprites in the list by the same amount.
:param float change_x: Amount to change all x values by
:param float change_y: Amount to change all y values by
"""
for sprite in self.sprite_list:
sprite.center_x += change_x
sprite.center_y += change_y
def preload_textures(self, texture_names: List):
"""
Preload a set of textures that will be used for sprites in this
sprite list.
:param array texture_names: List of file names to load in as textures.
"""
self.array_of_texture_names.extend(texture_names)
self.array_of_images = None
def _calculate_sprite_buffer(self):
if self.is_static:
usage = 'static'
else:
usage = 'stream'
def calculate_pos_buffer():
self._sprite_pos_data = array.array('f')
# print("A")
for sprite in self.sprite_list:
self._sprite_pos_data.append(sprite.center_x)
self._sprite_pos_data.append(sprite.center_y)
self._sprite_pos_buf = shader.buffer(
self._sprite_pos_data.tobytes(),
usage=usage
)
variables = ['in_pos']
self._sprite_pos_desc = shader.BufferDescription(
self._sprite_pos_buf,
'2f',
variables,
instanced=True)
self._sprite_pos_changed = False
def calculate_size_buffer():
self._sprite_size_data = array.array('f')
for sprite in self.sprite_list:
self._sprite_size_data.append(sprite.width)
self._sprite_size_data.append(sprite.height)
self._sprite_size_buf = shader.buffer(
self._sprite_size_data.tobytes(),
usage=usage
)
variables = ['in_size']
self._sprite_size_desc = shader.BufferDescription(
self._sprite_size_buf,
'2f',
variables,
instanced=True)
self._sprite_size_changed = False
def calculate_angle_buffer():
self._sprite_angle_data = array.array('f')
for sprite in self.sprite_list:
self._sprite_angle_data.append(math.radians(sprite.angle))
self._sprite_angle_buf = shader.buffer(
self._sprite_angle_data.tobytes(),
usage=usage
)
variables = ['in_angle']
self._sprite_angle_desc = shader.BufferDescription(
self._sprite_angle_buf,
'1f',
variables,
instanced=True)
self._sprite_angle_changed = False
def calculate_colors():
self._sprite_color_data = array.array('B')
for sprite in self.sprite_list:
self._sprite_color_data.append(sprite.color[0])
self._sprite_color_data.append(sprite.color[1])
self._sprite_color_data.append(sprite.color[2])
self._sprite_color_data.append(sprite.alpha)
self._sprite_color_buf = shader.buffer(
self._sprite_color_data.tobytes(),
usage=usage
)
variables = ['in_color']
self._sprite_color_desc = shader.BufferDescription(
self._sprite_color_buf,
'4B',
variables,
normalized=['in_color'], instanced=True)
self._sprite_color_changed = False
def calculate_sub_tex_coords():
new_array_of_texture_names = []
new_array_of_images = []
new_texture = False
if self.array_of_images is None:
new_texture = True
# print()
# print("New texture start: ", new_texture)
for sprite in self.sprite_list:
# noinspection PyProtectedMember
if sprite.texture is None:
raise Exception("Error: Attempt to draw a sprite without a texture set.")
name_of_texture_to_check = sprite.texture.name
if name_of_texture_to_check not in self.array_of_texture_names:
new_texture = True
# print("New because of ", name_of_texture_to_check)
if name_of_texture_to_check not in new_array_of_texture_names:
new_array_of_texture_names.append(name_of_texture_to_check)
image = sprite.texture.image
new_array_of_images.append(image)
# print("New texture end: ", new_texture)
# print(new_array_of_texture_names)
# print(self.array_of_texture_names)
# print()
if new_texture:
# Add back in any old textures. Chances are we'll need them.
for index, old_texture_name in enumerate(self.array_of_texture_names):
if old_texture_name not in new_array_of_texture_names and self.array_of_images is not None:
new_array_of_texture_names.append(old_texture_name)
image = self.array_of_images[index]
new_array_of_images.append(image)
self.array_of_texture_names = new_array_of_texture_names
self.array_of_images = new_array_of_images
# print(f"New Texture Atlas with names {self.array_of_texture_names}")
# Get their sizes
widths, heights = zip(*(i.size for i in self.array_of_images))
# Figure out what size a composite would be
total_width = sum(widths)
max_height = max(heights)
if new_texture:
# TODO: This code isn't valid, but I think some releasing might be in order.
# if self.texture is not None:
# shader.Texture.release(self.texture_id)
# Make the composite image
new_image = Image.new('RGBA', (total_width, max_height))
x_offset = 0
for image in self.array_of_images:
new_image.paste(image, (x_offset, 0))
x_offset += image.size[0]
# Create a texture out the composite image
texture_bytes = new_image.tobytes()
self._texture = shader.texture(
(new_image.width, new_image.height),
4,
texture_bytes
)
if self.texture_id is None:
self.texture_id = SpriteList.next_texture_id
# Create a list with the coordinates of all the unique textures
tex_coords = []
start_x = 0.0
for image in self.array_of_images:
end_x = start_x + (image.width / total_width)
normalized_width = image.width / total_width
start_height = 1 - (image.height / max_height)
normalized_height = image.height / max_height
tex_coords.append([start_x, start_height, normalized_width, normalized_height])
start_x = end_x
# Go through each sprite and pull from the coordinate list, the proper
# coordinates for that sprite's image.
array_of_sub_tex_coords = array.array('f')
for sprite in self.sprite_list:
index = self.array_of_texture_names.index(sprite.texture.name)
for coord in tex_coords[index]:
array_of_sub_tex_coords.append(coord)
self._sprite_sub_tex_buf = shader.buffer(
array_of_sub_tex_coords.tobytes(),
usage=usage
)
self._sprite_sub_tex_desc = shader.BufferDescription(
self._sprite_sub_tex_buf,
'4f',
['in_sub_tex_coords'],
instanced=True)
self._sprite_sub_tex_changed = False
if len(self.sprite_list) == 0:
return
calculate_pos_buffer()
calculate_size_buffer()
calculate_angle_buffer()
calculate_sub_tex_coords()
calculate_colors()
vertices = array.array('f', [
# x, y, u, v
-1.0, -1.0, 0.0, 0.0,
-1.0, 1.0, 0.0, 1.0,
1.0, -1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 1.0,
]
)
self.vbo_buf = shader.buffer(vertices.tobytes())
vbo_buf_desc = shader.BufferDescription(
self.vbo_buf,
'2f 2f',
('in_vert', 'in_texture')
)
# Can add buffer to index vertices
vao_content = [vbo_buf_desc,
self._sprite_pos_desc,
self._sprite_size_desc,
self._sprite_angle_desc,
self._sprite_sub_tex_desc,
self._sprite_color_desc]
self._vao1 = shader.vertex_array(self.program, vao_content)
def dump(self):
buffer = self._sprite_pos_data.tobytes()
record_size = len(buffer) / len(self.sprite_list)
for i, char in enumerate(buffer):
if i % record_size == 0:
print()
print(f"{char:02x} ", end="")
def _update_positions(self):
""" Called by the Sprite class to update position, angle, size and color
of all sprites in the list.
Necessary for batch drawing of items. """
if self._vao1 is None:
return
for i, sprite in enumerate(self.sprite_list):
self._sprite_pos_data[i * 2] = sprite.position[0]
self._sprite_pos_data[i * 2 + 1] = sprite.position[1]
self._sprite_pos_changed = True
self._sprite_angle_data[i] = math.radians(sprite.angle)
self._sprite_angle_changed = True
self._sprite_color_data[i * 4] = sprite.color[0]
self._sprite_color_data[i * 4 + 1] = sprite.color[1]
self._sprite_color_data[i * 4 + 2] = sprite.color[2]
self._sprite_color_data[i * 4 + 3] = sprite.alpha
self._sprite_color_changed = True
self._sprite_size_data[i * 2] = sprite.width
self._sprite_size_data[i * 2 + 1] = sprite.height
self._sprite_size_changed = True
def update_texture(self, _sprite):
""" Make sure we update the texture for this sprite for the next batch
drawing"""
if self._vao1 is None:
return
self._calculate_sprite_buffer()
def update_position(self, sprite: Sprite):
"""
Called by the Sprite class to update position, angle, size and color
of the specified sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_pos_data[i * 2] = sprite.position[0]
self._sprite_pos_data[i * 2 + 1] = sprite.position[1]
self._sprite_pos_changed = True
self._sprite_angle_data[i] = math.radians(sprite.angle)
self._sprite_angle_changed = True
self._sprite_color_data[i * 4] = sprite.color[0]
self._sprite_color_data[i * 4 + 1] = sprite.color[1]
self._sprite_color_data[i * 4 + 2] = sprite.color[2]
self._sprite_color_data[i * 4 + 3] = sprite.alpha
self._sprite_color_changed = True
def update_size(self, sprite: Sprite):
"""
Called by the Sprite class to update the size/scale in this sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_size_data[i * 2] = sprite.width
self._sprite_size_data[i * 2 + 1] = sprite.height
self._sprite_size_changed = True
def update_height(self, sprite: Sprite):
"""
Called by the Sprite class to update the size/scale in this sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_size_data[i * 2 + 1] = sprite.height
self._sprite_size_changed = True
def update_width(self, sprite: Sprite):
"""
Called by the Sprite class to update the size/scale in this sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_size_data[i * 2] = sprite.width
self._sprite_size_changed = True
def update_location(self, sprite: Sprite):
"""
Called by the Sprite class to update the location in this sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_pos_data[i * 2] = sprite.position[0]
self._sprite_pos_data[i * 2 + 1] = sprite.position[1]
self._sprite_pos_changed = True
def update_angle(self, sprite: Sprite):
"""
Called by the Sprite class to update the angle in this sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_angle_data[i] = math.radians(sprite.angle)
self._sprite_angle_changed = True
def draw(self):
""" Draw this list of sprites. """
if self.program is None:
# Used in drawing optimization via OpenGL
self.program = shader.program(
vertex_shader=_VERTEX_SHADER,
fragment_shader=_FRAGMENT_SHADER
)
if len(self.sprite_list) == 0:
return
if self._vao1 is None:
self._calculate_sprite_buffer()
self._texture.use(0)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
with self._vao1:
self.program['Texture'] = self.texture_id
self.program['Projection'] = get_projection().flatten()
if not self.is_static:
if self._sprite_pos_changed:
self._sprite_pos_buf.orphan()
self._sprite_pos_buf.write(self._sprite_pos_data.tobytes())
self._sprite_pos_changed = False
if self._sprite_size_changed:
self._sprite_size_buf.orphan()
self._sprite_size_buf.write(self._sprite_size_data.tobytes())
self._sprite_size_changed = False
if self._sprite_angle_changed:
self._sprite_angle_buf.orphan()
self._sprite_angle_buf.write(self._sprite_angle_data.tobytes())
self._sprite_angle_changed = False
if self._sprite_color_changed:
self._sprite_color_buf.orphan()
self._sprite_color_buf.write(self._sprite_color_data.tobytes())
self._sprite_color_changed = False
if self._sprite_sub_tex_changed:
self._sprite_sub_tex_buf.orphan()
self._sprite_sub_tex_buf.write(self._sprite_sub_tex_data.tobytes())
self._sprite_sub_tex_changed = False
self._vao1.render(gl.GL_TRIANGLE_STRIP, instances=len(self.sprite_list))
def __len__(self) -> int:
""" Return the length of the sprite list. """
return len(self.sprite_list)
def __iter__(self) -> Iterable[_SpriteType]:
""" Return an iterable object of sprites. """
return iter(self.sprite_list)
def __getitem__(self, i):
return self.sprite_list[i]
def pop(self) -> Sprite:
"""
Pop off the last sprite in the list.
"""
self.program = None
return self.sprite_list.pop()
def get_closest_sprite(sprite: Sprite, sprite_list: SpriteList) -> Optional[Tuple[Sprite, float]]:
"""
Given a Sprite and SpriteList, returns the closest sprite, and its distance.
:param Sprite sprite: Target sprite
:param SpriteList sprite_list: List to search for closest sprite.
:return: Closest sprite.
:rtype: Sprite
"""
if len(sprite_list) == 0:
return None
min_pos = 0
min_distance = get_distance_between_sprites(sprite, sprite_list[min_pos])
for i in range(1, len(sprite_list)):
distance = get_distance_between_sprites(sprite, sprite_list[i])
if distance < min_distance:
min_pos = i
min_distance = distance
return sprite_list[min_pos], min_distance
def check_for_collision(sprite1: Sprite, sprite2: Sprite) -> bool:
"""
Check for a collision between two sprites.
:param sprite1: First sprite
:param sprite2: Second sprite
:Returns: True or False depending if the sprites intersect.
"""
if not isinstance(sprite1, Sprite):
raise TypeError("Parameter 1 is not an instance of the Sprite class.")
if isinstance(sprite2, SpriteList):
raise TypeError("Parameter 2 is a instance of the SpriteList instead of a required Sprite. See if you meant to "
"call check_for_collision_with_list instead of check_for_collision.")
elif not isinstance(sprite2, Sprite):
raise TypeError("Parameter 2 is not an instance of the Sprite class.")
return _check_for_collision(sprite1, sprite2)
def _check_for_collision(sprite1: Sprite, sprite2: Sprite) -> bool:
"""
Check for collision between two sprites.
:param Sprite sprite1: Sprite 1
:param Sprite sprite2: Sprite 2
:returns: Boolean
"""
collision_radius_sum = sprite1.collision_radius + sprite2.collision_radius
diff_x = sprite1.position[0] - sprite2.position[0]
diff_x2 = diff_x * diff_x
if diff_x2 > collision_radius_sum * collision_radius_sum:
return False
diff_y = sprite1.position[1] - sprite2.position[1]
diff_y2 = diff_y * diff_y
if diff_y2 > collision_radius_sum * collision_radius_sum:
return False
distance = diff_x2 + diff_y2
if distance > collision_radius_sum * collision_radius_sum:
return False
return are_polygons_intersecting(sprite1.get_adjusted_hit_box(), sprite2.get_adjusted_hit_box())
def check_for_collision_with_list(sprite: Sprite,
sprite_list: SpriteList) -> List[Sprite]:
"""
Check for a collision between a sprite, and a list of sprites.
:param Sprite sprite: Sprite to check
:param SpriteList sprite_list: SpriteList to check against
:returns: List of sprites colliding, or an empty list.
"""
if not isinstance(sprite, Sprite):
raise TypeError(f"Parameter 1 is not an instance of the Sprite class, it is an instance of {type(sprite)}.")
if not isinstance(sprite_list, SpriteList):
raise TypeError(f"Parameter 2 is a {type(sprite_list)} instead of expected SpriteList.")
if sprite_list.use_spatial_hash:
sprite_list_to_check = sprite_list.spatial_hash.get_objects_for_box(sprite)
# checks_saved = len(sprite_list) - len(sprite_list_to_check)
else:
sprite_list_to_check = sprite_list
collision_list = [sprite2
for sprite2 in sprite_list_to_check
if sprite is not sprite2 and _check_for_collision(sprite, sprite2)]
# collision_list = []
# for sprite2 in sprite_list_to_check:
# if sprite1 is not sprite2 and sprite2 not in collision_list:
# if _check_for_collision(sprite1, sprite2):
# collision_list.append(sprite2)
return collision_list
def get_sprites_at_point(point: Point,
sprite_list: SpriteList) -> List[Sprite]:
"""
Get a list of sprites at a particular point
:param Point point: Point to check
:param SpriteList sprite_list: SpriteList to check against
:returns: List of sprites colliding, or an empty list.
"""
if not isinstance(sprite_list, SpriteList):
raise TypeError(f"Parameter 2 is a {type(sprite_list)} instead of expected SpriteList.")
if sprite_list.use_spatial_hash:
sprite_list_to_check = sprite_list.spatial_hash.get_objects_for_point(point)
# checks_saved = len(sprite_list) - len(sprite_list_to_check)
else:
sprite_list_to_check = sprite_list
collision_list = [sprite2
for sprite2 in sprite_list_to_check
if is_point_in_polygon(point[0], point[1], sprite2.get_adjusted_hit_box())]
return collision_list
| 34.089089 | 120 | 0.605197 |
497e59ee8be21d8f1b99304aa0afe9ce3050c92c | 8,450 | py | Python | yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | 1 | 2019-12-08T21:39:38.000Z | 2019-12-08T21:39:38.000Z | yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | null | null | null | yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2017 Nokia and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
import pkg_resources
import logging
import json
import time
import yardstick.ssh as ssh
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
class PktgenDPDK(base.Scenario):
"""Execute pktgen-dpdk on one vm and execute testpmd on the other vm
"""
__scenario_type__ = "PktgenDPDK"
PKTGEN_DPDK_SCRIPT = 'pktgen_dpdk_benchmark.bash'
TESTPMD_SCRIPT = 'testpmd_rev.bash'
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.source_ipaddr = [None] * 2
self.source_ipaddr[0] = \
self.context_cfg["host"].get("ipaddr", '127.0.0.1')
self.target_ipaddr = [None] * 2
self.target_ipaddr[0] = \
self.context_cfg["target"].get("ipaddr", '127.0.0.1')
self.target_macaddr = [None] * 2
self.setup_done = False
self.dpdk_setup_done = False
def setup(self):
"""scenario setup"""
self.pktgen_dpdk_script = pkg_resources.resource_filename(
'yardstick.benchmark.scenarios.networking',
PktgenDPDK.PKTGEN_DPDK_SCRIPT)
self.testpmd_script = pkg_resources.resource_filename(
'yardstick.benchmark.scenarios.networking',
PktgenDPDK.TESTPMD_SCRIPT)
host = self.context_cfg['host']
host_user = host.get('user', 'ubuntu')
host_ssh_port = host.get('ssh_port', ssh.DEFAULT_PORT)
host_ip = host.get('ip', None)
host_key_filename = host.get('key_filename', '~/.ssh/id_rsa')
target = self.context_cfg['target']
target_user = target.get('user', 'ubuntu')
target_ssh_port = target.get('ssh_port', ssh.DEFAULT_PORT)
target_ip = target.get('ip', None)
target_key_filename = target.get('key_filename', '~/.ssh/id_rsa')
LOG.info("user:%s, target:%s", target_user, target_ip)
self.server = ssh.SSH(target_user, target_ip,
key_filename=target_key_filename,
port=target_ssh_port)
self.server.wait(timeout=600)
# copy script to host
self.server._put_file_shell(self.testpmd_script, '~/testpmd_rev.sh')
LOG.info("user:%s, host:%s", host_user, host_ip)
self.client = ssh.SSH(host_user, host_ip,
key_filename=host_key_filename,
port=host_ssh_port)
self.client.wait(timeout=600)
# copy script to host
self.client._put_file_shell(self.pktgen_dpdk_script,
'~/pktgen_dpdk.sh')
self.setup_done = True
def dpdk_setup(self):
"""dpdk setup"""
# disable Address Space Layout Randomization (ASLR)
cmd = "echo 0 | sudo tee /proc/sys/kernel/randomize_va_space"
self.server.send_command(cmd)
self.client.send_command(cmd)
if not self._is_dpdk_setup("client"):
cmd = "sudo ifup eth1"
LOG.debug("Executing command: %s", cmd)
self.client.send_command(cmd)
time.sleep(1)
self.source_ipaddr[1] = utils.get_port_ip(self.client, 'eth1')
self.client.run("tee ~/.pktgen-dpdk.ipaddr.eth1 > /dev/null",
stdin=self.source_ipaddr[1])
else:
cmd = "cat ~/.pktgen-dpdk.ipaddr.eth1"
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
self.source_ipaddr[1] = stdout
if not self._is_dpdk_setup("server"):
cmd = "sudo ifup eth1"
LOG.debug("Executing command: %s", cmd)
self.server.send_command(cmd)
time.sleep(1)
self.target_ipaddr[1] = utils.get_port_ip(self.server, 'eth1')
self.target_macaddr[1] = utils.get_port_mac(self.server, 'eth1')
self.server.run("tee ~/.testpmd.ipaddr.eth1 > /dev/null",
stdin=self.target_ipaddr[1])
self.server.run("tee ~/.testpmd.macaddr.eth1 > /dev/null",
stdin=self.target_macaddr[1])
cmd = "screen sudo -E bash ~/testpmd_rev.sh"
LOG.debug("Executing command: %s", cmd)
self.server.send_command(cmd)
time.sleep(1)
else:
cmd = "cat ~/.testpmd.ipaddr.eth1"
status, stdout, stderr = self.server.execute(cmd)
if status:
raise RuntimeError(stderr)
self.target_ipaddr[1] = stdout
cmd = "cat ~/.testpmd.macaddr.eth1"
status, stdout, stderr = self.server.execute(cmd)
if status:
raise RuntimeError(stderr)
self.target_macaddr[1] = stdout
self.dpdk_setup_done = True
def _is_dpdk_setup(self, host):
"""Is dpdk already setup in the host?"""
is_run = True
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
status, stdout, stderr = self.server.execute(cmd)
if stdout:
is_run = False
else:
status, stdout, stderr = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
def _dpdk_get_result(self):
"""Get packet statistics from server"""
cmd = "sudo /dpdk/destdir/bin/dpdk-procinfo -- --stats 2>/dev/null | \
awk '$1 ~ /RX-packets/' | cut -d ':' -f2 | cut -d ' ' -f2 | \
head -n 1"
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.server.execute(cmd)
if status:
raise RuntimeError(stderr)
received = int(stdout)
cmd = "sudo /dpdk/destdir/bin/dpdk-procinfo -- --stats-reset" \
" > /dev/null 2>&1"
self.server.execute(cmd)
time.sleep(1)
self.server.execute(cmd)
return received
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
self.setup()
if not self.dpdk_setup_done:
self.dpdk_setup()
options = self.scenario_cfg['options']
packetsize = options.get("packetsize", 60)
rate = options.get("rate", 100)
self.number_of_ports = options.get("number_of_ports", 10)
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
if "runner" in self.scenario_cfg else None
# if run by an arithmetic runner
arithmetic_time = options.get("duration", None)
if duration_time:
duration = duration_time
elif arithmetic_time:
duration = arithmetic_time
else:
duration = 20
cmd = "sudo bash pktgen_dpdk.sh %s %s %s %s %s %s %s" \
% (self.source_ipaddr[1],
self.target_ipaddr[1], self.target_macaddr[1],
self.number_of_ports, packetsize, duration, rate)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
result.update(json.loads(stdout))
result['packets_received'] = self._dpdk_get_result()
result['packetsize'] = packetsize
if "sla" in self.scenario_cfg:
sent = result['packets_sent']
received = result['packets_received']
ppm = 1000000 * (sent - received) / sent
# Added by Jing
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
% (ppm, sla_max_ppm)
| 37.22467 | 78 | 0.573373 |
255253a037431d1da4aadca745059315227b9b24 | 6,480 | py | Python | lib/ansiblelint/skip_utils.py | ragne/ansible-lint | 70376e9da602aca9b6d3fa1d0ee62d314d6d8885 | [
"MIT"
] | null | null | null | lib/ansiblelint/skip_utils.py | ragne/ansible-lint | 70376e9da602aca9b6d3fa1d0ee62d314d6d8885 | [
"MIT"
] | null | null | null | lib/ansiblelint/skip_utils.py | ragne/ansible-lint | 70376e9da602aca9b6d3fa1d0ee62d314d6d8885 | [
"MIT"
] | null | null | null | # (c) 2019–2020, Ansible by Red Hat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Utils related to inline skipping of rules."""
from itertools import product
import logging
import sys
import ruamel.yaml
from typing import Any, Generator, List, Sequence
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
INLINE_SKIP_FLAG = '# noqa '
_logger = logging.getLogger(__name__)
FileType = Literal["playbook", "pre_tasks", "post_tasks"]
# playbook: Sequence currently expects only instances of one of the two
# classes below but we should consider avoiding this chimera.
# ruamel.yaml.comments.CommentedSeq
# ansible.parsing.yaml.objects.AnsibleSequence
def get_rule_skips_from_line(line: str) -> List:
"""Return list of rule ids skipped via comment on the line of yaml."""
_before_noqa, _noqa_marker, noqa_text = line.partition(INLINE_SKIP_FLAG)
return noqa_text.split()
def append_skipped_rules(pyyaml_data: str, file_text: str, file_type: FileType):
"""Append 'skipped_rules' to individual tasks or single metadata block.
For a file, uses 2nd parser (ruamel.yaml) to pull comments out of
yaml subsets, check for '# noqa' skipped rules, and append any skips to the
original parser (pyyaml) data relied on by remainder of ansible-lint.
:param pyyaml_data: file text parsed via ansible and pyyaml.
:param file_text: raw file text.
:param file_type: type of file: tasks, handlers or meta.
:returns: original pyyaml_data altered with a 'skipped_rules' list added
to individual tasks, or added to the single metadata block.
"""
try:
yaml_skip = _append_skipped_rules(pyyaml_data, file_text, file_type)
except RuntimeError:
# Notify user of skip error, do not stop, do not change exit code
_logger.error('Error trying to append skipped rules', exc_info=True)
return pyyaml_data
return yaml_skip
def _append_skipped_rules(pyyaml_data: Sequence, file_text: str, file_type: FileType):
# parse file text using 2nd parser library
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if file_type in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif file_type in ('playbook', 'pre_tasks', 'post_tasks'):
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
else:
raise RuntimeError('Unexpected file type: {}'.format(file_type))
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook: Sequence) -> List:
"""Return parts of playbook that contains tasks, and nested tasks.
:param playbook: playbook yaml from yaml parser.
:returns: list of task dictionaries.
"""
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks: Sequence) -> Generator:
"""Get list of tasks from list made of tasks and nested tasks."""
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task: Any):
return (
subtask
for k in NESTED_TASK_KEYS if k in task
for subtask in task[k]
)
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input: Sequence) -> Sequence:
"""Traverse yaml for comments with rule skips and return list of rules."""
yaml_comment_obj_strs = []
def traverse_yaml(obj: Any) -> None:
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split(r'\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return rule_id_list
| 36.404494 | 86 | 0.697222 |
191fb6b3ed98935af0680952088c8b9e58b67482 | 8,380 | py | Python | pycfn_elasticsearch/vendored/botocore/docs/params.py | elelsee/pycfn-elasticsearch | 74f644f5e508736bab8d45a8ac56e8edeb4e1ca9 | [
"Apache-2.0"
] | 4 | 2015-10-20T15:18:40.000Z | 2018-01-17T10:03:19.000Z | pycfn_elasticsearch/vendored/botocore/docs/params.py | elelsee/pycfn-elasticsearch | 74f644f5e508736bab8d45a8ac56e8edeb4e1ca9 | [
"Apache-2.0"
] | null | null | null | pycfn_elasticsearch/vendored/botocore/docs/params.py | elelsee/pycfn-elasticsearch | 74f644f5e508736bab8d45a8ac56e8edeb4e1ca9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.shape import ShapeDocumenter
from botocore.docs.utils import py_type_name
class BaseParamsDocumenter(ShapeDocumenter):
def document_params(self, section, shape, include=None, exclude=None):
"""Fills out the documentation for a section given a model shape.
:param section: The section to write the documentation to.
:param shape: The shape of the operation.
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
history = []
self.traverse_and_document_shape(
section=section, shape=shape, history=history,
name=None, include=include, exclude=exclude)
def document_recursive_shape(self, section, shape, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_default(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_type_list(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
param_shape = shape.member
param_section = section.add_new_section(
param_shape.name, context={'shape': shape.member.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=None)
section = section.add_new_section('end-list')
self._end_nested_param(section)
def document_shape_type_map(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
key_section = section.add_new_section(
'key', context={'shape': shape.key.name})
self._start_nested_param(key_section)
self._add_member_documentation(key_section, shape.key)
param_section = section.add_new_section(
shape.value.name, context={'shape': shape.value.name})
param_section.style.indent()
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=shape.value,
history=history, name=None)
end_section = section.add_new_section('end-map')
self._end_nested_param(end_section)
self._end_nested_param(end_section)
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None,
name=None, **kwargs):
members = self._add_members_to_shape(shape.members, include)
self._add_member_documentation(section, shape, name=name)
for param in members:
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param)
section = section.add_new_section('end-structure')
self._end_nested_param(section)
def _add_member_documentation(self, section, shape, **kwargs):
pass
def _add_members_to_shape(self, members, include):
if include:
members = members.copy()
for param in include:
members[param.name] = param
return members
def _start_nested_param(self, section):
section.style.indent()
section.style.new_line()
def _end_nested_param(self, section):
section.style.dedent()
section.style.new_line()
class ResponseParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the response parameters"""
EVENT_NAME = 'response-params'
def _add_member_documentation(self, section, shape, name=None, **kwargs):
py_type = py_type_name(shape.type_name)
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
type_section.style.italics('(%s) -- ' % py_type)
documentation_section = section.add_new_section('param-documentation')
if shape.documentation:
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
section.style.new_paragraph()
class RequestParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the request parameters"""
EVENT_NAME = 'request-params'
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
if len(history) > 1:
self._add_member_documentation(section, shape, **kwargs)
section.style.indent()
members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(members):
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
param_section.style.new_line()
is_required = param in shape.required_members
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param, is_required=is_required)
section = section.add_new_section('end-structure')
if len(history) > 1:
section.style.dedent()
section.style.new_line()
def _add_member_documentation(self, section, shape, name=None,
is_top_level_param=False, is_required=False,
**kwargs):
py_type = py_type_name(shape.type_name)
if is_top_level_param:
type_section = section.add_new_section('param-type')
type_section.write(':type %s: %s' % (name, py_type))
end_type_section = type_section.add_new_section('end-param-type')
end_type_section.style.new_line()
name_section = section.add_new_section('param-name')
name_section.write(':param %s: ' % name)
else:
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
type_section.style.italics('(%s) -- ' % py_type)
if is_required:
is_required_section = section.add_new_section('is-required')
is_required_section.style.indent()
is_required_section.style.bold('[REQUIRED] ')
if shape.documentation:
documentation_section = section.add_new_section(
'param-documentation')
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
end_param_section = section.add_new_section('end-param')
end_param_section.style.new_paragraph()
| 42.538071 | 78 | 0.648807 |
9feaa974de55f76b370a107bf278a7aebf515f62 | 6,711 | py | Python | jamtoolsbuild.py | smallhoe/JamTools | 4033ff3b5f200fff826986e3449c3d859c6f567a | [
"Apache-2.0"
] | null | null | null | jamtoolsbuild.py | smallhoe/JamTools | 4033ff3b5f200fff826986e3449c3d859c6f567a | [
"Apache-2.0"
] | null | null | null | jamtoolsbuild.py | smallhoe/JamTools | 4033ff3b5f200fff826986e3449c3d859c6f567a | [
"Apache-2.0"
] | null | null | null | import time
jamfilelist = ["PyQt5CoreModels", "jamcontroller", "WEBFilesTransmitter", "clientFilesTransmitter",
"jamscreenshot", "jampublic", "jamroll_screenshot"]
print("说明:test.py文件为主文件,main.py为存放引入库的文件(无需管),scr文件夹是fbs打包的项目目录.\n"
"运行本文件打包时,会自动将test.py文件覆盖到PyQt5CoreModels.py(这是前期为了防反编译搞的hh)中,然后会自动解析所有jamfilelist中源码的引入库,"
"并将所有需要的库格式化后写入main.py文件中,从而让pyinstall可以找到(否则可能有找不到库的错误)"
"同时会自动配置scr项目目录,然后通过命令行运行打包程序实现自动打包,如需生成安装文件Windows下需要nsis环境,请自行探索..\n"
"通过更改下面的WithCompile 和Debug变量可以调整是否编译和是否debug模式.\n"
"需要编译时会将所有源码文件编译为c然后编译为pyd文件,可以实现源码保护,而且运行速度略有提升,需要自行配置好c和cython环境\n"
"debug模式下运行打包文件将会有命令行窗口")
# "voice_and_text","txpythonsdk"这两个是机器人语音合成的库,由于tx的api改为付费的了,所以不能播放声音(作者没钱了qaq),
# 如需使用可以在test.py中引入相关的包(已注释掉)然后在voice_and_text和txpythonsdk中修改api即可
if __name__ == '__main__':
import os
import shutil
import subprocess, setuptools
from jampublic import PLATFORM_SYS
from test import VERSON
WithCompile = False # 是否编译
Debug = False # 是否debug模式
print("copy test.py->PyQt5CoreModels.py")
testsize = os.path.getsize("test.py")
coresize = os.path.getsize("PyQt5CoreModels.py")
if testsize != coresize:
with open("test.py", "r", encoding="utf-8")as f:
with open("PyQt5CoreModels.py", "w", encoding="utf-8")as mo:
mo.write(f.read())
if WithCompile:
Compiler = subprocess.Popen('python3 setjam.py build_ext --inplace', shell=True)
Compiler.wait()
if PLATFORM_SYS == "win32":
ext = ".pyd"
suffix = ".cp37-win_amd64"
else:
ext = ".so"
suffix = ".cpython-37m-darwin"
else:
ext = ".py"
suffix = ""
if os.path.exists("src/main/python"):
print("清空目标文件夹")
shutil.rmtree("src/main/python")
time.sleep(0.1)
os.mkdir("src/main/python")
for file in jamfilelist:
if os.path.exists('{}{}{}'.format(file, suffix, ext)):
if os.path.exists('src/main/python/{}{}'.format(file, ext)):
os.remove('src/main/python/{}{}'.format(file, ext))
print('removed src/main/python/{}{}'.format(file, ext))
shutil.copy2('{}{}{}'.format(file, suffix, ext), 'src/main/python/{}{}'.format(file, ext))
if WithCompile:
os.remove("{}{}{}".format(file, suffix, ext))
print('copy {}{}'.format(file, ext))
else:
raise OSError
with open('main.py', "w", encoding="utf-8")as mainf:
importfilelist = []
for file in jamfilelist:
print("explaining {}".format(file))
with open("{}.py".format(file), "r", encoding="utf-8")as soursef:
line = soursef.readline()
while line:
if line[:6] == "import" or (line[:4] == "from" and "import" in line):
if "PyQt5" in line or "pynput" in line:
if "from" in line:
line = "import " + line.split(" ")[1] + "\n"
elif " as " in line:
line = line.split(" as ")[0] + "\n"
if "jampublic" in line: line = "import jampublic\n"
while line[-2] == "\\": # 多行
if line not in importfilelist:
importfilelist.append(line)
line = soursef.readline()
if line not in importfilelist:
importfilelist.append(line)
line = soursef.readline()
mainf.writelines(importfilelist)
mainf.writelines(["from PyQt5CoreModels import main\n", "main()\n\n"])
shutil.copy2('main.py', 'src/main/python/main.py')
print('copy main.py')
shutil.copy2('imagefiles/jamresourse.py', 'src/main/python/jamresourse.py')
print('copy jamresourse.py')
if os.path.exists("src/main/resources/base"):
shutil.rmtree("src/main/resources/base")
os.makedirs("src/main/resources/base/bin")
includedir = ["html", "bin/" + PLATFORM_SYS]
includefiles = ["log.log"]
if PLATFORM_SYS == "win32":
includefiles.extend(["screen-capture-recorder-x64.dll", "audio_sniffer-x64.dll"])
for d in includedir:
td = "src/main/resources/base/" + d
if os.path.exists(d):
if os.path.exists(td):
shutil.rmtree(td)
print("移除", td)
shutil.copytree(d, td)
print("copy{}->{}".format(d, td))
else:
print("不存在:", d)
for f in includefiles:
td = "src/main/resources/base/" + f
if os.path.exists(f):
if os.path.exists(td):
os.remove(td)
print("移除", td)
shutil.copy2(f, td)
print("copy{}->{}".format(f, td))
else:
print("不存在", f)
# if os.path.exists("src/main/resources/base/html"):
# shutil.rmtree("src/main/resources/base/html")
# print("移除html")
# shutil.copytree("html", "src/main/resources/base/html")
# print("copy html")
# shutil.copy2('log.log', 'src/main/resources/base/log.log')
# print('copy log.log')
if PLATFORM_SYS == "win32":
with open("target/installer/Installer.nsi", "r", encoding="ansi")as nsisfile:
ns = nsisfile.readlines()
for i, line in enumerate(ns):
if "!define PRODUCT_VERSION" in line:
print("找到版本号{}".format(line))
v = line.split('"')[-2]
print(v)
if v != VERSON:
print("版本号不同")
ns[i] = '!define PRODUCT_VERSION "{}"\n'.format(VERSON)
with open("target/installer/Installer.nsi", "w", encoding="ansi")as nsisfile:
nsisfile.writelines(ns)
break
print('start freeze')
# fbs freeze --debug
freezer = subprocess.Popen('fbs freeze {}'.format("--debug" if Debug else ""), shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
freezer.wait()
if PLATFORM_SYS != "win32":
a = input("是否打包为安装文件,Y/N:(回车默认Y)")
if "y" in a.lower() or len(a) == 0:
print("开始打包镜像")
freezer = subprocess.Popen('fbs installer', shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
freezer.wait()
print("打包完成")
if PLATFORM_SYS == "linux":
print("linux下自行运行sudo dpkg -i target/JamTools.deb安装")
print('finished all')
| 43.019231 | 117 | 0.547161 |
77deefc7ce037705b2f887af17a9ab683b64b1c9 | 9,209 | py | Python | modules/userinput.py | rufotheone-fr/retool | be776847d02f711faf58d834dd52554959bef45b | [
"BSD-3-Clause"
] | 117 | 2020-05-02T07:44:41.000Z | 2022-03-31T07:37:07.000Z | modules/userinput.py | rufotheone-fr/retool | be776847d02f711faf58d834dd52554959bef45b | [
"BSD-3-Clause"
] | 150 | 2020-05-17T22:26:10.000Z | 2022-03-20T01:37:27.000Z | modules/userinput.py | rufotheone-fr/retool | be776847d02f711faf58d834dd52554959bef45b | [
"BSD-3-Clause"
] | 14 | 2020-07-08T04:54:32.000Z | 2022-02-20T09:15:56.000Z | import argparse
import os
import sys
from strictyaml import load, Map, MapPattern, Str, Seq, YAMLError
from modules.classes import SmartFormatter, UserInput
from modules.utils import Font, regex_test
def check_input():
""" Checks user input values"""
parser = argparse.ArgumentParser(
usage=f'%(prog)s <input dat/folder> <options>',
allow_abbrev=False,
formatter_class=SmartFormatter)
dev_options = parser.add_argument_group('dev options')
modes = parser.add_argument_group('modes')
exclusions = parser.add_argument_group('exclusions')
parser.add_argument('Input',
metavar='<input dat>',
type=str,
help='R|the path to the dat file, or folder of dat files you want\nto process')
parser.add_argument('--output',
metavar='<output folder>',
type=str,
help='R|set an output folder where the new 1G1R dat/s will be\ncreated')
parser.add_argument('--emptytitles',
action='store_true',
help='R|include titles that don\'t have hashes, ROMs, or disks\nspecified')
parser.add_argument('--nofilters',
action='store_true',
help='R|don\'t load custom global and system filters from the\nuser-filters folder')
dev_options.add_argument('--errors',
action='store_true',
help='report clone list errors during processing')
dev_options.add_argument('--list',
action='store_true',
help=f'R|also output a list of just the 1G1R title names (See\n{Font.bold}user-config.yaml{Font.end} to add a prefix and/or suffix to each line)')
dev_options.add_argument('--log',
action='store_true',
help='R|also output lists of what titles have been kept,\nremoved, and set as clones')
modes.add_argument('-l',
action='store_true',
help=f'filter by languages using a list (see {Font.bold}user-config.yaml{Font.end})')
modes.add_argument('-x',
action='store_true',
help='output dat/s in legacy parent/clone format')
modes.add_argument('-y',
action='store_true',
help='R|don\'t demote (Unl) titles if a production version is found in another region')
modes.add_argument('-z',
action='store_true',
help='R|titles ripped from modern platform rereleases, such as those found\nin Virtual Console, replace standard editions (ripped titles might\nnot work in emulators)')
exclusions.add_argument('--exclude',
action='extend',
metavar='FILTERS',
help='R|use with the following single letter filters to exclude these\ntypes of titles:\n'
'\na\tapplications'
'\nA\taudio (might include game soundtracks)'
'\nb\tbad dumps'
'\nB\tBIOS and other chips'
'\nc\tcoverdiscs (discs attached to the front of magazines)'
'\nd\tdemos, kiosks, and samples'
'\nD\tadd-ons (expansion packs and additional material)'
'\ne\teducational titles'
'\nm\tmanuals'
'\nM\tmultimedia titles (might include games)'
'\no\tbonus discs'
'\np\tpirate titles'
'\nP\tpreproduction titles (alphas, betas, prototypes)'
'\nr\tpromotional titles'
'\nu\tunlicensed titles'
'\nv\tvideo\n\n',
nargs='+')
modes.add_argument('-q',
action='store_true',
help=argparse.SUPPRESS)
modes.add_argument('--test',
action='store_true',
help=argparse.SUPPRESS)
if len(sys.argv) == 1:
sys.exit(1)
args = parser.parse_args()
if not os.path.isfile(args.Input) and not os.path.isdir(args.Input):
print(f'Can\'t find the specified input dat or folder {Font.bold}"{args.Input}"{Font.end}.')
sys.exit()
if args.output is not None:
if os.path.isfile(args.output):
print(f'Can\'t output to {Font.bold}"{args.output}"{Font.end}, as it\'s a file, not a folder.')
sys.exit()
elif not os.path.exists(args.output):
print(f'* Creating folder "{Font.bold}{args.output}{Font.end}"')
os.makedirs(args.output)
else:
args.output = ''
# Set errors and legacy to always be true if in dev environment
if os.path.isfile('.dev') and args.q == False:
setattr(args, 'x', True)
setattr(args, 'errors', True)
# Create user options string
user_options = []
hidden_options = ['Input', 'output', 'q', 'errors', 'log', 'nofilters', 'list', 'test', 'emptytitles']
for arg in vars(args):
if arg not in hidden_options and getattr(args, arg) == True:
user_options.append(arg)
if args.exclude != [] and args.exclude != None:
for arg in args.exclude:
user_options.append(arg)
if user_options != []:
user_options = f' (-{"".join(sorted([x for x in "".join(user_options)], key=str.casefold))})'
else:
user_options = ''
return UserInput(
args.Input,
args.output,
True if 'a' in user_options else False,
True if 'A' in user_options else False,
True if 'b' in user_options else False,
True if 'B' in user_options else False,
True if 'c' in user_options else False,
True if 'd' in user_options else False,
True if 'D' in user_options else False,
True if 'e' in user_options else False,
True if 'm' in user_options else False,
True if 'M' in user_options else False,
True if 'o' in user_options else False,
True if 'p' in user_options else False,
True if 'P' in user_options else False,
True if 'r' in user_options else False,
True if 'u' in user_options else False,
True if 'v' in user_options else False,
args.z,
args.y,
args.l,
args.x,
user_options,
args.errors,
args.nofilters,
args.log,
args.list,
args.emptytitles,
args.test)
def import_user_config(region_data, user_input):
""" Import user config data for use in creating the output dat """
# Import user-config.yaml settings
try:
schema = Map({"language filter": Seq(Str())|Str(), "region order": Seq(Str())|Str(), "list prefix": Seq(Str())|Str(), "list suffix": Seq(Str())|Str(), "gui settings": Seq(Str()|MapPattern(Str(), Str()))|Str()})
with open('user-config.yaml', encoding='utf-8') as user_config_import:
user_config = load(str(user_config_import.read()), schema)
except OSError as e:
print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
raise
except YAMLError as e:
print(f'\n{Font.error_bold}* YAML error: {Font.end}{str(e)}\n')
raise
user_input.user_languages = []
for key, value in region_data.languages_key.items():
for language in user_config.data['language filter']:
if language == key:
user_input.user_languages.append(value)
user_input.user_region_order = user_config.data['region order']
user_input.user_config = user_config
return user_input
def import_user_filters(filename, filter_type):
""" Import user filters for excluding/including specific strings """
try:
schema = Map({"exclude": Seq(Str())|Str(), "include": Seq(Str())|Str()})
with open(f'user-filters/{filename}.yaml', encoding='utf-8') as user_filter_import:
user_filters = load(str(user_filter_import.read()), schema)
# Check for valid regex
user_filters.data['exclude'] = regex_test(user_filters.data['exclude'], filter_type)
user_filters.data['include'] = regex_test(user_filters.data['include'], filter_type)
except OSError as e:
print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
raise
except YAMLError as e:
print(f'\n{Font.error_bold}* YAML error: {Font.end}{str(e)}\n')
raise
return user_filters
| 41.111607 | 219 | 0.541753 |
1a719fa160b85a30de322ea98735f1a3f74e533d | 2,609 | py | Python | Numerical_Methods_Physics/Gauss_Elimination.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | Numerical_Methods_Physics/Gauss_Elimination.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | Numerical_Methods_Physics/Gauss_Elimination.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | # #from numpy import array, zeros
# from array import *
import numpy as np
#from numpy import fabs
'''
a=np.array([[25,5,1],
[64,8,1],
[144,12,1]],float)
b=[106.8,177,279.2]
n= len(b)
x= np.zeros(n,float)
# Elimination
for k in range(n - 1):
for i in range(k + 1, n):
if a[i, k] == 0:
continue
factor = a[k, k] / a[i, k]
for j in range(k, n):
a[i, j] = a[k, j] - a[i, j] * factor
b[i] = b[k] - b[i] * factor
print(a)
print(b)
# Back-Substitution
x[n - 1] = b[n - 1] / a[n - 1, n - 1]
for i in range(n - 2, -1, -1):
sum_ax = 0
for j in range(i + 1, n):
sum_ax += a[i, j] * x[j]
x[i] = (b[i] - sum_ax) / a[i, i]
print(x)
'''
### Gauss Elimination Method with Partial Pivoting
##3 defining array
## Coefficient Matrix
'''
a= np.array([[0,7,-1,3,1],
[0,3,4,1,7],
[6,2,0,2,-1],
[2,1,2,0,2],
[3,4,1,-2,1]],float)
## Constant vector
b = np.array([5,7,2,3,4],float)
'''
'''
a=np.array([[25,5,1],
[64,8,1],
[144,12,1]],float)
b=np.array([106.8,177,279.2],float)
'''
a=np.array([[20,15,10],
[-3,-2.249,7],
[5,1,3]],float)
b=np.array([45,1.751,9],float)
## length of the vector
n= len(b)
# defining zeros to fill the entries of x
x= np.zeros(n,float)
##
## --------------Partial Pivoting---------------
## if there is zero on the main diagonal the we have to interchange the row with another row which has greater value then zero
for k in range(n-1):
if abs(a[k,k])<1.0e-10:
for i in range(k+1, n): ### I have to check here what if i take n instead of (n-1), can we interchange the pivot row with the last row.
if abs(a[i,k])> abs(a[k,k]): #we can also write (1.0e-10) instead of a([k,k])### it doesn't matter whether we take n or n-1in the previous range.
a[[i,k]]=a[[k,i]]
b[[k,i]]=b[[i,k]]
break
# Elimination---->
#for k in range(n-1):
for i in range(k+1,n):
if a[i,k]==0:
continue
factor= a[k,k]/a[i,k]
for j in range(k,n):
a[i,j]= a[k,j]- a[i,j]*factor
b[i]=b[k]-b[i]*factor
print("the upper triangular matrix ")
U=a
print(np.round(U,1))
## back-Substitution---->
x[n-1]= b[n-1]/a[n-1,n-1]
for i in range(n-2,-1,-1):
sum_ax=0
for j in range(i+1,n):
sum_ax+=a[i,j]*x[j]
x[i]= (b[i]-sum_ax)/a[i,i]
print("The solution vector is: ")
print(np.round(x,1))
| 25.578431 | 158 | 0.48716 |
63e4fabe50d8c3257ee6b6e16a6cb561eb8b4586 | 1,333 | py | Python | xlsxwriter/test/comparison/test_chart_blank03.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-25T06:08:09.000Z | 2019-11-01T02:33:56.000Z | xlsxwriter/test/comparison/test_chart_blank03.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2019-07-14T00:29:05.000Z | 2019-11-26T06:16:46.000Z | xlsxwriter/test/comparison/test_chart_blank03.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_blank03.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [44253568, 44269952]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.show_blanks_as('span')
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 24.685185 | 79 | 0.56039 |
7f48ff2758ca1da69efc97e333be34fa3b8a2d3b | 16,438 | py | Python | Notebook/KL Divergence Loop.py | yxie367/Mushrooms | 9b0fc5dfb6ab20c7c0c4146bff0d04de561302c5 | [
"MIT"
] | null | null | null | Notebook/KL Divergence Loop.py | yxie367/Mushrooms | 9b0fc5dfb6ab20c7c0c4146bff0d04de561302c5 | [
"MIT"
] | null | null | null | Notebook/KL Divergence Loop.py | yxie367/Mushrooms | 9b0fc5dfb6ab20c7c0c4146bff0d04de561302c5 | [
"MIT"
] | null | null | null | # %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test_pred1], axis=1)
y_test1 = y_test.reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test1], axis=1)
X_df1.columns = ["X_pred","y_pred","y_actual"]
#print(X_df1)
# %%
import math
table1 = pd.DataFrame(columns=["KL_div","abs_distance","correctness"])
for i in range(0,len(X_df1)):
# KL divergence
p = X_df1.loc[i,"X_pred"]
try:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
except:
kl = 0
table1.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table1.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred1 = X_df1.loc[i,"y_pred"]
y_act1 = X_df1.loc[i,"y_actual"]
if y_pred1 == y_act1:
table1.loc[i,"correctness"] = 1 # correct prediction
else:
table1.loc[i,"correctness"] = 0 # wrong prediction
table1.loc[i,"y_pred"] = y_pred1
#print(table1)
# %%
table1["count"] = 1
correctness1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness1["percent"] = 100*(correctness1["correctness"]/correctness1["count"])
#print(correctness1)
# %%
index = []
for i in (correctness1.index):
index.append(str(i))
plt.bar(index,correctness1["percent"], width=0.7)
for index,data in enumerate(correctness1["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.80, 0.1))).apply(sum)
kl1["percent"] = (kl1["correctness"]/kl1["count"])
kl1.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl1["percent"])
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg1 = np.arange(0, 0.70, 0.1).reshape((-1, 1))
y_reg1 = kl1["percent"]
reg_model1 = LinearRegression().fit(x_reg1,y_reg1)
# %%
# print('intercept(alpha):', reg_model1.intercept_)
# print('slope(theta):', reg_model1.coef_)
# %% [markdown]
# # NN2 Odor - Almond (a)
# %% [markdown]
# #### Train Set and Test Set
# %%
from sklearn.utils import shuffle
X_train_full2 = shuffle(X_train_green, random_state=j)
# X_test2 = shuffle(X_test_green, random_state=j)
y_train_full2 = shuffle(y_train_green, random_state=j)
# y_test2 = shuffle(y_test_green, random_state=j)
# %% [markdown]
# #### Validation Set
# %%
X_valid2, X_train2 = X_train_full2[:500], X_train_full2[500:]
y_valid2, y_train2 = y_train_full2[:500], y_train_full2[500:]
# print("X_train:", X_train2.shape[0], "y_train", y_train2.shape[0])
# print("X_valid: ", X_valid2.shape[0], "y_valid ", y_valid2.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train2 = pipeline.fit_transform(X_train2)
X_valid2 = pipeline.fit_transform(X_valid2)
X_test2 = pipeline.fit_transform(X_test)
y_test2 = y_test
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
tf.random.set_random_seed(j)
# %%
model2 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model2.summary()
# %% [markdown]
# #### Compile the Model
# %%
model2.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model2 = model2.fit(X_train2, y_train2,
epochs=100,
validation_data=(X_valid2, y_valid2),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results2 = model2.evaluate(X_test2, y_test2)
# print("test loss, test acc:", results2)
# %% [markdown]
# ### Make Some Predictions
# %%
# y_pred2 = (model2.predict(X_new2) > 0.5).astype("int32")
# print(y_pred2)
y_test_pred2 = (model2.predict(X_test2) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df2 = pd.DataFrame(model2.predict(X_test2))
y_test_pred2 = pd.DataFrame(y_test_pred2).reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test_pred2], axis=1)
y_test2 = y_test2.reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test2], axis=1)
X_df2.columns = ["X_pred","y_pred","y_actual"]
#print(X_df2)
# %%
import math
table2 = pd.DataFrame(columns=["KL_div","abs_distance","y_pred","correctness"])
for i in range(0,len(X_df2)):
# KL divergence
p = X_df2.loc[i,"X_pred"]
if p > 0:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
else:
kl = 1
table2.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table2.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred = X_df2.loc[i,"y_pred"]
y_act = X_df2.loc[i,"y_actual"]
if y_pred == y_act:
table2.loc[i,"correctness"] = 1 # correct prediction
else:
table2.loc[i,"correctness"] = 0 # wrong prediction
table2.loc[i,"y_pred"] = y_pred
#print(table2)
# %%
table2["count"] = 1
correctness2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness2["percent"] = 100*(correctness2["correctness"]/correctness2["count"])
#print(correctness2)
# %%
index = []
for i in (correctness2.index):
index.append(str(i))
plt.bar(index,correctness2["percent"], width=0.7)
for index,data in enumerate(correctness2["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
kl2["percent"] = (kl2["correctness"]/kl2["count"])
kl2.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl2["percent"])
# print(kl)
# print(np.arange(0, 0.7, 0.05))
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg2 = np.arange(0, 0.7, 0.1).reshape((-1, 1))
y_reg2 = kl2["percent"]
reg_model2 = LinearRegression().fit(x_reg2,y_reg2)
# %%
# print('intercept(alpha):', reg_model2.intercept_)
# print('slope(theta):', reg_model2.coef_)
# %% [markdown]
# ## Algorithm C: It = argmax(Ct,i)
# %%
# Correct answer
ans = pd.DataFrame(X_df2["y_actual"])
# NN1
alpha1 = reg_model1.intercept_
theta1 = reg_model1.coef_
# NN2
alpha2 = reg_model2.intercept_
theta2 = reg_model2.coef_
# %%
# Creating NN tables
nn1 = table1.drop(["abs_distance","correctness"], axis=1)
nn1["conf"] = 1 + theta1 * nn1["KL_div"]
nn2 = table2.drop(["abs_distance","correctness"], axis=1)
nn2["conf"] = 1 + theta2 * nn2["KL_div"]
# nn2
# %%
# Determing higher confidence NN and choosing that arm
for i in range(0,len(nn1)):
if nn1.loc[i,"conf"] > nn2.loc[i,"conf"]:
ans.loc[i,"y_pred"] = nn1.loc[i,"y_pred"]
ans.loc[i,"NN"] = 1
ans.loc[i,"conf"] = nn1.loc[i,"conf"]
else:
ans.loc[i,"y_pred"] = nn2.loc[i,"y_pred"]
ans.loc[i,"NN"] = 2
ans.loc[i,"conf"] = nn2.loc[i,"conf"]
# ans
# %% [markdown]
# #### Comparing performance
# %%
# NN1 performance
cost1 = 0
for i in range(0,len(nn1)):
if nn1.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost1 += 1
else:
pass
# NN2 performance
cost2 = 0
for i in range(0,len(nn2)):
if nn2.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost2 += 1
else:
pass
# Combined performance
cost3 = 0
for i in range(0,len(nn1)):
nn = ans.loc[i,"NN"]
nn_conf = ans.loc[i,"conf"]
if ans.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost3 += 1
wrong_record = wrong_record + (f"Run:{run} - Wrong NN:{nn}, Conf:{nn_conf}") + "\n"
else:
pass
# %%
record = record+(f"Run:{run} - Error count for NN1:{cost1}, NN2:{cost2}, Combined:{cost3}") + "\n"
total_error_1 += cost1
total_error_2 += cost2
total_error_comb += cost3
print(f"Run {run} complete!")
run+=1
print(record)
print(f"Average error count for NN1:{total_error_1/num_trials}, NN2:{total_error_2/num_trials}, Combined:{total_error_comb/num_trials}")
#%%
# print(wrong_record)
# %%
| 27.673401 | 177 | 0.592469 |
ab5a2d297c17fda8c540f8f0177b9c9013791e11 | 4,187 | py | Python | zhihu_user_info_spider/zhihu_user_info_spider/parser/Parser.py | takhello/spider_collection | 05d2a517f52527cebbbf82de3072673f06c90d81 | [
"MIT"
] | null | null | null | zhihu_user_info_spider/zhihu_user_info_spider/parser/Parser.py | takhello/spider_collection | 05d2a517f52527cebbbf82de3072673f06c90d81 | [
"MIT"
] | null | null | null | zhihu_user_info_spider/zhihu_user_info_spider/parser/Parser.py | takhello/spider_collection | 05d2a517f52527cebbbf82de3072673f06c90d81 | [
"MIT"
] | null | null | null | from zhihu_user_info_spider.entities.UserEntity import UserEntityList
import parsel
import re
from zhihu_user_info_spider.Exception import SpiderException
user_entity = UserEntityList()
class Parser(object):
'''该方法后续加入的is_add属性和database属性主要是为了迎合接口
is_add 主要是用来区分是否要加入到csv中的,默认为True,
database 主要是用来解析从数据库中读取的数据的,默认为False'''
@staticmethod
def user_info_parser(json: dict, is_add=True, database=False):
single_user_info_dict = {}
if database:
single_user_info_dict["id2"] = json["id2"]
else:
single_user_info_dict["id2"] = json["id"]
single_user_info_dict["url_token"] = json["url_token"]
single_user_info_dict["name"] = json["name"]
if json["gender"] == 1:
single_user_info_dict["gender"] = "男"
elif json["gender"] == 0:
single_user_info_dict["gender"] = "女"
else:
single_user_info_dict["gender"] = "未知"
single_user_info_dict["type"] = json["type"]
single_user_info_dict["headline"] = json["headline"]
single_user_info_dict["description"] = json["description"]
single_user_info_dict["following_count"] = json["following_count"]
single_user_info_dict["follower_count"] = json["follower_count"]
single_user_info_dict["answer_count"] = json["answer_count"]
single_user_info_dict["zvideo_count"] = json["zvideo_count"]
single_user_info_dict["question_count"] = json["question_count"]
single_user_info_dict["articles_count"] = json["articles_count"]
single_user_info_dict["columns_count"] = json["columns_count"]
single_user_info_dict["favorite_count"] = json["favorite_count"]
single_user_info_dict["following_question_count"] = json["following_question_count"]
single_user_info_dict["following_topic_count"] = json["following_topic_count"]
single_user_info_dict["following_columns_count"] = json["following_columns_count"]
single_user_info_dict["following_favlists_count"] = json["following_favlists_count"]
location_list = json["location"]
single_user_info_dict["location"] = []
single_user_info_dict["voteup_count"] = json["voteup_count"]
if database:
single_user_info_dict["level"] = json["level"]
single_user_info_dict["avatar_url"] = json["avatar_url"]
single_user_info_dict["business"] = json["business"]
if location_list == None:
single_user_info_dict["location"] = []
else:
for item in location_list:
single_user_info_dict["location"].append(item)
else:
single_user_info_dict["level"] = json["level_info"]["level"]
single_user_info_dict["avatar_url"] = json["avatar_url_template"]
single_user_info_dict["business"] = json["business"]['name']
if location_list == None:
single_user_info_dict["location"] = []
else:
for item in location_list:
single_user_info_dict["location"].append(item["name"])
if is_add:
user_entity.add_user(single_user_info_dict)
else:
return single_user_info_dict
@staticmethod
def hot_question_list_parser(response_text: str):
if response_text != None and response_text != "":
url_list = parsel.Selector(response_text).xpath("//div[@class='HotItem-content']/a/@href").getall()
i = 0
n = 0
while i < len(url_list) - n:
if not url_list[i].__contains__("question"):
url_list.pop(i)
n = n + 1
i = i + 1
if len(url_list) == 0:
print("接收到的hot文档已失效,请及时更换cookie")
raise SpiderException("接收到的hot文档已失效,请及时更换cookie")
else:
id_list = []
for i in url_list:
id_list.append(i.split("/")[-1])
return id_list
else:
print("请输入正确的hot文档")
raise SpiderException("请输入正确的hot文档")
if __name__ == '__main__':
print()
| 43.614583 | 111 | 0.62097 |
4e2eb8172e6bd87037ea5907a8a4c4b6e711db84 | 8,091 | py | Python | legal-api/src/legal_api/core/meta/filing.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | null | null | null | legal-api/src/legal_api/core/meta/filing.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | null | null | null | legal-api/src/legal_api/core/meta/filing.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | null | null | null | # Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta Filing support for the core domain used by the application."""
import re
from contextlib import suppress
from enum import Enum, auto
from typing import Final, MutableMapping, Optional
from legal_api.models import Business
from legal_api.models import Filing as FilingStorage
from legal_api.utils.datetime import date
class AutoName(str, Enum):
"""Replace autoname from Enum class."""
#pragma warning disable S5720; # noqa: E265
# disable sonar cloud complaining about this signature
def _generate_next_value_(name, start, count, last_values): # pylint: disable=W0221,E0213 # noqa: N805
"""Return the name of the key, but in lowercase."""
names = name.split('_')
return ''.join([x.lower() if i == 0 else x.capitalize() for i, x in enumerate(names)])
#pragma warning enable S5720; # noqa: E265
class ReportTitles(str, Enum):
"""Enum of the system error codes."""
ALTERATION_NOTICE = 'Alteration Notice'
CERTIFICATE = 'Certificate'
CERTIFICATE_OF_NAME_CHANGE = 'Certificate Of Name Change'
CERTIFIED_MEMORANDUM = 'Certified Memorandum'
CERTIFIED_RULES = 'Certified Rules'
NOTICE_OF_ARTICLES = 'Notice of Articles'
class ReportNames(AutoName):
"""Enum of the system error codes."""
ALTERATION_NOTICE = auto()
CERTIFICATE = auto()
CERTIFICATE_OF_NAME_CHANGE = auto()
CERTIFIED_MEMORANDUM = auto()
CERTIFIED_RULES = auto()
NOTICE_OF_ARTICLES = auto()
class FilingTitles(str, Enum):
"""Enum of the system error codes."""
INCORPORATION_APPLICATION_DEFAULT = 'Incorporation Application'
FILINGS: Final = {
'affidavit': {
'name': 'affidavit',
'title': 'Affidavit',
'codes': {
'CP': 'AFDVT'
}
},
'alteration': {
'name': 'alteration',
'title': 'Notice of Alteration Filing',
'displayName': 'Alteration',
'codes': {
'BC': 'ALTER',
'BEN': 'ALTER'
}
},
'annualReport': {
'name': 'annualReport',
'title': 'Annual Report Filing',
'displayName': 'Annual Report',
'codes': {
'CP': 'OTANN',
'BEN': 'BCANN'
}
},
'changeOfAddress': {
'name': 'changeOfAddress',
'title': 'Change of Address Filing',
'displayName': 'Address Change',
'codes': {
'CP': 'OTADD',
'BEN': 'BCADD'
}
},
'changeOfDirectors': {
'name': 'changeOfDirectors',
'title': 'Change of Directors Filing',
'displayName': 'Director Change',
'codes': {
'CP': 'OTCDR',
'BEN': 'BCCDR'
},
'free': {
'codes': {
'CP': 'OTFDR',
'BEN': 'BCFDR'
}
},
'additional': [
{'types': 'BC,BEN', 'outputs': ['noticeOfArticles', ]},
]
},
'changeOfName': {
'name': 'changeOfName',
'title': 'Change of Name Filing',
'displayName': 'Legal Name Change'
},
'conversion': {
'name': 'conversion',
'title': 'Conversion Ledger',
'displayName': 'Conversion'
},
'correction': {
'name': 'correction',
'title': 'Correction',
'displayName': 'Correction',
'codes': {
'BEN': 'CRCTN',
'CP': 'CRCTN'
}
},
'courtOrder': {
'name': 'courtOrder',
'title': 'Court Order',
'displayName': 'Court Order',
'code': 'NOFEE'},
'dissolution': {
'name': 'dissolution',
'title': 'Voluntary dissolution',
'displayName': 'Voluntary Dissolution',
'codes': {
'CP': 'DIS_VOL',
'BC': 'DIS_VOL',
'BEN': 'DIS_VOL',
'ULC': 'DIS_VOL',
'CC': 'DIS_VOL',
'LLC': 'DIS_VOL'
},
'additional': [
{'types': 'CP', 'outputs': ['certificateOfDissolution', 'specialResolution', 'affidavit']},
{'types': 'BC,BEN,CC,ULC,LLC', 'outputs': ['certificateOfDissolution']},
]
},
'incorporationApplication': {
'name': 'incorporationApplication',
'title': FilingTitles.INCORPORATION_APPLICATION_DEFAULT,
'displayName': {
'BC': FilingTitles.INCORPORATION_APPLICATION_DEFAULT,
'BEN': 'BC Benefit Company Incorporation Application',
'CP': FilingTitles.INCORPORATION_APPLICATION_DEFAULT,
},
'codes': {
'BEN': 'BCINC'
},
'additional': [
{'types': 'CP', 'outputs': ['certificate']},
{'types': 'BC,BEN', 'outputs': ['noticeOfArticles', 'certificate']},
]
},
'registrarsNotation': {
'name': 'registrarsNotation',
'title': 'Registrars Notation',
'displayName': "Registrar's Notation",
'code': 'NOFEE'},
'registrarsOrder': {
'name': 'registrarsOrder',
'title': 'Registrars Order',
'displayName': "Registrar's Order",
'code': 'NOFEE'},
'specialResolution': {
'name': 'specialResolution',
'title': 'Special Resolution',
'displayName': 'Special Resolution',
'codes': {
'CP': 'SPRLN'}},
'transition': {
'name': 'transition',
'title': 'Transition',
'displayName': 'Transition Application',
'codes': {
'BC': 'TRANS',
'BEN': 'TRANS'
}
},
}
class FilingMeta: # pylint: disable=too-few-public-methods
"""Create all the information about a filing."""
@staticmethod
def display_name(business: Business, filing: FilingStorage, full_name: bool = True) -> Optional[str]:
"""Return the name of the filing to display on outputs."""
# if there is no lookup
if not (names := FILINGS.get(filing.filing_type, {}).get('displayName')):
return ' '.join(word.capitalize()
for word in
re.sub(r'([A-Z])', r':\1', filing.filing_type).split(':'))
if isinstance(names, MutableMapping):
name = names.get(business.legal_type)
else:
name = names
if filing.filing_type in ('annualReport') and (year := FilingMeta.get_effective_display_year(filing.meta_data)):
name = f'{name} ({year})'
elif filing.filing_type in ('correction') and filing.meta_data:
with suppress(Exception):
name = f'{name} - {FilingMeta.display_name(business, filing.children[0], False)}'
if full_name and filing.parent_filing_id and filing.status == FilingStorage.Status.CORRECTED:
name = f'{name} - Corrected'
return name
@staticmethod
def get_effective_display_year(filing_meta_data: dict) -> Optional[str]:
"""Render a year as a string, given all filing mechanisms."""
with suppress(IndexError, KeyError, TypeError):
report_date = filing_meta_data['annualReport']['annualReportDate']
return str(date.fromisoformat(report_date).year)
return None
@staticmethod
def get_all_outputs(business_type: str, filing_name: str) -> list:
"""Return list of all outputs."""
filing = FILINGS.get(filing_name)
for docs in filing.get('additional', []):
if business_type in docs.get('types'):
return docs.get('outputs')
return []
| 32.757085 | 120 | 0.5736 |
8902c847354456e87db15d82d6e24a5f7a8b5c32 | 285 | py | Python | Python/16 - 077 - contando vogais em tupla.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/16 - 077 - contando vogais em tupla.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/16 - 077 - contando vogais em tupla.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | # Aula 16 (Tuplas)
palavras = ('Computador', 'Windows', 'Linux', 'Acer', 'Dell', 'Monitor', 'Smartphone')
print(palavras)
for c in range(0, len(palavras)):
print(f'\n{palavras[c]}', end=': ')
for l in palavras[c]:
if l in 'aeiouAEIOU':
print(l, end=' ')
| 23.75 | 86 | 0.564912 |
099f32e801648b1dd676ff3a1fcf57ecbee704e4 | 5,533 | py | Python | http/url.py | JNRowe-retired/http | 8961180cbff000965e723ce8f800198c45488334 | [
"MIT"
] | 4 | 2019-07-10T10:12:14.000Z | 2021-09-16T13:03:41.000Z | http/url.py | JNRowe-retired/http | 8961180cbff000965e723ce8f800198c45488334 | [
"MIT"
] | 5 | 2019-07-10T09:56:45.000Z | 2021-07-16T09:35:36.000Z | http/url.py | JNRowe-retired/http | 8961180cbff000965e723ce8f800198c45488334 | [
"MIT"
] | 1 | 2021-07-13T10:15:16.000Z | 2021-07-13T10:15:16.000Z | from urlparse import urlunparse as urlimplode, urlparse as urlexplode
from urlparse import parse_qsl as queryexplode, urljoin
from urllib import urlencode as queryimplode, quote
import re
"""
:copyright: (c) 2012 by Damien 'bl0b' Leroux
:license: MIT
"""
__author__ = "Damien 'bl0b' Leroux <damien.leroux@gmail.com>"
class Url(object):
"""Handles URLs.
Construct and deconstruct an URL in a simple and easy manner.
Path is a list of path elements.
Query is a list of 2-uples (key, value).
User can either configure netloc as a whole or username, password, host,
and port independently.
String representation of Url instance is the URL string itself.
"""
class Path(list):
SEP = '/' # necessary for splitting
def __init__(self, path):
parts = self._get_parts(path)
list.__init__(self, parts)
def append(self, path):
parts = self._get_parts(path)
# TODO refactor this part
for i in parts:
if i is not None and i is not '':
super(Url.Path, self).append(i)
def _get_parts(self, path):
type_path = type(path)
if type_path is str or type_path is unicode:
parts = [x for (i, x)
in enumerate(path.split(Url.Path.SEP))
if x or not i]
else:
parts = path
return parts
def __str__(self):
return quote(Url.Path.SEP.join(self))
@property
def is_absolute(self):
return len(self) > 0 and self[0] == ''
@property
def is_relative(self):
return len(self) > 0 and self[0] != ''
def canonify(self):
if len(self) > 0 and self[0] == '':
tmp = self[1:]
init = self[:1]
else:
tmp = self
init = []
def _canon(a, b):
if b in ('.', ''):
return a
if b == '..':
return a[:-1]
return a + [b]
canon = init + reduce(_canon, tmp, [])
self.__init__(canon)
return self
netloc_re = re.compile('(([^:@]+)(:([^@]+))?@)?([^:]+)(:([0-9]+))?')
def __init__(self, string_url=None, scheme='http', netloc='', path=[],
params='', query=[], fragment='', username=None,
password=None, host=None, port=None):
"""Construct an instance from an URL string
:param scheme: Scheme for this request. Default is HTTP
:param netloc: Default is ''
:param path: Default is an empty list
:param params: Default is an empty string
:param query: Default is an empty list
:param fragment: Default is an empty string
:param username: Default is None
:param password: Default is None
:param host: Default is an empty string
:param port: Default is None
If netloc is not empty, it takes precedence over
(username, password, host, port). Only host is mandatory if netloc is
not provided."""
if string_url is not None:
p = urlexplode(string_url)
scheme, netloc, path, params, query, fragment = p
self.username = p.username
self.password = p.password
self.host = p.hostname
self.port = p.port
query = queryexplode(query)
elif netloc:
self.netloc = netloc
else:
self.host = host
self.port = port
self.username = username
self.password = password
self.scheme = scheme
self._path = Url.Path(path)
self.query = query
self.fragment = fragment
self.params = params
def __str__(self):
"The URL in human-copypastable form."
return urlimplode(self)
def __iter__(self):
"Behave as a tuple or more precisely as a urlparse.ParseResult"
yield self.scheme
yield self.netloc
path = str(self.path)
if path == '':
path = '/'
yield path
yield self.params
yield queryimplode(self.query)
yield self.fragment
def _netloc_get(self):
"Reconstruct netloc. Not to be called directly."
ret = []
if self.username is not None:
ret.append(self.username)
if self.password is not None:
ret.append(':' + self.password)
ret.append('@')
ret.append(self.host or '')
if self.port is not None:
ret.append(':' + str(self.port))
return ''.join(ret)
def _netloc_set(self, netloc):
"Deconstruct netloc. Not to be called directly."
optional0, self.username, \
optional1, self.password, \
self.host, \
optional2, self.port = self.netloc_re.match(netloc).groups()
def _path_set(self, p):
self._path = Url.Path(p)
def __add__(self, u):
"Join two URLs."
return Url(urljoin(str(self), str(u)))
def __eq__(self, u):
return str(self) == str(u)
__repr__ = __str__
path = property(lambda s: s._path, _path_set)
netloc = property(_netloc_get, _netloc_set)
is_absolute = property(lambda s: bool(s.host))
is_relative = property(lambda s: not bool(s.host))
is_secure = property(lambda s: s.scheme.lower() == 'https')
| 30.910615 | 77 | 0.548346 |
66d112866a6518d0a2493bbf4ea2e8f417a37620 | 769 | py | Python | flask_rebar/authenticators/base.py | sanketsaurav/flask-rebar | a58488e2ed8a9ab94c7c5bf9825ae28e547385bf | [
"MIT"
] | null | null | null | flask_rebar/authenticators/base.py | sanketsaurav/flask-rebar | a58488e2ed8a9ab94c7c5bf9825ae28e547385bf | [
"MIT"
] | null | null | null | flask_rebar/authenticators/base.py | sanketsaurav/flask-rebar | a58488e2ed8a9ab94c7c5bf9825ae28e547385bf | [
"MIT"
] | null | null | null | """
Base Authenticator
~~~~~~~~~~~~~~~~~~
Base class for authenticators.
:copyright: Copyright 2018 PlanGrid, Inc., see AUTHORS.
:license: MIT, see LICENSE for details.
"""
class Authenticator(object):
"""
Abstract authenticator class. Custom authentication methods should
extend this class.
"""
def authenticate(self):
"""
Implementations of :class:`Authenticator` should override this method.
This will be called before a request handler is called, and should raise
an :class:`flask_rebar.errors.HttpJsonError` is authentication fails.
Otherwise the return value is ignored.
:raises: :class:`flask_rebar.errors.Unauthorized`
"""
raise NotImplementedError
| 25.633333 | 80 | 0.656697 |
a460a76413a4d661a5da8fb7491ba488cf36b8cb | 20,285 | py | Python | src/graph/trans_sys.py | MuvvalaKaran/Adam-Can-Play-Any-Strategy | ea8d3ae87bc73e1285833f599797c90321026f5e | [
"MIT"
] | null | null | null | src/graph/trans_sys.py | MuvvalaKaran/Adam-Can-Play-Any-Strategy | ea8d3ae87bc73e1285833f599797c90321026f5e | [
"MIT"
] | null | null | null | src/graph/trans_sys.py | MuvvalaKaran/Adam-Can-Play-Any-Strategy | ea8d3ae87bc73e1285833f599797c90321026f5e | [
"MIT"
] | null | null | null | import warnings
import queue
import math
import networkx as nx
from graphviz import Digraph
# local packages
from .two_player_graph import TwoPlayerGraph
from ..factory.builder import Builder
class FiniteTransSys(TwoPlayerGraph):
def __init__(self, graph_name: str, config_yaml: str, save_flag: bool = False,
finite: bool = True):
TwoPlayerGraph.__init__(self, graph_name, config_yaml, save_flag, finite)
def fancy_graph(self, color=("lightgrey", "red", "purple"), **kwargs) -> None:
"""
Method to create a illustration of the graph
:return: Diagram of the graph
"""
dot: Digraph = Digraph(name="graph")
nodes = self._graph_yaml["nodes"]
for n in nodes:
# default color for all the nodes is grey
ap = n[1].get('ap')
ap = "{" + str(ap) + "}"
dot.node(str(n[0]), _attributes={"style": "filled",
"fillcolor": color[0],
"xlabel": ap,
"shape": "rectangle"})
if n[1].get('init'):
# default color for init node is red
dot.node(str(n[0]), _attributes={"style": "filled", "fillcolor": color[1], "xlabel": ap})
if n[1].get('accepting'):
dot.node(str(n[0]), _attributes={"style": "filled", "fillcolor": color[2], "xlabel": ap})
if n[1].get('player') == 'eve':
dot.node(str(n[0]), _attributes={"shape": "", "xlabel": ap})
if n[1].get('player') == 'adam':
dot.node(str(n[0]), _attributes={"shape": "rectangle", "xlabel": ap})
# add all the edges
edges = self._graph_yaml["edges"]
# load the weights to illustrate on the graph
for counter, edge in enumerate(edges):
if edge[2].get('strategy') is True:
dot.edge(str(edge[0]), str(edge[1]), label=str(edge[2].get('actions')),
_attributes={'color': 'red'})
else:
dot.edge(str(edge[0]), str(edge[1]), label=str(edge[2].get('actions')) + ': ' + str(edge[2].get('weight')))
# set graph attributes
# dot.graph_attr['rankdir'] = 'LR'
dot.node_attr['fixedsize'] = 'False'
dot.edge_attr.update(arrowhead='vee', arrowsize='1', decorate='True')
if self._save_flag:
graph_name = str(self._graph.__getattribute__('name'))
self.save_dot_graph(dot, graph_name, **kwargs)
# a function to construct the two game automatically in code. K = # of times the human can intervene
def automate_construction(self, k: int):
if not isinstance(k, int):
warnings.warn("Please Make sure the Quantity K which represents the number of times the human can "
"intervene is an integer")
eve_node_lst = []
adam_node_lst = []
two_player_graph_ts = FiniteTransSys(self._graph_name, self._config_yaml, self._save_flag)
two_player_graph_ts._graph = nx.MultiDiGraph(name=self._graph_name)
# lets create k copies of the states
for _n in self._graph.nodes():
for i in range(k+1):
_sys_node = (_n, i)
eve_node_lst.append(_sys_node)
two_player_graph_ts.add_states_from(eve_node_lst, player='eve')
# for each edge create a human node and then alter the original edge to go through the human node
for e in self._graph.edges():
for i in range(k):
# lets create a human edge with huv,k naming convention
_env_node = ((f"h{e[0][1:]}{e[1][1:]}"), f"{i}")
adam_node_lst.append(_env_node)
two_player_graph_ts.add_states_from(adam_node_lst, player='adam')
# add init node
init_node = self.get_initial_states()
two_player_graph_ts.add_state_attribute((init_node[0][0], 0), "init", True)
for e in self._graph.edges.data():
# add edge between e[0] and the human node h{e[0][1:]}{e[1][1:]}, k
for ik in range(k):
two_player_graph_ts.add_edge((e[0], ik), ((f"h{e[0][1:]}{e[1][1:]}"), f"{ik}"),
actions=e[2].get("actions"), weight=e[2].get("weight"))
two_player_graph_ts.add_edge(((f"h{e[0][1:]}{e[1][1:]}"), f"{ik}"), (e[1], ik),
actions=e[2].get("actions"), weight=e[2].get("weight"))
_alt_nodes_set = set(self._graph.nodes()) - {e[1]}
for _alt_node in _alt_nodes_set:
two_player_graph_ts.add_edge(((f"h{e[0][1:]}{e[1][1:]}"), f"{ik}"), (_alt_node, ik+1),
actions="m", weight=0)
# manually add edges to states that belong to k index
for e in self._graph.edges.data():
two_player_graph_ts.add_edge((e[0], k), (e[1], k),
actions=e[2].get('actions'), weight=e[2].get("weight"))
# add the original atomic proposition to the new states
for _n in self._graph.nodes.data():
if _n[1].get('ap'):
for ik in range(k+1):
two_player_graph_ts.add_state_attribute((_n[0], ik), 'ap', _n[1].get('ap'))
return two_player_graph_ts
def _sanity_check(self, debug: bool = False):
"""
A helper method that loops through every node and checks if it has an outgoing edge or not.
If not then we add action "self" with weight 0 if its an accepting states else -1 * |max_weight|.
:return:
"""
max_weight = self.get_max_weight()
accn_states = self.get_accepting_states()
for _n in self._graph.nodes():
if len(list(self._graph.successors(_n))) == 0:
if debug:
print("====================================")
print(f"Adding a self loop to state {_n} in {self._graph.name}")
print("====================================")
# if its an accepting state
if _n in accn_states:
self._graph.add_edge(_n, _n, weight=0, actions="self")
# if its a trap state
else:
self._graph.add_edge(_n, _n, weight=max_weight, actions="self")
def _sanity_check_finite(self, debug: bool = False):
"""
A helper method that loops through every node and checks if it has an outgoing edge or not.
If not then we add action "self" with weight {}.
:return:
"""
max_weight = -1 * math.inf
accn_states = self.get_accepting_states()
for _n in self._graph.nodes():
if len(list(self._graph.successors(_n))) == 0:
if debug:
print("====================================")
print(f"Adding a self loop to state {_n} in {self._graph.name}")
print("====================================")
# if its an accepting state
if _n in accn_states:
self._graph.add_edge(_n, _n, weight=0, actions="self")
# if its a trap state
else:
self._graph.add_edge(_n, _n, weight=max_weight, actions="self")
@classmethod
def from_raw_ts(cls, raw_ts: TwoPlayerGraph,
graph_name: str,
config_yaml: str,
save_flag: bool = False,
plot: bool = False,
human_intervention: int = 1,
plot_raw_ts: bool = False,
finite: bool = False,
debug: bool = False):
"""
Return a concrete instance of a FiniteTransSys given a basic transition system with nodes
that belong only to the system(eve)
:param raw_ts:
:param graph_name:
:param config_yaml:
:param save_flag:
:param plot:
:param human_intervention:
:param plot_raw_ts:
:param debug:
:return: An instance of the FiniteTransSys that contains both the env(adam) and sys(eve) nodes
"""
raw_trans_name = "raw" + graph_name
trans_sys = FiniteTransSys(raw_trans_name, f"config/{raw_trans_name}",
save_flag=save_flag, finite=finite)
trans_sys._graph = raw_ts._graph
if finite:
trans_sys._sanity_check_finite(debug=debug)
else:
trans_sys._sanity_check(debug=debug)
if plot_raw_ts:
trans_sys.plot_graph()
trans_sys._graph_name = graph_name
trans_sys._config_yaml = config_yaml
trans_sys = trans_sys.automate_construction(k=human_intervention)
if plot:
trans_sys.plot_graph()
if debug:
trans_sys.print_nodes()
trans_sys.print_edges()
return trans_sys
@classmethod
def get_three_state_ts(cls, graph_name: str,
config_yaml: str,
save_flag: bool = False,
debug: bool = False,
plot: bool = False,
human_intervention: int = 1,
plot_raw_ts: bool = False):
"""
A methods that return a concrete instance of FiniteTransitionSystem with eve and adam nodes and edges.
:param graph_name:
:param config_yaml:
:param save_flag:
:param debug:
:param plot:
:param human_intervention:
:param plot_raw_ts:
:return:
"""
raw_trans_name = "raw" + graph_name
trans_sys = FiniteTransSys(raw_trans_name, f"config/{raw_trans_name}", save_flag=save_flag)
# trans_sys.construct_graph()
trans_sys._graph = nx.MultiDiGraph(name=graph_name)
trans_sys.add_states_from(['s1', 's2', 's3'])
trans_sys.add_state_attribute('s1', 'ap', 'b')
trans_sys.add_state_attribute('s2', 'ap', 'a')
trans_sys.add_state_attribute('s3', 'ap', 'c')
trans_sys.add_edge('s1', 's2', actions='s12', weight=0)
trans_sys.add_edge('s2', 's1', actions='s21', weight=2)
trans_sys.add_edge('s2', 's3', actions='s23', weight=3)
trans_sys.add_edge('s3', 's1', actions='s31', weight=5)
trans_sys.add_edge('s1', 's3', actions='s13', weight=3)
# trans_sys.add_edge('s1', 's2', actions='s12', weight=-1)
# trans_sys.add_edge('s2', 's1', actions='s21', weight=-1)
# trans_sys.add_edge('s2', 's3', actions='s23', weight=-1)
# trans_sys.add_edge('s3', 's1', actions='s31', weight=-1)
# trans_sys.add_edge('s1', 's3', actions='s13', weight=-1)
trans_sys.add_initial_state('s2')
if plot_raw_ts:
trans_sys.plot_graph()
trans_sys._graph_name = graph_name
trans_sys._config_yaml = config_yaml
trans_sys = trans_sys.automate_construction(k=human_intervention)
if plot:
trans_sys.plot_graph()
if debug:
trans_sys.print_nodes()
trans_sys.print_edges()
return trans_sys
@classmethod
def get_five_state_ts(cls, graph_name: str,
config_yaml: str,
save_flag: bool = False,
debug: bool = False,
plot: bool = False,
human_intervention: int = 1,
plot_raw_ts: bool = False):
"""
A methods that return a concrete instance of FiniteTransitionSystem with eve and adam nodes and edges.
:param graph_name:
:param config_yaml:
:param save_flag:
:param debug:
:param plot:
:param human_intervention:
:param plot_raw_ts:
:return:
"""
raw_trans_name = "raw" + graph_name
trans_sys = FiniteTransSys(raw_trans_name, f"config/{raw_trans_name}", save_flag=save_flag)
# trans_sys.construct_graph()
trans_sys._graph = nx.MultiDiGraph(name=graph_name)
trans_sys.add_states_from(['s1', 's2', 's3', 's4', 's5'])
trans_sys.add_state_attribute('s1', 'ap', 'b')
trans_sys.add_state_attribute('s2', 'ap', 'i')
trans_sys.add_state_attribute('s3', 'ap', 'r')
trans_sys.add_state_attribute('s4', 'ap', 'g')
trans_sys.add_state_attribute('s5', 'ap', 'd')
# E = 4 ; W = 2; S = 3 ; N = 9
trans_sys.add_edge('s1', 's2', actions='E', weight=-4)
trans_sys.add_edge('s2', 's1', actions='W', weight=-2)
trans_sys.add_edge('s3', 's2', actions='N', weight=-9)
trans_sys.add_edge('s2', 's3', actions='S', weight=-3)
trans_sys.add_edge('s3', 's4', actions='S', weight=-3)
trans_sys.add_edge('s4', 's3', actions='N', weight=-9)
trans_sys.add_edge('s1', 's4', actions='W', weight=-2)
trans_sys.add_edge('s4', 's1', actions='W', weight=-2)
trans_sys.add_edge('s4', 's5', actions='E', weight=-4)
trans_sys.add_edge('s5', 's4', actions='S', weight=-3)
trans_sys.add_edge('s2', 's5', actions='E', weight=-4)
trans_sys.add_edge('s5', 's2', actions='N', weight=-9)
trans_sys.add_initial_state('s1')
if plot_raw_ts:
trans_sys.plot_graph()
trans_sys._graph_name = graph_name
trans_sys._config_yaml = config_yaml
trans_sys = trans_sys.automate_construction(k=human_intervention)
if plot:
trans_sys.plot_graph()
if debug:
trans_sys.print_nodes()
trans_sys.print_edges()
return trans_sys
class TransitionSystemBuilder(Builder):
def __init__(self):
# call the parent class constructor
Builder.__init__(self)
self._pre_built = {}
def __call__(self,
raw_trans_sys: FiniteTransSys,
graph_name: str,
config_yaml: str,
from_file: bool = False,
pre_built: bool = True,
built_in_ts_name: str = "",
save_flag: bool = False,
debug: bool = False,
plot: bool = False,
human_intervention: int = 1,
finite: bool = False,
plot_raw_ts: bool = False) -> 'FiniteTransSys':
"""
A method to create an instance of a finite transition system consisting of two players - eve and system .
:param raw_trans_sys: The original graph with only nodes that belong to eve.
:param debug:
:param plot:
:param human_intervention:
:param plot_raw_ts:
:return:
"""
print(f"No. of times the human can intervene is : {human_intervention}")
if pre_built and built_in_ts_name == "":
raise TypeError("Using the built in transition system. enter a valid transition system name.")
self._instance = FiniteTransSys(graph_name, config_yaml, save_flag=save_flag, finite=finite)
self._instance._graph = nx.MultiDiGraph(name=graph_name)
# load dict with function calls
self._load_pre_built()
if pre_built:
self._instance = self._from_built_in_ts(built_in_ts_name,
graph_name,
config_yaml,
save_flag,
debug,
plot,
human_intervention,
plot_raw_ts)
elif raw_trans_sys:
if not isinstance(raw_trans_sys, FiniteTransSys):
raise TypeError(f"Please ensure that the raw transition system is of type {FiniteTransSys.__name__}. \n"
f"If you are trying to constructing a two player graph with sys(eve) and env(adam) nodes"
f" then use the builder for the {TwoPlayerGraph.__name__} class")
self._instance = self._from_ts(raw_trans_sys,
graph_name,
config_yaml,
save_flag, plot,
human_intervention,
plot_raw_ts,
finite,
debug)
elif from_file:
self._instance._graph_yaml = self._from_yaml(config_yaml)
self._instance.build_graph_from_file()
if plot:
self._instance.plot_graph()
return self._instance
def _from_ts(self, raw_ts: FiniteTransSys,
graph_name: str,
config_yaml: str,
save_flag: bool = False,
plot: bool = False,
human_intervention: int = 1,
plot_raw_ts: bool = False,
finite: bool = False,
debug: bool = False):
"""
Returns a Two Player transition system give a transition system with nodes that belong to eve only.
:param raw_ts:
:param graph_name:
:param config_yaml:
:param save_flag:
:param plot:
:param human_intervention:
:param plot_raw_ts:
:param debug:
:return: A concrete instance of the FiniteTransSys with both human and system nodes
"""
return self._instance.from_raw_ts(raw_ts=raw_ts,
graph_name=graph_name,
config_yaml=config_yaml,
save_flag=save_flag,
plot=plot,
human_intervention=human_intervention,
plot_raw_ts=plot_raw_ts,
finite=finite,
debug=debug)
def _load_pre_built(self):
"""
A method to load the _pre_built dict with function calls to built in functions that create an
concrete instance of FiniteTransitionSystem
effect: Updates the built-in _pre_built dict with their respective keys and function calls as values
"""
self._pre_built.update({"three_state_ts": self._instance.get_three_state_ts})
self._pre_built.update({"five_state_ts": self._instance.get_five_state_ts})
def _from_built_in_ts(self,
ts_name: str,
graph_name: str,
config_yaml: str,
save_flag: bool,
debug: bool,
plot: bool,
human_intervention: int,
plot_raw_TS: bool):
"""
Return a pre-built Transition system based on the name of the Transition system which should be a valid key in
the pre_built dict
:param ts_name: The name of the system
:return:
"""
try:
func = self._pre_built[ts_name]
return func(graph_name,
config_yaml,
save_flag=save_flag,
debug=debug,
plot=plot,
human_intervention=human_intervention,
plot_raw_ts=plot_raw_TS)
except KeyError:
raise KeyError(f"Make sure you enter the correct name to access the pre built TS."
f" The built TS names are : {[i for i in self._pre_built.keys()]}")
def _from_yaml(self, config_file_name: str) -> dict:
config_data = self.load_YAML_config_data(config_file_name)
return config_data
| 41.99793 | 123 | 0.529505 |
f0323e013b2598192d2c60b9bb9b6d51436bfe26 | 3,485 | py | Python | phm_robot_task_completion/test/test_phm_robot_task_completion_node.py | inomuh/phm_tools | 849de95081c5a3e9709697dfa1a9c9bea6c0ef99 | [
"Apache-2.0"
] | 12 | 2019-09-20T16:45:07.000Z | 2022-03-16T00:21:59.000Z | phm_robot_task_completion/test/test_phm_robot_task_completion_node.py | inomuh/phm_tools | 849de95081c5a3e9709697dfa1a9c9bea6c0ef99 | [
"Apache-2.0"
] | null | null | null | phm_robot_task_completion/test/test_phm_robot_task_completion_node.py | inomuh/phm_tools | 849de95081c5a3e9709697dfa1a9c9bea6c0ef99 | [
"Apache-2.0"
] | 6 | 2019-12-05T12:17:16.000Z | 2022-02-17T07:07:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import math
import rosunit
from phm_robot_task_completion.phm_robot_task_completion_node import RobotTaskCompletionNode
from phm_msgs.msg import Potc
PKG = 'phm_robot_task_completion'
NAME = 'test_phm_robot_task_completion_node'
class TestRobotTaskCompletionNode(unittest.TestCase):
rtcn = RobotTaskCompletionNode()
actual_potc_dict = dict({'Nominal': {'Distance': 0.019216928763702394, 'POTC': 0.9999999622234921, 'Time': 0.03545456144544813}, 'Sensor Based': {'Distance': 0.019216928763702394, 'POTC': 0.9999999622234921, 'Time': 0.03545456144544813}})
predict_potc_dict = dict({'Nominal': {'Distance': 0.018785978314649166, 'POTC': 0.9999999655204828, 'Time': 0.01687624037496195}, 'Sensor Based': {'Distance': 0.018785978314649166, 'POTC': 0.9999999655204828, 'Time': 0.01687624037496195}})
def test_1_set_reliability_func(self):
phm_potc = Potc()
phm_potc = self.rtcn.set_potc_func(self.actual_potc_dict, self.predict_potc_dict, 5)
ap_potc_nominal_value = 0.999999962223
ap_potc_sensor_based_value = 0.999999962223
ap_potc_time = 0.0354545614454
ap_potc_distance = 0.0192169287637
pp_potc_nominal_value = 0.99999996552
pp_potc_sensor_based_value = 0.99999996552
pp_potc_time = 0.016876240375
pp_potc_distance = 0.0187859783146
self.assertAlmostEqual(phm_potc.actual_potc.potc_nominal_value, ap_potc_nominal_value, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_sensor_based_value, ap_potc_sensor_based_value, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_time, ap_potc_time, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_distance, ap_potc_distance, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_nominal_value, pp_potc_nominal_value, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_sensor_based_value, pp_potc_sensor_based_value, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_time, pp_potc_time, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_distance, pp_potc_distance, 5)
def test_2_set_reliability_func(self):
phm_potc = Potc()
empty_dict = ""
phm_potc = self.rtcn.set_potc_func(empty_dict, empty_dict, 5)
ap_potc_nominal_value = 1.0
ap_potc_sensor_based_value = 1.0
ap_potc_time = 0.0
ap_potc_distance = 0.0
pp_potc_nominal_value = 1.0
pp_potc_sensor_based_value = 1.0
pp_potc_time = 0.0
pp_potc_distance = 0.0
self.assertAlmostEqual(phm_potc.actual_potc.potc_nominal_value, ap_potc_nominal_value, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_sensor_based_value, ap_potc_sensor_based_value, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_time, ap_potc_time, 5)
self.assertAlmostEqual(phm_potc.actual_potc.potc_distance, ap_potc_distance, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_nominal_value, pp_potc_nominal_value, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_sensor_based_value, pp_potc_sensor_based_value, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_time, pp_potc_time, 5)
self.assertAlmostEqual(phm_potc.predict_potc.potc_distance, pp_potc_distance, 5)
if __name__ == '__main__':
rosunit.unitrun(PKG, NAME, TestRobotTaskCompletionNode, sysargs = "--cov", coverage_packages=[str(PKG)])
| 47.094595 | 243 | 0.755524 |
62b97dff5d8079e036aea8fa8b0be1872e349b28 | 2,065 | py | Python | cms/core/templatetags/frontend_tags.py | dragon-dxw/nhs-ei.website | 6b513040f2cbf5c4359dc0f9431712d74bc6aa02 | [
"MIT"
] | null | null | null | cms/core/templatetags/frontend_tags.py | dragon-dxw/nhs-ei.website | 6b513040f2cbf5c4359dc0f9431712d74bc6aa02 | [
"MIT"
] | 35 | 2021-06-25T10:22:48.000Z | 2022-03-30T11:26:22.000Z | cms/core/templatetags/frontend_tags.py | dxw/nhs-ei.website | 6b513040f2cbf5c4359dc0f9431712d74bc6aa02 | [
"MIT"
] | null | null | null | from django import template
from wagtail.core.models import Page, PageRevision
from cms.core.models import UpperFooterLinks, LowerFooterLinks
from django.contrib.contenttypes.models import ContentType
register = template.Library()
@register.inclusion_tag("tags/breadcrumb.html", takes_context=True)
def breadcrumb(context):
"""
Generates an array of pages which are passed to the breadcrumb template.
"""
page = context.get("page", None)
if isinstance(page, Page):
site = page.get_site()
breadcrumb_pages = []
# Traverse the page parents with get_parent() until we hit a site root
while page.id != site.root_page_id and not page.is_root():
page = page.get_parent()
breadcrumb_pages = [page] + breadcrumb_pages
return {
"breadcrumb_pages": breadcrumb_pages,
}
else:
return {}
# else:
# raise Exception("'page' not found in template context")
@register.inclusion_tag("tags/footer_links.html", takes_context=True)
def footer_links(context, location):
footer_links = None
hidden_title = ""
list_class = ""
if location == "upper":
list_class = "nhsie-footer-menu"
hidden_title = "Secondary menu links"
footer_links = UpperFooterLinks.objects.all()
elif location == "lower":
list_class = "nhsuk-footer__list"
hidden_title = "Support links"
footer_links = LowerFooterLinks.objects.all()
return {
"footer_links": footer_links,
"hidden_title": hidden_title,
"list_class": list_class,
}
@register.inclusion_tag("tags/content_type_tag.html", takes_context=True)
def get_content_type_tag(context, page):
result_page = Page.objects.get(id=page.id)
content_type = result_page.content_type
CONTENT_TYPE_LABELS = {
"post": "News",
"blog": "Blog",
"publication": "Publication",
}
if content_type.model in CONTENT_TYPE_LABELS.keys():
return {"type": CONTENT_TYPE_LABELS[content_type.model]}
| 30.820896 | 78 | 0.668281 |
ad52c9a9d1e53a09c907da1ec4b6a6f40c2682bd | 3,801 | py | Python | S4/S4 Library/simulation/cas/cas_enums.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/cas/cas_enums.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/cas/cas_enums.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | import enum
class CASPaintPose(enum.Int):
NONE = 0
SIT = 1
BACK = 2
SIT_UP = 3
STAND = 4
class CASMode(enum.Int):
BODY = 0
FACE = 1
FACE_DETAIL = 2
NOACTION = 3
class RandomizationMode(enum.Int):
SELECTIVE_RANDOMIZATION = 1
MENU_RANDOMIZATION = 2
CAREER_OUTFIT_RANDOMIZATION = 3
CLUB_OUTFIT_RANDOMIZATION_ALL = 4
CLUB_OUTFIT_RANDOMIZATION_SINGLE = 5
TEMPLATE_RANDOMIZATION = 6
class CASMenuState:
class MenuType(enum.Int):
NONE = 0
MENU = 4194304
SUBMENU = 8388608
MENUITEM = 12582912
class MenuMode(enum.Int):
NONE = 0
PROFILE = 262144
CLOTHING = 524288
OUTFITS = 786432
ACCESSORIES = 1048576
FEATURED = 1310720
class MenuSection(enum.Int):
NONE = 0
HEAD = 1024
BODY = 2048
UPPERBODY = 3072
LOWERBODY = 4096
HAIR = 6144
ACCESSORIES = 7168
FACE = 8192
GENERIC_PELTS = 9216
GENERIC_BREEDS = 10240
class MenuItem(enum.Int):
NONE = 0
GENDER = 1
AGE = 2
VOICE = 3
TRAITS = 4
SKIN_DETAILS = 5
TATTOOS = 6
WHOLEHEADS = 7
EYES = 8
NOSE = 9
CHEEK = 10
MOUTH = 11
JAW = 12
CHIN = 13
EARS = 14
EYEBROWS = 15
FOREHEAD = 16
LOOKS = 17
OWNED = 18
BODY = 19
SKINDETAIL_BROW = 20
SKINDETAIL_CHEEKS = 21
SKINDETAIL_EYEBAGS = 22
SKINDETAIL_EYESOCKET = 23
SKINDETAIL_MOUTH = 24
TEETH = 25
FACE_PRESETS = 26
TAIL = 27
HAIRSTYLE = 30
FACIALHAIR = 31
HATS = 32
PIERCINGS = 33
EARRINGS = 34
GLASSES = 35
MAKEUP = 36
CONTACTS = 37
MAKEUP_EYES = 40
MAKEUP_CHEEKS = 41
MAKEUP_LIPS = 42
MAKEUP_FACEPAINT = 43
MAKEUP_EYELINER = 44
TOPS = 50
BRACELETS = 51
NECKLACES = 52
RINGS = 53
GLOVES = 54
BOTTOMS = 70
SHOES = 71
STOCKINGS = 72
SOCKS = 73
TIGHTS = 74
FULLBODY = 93
OUTFIT_EVERYDAY = 100
OUTFIT_FORMAL = 101
OUTFIT_ATHLETIC = 102
OUTFIT_MISC = 103
OUTFIT_PARTY = 104
OUTFIT_SLEEP = 105
OUTFIT_WORK = 106
PELT = 200
BREED = 201
FUR = 202
class CASRandomizeFlag(enum.Int):
PROFILE_GENDER = 1
PROFILE_BODYSHAPE = 2
PROFILE_FACE = 8
PROFILE_SKINTONE = 16
PROFILE_HAIR = 32
PROFILE_FACIALHAIR = 64
PROFILE_VOICE = 128
PROFILE_CLOTHING = 256
PROFILE_ASPIRATION = 512
PROFILE_TRAITS = 2048
OCCULT_SKINDETAIL = 4096
OCCULT_TAIL = 8192
PROFILE_BREEDSIZE = 16384
CLOTHING_HAT = 32768
CLOTHING_TOP = 65536
CLOTHING_BOTTOM = 131072
CLOTHING_SHOES = 262144
CLOTHING_MAKEUP = 524288
CLOTHING_HAIR = 1048576
CLOTHING_FACIAL_HAIR = 2097152
CLOTHING_ACCESSORIES = 4194304
CLOTHING_FULLBODY = 8388608
RANDOMIZE_BY_MENUSTATE = 2147483648
class SimRegion(enum.Int):
EYES = 0
NOSE = 1
MOUTH = 2
MUZZLE = MOUTH
CHEEKS = 3
CHIN = 4
JAW = 5
FOREHEAD = 6
BROWS = 8
EARS = 9
HEAD = 10
FULLFACE = 12
CHEST = 14
UPPERCHEST = 15
NECK = 16
SHOULDERS = 17
UPPERARM = 18
LOWERARM = 19
HANDS = 20
FRONTFEET = HANDS
WAIST = 21
HIPS = 22
BELLY = 23
BUTT = 24
THIGHS = 25
LOWERLEG = 26
FEET = 27
BACKFEET = FEET
BODY = 28
UPPERBODY = 29
LOWERBODY = 30
TAIL = 31
FUR = 32
FORELEGS = 33
HINDLEGS = 34
INVALID = 64
class CASBrandedLogoBackground(enum.Int):
LIGHT = 0
DARK = 1
| 20.884615 | 41 | 0.556959 |
dad07fa823b276f46be491b369cb6881a892115d | 6,141 | py | Python | sdk/python/pulumi_azure_native/web/v20160801/list_web_app_backup_configuration.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20160801/list_web_app_backup_configuration.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20160801/list_web_app_backup_configuration.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListWebAppBackupConfigurationResult',
'AwaitableListWebAppBackupConfigurationResult',
'list_web_app_backup_configuration',
]
@pulumi.output_type
class ListWebAppBackupConfigurationResult:
"""
Description of a backup which will be performed.
"""
def __init__(__self__, backup_request_name=None, backup_schedule=None, databases=None, enabled=None, id=None, kind=None, name=None, storage_account_url=None, type=None):
if backup_request_name and not isinstance(backup_request_name, str):
raise TypeError("Expected argument 'backup_request_name' to be a str")
pulumi.set(__self__, "backup_request_name", backup_request_name)
if backup_schedule and not isinstance(backup_schedule, dict):
raise TypeError("Expected argument 'backup_schedule' to be a dict")
pulumi.set(__self__, "backup_schedule", backup_schedule)
if databases and not isinstance(databases, list):
raise TypeError("Expected argument 'databases' to be a list")
pulumi.set(__self__, "databases", databases)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_url and not isinstance(storage_account_url, str):
raise TypeError("Expected argument 'storage_account_url' to be a str")
pulumi.set(__self__, "storage_account_url", storage_account_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backupRequestName")
def backup_request_name(self) -> str:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_request_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> Optional['outputs.BackupScheduleResponse']:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> Optional[Sequence['outputs.DatabaseBackupSettingResponse']]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> str:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppBackupConfigurationResult(ListWebAppBackupConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppBackupConfigurationResult(
backup_request_name=self.backup_request_name,
backup_schedule=self.backup_schedule,
databases=self.databases,
enabled=self.enabled,
id=self.id,
kind=self.kind,
name=self.name,
storage_account_url=self.storage_account_url,
type=self.type)
def list_web_app_backup_configuration(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppBackupConfigurationResult:
"""
Description of a backup which will be performed.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20160801:listWebAppBackupConfiguration', __args__, opts=opts, typ=ListWebAppBackupConfigurationResult).value
return AwaitableListWebAppBackupConfigurationResult(
backup_request_name=__ret__.backup_request_name,
backup_schedule=__ret__.backup_schedule,
databases=__ret__.databases,
enabled=__ret__.enabled,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
storage_account_url=__ret__.storage_account_url,
type=__ret__.type)
| 35.912281 | 173 | 0.651197 |
109d71b7aaabbe8679f1e6d92adf710150e262ea | 3,222 | py | Python | dm_control/suite/cheetah.py | rdaems/dm_control | c682e626fde95a98b53f67f07b0c1021e4200bb8 | [
"Apache-2.0"
] | 1 | 2022-03-22T11:53:38.000Z | 2022-03-22T11:53:38.000Z | dm_control/suite/cheetah.py | robot0102/dm_control | 3e1736d7e641ab751eb25bfa7622fea71c8da9c6 | [
"Apache-2.0"
] | null | null | null | dm_control/suite/cheetah.py | robot0102/dm_control | 3e1736d7e641ab751eb25bfa7622fea71c8da9c6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cheetah Domain."""
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
SUITE = containers.TaggedTasks()
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('cheetah.xml'), common.ASSETS
@SUITE.add('benchmarking')
def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self):
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
physics.step(nstep=200)
physics.data.time = 0
self._timeout_progress = 0
super().initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
return rewards.tolerance(physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
| 34.645161 | 78 | 0.693979 |
fc65d14db388fed57bdd7bd0abcf09a0ea124cad | 8,974 | py | Python | algos.py | sameerkumar94/Movie-Recommendation-System | d22e0e8e19a9df7e9fd560bfa67255324fd25d06 | [
"BSD-4-Clause-UC"
] | null | null | null | algos.py | sameerkumar94/Movie-Recommendation-System | d22e0e8e19a9df7e9fd560bfa67255324fd25d06 | [
"BSD-4-Clause-UC"
] | null | null | null | algos.py | sameerkumar94/Movie-Recommendation-System | d22e0e8e19a9df7e9fd560bfa67255324fd25d06 | [
"BSD-4-Clause-UC"
] | null | null | null | import operator
from data import load, load_movielens
import numpy as np
# load the biparte graph: np matrix
user_movie_matrix = load()
def greedy():
"""
This uses greedy approach for the recommendation
It traverses the graph based on the greedy approach : pick highest rating while traversing from node to node
"""
for row in user_movie_matrix:
# compute for each user
row = list(row)
maxrating = max(row)
print user_movie_matrix[user_movie_matrix[row] == maxrating]
pass
def bfs_paths(graph, start, goal):
flag = "product"
queue = [(start, [start])]
nonzero_indices = []
while queue:
(vertex, path) = queue.pop(0)
if flag == "product":
# the children nodes are the vertical of biparte matrix(nonzero)
column = graph[:, [vertex]]
nonzero_indices = column.nonzero()
nonzero_indices = nonzero_indices[0]
for child_index in nonzero_indices:
row = graph[child_index]
nonzero_row_indices = row.nonzero()[0]
for row_index in nonzero_row_indices:
if goal == row_index:
yield path + [child_index]
else:
queue.append((row_index, path + [row_index]))
def graph_search(biparte_matrix):
"""
the first row of the biparte matrix is : targetUser
the second to last row is the closest neighbors
Now using graph search we have to find recommend a movie
Since its a biparte graph:
1. First move from customer to movie
2. move from movie to customer
"""
# the user who is being recommeneded
target_vector = biparte_matrix[0]
# get indices of all /some of the indices of the watched movies of the target user
#bfs_roots = [i for i, e in enumerate(target_vector) if e != 0]
bfs_all_roots = np.argpartition(target_vector, -10 )[-10:]
bfs_roots = []
for root in bfs_all_roots:
if target_vector[root] != 0 :
bfs_roots.append(root)
path =[]
data = {}
for item_index in range(biparte_matrix.shape[0]):
if target_vector[item_index] == 0:
for root in bfs_roots:
path.append( bfs_paths(biparte_matrix, root, item_index ))
data[item_index] = path
return data
def user_base_collabertive_filtering():
# find euclidian distance of first two users w.r.t all users
# note: distance betwee two same vecotrs is zero
for user in range(2):
distances = []
for anotheruser in range(user_movie_matrix.shape[0]):
distance = np.linalg.norm(user_movie_matrix[user] - user_movie_matrix[anotheruser] )
distances.append(distance)
# get the similar users
# indices of the similar users: closest 20
# non zero too
closest_all_indices=np.argpartition(distances, -20)[-20:]
closest_indices = []
for index in closest_all_indices:
if distances[index] != 0:
closest_indices.append(index)
# consider the first five closest neighbors for the recommendation
closest_indices.insert(0, user)
biparte_matrix = user_movie_matrix[closest_indices[0:4]]
# now execute the graph search for recommendation
paths = graph_search(biparte_matrix)
# compute the weights of paths
data= {}
for item in paths.keys():
print item
weight = 0
allpaths = paths[item]
for path in allpaths:
depth = len(path)
weight = weight + (0.5)**depth
data[item] = weight
# find the which movie has great weight:
fav_movie = max(data.iteritems(), key=operator.itemgetter(1))[0]
def get_movie_avg_rating(id, ratings):
"""
return avg rating of the movie
"""
#df = ratings.movieId == id
rating = 0
for index, row in ratings.iterrows():
if row["movieId"] == id:
rating = rating + row["rating"]
return rating
def get_user_movie_rating( id, ratings, target_user):
"""
return user rating of the movie
"""
#df = ratings.movieId == id
rating = 0
df = ratings[ratings.userId == 1]
for index, row in df.iterrows():
if row["movieId"] == id:
rating = rating + row["rating"]
return rating
def content_based_filtering():
"""
For content based filtering we wont directly use the biparte graph. We will be directly querying the file that is loaded
using the pandas dataframe.
The idea behind content based filtering is : when a user likes certain movies, using the meta informaton of the movies that the user watched we
will suggest similar movies which may have the same properties.
meta-properties: genre
user-given-properties: rating
Algorithm:
choose all the movies that the user watched and arrange in descending order of ratings
obtain genre of all the movies that user watched
sum all the ratings for each genre
Now pick the top three generes and suggest based on that
"""
movies, ratings = load_movielens()
# considerng customer 1
target_user = 1
movie_ids = []
# list of movies watched by user
for index, row in ratings.iterrows():
if row["userId"] == 1 :
movie_ids.append(row["movieId"])
# compute the average of the ratings for each genre watched by the user
genre_dict = {}
genre_count_dict = {}
genre_ratio = {}
for id in movie_ids:
df = movies[movies.movieId == id]
genres = []
for index, row in df.iterrows():
genres = row["genres"]
genres = genres.lower()
genres = genres.split('|')
rating = get_user_movie_rating(id, ratings, target_user =1 )
for genre in genres:
if genre in genre_dict.keys():
genre_dict[genre] = genre_dict[genre] + rating
genre_count_dict[genre] = genre_count_dict[genre] + 1
else:
genre_dict[genre] = rating
genre_count_dict[genre] = rating
for key in genre_dict.keys():
ratio = genre_dict[key] / float(genre_count_dict[key])
genre_dict[key] = ratio
fav_genre = max(genre_dict.iteritems(), key=operator.itemgetter(1))[0]
# get the best movies from that genre:
genres_ids = []
fav_movie_id = 0
for index, row in movies.iterrows():
genres = row['genres']
genres = genres.lower()
genres = genres.split('|')
if fav_genre in genres:
movie_rating = get_movie_avg_rating(row["movieId"], ratings )
if movie_rating > rating:
fav_movie_id = row["movieId"]
rating = movie_rating
print "content based recommended movie is:"
print movies[movies.movieId == fav_movie_id]
return fav_mov_id
def evaluation():
"""
lets only compute for one target user: As it is taking too long
"""
fav_mov_id_con = content_based_filtering()
fav_mov_id_col = user_base_collabertive_filtering()
for user in range(2):
distances = []
for anotheruser in range(user_movie_matrix.shape[0]):
distance = np.linalg.norm(user_movie_matrix[user] - user_movie_matrix[anotheruser] )
distances.append(distance)
# get the similar users
# indices of the similar users: closest 20
# non zero too
closest_all_indices=np.argpartition(distances, -20)[-20:]
closest_indices = []
for index in closest_all_indices:
if distances[index] != 0:
closest_indices.append(index)
# consider the first five closest neighbors for the recommendation
closest_indices.insert(0, user)
biparte_matrix = user_movie_matrix[closest_indices[0:4]]
target_col_vector =user_movie_matrix[0]
target_con_vector = user_movie_matrix[0]
score_col = 0
score_con = 0
for row in biparte_matrix:
score_col = score_col + np.linalg.norm(row, target_col_vector)
score_con = score_con + np.linalg.norm(row, target_con_vector)
print "The collaberative filtering sum of distances is: ", score_col
print "The contend based filtering sum of distances is: ", score_con
evaluation()
| 26.62908 | 151 | 0.589592 |
a1fd1ce42fb1d4d000579449981172c470f7b3b5 | 4,348 | py | Python | tests/components/fritz/__init__.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | tests/components/fritz/__init__.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/fritz/__init__.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Tests for the AVM Fritz!Box integration."""
from unittest import mock
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
MOCK_CONFIG = {
DOMAIN: {
CONF_DEVICES: [
{
CONF_HOST: "fake_host",
CONF_PORT: "1234",
CONF_PASSWORD: "fake_pass",
CONF_USERNAME: "fake_user",
}
]
}
}
class FritzConnectionMock: # pylint: disable=too-few-public-methods
"""FritzConnection mocking."""
FRITZBOX_DATA = {
("WANIPConn:1", "GetStatusInfo"): {
"NewConnectionStatus": "Connected",
"NewUptime": 35307,
},
("WANIPConnection:1", "GetStatusInfo"): {},
("WANCommonIFC:1", "GetCommonLinkProperties"): {
"NewLayer1DownstreamMaxBitRate": 10087000,
"NewLayer1UpstreamMaxBitRate": 2105000,
"NewPhysicalLinkStatus": "Up",
},
("WANCommonIFC:1", "GetAddonInfos"): {
"NewByteSendRate": 3438,
"NewByteReceiveRate": 67649,
"NewTotalBytesSent": 1712232562,
"NewTotalBytesReceived": 5221019883,
},
("LANEthernetInterfaceConfig:1", "GetStatistics"): {
"NewBytesSent": 23004321,
"NewBytesReceived": 12045,
},
("DeviceInfo:1", "GetInfo"): {
"NewSerialNumber": "abcdefgh",
"NewName": "TheName",
"NewModelName": "FRITZ!Box 7490",
},
}
FRITZBOX_DATA_INDEXED = {
("X_AVM-DE_Homeauto:1", "GetGenericDeviceInfos"): [
{
"NewSwitchIsValid": "VALID",
"NewMultimeterIsValid": "VALID",
"NewTemperatureIsValid": "VALID",
"NewDeviceId": 16,
"NewAIN": "08761 0114116",
"NewDeviceName": "FRITZ!DECT 200 #1",
"NewTemperatureOffset": "0",
"NewSwitchLock": "0",
"NewProductName": "FRITZ!DECT 200",
"NewPresent": "CONNECTED",
"NewMultimeterPower": 1673,
"NewHkrComfortTemperature": "0",
"NewSwitchMode": "AUTO",
"NewManufacturer": "AVM",
"NewMultimeterIsEnabled": "ENABLED",
"NewHkrIsTemperature": "0",
"NewFunctionBitMask": 2944,
"NewTemperatureIsEnabled": "ENABLED",
"NewSwitchState": "ON",
"NewSwitchIsEnabled": "ENABLED",
"NewFirmwareVersion": "03.87",
"NewHkrSetVentilStatus": "CLOSED",
"NewMultimeterEnergy": 5182,
"NewHkrComfortVentilStatus": "CLOSED",
"NewHkrReduceTemperature": "0",
"NewHkrReduceVentilStatus": "CLOSED",
"NewHkrIsEnabled": "DISABLED",
"NewHkrSetTemperature": "0",
"NewTemperatureCelsius": "225",
"NewHkrIsValid": "INVALID",
},
{},
],
("Hosts1", "GetGenericHostEntry"): [
{
"NewSerialNumber": 1234,
"NewName": "TheName",
"NewModelName": "FRITZ!Box 7490",
},
{},
],
}
MODELNAME = "FRITZ!Box 7490"
def __init__(self):
"""Inint Mocking class."""
type(self).modelname = mock.PropertyMock(return_value=self.MODELNAME)
self.call_action = mock.Mock(side_effect=self._side_effect_call_action)
type(self).action_names = mock.PropertyMock(
side_effect=self._side_effect_action_names
)
services = {
srv: None
for srv, _ in list(self.FRITZBOX_DATA) + list(self.FRITZBOX_DATA_INDEXED)
}
type(self).services = mock.PropertyMock(side_effect=[services])
def _side_effect_call_action(self, service, action, **kwargs):
if kwargs:
index = next(iter(kwargs.values()))
return self.FRITZBOX_DATA_INDEXED[(service, action)][index]
return self.FRITZBOX_DATA[(service, action)]
def _side_effect_action_names(self):
return list(self.FRITZBOX_DATA) + list(self.FRITZBOX_DATA_INDEXED)
| 33.96875 | 85 | 0.538638 |
967dff7666d3d1093c53af046a2bfaacf64e93d6 | 3,562 | py | Python | bsd2/vagrant-ansible/ansible/plugins/inventory/openshift.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | 1 | 2016-01-20T14:36:02.000Z | 2016-01-20T14:36:02.000Z | bsd2/vagrant-ansible/ansible/plugins/inventory/openshift.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | null | null | null | bsd2/vagrant-ansible/ansible/plugins/inventory/openshift.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup a Openshift gear
version_added: None
author: Michael Scherer
'''
import urllib2
try:
import json
except ImportError:
import simplejson as json
import os
import os.path
import sys
import ConfigParser
import StringIO
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
print "failed=True msg='missing %s'" % env_var
sys.exit(1)
return result
def get_json_from_api(url):
req = urllib2.Request(url, None, {'Accept': 'application/json; version=1.5'})
response = urllib2.urlopen(req)
return json.loads(response.read())['data']
def passwd_setup(top_level_url, username, password):
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
passwd_setup(broker_url, username, password)
response = get_json_from_api(broker_url + '/domains')
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']))
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({})
else:
print "Need a argument, either --list or --host <host>"
| 30.444444 | 95 | 0.703257 |
b20ff94265333dd074308d3f32c149d264b0d70e | 180 | py | Python | part22/blog/urls.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | 1 | 2019-11-15T08:02:45.000Z | 2019-11-15T08:02:45.000Z | part22/blog/urls.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | null | null | null | part22/blog/urls.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns =[
path('',views.index),
path('jurnal/',views.jurnal),
path('berita/',views.berita),
path('gosip/',views.gosip),
] | 20 | 30 | 0.694444 |
a14c67b49bc95f32df18d27afbc27236948820fc | 5,687 | py | Python | seisflows/tools/legacy/segy/headers.py | zhengjing8628/Seisflows_SRVM | af2734404757d907b88e5b58e8175bcf2b1967e6 | [
"BSD-2-Clause"
] | 2 | 2021-05-12T03:28:31.000Z | 2021-12-08T14:43:20.000Z | seisflows/tools/legacy/segy/headers.py | zhengjing8628/Seisflows_SRVM | af2734404757d907b88e5b58e8175bcf2b1967e6 | [
"BSD-2-Clause"
] | null | null | null | seisflows/tools/legacy/segy/headers.py | zhengjing8628/Seisflows_SRVM | af2734404757d907b88e5b58e8175bcf2b1967e6 | [
"BSD-2-Clause"
] | 1 | 2020-04-16T08:38:49.000Z | 2020-04-16T08:38:49.000Z |
SEGY_TAPE_LABEL = [
['uchar', 4, 0, 'StorageUnitSeqNumber'],
['uchar', 5, 4, 'SEGYRevision'],
['uchar', 6, 9, 'StorageUnitStructure'],
['uchar', 4, 15, 'BindingEdition'],
['uchar', 10, 19, 'MaxBlockSize'],
['uchar', 10, 29, 'ProducerOrganizationCode'],
['uchar', 11, 39, 'CreationDate'],
['uchar', 12, 50, 'SerialNumber'],
['uchar', 6, 62, 'Reserved'],
['uchar', 60, 68, 'StorageSetIdentifier']]
SEGY_BINARY_HEADER = [
['int32', 1, 0, 'JobID'],
['int32', 1, 4, 'LineNumber'],
['int32', 1, 8, 'ReelNumber'],
['int16', 1, 12, 'DataTracePerEnsemble'],
['int16', 1, 14, 'AuxillaryTracePerEnsemble'],
['int16', 1, 16, 'SamplingTime'],
['int16', 1, 18, 'OriginalSamplingTime'],
['int16', 1, 20, 'NumberSamples'],
['int16', 1, 22, 'OriginalNumberSamples'],
['int16', 1, 24, 'TraceMachineFormatCode'],
['int16', 1, 26, 'EnsembleFold'],
['int16', 1, 28, 'TraceSorting'],
['int16', 1, 30, 'VerticalSumCode'],
['int16', 1, 32, 'SweepFreqencyStart'],
['int16', 1, 34, 'SweepFrequencyEnd'],
['int16', 1, 36, 'SweepLength'],
['int16', 1, 38, 'SweepType'],
['int16', 1, 40, 'SweepChannel'],
['int16', 1, 42, 'SweepTaperLengthStart'],
['int16', 1, 44, 'SweepTaperLengthEnd'],
['int16', 1, 46, 'TaperType'],
['int16', 1, 48, 'CorrelatedTraces'],
['int16', 1, 50, 'BinaryGain'],
['int16', 1, 52, 'AmplitudeRecovery'],
['int16', 1, 54, 'MeasurementSystem'],
['int16', 1, 56, 'ImpulsePolarity'],
['int16', 1, 58, 'VibratoryPolarity'],
['int16', 120, 60, 'Unassigned1'],
['int16', 1, 300, 'RevisionNumber'],
['int16', 1, 302, 'FixedLengthTraceFlag'],
['int16', 1, 304, 'NumberExtendedTextualHeaders'],
['int16', 47, 306, 'Unassigned2']]
SEGY_TRACE_HEADER = [
['int32', 1, 0, 'TraceSequenceLine'],
['int32', 1, 4, 'TraceSequenceFile'],
['int32', 1, 8, 'OriginalFieldRecord'],
['int32', 1, 12, 'TraceNumberInField'],
['int32', 1, 16, 'EnergySourceNumber'],
['int32', 1, 20, 'EnsembleNumber'],
['int32', 1, 24, 'EnsembleTraceNumber'],
['int16', 1, 28, 'TraceIdentificationCode'],
['int16', 1, 30, 'NumberVertSummedTraces'],
['int16', 1, 32, 'NumberHorizSummedTraces'],
['int16', 1, 34, 'DataUse'],
['int32', 1, 36, 'ReceiverGroupOffset'],
['int32', 1, 40, 'ReceiverGroupElevation'],
['int32', 1, 44, 'SurfaceSourceElevation'],
['int32', 1, 48, 'SurfaceSourceDepth'],
['int32', 1, 52, 'ReceiverDatumElevation'],
['int32', 1, 56, 'SourceDatumElevation'],
['int32', 1, 60, 'SourceWaterDepth'],
['int32', 1, 64, 'GroupWaterDepth'],
['int16', 1, 68, 'ElevationOrDepthScalar'],
['int16', 1, 70, 'CoordinateScalar'],
['int32', 1, 72, 'SourceX'],
['int32', 1, 76, 'SourceY'],
['int32', 1, 80, 'GroupX'],
['int32', 1, 84, 'GroupY'],
['int16', 1, 88, 'CoordinateUnits'],
['int16', 1, 90, 'WeatheringVelocity'],
['int16', 1, 92, 'SubweatheringVelocity'],
['int16', 1, 94, 'SourceUpholeTime_ms'],
['int16', 1, 96, 'GroupUpholeTime_ms'],
['int16', 1, 98, 'SourceStaticCorrection_ms'],
['int16', 1, 100, 'GroupStaticCorrection_ms'],
['int16', 1, 102, 'TotalStaticApplied_ms'],
['int16', 1, 104, 'ALagTime_ms'],
['int16', 1, 106, 'BLagTime_ms'],
['int16', 1, 108, 'RecordingDelay_ms'],
['int16', 1, 110, 'MuteTimeStart_ms'],
['int16', 1, 112, 'MuteTimeEnd_ms'],
['int16', 1, 114, 'NumberSamples'],
['int16', 1, 116, 'SampleInterval_ms'],
['int16', 1, 118, 'GainType'],
['int16', 1, 120, 'GainConstant_dB'],
['int16', 1, 122, 'InitialGain_dB'],
['int16', 1, 124, 'Correlated'],
['int16', 1, 126, 'SweepFrequencyStart_Hz'],
['int16', 1, 128, 'SweepFrequencyEnd_Hz'],
['int16', 1, 130, 'SweepLength_ms'],
['int16', 1, 132, 'SweepType'],
['int16', 1, 134, 'SweepTaperLengthStart_ms'],
['int16', 1, 136, 'SweepTaperLengthEnd_ms'],
['int16', 1, 138, 'TaperType'],
['int16', 1, 140, 'AliasFilter_Hz'],
['int16', 1, 142, 'AliasSlope_dBperOctave'],
['int16', 1, 144, 'NotchFilterFrequency_Hz'],
['int16', 1, 146, 'NotchFilterSlope_dBperOctave'],
['int16', 1, 148, 'LowCutFrequency_Hz'],
['int16', 1, 150, 'HighCutFrequency_Hz'],
['int16', 1, 152, 'LowCutSlope_dBperOctave'],
['int16', 1, 154, 'HighCutSlope_dBperOctave'],
['int16', 1, 156, 'Year'],
['int16', 1, 158, 'Day'],
['int16', 1, 160, 'Hour'],
['int16', 1, 162, 'Minute'],
['int16', 1, 164, 'Second'],
['int16', 1, 166, 'TimeBasisCode'],
['int16', 1, 168, 'WeightingFactor'],
['int16', 1, 170, 'GeophoneGroupNumberRoll1'],
['int16', 1, 172, 'GeophoneGroupNumberFirstTraceOriginalRecord'],
['int16', 1, 174, 'GeophoneGroupNumberLastTraceOriginalRecord'],
['int16', 1, 176, 'GapSize'],
['int16', 1, 178, 'OverTravel'],
['int32', 1, 180, 'EnsembleX'],
['int32', 1, 184, 'EnsembleY'],
['int32', 1, 188, 'InLineNumber3D'],
['int32', 1, 192, 'CrossLineNumber3D'],
['int32', 1, 196, 'ShotPointNumber'],
['int16', 1, 200, 'ShotPointScalar'],
['int16', 1, 202, 'TraceMeasurementUnit'],
['int32', 1, 204, 'TransductionConstantMantissa'],
['int16', 1, 208, 'TransductionConstantExponent'],
['int16', 1, 210, 'TranductionUnits'],
['int16', 1, 212, 'TraceIdentifier'],
['int16', 1, 214, 'TimeScalar'],
['int16', 1, 216, 'SourceTypeOrientation'],
['bit48', 1, 218, 'SourceEnergyDirection'],
['int32', 1, 224, 'SourceMeasurementMantissa'],
['int16', 1, 228, 'SourceMeasurementExponent'],
['int16', 1, 230, 'SourceMeasurementUnit'],
['int16', 4, 232, 'Unassigned']]
| 41.210145 | 69 | 0.582205 |
2c8d3a3f1b4209a6973fa9b52c88d4eb6c3ebc49 | 2,664 | py | Python | sss/kcenter_sss.py | gongzhimin/ActiveThief-attack-MLaaS | 542ac43a94ec8b96b4dffc101f561e85ce564865 | [
"MIT"
] | 2 | 2022-03-07T13:45:43.000Z | 2022-03-07T13:45:52.000Z | sss/kcenter_sss.py | gongzhimin/ActiveThief-attack-MLaaS | 542ac43a94ec8b96b4dffc101f561e85ce564865 | [
"MIT"
] | null | null | null | sss/kcenter_sss.py | gongzhimin/ActiveThief-attack-MLaaS | 542ac43a94ec8b96b4dffc101f561e85ce564865 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2019 Soham Pal, Yash Gupta, Aditya Shukla, Aditya Kanade,
Shirish Shevade, Vinod Ganapathy. Indian Institute of Science.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from base_sss import SubsetSelectionStrategy
import numpy as np
import math
from cfg import config, cfg
from utils.kcenter import KCenter
import tensorflow as tf
class KCenterGreedyApproach(SubsetSelectionStrategy):
def __init__(self, size, Y_vec, init_cluster):
self.init_cluster = init_cluster
super(KCenterGreedyApproach, self).__init__(size, Y_vec)
def get_subset(self):
X = self.Y_vec
Y = self.init_cluster
batch_size = 100*cfg.batch_size
n_batches = int(math.ceil(len(X)/float(batch_size)))
m = KCenter()
points = []
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for _ in range(self.size):
p = []
q = []
for i in range(n_batches):
start = i*batch_size
end = i*batch_size + batch_size
X_b = X[start:end]
D_argmax_val, D_min_max_val = sess.run( [m.D_min_argmax, m.D_min_max], feed_dict={ m.A: X_b, m.B:Y } )
p.append(D_argmax_val)
q.append(D_min_max_val)
b_indx = np.argmax(q)
indx = b_indx*batch_size + p[b_indx]
Y = np.vstack([Y, X[indx]])
points.append(indx)
return points | 36 | 122 | 0.645646 |
d6e49c38e5c0a167b6621c6f6cc72cb78c2206eb | 10,986 | py | Python | ion/agents/platform/rsn/simulator/oms_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 3 | 2016-09-20T09:50:06.000Z | 2018-08-10T01:41:38.000Z | ion/agents/platform/rsn/simulator/oms_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | null | null | null | ion/agents/platform/rsn/simulator/oms_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 2 | 2016-03-16T22:25:49.000Z | 2016-11-26T14:54:21.000Z | #!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.oms_simulator
@file ion/agents/platform/rsn/simulator/oms_simulator.py
@author Carlos Rueda
@brief OMS simulator
"""
__author__ = 'Carlos Rueda'
import time
import ntplib
from ion.agents.platform.responses import NormalResponse, InvalidResponse
from ion.agents.platform.rsn.oms_client import CIOMSClient
from ion.agents.platform.rsn.simulator.oms_events import EventNotifier
from ion.agents.platform.rsn.simulator.oms_events import EventGenerator
from ion.agents.platform.rsn.simulator.oms_values import generate_values
from ion.agents.platform.util.network_util import NetworkUtil
from ion.agents.platform.rsn.simulator.logger import Logger
log = Logger.get_logger()
class CIOMSSimulator(CIOMSClient):
"""
Implementation of CIOMSClient for testing purposes.
It adds some methods intended to be used by tests (they are prefixed with
"x_" and are "public" to make them visible through the xml/rpc mechanism).
"""
# _raise_exception: see disable() and enable()
_raise_exception = False
@classmethod
def x_disable(cls):
"""
Makes any subsequent call to any public API operation to raise an
exception. This allows to test for the "lost connection" case.
"""
cls._raise_exception = True
@classmethod
def x_enable(cls):
"""
Cancels the effect of disable() (so the simulator continues to
operate normally).
"""
cls._raise_exception = False
def __init__(self, yaml_filename='ion/agents/platform/rsn/simulator/network.yml',
events_filename='ion/agents/platform/rsn/simulator/events.yml'):
self._ndef = NetworkUtil.deserialize_network_definition(file(yaml_filename))
self._pnodes = self._ndef.pnodes
# note that all ports are implicitly init'ed with state='OFF'
self._portState = {}
# registered event listeners: {url: reg_time, ...},
# where reg_time is the NTP time of (latest) registration.
# NOTE: for simplicity, we don't keep info about unregistered listeners
self._reg_event_listeners = {}
self._event_notifier = EventNotifier()
# EventGenerator only kept while there are listeners registered
self._event_generator = None
self._events_filename = events_filename
def _start_event_generator_if_listeners(self):
if not self._event_generator and len(self._reg_event_listeners):
self._event_generator = EventGenerator(self._event_notifier, self._events_filename)
self._event_generator.start()
log.debug("event generator started (%s listeners registered)",
len(self._reg_event_listeners))
def _stop_event_generator_if_no_listeners(self):
if self._event_generator and not len(self._reg_event_listeners):
log.debug("event generator stopping (no listeners registered)")
self._event_generator.stop()
self._event_generator = None
def _deactivate_simulator(self):
"""
Special method only intended to be called for when the simulator is run
in "embedded" form. See test_oms_simulator for the particular case.
"""
log.info("_deactivate_simulator called. event_generator=%s; %s listeners registered",
self._event_generator, len(self._reg_event_listeners))
if self._event_generator:
self._event_generator.stop()
self._event_generator = None
def _enter(self):
"""
Called when entering any of the CI-OMS interface methods.
"""
self._dispatch_synthetic_exception()
def _dispatch_synthetic_exception(self):
"""
Called by all CI_OMS interface methods to dispatch the
simulation of connection lost.
"""
if self._raise_exception:
msg = "(LC) synthetic exception from CIOMSSimulator"
log.debug(msg)
raise Exception(msg)
def ping(self):
self._enter()
return "pong"
def get_platform_metadata(self, platform_id):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
pnode = self._pnodes[platform_id]
# TODO capture/include appropriate elements
md = {}
if pnode.name:
md['name'] = pnode.name
if pnode.parent:
md['parent_platform_id'] = pnode.parent.platform_id
return {platform_id: md}
def get_platform_attribute_values(self, platform_id, req_attrs):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
# complete time window until current time:
to_time = ntplib.system_to_ntp_time(time.time())
attrs = self._pnodes[platform_id].attrs
vals = {}
for attrName, from_time in req_attrs:
if attrName in attrs:
attr = attrs[attrName]
values = generate_values(platform_id, attr.attr_id, from_time, to_time)
vals[attrName] = values
# Note: values == [] if there are no values.
else:
vals[attrName] = InvalidResponse.ATTRIBUTE_ID
return {platform_id: vals}
def _set_port_state(self, platform_id, port_id, state):
pp_id = '%s %s' % (platform_id, port_id)
self._portState[pp_id] = state
def _get_port_state(self, platform_id, port_id):
pp_id = '%s %s' % (platform_id, port_id)
return self._portState.get(pp_id, 'OFF')
def get_platform_ports(self, platform_id):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
ports = {}
for port_id in self._pnodes[platform_id].ports:
state = self._get_port_state(platform_id, port_id)
ports[port_id] = {'state': state}
return {platform_id: ports}
def turn_on_platform_port(self, platform_id, port_id, src):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
if port_id not in self._pnodes[platform_id].ports :
return {platform_id: {port_id: InvalidResponse.PORT_ID}}
state = self._get_port_state(platform_id, port_id)
if state == "ON":
result = NormalResponse.PORT_ALREADY_ON
log.debug("port %s in platform %s already turned on." % (port_id, platform_id))
else:
self._set_port_state(platform_id, port_id, 'ON')
result = NormalResponse.PORT_TURNED_ON
log.info("port %s in platform %s turned on." % (port_id, platform_id))
return {platform_id: {port_id: result}}
def turn_off_platform_port(self, platform_id, port_id, src):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
if port_id not in self._pnodes[platform_id].ports :
return {platform_id: {port_id: InvalidResponse.PORT_ID}}
state = self._get_port_state(platform_id, port_id)
if state == "OFF":
result = NormalResponse.PORT_ALREADY_OFF
log.debug("port %s in platform %s already turned off." % (port_id, platform_id))
else:
self._set_port_state(platform_id, port_id, 'OFF')
result = NormalResponse.PORT_TURNED_OFF
log.info("port %s in platform %s turned off." % (port_id, platform_id))
return {platform_id: {port_id: result}}
def set_over_current(self, platform_id, port_id, ma, us, src):
self._enter()
if platform_id not in self._pnodes:
return {platform_id: InvalidResponse.PLATFORM_ID}
if port_id not in self._pnodes[platform_id].ports :
return {platform_id: {port_id: InvalidResponse.PORT_ID}}
# OK, but we don't do anything else here, just accept.
result = NormalResponse.PORT_SET_OVER_CURRENT
return {platform_id: {port_id: result}}
def _validate_event_listener_url(self, url):
"""
Does a basic, static validation of the url.
"""
# TODO implement it; for now always returning True
return True
def register_event_listener(self, url):
self._enter()
# NOTE: event_types was previously a parameter to this operation. To
# minimize changes in the code, I introduced an 'ALL' event type to
# be used here explicitly.
event_type = 'ALL'
log.debug("register_event_listener called: url=%r", url)
if not self._validate_event_listener_url(url):
return {url: InvalidResponse.EVENT_LISTENER_URL}
if not url in self._reg_event_listeners:
# create entry for this new url
reg_time = self._event_notifier.add_listener(url, event_type)
self._reg_event_listeners[url] = reg_time
log.info("registered url=%r", url)
else:
# already registered:
reg_time = self._reg_event_listeners[url]
self._start_event_generator_if_listeners()
return {url: reg_time}
def unregister_event_listener(self, url):
self._enter()
# NOTE: event_types was previously a parameter to this operation. To
# minimize changes in the code, I introduced an 'ALL' event type to
# be used here explicitly.
event_type = 'ALL'
log.debug("unregister_event_listener called: url=%r", url)
if not url in self._reg_event_listeners:
return {url: 0}
#
# registered, so remove it
#
unreg_time = self._event_notifier.remove_listener(url, event_type)
del self._reg_event_listeners[url]
log.info("unregistered url=%r", url)
self._stop_event_generator_if_no_listeners()
return {url: unreg_time}
def get_registered_event_listeners(self):
self._enter()
return self._reg_event_listeners
def generate_test_event(self, event):
self._enter()
if self._event_generator: # there are listeners registered.
# copy event and include the additional fields:
event_instance = event.copy()
event_instance['test_event'] = True
timestamp = ntplib.system_to_ntp_time(time.time())
if 'timestamp' not in event_instance:
event_instance['timestamp'] = timestamp
if 'first_time_timestamp' not in event_instance:
event_instance['first_time_timestamp'] = timestamp
# simply notify listeners right away
self._event_notifier.notify(event_instance)
return True
else: # there are *no* listeners registered.
return False
| 35.43871 | 95 | 0.650737 |
83b6fcb0b35894378534e93b9f7aa30984f97f0f | 803 | py | Python | Code/Python/PenTesting/labDecrypt.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | Code/Python/PenTesting/labDecrypt.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | Code/Python/PenTesting/labDecrypt.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | import sys
import r2pipe
r2 = r2pipe.open("./lab4")
r2.cmd("e dbg.profile=lab4.rr2")
r2.cmd("doo") # reopen in debug mode
r2.cmd("aaaa")
r2.cmd("db (frist break number)") # sets the break point
r2.cmd("dc") # Continue
def step():
r2.cmd("ds") # single step
r2.cmd("sr rip") # seek to the current RIP the value at RIP
while true:
disass = []
while true:
step()
current_instruction = r2.cmdj("pdj 1")[0]
disass.append(current_instruction['opcode'])
if current_instruction['type'] == 'cjmp':
break
while True:
step()
if "loop" in r2.cmd("pdj 1")[0]['opcode']
break
target = hex(r2.cmdj("pdj 2")[1]['jump'])
r2.cmd("dc")
| 25.903226 | 64 | 0.519303 |
0511744ab000708e51fff53432bf1678f7635551 | 114 | py | Python | pash/__init__.py | iansmcf/pash | 273786a0e830bd582294419c5a93211552e692ba | [
"BSD-3-Clause"
] | null | null | null | pash/__init__.py | iansmcf/pash | 273786a0e830bd582294419c5a93211552e692ba | [
"BSD-3-Clause"
] | null | null | null | pash/__init__.py | iansmcf/pash | 273786a0e830bd582294419c5a93211552e692ba | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Ian McFarlane'
__email__ = 'iansmcfarlane@gmail.com'
__version__ = '1.0.0'
| 19 | 37 | 0.666667 |
64f62485e123e929c9ad50d75ae376d20ab59142 | 115 | py | Python | src/errors.py | AndrewLaganaro/specter-diy | 5d986c281d0a8e494f47ad4265a54dd9e6912d8e | [
"MIT"
] | null | null | null | src/errors.py | AndrewLaganaro/specter-diy | 5d986c281d0a8e494f47ad4265a54dd9e6912d8e | [
"MIT"
] | null | null | null | src/errors.py | AndrewLaganaro/specter-diy | 5d986c281d0a8e494f47ad4265a54dd9e6912d8e | [
"MIT"
] | 1 | 2021-01-14T09:45:18.000Z | 2021-01-14T09:45:18.000Z | class BaseError(Exception):
"""
All generic custom errors inherit from this one
"""
NAME = "Error"
| 19.166667 | 51 | 0.626087 |
56fd3b779406d90528d8d8469fe38821af1b5d2e | 3,361 | py | Python | test/likelihoods/test_gaussian_likelihood.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 2 | 2021-10-30T03:50:28.000Z | 2022-02-22T22:01:14.000Z | test/likelihoods/test_gaussian_likelihood.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | null | null | null | test/likelihoods/test_gaussian_likelihood.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 3 | 2020-09-18T18:58:12.000Z | 2021-05-27T15:39:00.000Z | #!/usr/bin/env python3
import unittest
import torch
from gpytorch.distributions import MultivariateNormal
from gpytorch.lazy import DiagLazyTensor
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood, GaussianLikelihood
from gpytorch.likelihoods.noise_models import FixedGaussianNoise
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
class TestGaussianLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 0
def create_likelihood(self):
return GaussianLikelihood()
class TestGaussianLikelihoodBatch(TestGaussianLikelihood):
seed = 0
def create_likelihood(self):
return GaussianLikelihood(batch_shape=torch.Size([3]))
def test_nonbatch(self):
pass
class TestGaussianLikelihoodMultiBatch(TestGaussianLikelihood):
seed = 0
def create_likelihood(self):
return GaussianLikelihood(batch_shape=torch.Size([2, 3]))
def test_nonbatch(self):
pass
def test_batch(self):
pass
class TestFixedNoiseGaussianLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
def create_likelihood(self):
noise = 0.1 + torch.rand(5)
return FixedNoiseGaussianLikelihood(noise=noise)
def test_fixed_noise_gaussian_likelihood(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
lkhd = FixedNoiseGaussianLikelihood(noise=noise)
# test basics
self.assertIsInstance(lkhd.noise_covar, FixedGaussianNoise)
self.assertTrue(torch.equal(noise, lkhd.noise))
new_noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
lkhd.noise = new_noise
self.assertTrue(torch.equal(lkhd.noise, new_noise))
# test __call__
mean = torch.zeros(4, device=device, dtype=dtype)
covar = DiagLazyTensor(torch.ones(4, device=device, dtype=dtype))
mvn = MultivariateNormal(mean, covar)
out = lkhd(mvn)
self.assertTrue(torch.allclose(out.variance, 1 + new_noise))
# things should break if dimensions mismatch
mean = torch.zeros(5, device=device, dtype=dtype)
covar = DiagLazyTensor(torch.ones(5, device=device, dtype=dtype))
mvn = MultivariateNormal(mean, covar)
with self.assertWarns(UserWarning):
lkhd(mvn)
# test __call__ w/ observation noise
obs_noise = 0.1 + torch.rand(5, device=device, dtype=dtype)
out = lkhd(mvn, noise=obs_noise)
self.assertTrue(torch.allclose(out.variance, 1 + obs_noise))
class TestFixedNoiseGaussianLikelihoodBatch(BaseLikelihoodTestCase, unittest.TestCase):
def create_likelihood(self):
noise = 0.1 + torch.rand(3, 5)
return FixedNoiseGaussianLikelihood(noise=noise)
def test_nonbatch(self):
pass
class TestFixedNoiseGaussianLikelihoodMultiBatch(BaseLikelihoodTestCase, unittest.TestCase):
def create_likelihood(self):
noise = 0.1 + torch.rand(2, 3, 5)
return FixedNoiseGaussianLikelihood(noise=noise)
def test_nonbatch(self):
pass
def test_batch(self):
pass
if __name__ == "__main__":
unittest.main()
| 33.277228 | 92 | 0.687891 |
1ce3a2c7ca9c5e30def2053ba1d36a05bdc96a3b | 2,007 | py | Python | dbt_gen/warehouse.py | norton120/dbt_gen | 712fc8698a77c3372f5a403a5ae50711d0cb3c7d | [
"MIT"
] | null | null | null | dbt_gen/warehouse.py | norton120/dbt_gen | 712fc8698a77c3372f5a403a5ae50711d0cb3c7d | [
"MIT"
] | null | null | null | dbt_gen/warehouse.py | norton120/dbt_gen | 712fc8698a77c3372f5a403a5ae50711d0cb3c7d | [
"MIT"
] | null | null | null | from global_logger import GLOBAL_LOGGER
import sys
class Warehouse:
logger = GLOBAL_LOGGER
platform = ''
def __init__(self,creds):
# platform is the warehouse type from config.
self.creds = creds
if self.creds['type'] == 'snowflake':
import snowflake.connector as db
try:
self.con = db.connect(user= self.creds['user'],
password= self.creds['password'],
account= self.creds['account']
)
except:
self.logger.error('Failed to connect to snowflake: {}'.format(sys.exc_info()[0]))
return False
def get_lake_tables(self, database, schemas):
if self.creds['type'] == 'snowflake':
cursor = self.con.cursor()
tables = []
try:
for schema in schemas:
cursor.execute("SELECT table_name from {}.information_schema.tables WHERE table_schema = '{}'".format(database,schema.upper()))
for name in cursor:
tables.append((schema.upper(),name[0]))
except:
self.logger.error('Failed to gather tables from data lake: {}'.format(sys.exc_info()[0]))
return False
return tables
def get_table_columns(self, database, schema, table):
if self.creds['type'] == 'snowflake':
cursor = self.con.cursor()
columns = []
try:
cursor.execute("SELECT column_name FROM {}.information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'".format(database,schema.upper(),table.upper()))
for column in cursor:
columns.append(column[0])
except:
self.logger.error('Failed to gather columns from data lake: {}'.format(sys.exc_info()[0]))
return False
return columns
| 37.166667 | 181 | 0.530144 |
4bef842a096dd00f9e85c086169ec61928944093 | 25,038 | py | Python | discord/webhook.py | data-navigator/discord.py | c30a366106f9d6114dcf38a31aed58dec6747054 | [
"MIT"
] | 1 | 2021-05-19T19:53:43.000Z | 2021-05-19T19:53:43.000Z | discord/webhook.py | data-navigator/discord.py | c30a366106f9d6114dcf38a31aed58dec6747054 | [
"MIT"
] | null | null | null | discord/webhook.py | data-navigator/discord.py | c30a366106f9d6114dcf38a31aed58dec6747054 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import json
import time
import re
import aiohttp
from . import utils
from .errors import InvalidArgument, HTTPException, Forbidden, NotFound
from .user import BaseUser, User
__all__ = ['WebhookAdapter', 'AsyncWebhookAdapter', 'RequestsWebhookAdapter', 'Webhook']
class WebhookAdapter:
"""Base class for all webhook adapters.
Attributes
------------
webhook: :class:`Webhook`
The webhook that owns this adapter.
"""
BASE = 'https://discordapp.com/api/v7'
def _prepare(self, webhook):
self._webhook_id = webhook.id
self._webhook_token = webhook.token
self._request_url = '{0.BASE}/webhooks/{1}/{2}'.format(self, webhook.id, webhook.token)
self.webhook = webhook
def request(self, verb, url, payload=None, multipart=None):
"""Actually does the request.
Subclasses must implement this.
Parameters
-----------
verb: :class:`str`
The HTTP verb to use for the request.
url: :class:`str`
The URL to send the request to. This will have
the query parameters already added to it, if any.
multipart: Optional[:class:`dict`]
A dict containing multipart form data to send with
the request. If a filename is being uploaded, then it will
be under a ``file`` key which will have a 3-element :class:`tuple`
denoting ``(filename, file, content_type)``.
payload: Optional[:class:`dict`]
The JSON to send with the request, if any.
"""
raise NotImplementedError()
def delete_webhook(self):
return self.request('DELETE', self._request_url)
def edit_webhook(self, **payload):
return self.request('PATCH', self._request_url, payload=payload)
def handle_execution_response(self, data, *, wait):
"""Transforms the webhook execution response into something
more meaningful.
This is mainly used to convert the data into a :class:`Message`
if necessary.
Subclasses must implement this.
Parameters
------------
data
The data that was returned from the request.
wait: :class:`bool`
Whether the webhook execution was asked to wait or not.
"""
raise NotImplementedError()
async def _wrap_coroutine_and_cleanup(self, coro, cleanup):
try:
return await coro
finally:
cleanup()
def execute_webhook(self, *, payload, wait=False, file=None, files=None):
cleanup = None
if file is not None:
multipart = {
'file': (file.filename, file.fp, 'application/octet-stream'),
'payload_json': utils.to_json(payload)
}
data = None
cleanup = file.close
files_to_pass = [file]
elif files is not None:
multipart = {
'payload_json': utils.to_json(payload)
}
for i, file in enumerate(files, start=1):
multipart['file%i' % i] = (file.filename, file.fp, 'application/octet-stream')
data = None
def _anon():
for f in files:
f.close()
cleanup = _anon
files_to_pass = files
else:
data = payload
multipart = None
files_to_pass = None
url = '%s?wait=%d' % (self._request_url, wait)
try:
maybe_coro = self.request('POST', url, multipart=multipart, payload=data, files=files_to_pass)
finally:
if cleanup is not None:
if not asyncio.iscoroutine(maybe_coro):
cleanup()
else:
maybe_coro = self._wrap_coroutine_and_cleanup(maybe_coro, cleanup)
return self.handle_execution_response(maybe_coro, wait=wait)
class AsyncWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with aiohttp.
.. note::
You are responsible for cleaning up the client session.
Parameters
-----------
session: aiohttp.ClientSession
The session to use to send requests.
"""
def __init__(self, session):
self.session = session
self.loop = asyncio.get_event_loop()
async def request(self, verb, url, payload=None, multipart=None, *, files=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if multipart:
data = aiohttp.FormData()
for key, value in multipart.items():
if key.startswith('file'):
data.add_field(key, value[1], filename=value[0], content_type=value[2])
else:
data.add_field(key, value)
for tries in range(5):
for file in files:
file.reset(seek=tries)
async with self.session.request(verb, url, headers=headers, data=data) as r:
data = await r.text(encoding='utf-8')
if r.headers['Content-Type'] == 'application/json':
data = json.loads(data)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
delta = utils._parse_ratelimit_header(r)
await asyncio.sleep(delta, loop=self.loop)
if 300 > r.status >= 200:
return data
# we are being rate limited
if r.status == 429:
retry_after = data['retry_after'] / 1000.0
await asyncio.sleep(retry_after, loop=self.loop)
continue
if r.status in (500, 502):
await asyncio.sleep(1 + tries * 2, loop=self.loop)
continue
if r.status == 403:
raise Forbidden(r, data)
elif r.status == 404:
raise NotFound(r, data)
else:
raise HTTPException(r, data)
async def handle_execution_response(self, response, *, wait):
data = await response
if not wait:
return data
# transform into Message object
from .message import Message
return Message(data=data, state=self.webhook._state, channel=self.webhook.channel)
class RequestsWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with ``requests``.
Only versions of requests higher than 2.13.0 are supported.
Parameters
-----------
session: Optional[`requests.Session <http://docs.python-requests.org/en/latest/api/#requests.Session>`_]
The requests session to use for sending requests. If not given then
each request will create a new session. Note if a session is given,
the webhook adapter **will not** clean it up for you. You must close
the session yourself.
sleep: :class:`bool`
Whether to sleep the thread when encountering a 429 or pre-emptive
rate limit or a 5xx status code. Defaults to ``True``. If set to
``False`` then this will raise an :exc:`HTTPException` instead.
"""
def __init__(self, session=None, *, sleep=True):
import requests
self.session = session or requests
self.sleep = sleep
def request(self, verb, url, payload=None, multipart=None, *, files=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if multipart is not None:
data = {'payload_json': multipart.pop('payload_json')}
for tries in range(5):
for file in files:
file.reset(seek=tries)
r = self.session.request(verb, url, headers=headers, data=data, files=multipart)
r.encoding = 'utf-8'
data = r.text
# compatibility with aiohttp
r.status = r.status_code
if r.headers['Content-Type'] == 'application/json':
data = json.loads(data)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429 and self.sleep:
delta = utils._parse_ratelimit_header(r)
time.sleep(delta)
if 300 > r.status >= 200:
return data
# we are being rate limited
if r.status == 429:
if self.sleep:
retry_after = data['retry_after'] / 1000.0
time.sleep(retry_after)
continue
else:
raise HTTPException(r, data)
if self.sleep and r.status in (500, 502):
time.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, data)
elif r.status == 404:
raise NotFound(r, data)
else:
raise HTTPException(r, data)
def handle_execution_response(self, response, *, wait):
if not wait:
return response
# transform into Message object
from .message import Message
return Message(data=response, state=self.webhook._state, channel=self.webhook.channel)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialWebhookState does not support http methods.')
class _PartialWebhookState:
__slots__ = ('loop',)
def __init__(self, adapter):
# Fetch the loop from the adapter if it's there
try:
self.loop = adapter.loop
except AttributeError:
self.loop = None
def _get_guild(self, guild_id):
return None
def store_user(self, data):
return BaseUser(state=self, data=data)
@property
def is_bot(self):
return True
@property
def http(self):
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr):
raise AttributeError('PartialWebhookState does not support {0:!r}.'.format(attr))
class Webhook:
"""Represents a Discord webhook.
Webhooks are a form to send messages to channels in Discord without a
bot user or authentication.
There are two main ways to use Webhooks. The first is through the ones
received by the library such as :meth:`.Guild.webhooks` and
:meth:`.TextChannel.webhooks`. The ones received by the library will
automatically have an adapter bound using the library's HTTP session.
Those webhooks will have :meth:`~.Webhook.send`, :meth:`~.Webhook.delete` and
:meth:`~.Webhook.edit` as coroutines.
The second form involves creating a webhook object manually without having
it bound to a websocket connection using the :meth:`~.Webhook.from_url` or
:meth:`~.Webhook.partial` classmethods. This form allows finer grained control
over how requests are done, allowing you to mix async and sync code using either
``aiohttp`` or ``requests``.
For example, creating a webhook from a URL and using ``aiohttp``:
.. code-block:: python3
from discord import Webhook, AsyncWebhookAdapter
import aiohttp
async def foo():
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url('url-here', adapter=AsyncWebhookAdapter(session))
await webhook.send('Hello World', username='Foo')
Or creating a webhook from an ID and token and using ``requests``:
.. code-block:: python3
import requests
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(123456, 'abcdefg', adapter=RequestsWebhookAdapter())
webhook.send('Hello World', username='Foo')
Attributes
------------
id: :class:`int`
The webhook's ID
token: :class:`str`
The authentication token of the webhook.
guild_id: Optional[:class:`int`]
The guild ID this webhook is for.
channel_id: Optional[:class:`int`]
The channel ID this webhook is for.
user: Optional[:class:`abc.User`]
The user this webhook was created by. If the webhook was
received without authentication then this will be ``None``.
name: Optional[:class:`str`]
The default name of the webhook.
avatar: Optional[:class:`str`]
The default avatar of the webhook.
"""
__slots__ = ('id', 'guild_id', 'channel_id', 'user', 'name', 'avatar',
'token', '_state', '_adapter')
def __init__(self, data, *, adapter, state=None):
self.id = int(data['id'])
self.channel_id = utils._get_as_snowflake(data, 'channel_id')
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.name = data.get('name')
self.avatar = data.get('avatar')
self.token = data['token']
self._state = state or _PartialWebhookState(adapter)
self._adapter = adapter
self._adapter._prepare(self)
user = data.get('user')
if user is None:
self.user = None
elif state is None:
self.user = BaseUser(state=None, data=user)
else:
self.user = User(state=state, data=user)
def __repr__(self):
return '<Webhook id=%r>' % self.id
@property
def url(self):
"""Returns the webhook's url."""
return 'https://discordapp.com/api/webhooks/{}/{}'.format(self.id, self.token)
@classmethod
def partial(cls, id, token, *, adapter):
"""Creates a partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for ``aiohttp`` or
:class:`RequestsWebhookAdapter` for ``requests``.
"""
if not isinstance(adapter, WebhookAdapter):
raise TypeError('adapter must be a subclass of WebhookAdapter')
data = {
'id': id,
'token': token
}
return cls(data, adapter=adapter)
@classmethod
def from_url(cls, url, *, adapter):
"""Creates a partial :class:`Webhook` from a webhook URL.
Parameters
------------
url: :class:`str`
The URL of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for ``aiohttp`` or
:class:`RequestsWebhookAdapter` for ``requests``.
Raises
-------
InvalidArgument
The URL is invalid.
"""
m = re.search(r'discordapp.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise InvalidArgument('Invalid webhook URL given.')
return cls(m.groupdict(), adapter=adapter)
@classmethod
def from_state(cls, data, state):
return cls(data, adapter=AsyncWebhookAdapter(session=state.http._session), state=state)
@property
def guild(self):
"""Optional[:class:`Guild`]: The guild this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
return self._state._get_guild(self.guild_id)
@property
def channel(self):
"""Optional[:class:`TextChannel`]: The text channel this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
guild = self.guild
return guild and guild.get_channel(self.channel_id)
@property
def created_at(self):
"""Returns the webhook's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def avatar_url(self):
"""Returns a friendly URL version of the avatar the webhook has.
If the webhook does not have a traditional avatar, their default
avatar URL is returned instead.
This is equivalent to calling :meth:`avatar_url_as` with the
default parameters.
"""
return self.avatar_url_as()
def avatar_url_as(self, *, format=None, size=1024):
"""Returns a friendly URL version of the avatar the webhook has.
If the webhook does not have a traditional avatar, their default
avatar URL is returned instead.
The format must be one of 'jpeg', 'jpg', or 'png'.
The size must be a power of 2 between 16 and 1024.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is equivalent to png.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`str`
The resulting CDN URL.
"""
if self.avatar is None:
# Default is always blurple apparently
return 'https://cdn.discordapp.com/embed/avatars/0.png'
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 1024")
format = format or 'png'
if format not in ('png', 'jpg', 'jpeg'):
raise InvalidArgument("format must be one of 'png', 'jpg', or 'jpeg'.")
return 'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(self, format, size)
def delete(self):
"""|maybecoro|
Deletes this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Raises
-------
HTTPException
Deleting the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to delete this webhook.
"""
return self._adapter.delete_webhook()
def edit(self, **kwargs):
"""|maybecoro|
Edits this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Parameters
-------------
name: Optional[:class:`str`]
The webhook's new default name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's new default avatar.
Raises
-------
HTTPException
Editing the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to edit this webhook.
"""
payload = {}
try:
name = kwargs['name']
except KeyError:
pass
else:
if name is not None:
payload['name'] = str(name)
else:
payload['name'] = None
try:
avatar = kwargs['avatar']
except KeyError:
pass
else:
if avatar is not None:
payload['avatar'] = utils._bytes_to_base64_data(avatar)
else:
payload['avatar'] = None
return self._adapter.edit_webhook(**payload)
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False,
file=None, files=None, embed=None, embeds=None):
"""|maybecoro|
Sends a message using the webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
The content must be a type that can convert to a string through ``str(content)``.
To upload a single file, the ``file`` parameter should be used with a
single :class:`File` object.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type. You cannot mix the ``embed`` parameter with the
``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.
Parameters
------------
content: :class:`str`
The content of the message to send.
wait: :class:`bool`
Whether the server should wait before sending a response. This essentially
means that the return type of this function changes from ``None`` to
a :class:`Message` if set to ``True``.
username: :class:`str`
The username to send with this message. If no username is provided
then the default username for the webhook is used.
avatar_url: :class:`str`
The avatar URL to send with this message. If no avatar URL is provided
then the default avatar for the webhook is used.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
Raises
--------
HTTPException
Sending the message failed.
NotFound
This webhook was not found.
Forbidden
The authorization token for the webhook is incorrect.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid.
Returns
---------
Optional[:class:`Message`]
The message that was sent.
"""
payload = {}
if files is not None and file is not None:
raise InvalidArgument('Cannot mix file and files keyword arguments.')
if embeds is not None and embed is not None:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments.')
if embeds is not None:
if len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements.')
payload['embeds'] = [e.to_dict() for e in embeds]
if embed is not None:
payload['embeds'] = [embed.to_dict()]
if content is not None:
payload['content'] = str(content)
payload['tts'] = tts
if avatar_url:
payload['avatar_url'] = avatar_url
if username:
payload['username'] = username
return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
def execute(self, *args, **kwargs):
"""An alias for :meth:`~.Webhook.send`."""
return self.send(*args, **kwargs)
| 34.440165 | 115 | 0.59238 |
ebb0520f046c9ce2bd91d15ca07eec82e751e45d | 3,860 | py | Python | bin/main.py | commandarmy/discord-event-manager | 709dd307b046158ddf9e49a559852d486168a94f | [
"Apache-2.0"
] | 4 | 2019-11-20T22:52:00.000Z | 2021-04-29T17:26:09.000Z | bin/main.py | commandarmy/discord-event-manager | 709dd307b046158ddf9e49a559852d486168a94f | [
"Apache-2.0"
] | 14 | 2020-02-10T17:16:22.000Z | 2022-03-02T12:55:33.000Z | bin/main.py | commandarmy/discord-event-manager | 709dd307b046158ddf9e49a559852d486168a94f | [
"Apache-2.0"
] | 3 | 2021-06-26T17:34:16.000Z | 2021-11-03T06:36:32.000Z | #!/usr/bin/env python3
"""Flask web runtime script."""
from __future__ import annotations
__LICENSE__ = """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import argparse
import logging
import os
import threading
from bot.bot import make_bot_instance
from config.discord import bot_token
from config.flask import port, debug, database_file
from api.app import app, db
from api.build import build_angular
parser = argparse.ArgumentParser(
description='Flask application serving the event manager UI.')
parser.add_argument(
'-p', '--port', dest='port', type=int, default=port,
help='web application serving port')
parser.add_argument(
'--no_build', dest='no_build', action='store_true',
help='do not build the Angular application')
parser.add_argument(
'--recreate_database', dest='recreate_database', action='store_true',
help='if present, removes the previous database file')
logging.root.setLevel(logging.INFO)
class DiscordBotRuntimeThread(threading.Thread):
"""Creates a thread environment to run the discord bot.
Creates a wrapper allowing to wait for the bot initialization as well
as cleanly shut it down.
"""
def __init__(self, token: str):
super().__init__()
self._token = token
self._ready = threading.Event()
self._loop = asyncio.new_event_loop()
def run(self):
"""Runs the thread."""
asyncio.set_event_loop(self._loop)
bot = make_bot_instance(loop=self._loop)
async def runner():
"""Main bot runtime loop."""
try:
await bot.start(self._token)
finally:
await bot.close()
async def readiness_notifier():
"""Notifies when the bot is ready the threading.Event."""
try:
await bot.wait_until_ready()
finally:
self._ready.set()
self._loop.create_task(runner(),
name='Bot runtime')
self._loop.create_task(readiness_notifier(),
name='Readyness notifier')
self._loop.run_forever()
def wait_readiness(self):
"""Blocking call waiting for the bot to be fully operational."""
self._ready.wait()
def clean_stop(self):
"""Clean stop of the bot, closing all connections."""
self._loop.stop()
self.join()
def main():
"""Runs the bot with its frontend server."""
args = parser.parse_args()
if not args.no_build:
logging.info('Building Angular...')
build_angular(debug)
if os.path.exists(database_file) and args.recreate_database:
logging.info('Clearing previous database')
os.remove(database_file)
if not os.path.exists(database_file):
logging.info('Creating database')
with app.app_context():
db.create_all()
# Start the bot
runner = DiscordBotRuntimeThread(bot_token)
logging.info('Starting the bot')
runner.start()
runner.wait_readiness()
logging.info('Bot started, starting Flask application')
host = debug and '127.0.0.1' or '0.0.0.0'
try:
app.run(host=host, port=args.port, debug=debug,
use_reloader=False)
finally:
runner.clean_stop()
if __name__ == "__main__":
main()
| 29.692308 | 73 | 0.659326 |
7557082b13b4073e905c8ee0cd9dd0fbcb2af29a | 7,520 | py | Python | heat/engine/api.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | heat/engine/api.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | heat/engine/api.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.rpc import api
from heat.openstack.common import timeutils
from heat.engine import template
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
def extract_args(params):
'''
Extract any arguments passed as parameters through the API and return them
as a dictionary. This allows us to filter the passed args and do type
conversion where appropriate
'''
kwargs = {}
try:
timeout_mins = int(params.get(api.PARAM_TIMEOUT, 0))
except (ValueError, TypeError):
logger.exception('create timeout conversion')
else:
if timeout_mins > 0:
kwargs[api.PARAM_TIMEOUT] = timeout_mins
if api.PARAM_DISABLE_ROLLBACK in params:
disable_rollback = params.get(api.PARAM_DISABLE_ROLLBACK)
if str(disable_rollback).lower() == 'true':
kwargs[api.PARAM_DISABLE_ROLLBACK] = True
elif str(disable_rollback).lower() == 'false':
kwargs[api.PARAM_DISABLE_ROLLBACK] = False
else:
raise ValueError("Unexpected value for parameter %s : %s" %
(api.PARAM_DISABLE_ROLLBACK, disable_rollback))
return kwargs
def format_stack_outputs(stack, outputs):
'''
Return a representation of the given output template for the given stack
that matches the API output expectations.
'''
def format_stack_output(k):
return {api.OUTPUT_DESCRIPTION: outputs[k].get('Description',
'No description given'),
api.OUTPUT_KEY: k,
api.OUTPUT_VALUE: stack.output(k)}
return [format_stack_output(key) for key in outputs]
def format_stack(stack):
'''
Return a representation of the given stack that matches the API output
expectations.
'''
info = {
api.STACK_NAME: stack.name,
api.STACK_ID: dict(stack.identifier()),
api.STACK_CREATION_TIME: timeutils.isotime(stack.created_time),
api.STACK_UPDATED_TIME: timeutils.isotime(stack.updated_time),
api.STACK_NOTIFICATION_TOPICS: [], # TODO Not implemented yet
api.STACK_PARAMETERS: stack.parameters.map(str),
api.STACK_DESCRIPTION: stack.t[template.DESCRIPTION],
api.STACK_TMPL_DESCRIPTION: stack.t[template.DESCRIPTION],
api.STACK_ACTION: stack.action or '',
api.STACK_STATUS: stack.status or '',
api.STACK_STATUS_DATA: stack.status_reason,
api.STACK_CAPABILITIES: [], # TODO Not implemented yet
api.STACK_DISABLE_ROLLBACK: stack.disable_rollback,
api.STACK_TIMEOUT: stack.timeout_mins,
}
# only show the outputs on a completely created or updated stack
if (stack.action != stack.DELETE and stack.status == stack.COMPLETE):
info[api.STACK_OUTPUTS] = format_stack_outputs(stack, stack.outputs)
return info
def format_stack_resource(resource, detail=True):
'''
Return a representation of the given resource that matches the API output
expectations.
'''
last_updated_time = resource.updated_time or resource.created_time
res = {
api.RES_UPDATED_TIME: timeutils.isotime(last_updated_time),
api.RES_NAME: resource.name,
api.RES_PHYSICAL_ID: resource.resource_id or '',
api.RES_METADATA: resource.metadata,
api.RES_ACTION: resource.action,
api.RES_STATUS: resource.status,
api.RES_STATUS_DATA: resource.status_reason,
api.RES_TYPE: resource.t['Type'],
api.RES_ID: dict(resource.identifier()),
api.RES_STACK_ID: dict(resource.stack.identifier()),
api.RES_STACK_NAME: resource.stack.name,
api.RES_REQUIRED_BY: resource.required_by(),
}
if detail:
res[api.RES_DESCRIPTION] = resource.parsed_template('Description', '')
res[api.RES_METADATA] = resource.metadata
return res
def format_event(event):
stack_identifier = event.stack.identifier()
result = {
api.EVENT_ID: dict(event.identifier()),
api.EVENT_STACK_ID: dict(stack_identifier),
api.EVENT_STACK_NAME: stack_identifier.stack_name,
api.EVENT_TIMESTAMP: timeutils.isotime(event.timestamp),
api.EVENT_RES_NAME: event.resource_name,
api.EVENT_RES_PHYSICAL_ID: event.physical_resource_id,
api.EVENT_RES_ACTION: event.action,
api.EVENT_RES_STATUS: event.status,
api.EVENT_RES_STATUS_DATA: event.reason,
api.EVENT_RES_TYPE: event.resource_type,
api.EVENT_RES_PROPERTIES: event.resource_properties,
}
return result
def format_watch(watch):
result = {
api.WATCH_ACTIONS_ENABLED: watch.rule.get(api.RULE_ACTIONS_ENABLED),
api.WATCH_ALARM_ACTIONS: watch.rule.get(api.RULE_ALARM_ACTIONS),
api.WATCH_TOPIC: watch.rule.get(api.RULE_TOPIC),
api.WATCH_UPDATED_TIME: timeutils.isotime(watch.updated_at),
api.WATCH_DESCRIPTION: watch.rule.get(api.RULE_DESCRIPTION),
api.WATCH_NAME: watch.name,
api.WATCH_COMPARISON: watch.rule.get(api.RULE_COMPARISON),
api.WATCH_DIMENSIONS: watch.rule.get(api.RULE_DIMENSIONS) or [],
api.WATCH_PERIODS: watch.rule.get(api.RULE_PERIODS),
api.WATCH_INSUFFICIENT_ACTIONS:
watch.rule.get(api.RULE_INSUFFICIENT_ACTIONS),
api.WATCH_METRIC_NAME: watch.rule.get(api.RULE_METRIC_NAME),
api.WATCH_NAMESPACE: watch.rule.get(api.RULE_NAMESPACE),
api.WATCH_OK_ACTIONS: watch.rule.get(api.RULE_OK_ACTIONS),
api.WATCH_PERIOD: watch.rule.get(api.RULE_PERIOD),
api.WATCH_STATE_REASON: watch.rule.get(api.RULE_STATE_REASON),
api.WATCH_STATE_REASON_DATA:
watch.rule.get(api.RULE_STATE_REASON_DATA),
api.WATCH_STATE_UPDATED_TIME: timeutils.isotime(
watch.rule.get(api.RULE_STATE_UPDATED_TIME)),
api.WATCH_STATE_VALUE: watch.state,
api.WATCH_STATISTIC: watch.rule.get(api.RULE_STATISTIC),
api.WATCH_THRESHOLD: watch.rule.get(api.RULE_THRESHOLD),
api.WATCH_UNIT: watch.rule.get(api.RULE_UNIT),
api.WATCH_STACK_ID: watch.stack_id
}
return result
def format_watch_data(wd):
# Demangle DB format data into something more easily used in the API
# We are expecting a dict with exactly two items, Namespace and
# a metric key
namespace = wd.data['Namespace']
metric = [(k, v) for k, v in wd.data.items() if k != 'Namespace']
if len(metric) == 1:
metric_name, metric_data = metric[0]
else:
logger.error("Unexpected number of keys in watch_data.data!")
return
result = {
api.WATCH_DATA_ALARM: wd.watch_rule.name,
api.WATCH_DATA_METRIC: metric_name,
api.WATCH_DATA_TIME: timeutils.isotime(wd.created_at),
api.WATCH_DATA_NAMESPACE: namespace,
api.WATCH_DATA: metric_data
}
return result
| 38.172589 | 79 | 0.686436 |
1109779ffd9647869dd16611828cee03c3d71013 | 2,051 | py | Python | src/components/MaskOrientGenerator/faceSegmentation.py | jinwoo1225/HAiR | 944d0254329a31b0132d19f903d40d86c8c0071a | [
"Apache-2.0"
] | 2 | 2021-03-29T06:32:47.000Z | 2021-05-03T03:29:16.000Z | src/components/MaskOrientGenerator/faceSegmentation.py | jinwoo1225/HAiR | 944d0254329a31b0132d19f903d40d86c8c0071a | [
"Apache-2.0"
] | 28 | 2021-05-04T02:27:52.000Z | 2022-02-12T13:37:11.000Z | src/components/MaskOrientGenerator/faceSegmentation.py | jinwoo1225/HAiR | 944d0254329a31b0132d19f903d40d86c8c0071a | [
"Apache-2.0"
] | 3 | 2021-07-05T01:38:05.000Z | 2022-01-09T14:38:31.000Z | import cv2
import numpy as np
from PIL import Image
import torch
from torchvision import transforms
import os
from models.MobileNetV2_unet import MobileNetV2_unet
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
face_seg_model_path = BASE_DIR + '/../../../models/checkpoints/model.pt'
def write_to_txt(mask_arr: np.ndarray, mask_name: str) -> None:
with open(f'{mask_name}.txt', 'wt') as opt_file:
for i in mask_arr:
for j in i:
opt_file.write(str(j))
opt_file.write('\n')
def img_to_ndarray(img_path: str) -> np.ndarray:
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
class FaceSegmentation():
"""
mostly come from
https://github.com/kampta/face-seg
"""
def __init__(self):
model = MobileNetV2_unet(None).to(torch.device("cpu"))
model_path = face_seg_model_path
state_dict = torch.load(model_path, map_location="cpu")
model.load_state_dict(state_dict)
model.eval()
print(f"Model '{model_path}' is loaded")
self.model = model
def image_to_mask(self, image: np.ndarray, mask_size: int, out_size: int) -> np.ndarray:
transform = transforms.Compose([
transforms.Resize((mask_size, mask_size)),
transforms.ToTensor(), ])
pil_img = Image.fromarray(image)
torch_img = transform(pil_img)
torch_img = torch_img.unsqueeze(0)
torch_img = torch_img.to(torch.device("cpu"))
# feed-fowarding
logits = self.model(torch_img)
mask = np.argmax(logits.data.cpu().numpy(), axis=1)
# 이 모델은 현재 얼굴을 1로 머리는 2로 처리하기때문에 머리만 1로 처리하는 과정임.
mask = mask.squeeze()
mask = np.where(mask == 1, 0, mask)
mask = np.where(mask == 2, 1, mask)
mask = np.array(mask, dtype='uint8')
mask = cv2.resize(mask, dsize=(0, 0), fx=(out_size/mask_size),
fy=(out_size/mask_size), interpolation=cv2.INTER_LINEAR)
return mask | 30.61194 | 92 | 0.629937 |
ace67be360cfcd671da9b4328bc91830abfba6a9 | 22,576 | py | Python | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | """
Components/Card
===============
.. seealso::
`Material Design spec, Cards <https://material.io/components/cards>`_
.. rubric:: Cards contain content and actions about a single subject.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/cards.gif
:align: center
`KivyMD` provides the following card classes for use:
- MDCard_
- MDCardSwipe_
.. Note:: :class:`~MDCard` inherited from
:class:`~kivy.uix.boxlayout.BoxLayout`. You can use all parameters and
attributes of the :class:`~kivy.uix.boxlayout.BoxLayout` class in the
:class:`~MDCard` class.
.. MDCard:
MDCard
------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
MDScreen:
MDCard:
size_hint: None, None
size: "280dp", "180dp"
pos_hint: {"center_x": .5, "center_y": .5}
'''
class TestCard(MDApp):
def build(self):
return Builder.load_string(KV)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card.png
:align: center
Add content to card:
--------------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
MDScreen:
MDCard:
orientation: "vertical"
padding: "8dp"
size_hint: None, None
size: "280dp", "180dp"
pos_hint: {"center_x": .5, "center_y": .5}
MDLabel:
text: "Title"
theme_text_color: "Secondary"
size_hint_y: None
height: self.texture_size[1]
MDSeparator:
height: "1dp"
MDLabel:
text: "Body"
'''
class TestCard(MDApp):
def build(self):
return Builder.load_string(KV)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-content.png
:align: center
.. MDCardSwipe:
MDCardSwipe
-----------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/MDCardSwipe.gif
:align: center
To create a card with `swipe-to-delete` behavior, you must create a new class
that inherits from the :class:`~MDCardSwipe` class:
.. code-block:: kv
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
.. code-block:: python
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/map-mdcard-swipr.png
:align: center
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
# Content under the card.
MDCardSwipeFrontBox:
# Content of card.
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
scroll_timeout : 100
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
'''Card with `swipe-to-delete` behavior.'''
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def on_start(self):
'''Creates a list of cards.'''
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/list-mdcard-swipe.gif
:align: center
Binding a swipe to one of the sides of the screen
-------------------------------------------------
.. code-block:: kv
<SwipeToDeleteItem>:
# By default, the parameter is "left"
anchor: "right"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/mdcard-swipe-anchor-right.gif
:align: center
.. Note:: You cannot use the left and right swipe at the same time.
Swipe behavior
--------------
.. code-block:: kv
<SwipeToDeleteItem>:
# By default, the parameter is "hand"
type_swipe: "hand"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/hand-mdcard-swipe.gif
:align: center
.. code-block:: kv
<SwipeToDeleteItem>:
type_swipe: "auto"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/auto-mdcard-swipe.gif
:align: center
Removing an item using the ``type_swipe = "auto"`` parameter
------------------------------------------------------------
The map provides the :attr:`MDCardSwipe.on_swipe_complete` event.
You can use this event to remove items from a list:
.. code-block:: kv
<SwipeToDeleteItem>:
on_swipe_complete: app.on_swipe_complete(root)
.. code-block:: python
def on_swipe_complete(self, instance):
self.screen.ids.md_list.remove_widget(instance)
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
type_swipe: "auto"
on_swipe_complete: app.on_swipe_complete(root)
MDCardSwipeLayerBox:
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def on_swipe_complete(self, instance):
self.screen.ids.md_list.remove_widget(instance)
def on_start(self):
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/autodelete-mdcard-swipe.gif
:align: center
Add content to the bottom layer of the card
-------------------------------------------
To add content to the bottom layer of the card,
use the :class:`~MDCardSwipeLayerBox` class.
.. code-block:: kv
<SwipeToDeleteItem>:
MDCardSwipeLayerBox:
padding: "8dp"
MDIconButton:
icon: "trash-can"
pos_hint: {"center_y": .5}
on_release: app.remove_item(root)
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
padding: "8dp"
MDIconButton:
icon: "trash-can"
pos_hint: {"center_y": .5}
on_release: app.remove_item(root)
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def remove_item(self, instance):
self.screen.ids.md_list.remove_widget(instance)
def on_start(self):
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/handdelete-mdcard-swipe.gif
:align: center
Focus behavior
--------------
.. code-block:: kv
MDCard:
focus_behavior: True
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-focus.gif
:align: center
Ripple behavior
---------------
.. code-block:: kv
MDCard:
ripple_behavior: True
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-behavior.gif
:align: center
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
<StarButton@MDIconButton>
icon: "star"
on_release: self.icon = "star-outline" if self.icon == "star" else "star"
MDScreen:
MDCard:
orientation: "vertical"
size_hint: .5, None
height: box_top.height + box_bottom.height
focus_behavior: True
ripple_behavior: True
pos_hint: {"center_x": .5, "center_y": .5}
MDBoxLayout:
id: box_top
spacing: "20dp"
adaptive_height: True
FitImage:
source: "/Users/macbookair/album.jpeg"
size_hint: .3, None
height: text_box.height
MDBoxLayout:
id: text_box
orientation: "vertical"
adaptive_height: True
spacing: "10dp"
padding: 0, "10dp", "10dp", "10dp"
MDLabel:
text: "Ride the Lightning"
theme_text_color: "Primary"
font_style: "H5"
bold: True
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "July 27, 1984"
size_hint_y: None
height: self.texture_size[1]
theme_text_color: "Primary"
MDSeparator:
MDBoxLayout:
id: box_bottom
adaptive_height: True
padding: "10dp", 0, 0, 0
MDLabel:
text: "Rate this album"
size_hint_y: None
height: self.texture_size[1]
pos_hint: {"center_y": .5}
theme_text_color: "Primary"
StarButton:
StarButton:
StarButton:
StarButton:
StarButton:
'''
class Test(MDApp):
def build(self):
self.theme_cls.theme_style = "Dark"
return Builder.load_string(KV)
Test().run()
"""
__all__ = (
"MDCard",
"MDCardSwipe",
"MDCardSwipeFrontBox",
"MDCardSwipeLayerBox",
"MDSeparator",
)
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
ColorProperty,
NumericProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import colors
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import (
BackgroundColorBehavior,
FocusBehavior,
RectangularRippleBehavior,
RoundedRectangularElevationBehavior,
)
from kivymd.uix.boxlayout import MDBoxLayout
Builder.load_string(
"""
<MDCardSwipeLayerBox>:
canvas.before:
Color:
rgba: app.theme_cls.divider_color
Rectangle:
size: self.size
pos: self.pos
<MDCard>
canvas.before:
Color:
rgba: self.md_bg_color
RoundedRectangle:
size: self.size
pos: self.pos
radius: root.radius
source: root.background
<MDSeparator>
md_bg_color: self.theme_cls.divider_color if not root.color else root.color
"""
)
class MDSeparator(ThemableBehavior, MDBoxLayout):
"""A separator line."""
color = ColorProperty(None)
"""Separator color in ``rgba`` format.
:attr:`color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.on_orientation()
def on_orientation(self, *args):
self.size_hint = (
(1, None) if self.orientation == "horizontal" else (None, 1)
)
if self.orientation == "horizontal":
self.height = dp(1)
else:
self.width = dp(1)
class MDCard(
ThemableBehavior,
RoundedRectangularElevationBehavior,
BackgroundColorBehavior,
RectangularRippleBehavior,
FocusBehavior,
BoxLayout,
):
focus_behavior = BooleanProperty(False)
"""
Using focus when hovering over a card.
:attr:`focus_behavior` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
ripple_behavior = BooleanProperty(False)
"""
Use ripple effect for card.
:attr:`ripple_behavior` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
elevation = NumericProperty(None, allownone=True)
"""
Elevation value.
:attr:`elevation` is an :class:`~kivy.properties.NumericProperty`
and defaults to 1.
"""
_bg_color_map = (
get_color_from_hex(colors["Light"]["CardsDialogs"]),
get_color_from_hex(colors["Dark"]["CardsDialogs"]),
[1.0, 1.0, 1.0, 0.0],
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.theme_cls.bind(theme_style=self.update_md_bg_color)
Clock.schedule_once(lambda x: self._on_elevation(self.elevation))
Clock.schedule_once(
lambda x: self.on_ripple_behavior(0, self.ripple_behavior)
)
self.update_md_bg_color(self, self.theme_cls.theme_style)
def update_md_bg_color(self, instance, value):
if self.md_bg_color in self._bg_color_map:
self.md_bg_color = get_color_from_hex(colors[value]["CardsDialogs"])
def on_ripple_behavior(self, instance, value):
self._no_ripple_effect = False if value else True
def _on_elevation(self, value):
if value is None:
self.elevation = 6
else:
self.elevation = value
class MDCardSwipe(RelativeLayout):
"""
:Events:
:attr:`on_swipe_complete`
Called when a swipe of card is completed.
"""
open_progress = NumericProperty(0.0)
"""
Percent of visible part of side panel. The percent is specified as a
floating point number in the range 0-1. 0.0 if panel is closed and 1.0 if
panel is opened.
:attr:`open_progress` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.0`.
"""
opening_transition = StringProperty("out_cubic")
"""
The name of the animation transition type to use when animating to
the :attr:`state` `'opened'`.
:attr:`opening_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_cubic'`.
"""
closing_transition = StringProperty("out_sine")
"""
The name of the animation transition type to use when animating to
the :attr:`state` 'closed'.
:attr:`closing_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_sine'`.
"""
anchor = OptionProperty("left", options=("left", "right"))
"""
Anchoring screen edge for card. Available options are: `'left'`, `'right'`.
:attr:`anchor` is a :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
"""
swipe_distance = NumericProperty(50)
"""
The distance of the swipe with which the movement of navigation drawer
begins.
:attr:`swipe_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to `50`.
"""
opening_time = NumericProperty(0.2)
"""
The time taken for the card to slide to the :attr:`state` `'open'`.
:attr:`opening_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.2`.
"""
state = OptionProperty("closed", options=("closed", "opened"))
"""
Detailed state. Sets before :attr:`state`. Bind to :attr:`state` instead
of :attr:`status`. Available options are: `'closed'`, `'opened'`.
:attr:`status` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'closed'`.
"""
max_swipe_x = NumericProperty(0.3)
"""
If, after the events of :attr:`~on_touch_up` card position exceeds this
value - will automatically execute the method :attr:`~open_card`,
and if not - will automatically be :attr:`~close_card` method.
:attr:`max_swipe_x` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.3`.
"""
max_opened_x = NumericProperty("100dp")
"""
The value of the position the card shifts to when :attr:`~type_swipe`
s set to `'hand'`.
:attr:`max_opened_x` is a :class:`~kivy.properties.NumericProperty`
and defaults to `100dp`.
"""
type_swipe = OptionProperty("hand", options=("auto", "hand"))
"""
Type of card opening when swipe. Shift the card to the edge or to
a set position :attr:`~max_opened_x`. Available options are:
`'auto'`, `'hand'`.
:attr:`type_swipe` is a :class:`~kivy.properties.OptionProperty`
and defaults to `auto`.
"""
_opens_process = False
_to_closed = True
def __init__(self, **kw):
self.register_event_type("on_swipe_complete")
super().__init__(**kw)
def _on_swipe_complete(self, *args):
self.dispatch("on_swipe_complete")
def add_widget(self, widget, index=0, canvas=None):
if isinstance(widget, (MDCardSwipeFrontBox, MDCardSwipeLayerBox)):
return super().add_widget(widget)
def on_swipe_complete(self, *args):
"""Called when a swipe of card is completed."""
def on_anchor(self, instance, value):
if value == "right":
self.open_progress = 1.0
else:
self.open_progress = 0.0
def on_open_progress(self, instance, value):
if self.anchor == "left":
self.children[0].x = self.width * value
else:
self.children[0].x = self.width * value - self.width
def on_touch_move(self, touch):
if self.collide_point(touch.x, touch.y):
expr = (
touch.x < self.swipe_distance
if self.anchor == "left"
else touch.x > self.width - self.swipe_distance
)
if expr and not self._opens_process:
self._opens_process = True
self._to_closed = False
if self._opens_process:
self.open_progress = max(
min(self.open_progress + touch.dx / self.width, 2.5), 0
)
return super().on_touch_move(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
if not self._to_closed:
self._opens_process = False
self.complete_swipe()
return super().on_touch_up(touch)
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
if self.state == "opened":
self._to_closed = True
self.close_card()
return super().on_touch_down(touch)
def complete_swipe(self):
expr = (
self.open_progress <= self.max_swipe_x
if self.anchor == "left"
else self.open_progress >= self.max_swipe_x
)
if expr:
self.close_card()
else:
self.open_card()
def open_card(self):
if self.type_swipe == "hand":
swipe_x = (
self.max_opened_x
if self.anchor == "left"
else -self.max_opened_x
)
else:
swipe_x = self.width if self.anchor == "left" else 0
anim = Animation(
x=swipe_x, t=self.opening_transition, d=self.opening_time
)
anim.bind(on_complete=self._on_swipe_complete)
anim.start(self.children[0])
self.state = "opened"
def close_card(self):
anim = Animation(x=0, t=self.closing_transition, d=self.opening_time)
anim.bind(on_complete=self._reset_open_progress)
anim.start(self.children[0])
self.state = "closed"
def _reset_open_progress(self, *args):
self.open_progress = 0.0 if self.anchor == "left" else 1.0
self._to_closed = False
self.dispatch("on_swipe_complete")
class MDCardSwipeFrontBox(MDCard):
pass
class MDCardSwipeLayerBox(BoxLayout):
pass
| 25.567384 | 111 | 0.583097 |
2870f01c76895160f294a089767958ff3b0792d3 | 1,208 | py | Python | src/roles/nfts.py | re-nft/animetas-discord-bot | 29f223729aa6fd01663c6a808a1b4e247c8e9df5 | [
"MIT"
] | null | null | null | src/roles/nfts.py | re-nft/animetas-discord-bot | 29f223729aa6fd01663c6a808a1b4e247c8e9df5 | [
"MIT"
] | 2 | 2021-09-15T15:35:11.000Z | 2021-09-22T15:11:36.000Z | src/roles/nfts.py | re-nft/animetas-discord-bot | 29f223729aa6fd01663c6a808a1b4e247c8e9df5 | [
"MIT"
] | null | null | null | import dotenv
import os
import requests
from web3 import Web3
from utils.utils import get_all_nft_addresses
from env import get_env_file
def verify_wallet_has_token(wallet_address: str, token_address: str) -> bool:
dotenv.load_dotenv(get_env_file())
project_id = os.environ.get("INFURA_PROJECT_ID", "")
method_signature = Web3.sha3(text="balanceOf(address)").hex()[0:10]
padding = "000000000000000000000000"
data = method_signature + padding + wallet_address[2:]
body = {
"method": "eth_call",
"id": 1,
"jsonrpc": "2.0",
"params": [{"to": token_address, "data": data}, "latest"],
}
url = "https://mainnet.infura.io/v3/" + project_id
res = requests.post(url, json=body)
res.raise_for_status()
result = res.json()["result"]
token_balance = int(result, 16)
if token_balance > 0:
return True
return False
def verify_wallet_has_any_valid_token(address: str, guild_id: str) -> bool:
token_addresses = get_all_nft_addresses(guild_id)
for token_address in token_addresses:
has_token = verify_wallet_has_token(address, token_address)
if has_token:
return True
return False
| 28.761905 | 77 | 0.67798 |
a7c841dc7ddafc2fc057ec77cc474213cd0e8a06 | 1,432 | py | Python | venv/lib/python3.6/site-packages/scripts/__init__.py | coolkapil/flask-Api | 53b42dcba298bd94c07dbe18f0f8cb7f5710d3bc | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/scripts/__init__.py | coolkapil/flask-Api | 53b42dcba298bd94c07dbe18f0f8cb7f5710d3bc | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/scripts/__init__.py | coolkapil/flask-Api | 53b42dcba298bd94c07dbe18f0f8cb7f5710d3bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 1999-2015, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from stua.version import get_version, HG
VERSION = (2, 0, 0, "final", 0)
__author__ = "Raffaele Salmaso"
__author_email__ = "raffaele@salmaso.org"
__version__ = get_version(VERSION, HG, __file__)
| 46.193548 | 82 | 0.773743 |
2375ee4f550616ff60d20b87b5773704d8fbbe1e | 4,320 | py | Python | tensorflow/contrib/framework/python/ops/accumulate_n_v2.py | harunpehlivan/tensorflow | 376e2cfdab31f4da251ea2e50992a9bf97fd171b | [
"Apache-2.0"
] | 24 | 2018-02-01T15:49:22.000Z | 2021-01-11T16:31:18.000Z | tensorflow/contrib/framework/python/ops/accumulate_n_v2.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 3 | 2018-05-09T11:31:58.000Z | 2021-01-27T12:26:21.000Z | tensorflow/contrib/framework/python/ops/accumulate_n_v2.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 13 | 2018-02-22T21:04:13.000Z | 2020-11-17T11:38:36.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops that will eventually be folded into tensorflow/python/ops/math_ops.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def accumulate_n_v2(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.accumulate_n_v2([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n_v2([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
_INPUTS_ERR_MSG = ValueError("inputs must be a list of at least one Tensor"
"with the same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _INPUTS_ERR_MSG
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _INPUTS_ERR_MSG
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _INPUTS_ERR_MSG
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}"
.format(tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
elif context.in_eager_mode():
# TemporaryVariable not currently supported in eager mode; fall back
# onto AddN for now.
# TODO(frreiss) remove this once the lifetime of eager variables gets
# addressed
return math_ops.add_n(inputs, name=name)
else:
return gen_math_ops._accumulate_nv2(inputs, name=name, shape=shape)
# The following code should eventually be merged into
# tensorflow/python/ops/math_grad.py
@ops.RegisterGradient("AccumulateNV2")
def _AddNGrad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
| 38.571429 | 80 | 0.686574 |
40f924e9361bb79346116b340d44e52b15b8743f | 923 | py | Python | __main__.py | christopher-wolff/News-Analysis | f249ed4654cc3fcebd69f99b4ba577f5afe42a91 | [
"MIT"
] | null | null | null | __main__.py | christopher-wolff/News-Analysis | f249ed4654cc3fcebd69f99b4ba577f5afe42a91 | [
"MIT"
] | null | null | null | __main__.py | christopher-wolff/News-Analysis | f249ed4654cc3fcebd69f99b4ba577f5afe42a91 | [
"MIT"
] | 1 | 2017-11-27T03:55:55.000Z | 2017-11-27T03:55:55.000Z | """Sample use of analyzer module."""
from analyzer import query
from analyzer import scrape_stories
from analyzer import label_articles
from analyzer import train_model
from analyzer import analyze
from analyzer import visualize
if __name__ == '__main__':
print('Requesting articles from NYT API')
print('================================')
query(num_queries=10)
print()
print('Scraping full article texts from NYT website')
print('============================================')
scrape_stories()
print()
print('Labeling articles')
print('=================')
label_articles(reset=True, rand_labels=True)
print()
print('Training classifiers')
print('====================')
train_model()
print()
print('Analyzing data')
print('==============')
analyze()
print()
print('Visualize results')
print('=================')
visualize()
| 23.666667 | 57 | 0.566631 |
7d8da92be4161ef272c88f041da7f55d9020d7cc | 1,176 | py | Python | train/run_training.py | aws-samples/amazon-mlops-example-tensorflow | 5e35727f486426029c7e3c8ccd64d5292f2a6bd8 | [
"MIT-0"
] | 2 | 2021-08-30T15:53:39.000Z | 2021-08-31T09:38:02.000Z | train/run_training.py | aws-samples/amazon-mlops-example-tensorflow | 5e35727f486426029c7e3c8ccd64d5292f2a6bd8 | [
"MIT-0"
] | null | null | null | train/run_training.py | aws-samples/amazon-mlops-example-tensorflow | 5e35727f486426029c7e3c8ccd64d5292f2a6bd8 | [
"MIT-0"
] | null | null | null |
from sagemaker.tensorflow import TensorFlow
import os
import numpy as np
# set environment
src_bucket = os.getenv("BUCKET_NAME")
print(src_bucket)
sm_role = os.getenv("SAGEMAKER_IAM_ROLE")
artifact_path = f's3://{src_bucket}/toxic_comments'
instance = os.getenv("INSTANCE")
# create an estimator object
estimator = TensorFlow(
entry_point="train/train.py",
instance_count=1,
dependencies=['train/requirements.txt','./inference/etl.py','train/CustomModel.py'],
output_path = artifact_path,
model_dir = False,
code_location = artifact_path,
#instance_type = "local",
base_job_name = "comment-classification",
instance_type = instance,
framework_version="2.2",
py_version="py37",
role = sm_role,
script_mode =True
)
# train the model
estimator.fit(artifact_path)
# get the training job name (prefixed with datetime)
training_job_name = estimator.latest_training_job.name
# write dynamic variables(model artifact location) to a .env file for later use in deploy stages
with open("dynamic_vars/.env", "w") as f:
f.write("%s=%s\n%s=%s\n" %("job", training_job_name,"tokpath",f"{artifact_path}/tokenizer.pkl"))
f.close()
| 29.4 | 100 | 0.731293 |
9634a24f33400627a5a6d8417dc923da1d5ab07f | 2,299 | py | Python | docker/models/resource.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | 2 | 2016-06-28T03:59:36.000Z | 2017-03-16T22:31:29.000Z | docker/models/resource.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | null | null | null | docker/models/resource.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | null | null | null |
class Model(object):
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 10 characters.
"""
return self.id[:10]
def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = self.collection.get(self.id)
self.attrs = new_model.attrs
class Collection(object):
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
def list(self):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception("Can't create %s from %s" %
(self.model.__name__, attrs))
| 27.047059 | 79 | 0.587647 |
4160e732809ff1cbeaf04aff75e35300853a91bb | 1,664 | py | Python | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module03-deep learning/lesson02-miniflow/exercise08-stochastic gradient descent/nn1.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module03-deep learning/lesson02-miniflow/exercise08-stochastic gradient descent/nn1.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module03-deep learning/lesson02-miniflow/exercise08-stochastic gradient descent/nn1.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | """
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/3/26 17:16
@File : nn1.py
"""
"""
Have fun with the number of epochs!
Be warned that if you increase them too much,
the VM will time out :)
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.utils import shuffle, resample
from miniflow1 import *
# Load data
data = load_boston()
X_ = data['data']
y_ = data['target']
# Normalize data
X_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)
n_features = X_.shape[1]
n_hidden = 10
W1_ = np.random.randn(n_features, n_hidden)
b1_ = np.zeros(n_hidden)
W2_ = np.random.randn(n_hidden, 1)
b2_ = np.zeros(1)
# Neural network
X, y = Input(), Input()
W1, b1 = Input(), Input()
W2, b2 = Input(), Input()
l1 = Linear(X, W1, b1)
s1 = Sigmoid(l1)
l2 = Linear(s1, W2, b2)
cost = MSE(y, l2)
feed_dict = {
X: X_,
y: y_,
W1: W1_,
b1: b1_,
W2: W2_,
b2: b2_
}
epochs = 1000
# Total number of examples
m = X_.shape[0]
batch_size = 11
steps_per_epoch = m // batch_size
graph = topological_sort(feed_dict)
trainables = [W1, b1, W2, b2]
print("Total number of examples = {}".format(m))
# Step 4
for i in range(epochs):
loss = 0
for j in range(steps_per_epoch):
# Step 1
# Randomly sample a batch of examples
X_batch, y_batch = resample(X_, y_, n_samples=batch_size)
# Reset value of X and y Inputs
X.value = X_batch
y.value = y_batch
# Step 2
forward_and_backward(graph)
# Step 3
sgd_update(trainables)
loss += graph[-1].value
print("Epoch: {}, Loss: {:.3f}".format(i+1, loss/steps_per_epoch))
| 19.348837 | 70 | 0.618389 |
3bd7f225c64ff2bc210c8ef4433fc43831df7e6a | 2,193 | py | Python | meraki/aio/api/malware_settings.py | NoFliesOnYou/dashboard-api-python | 3185d0e8a9a38eba9127ac640dcbb02444e7adf2 | [
"MIT"
] | null | null | null | meraki/aio/api/malware_settings.py | NoFliesOnYou/dashboard-api-python | 3185d0e8a9a38eba9127ac640dcbb02444e7adf2 | [
"MIT"
] | 3 | 2020-11-08T08:50:59.000Z | 2021-12-13T20:47:15.000Z | flask/meraki/aio/api/malware_settings.py | cyberdevnet/mer-hacker | a7dddd03c5b02a2f8c84d711b69868d2b94f1f99 | [
"MIT"
] | null | null | null | class AsyncMalwareSettings:
def __init__(self, session):
super().__init__()
self._session = session
async def getNetworkSecurityMalwareSettings(self, networkId: str):
"""
**Returns all supported malware settings for an MX network**
https://developer.cisco.com/meraki/api/#!get-network-security-malware-settings
- networkId (string)
"""
metadata = {
'tags': ['Malware settings'],
'operation': 'getNetworkSecurityMalwareSettings',
}
resource = f'/networks/{networkId}/security/malwareSettings'
return await self._session.get(metadata, resource)
async def updateNetworkSecurityMalwareSettings(self, networkId: str, mode: str, **kwargs):
"""
**Set the supported malware settings for an MX network**
https://developer.cisco.com/meraki/api/#!update-network-security-malware-settings
- networkId (string)
- mode (string): Set mode to 'enabled' to enable malware prevention, otherwise 'disabled'
- allowedUrls (array): The urls that should be permitted by the malware detection engine. If omitted, the current config will remain unchanged. This is available only if your network supports AMP whitelisting
- allowedFiles (array): The sha256 digests of files that should be permitted by the malware detection engine. If omitted, the current config will remain unchanged. This is available only if your network supports AMP whitelisting
"""
kwargs.update(locals())
if 'mode' in kwargs:
options = ['enabled', 'disabled']
assert kwargs['mode'] in options, f'''"mode" cannot be "{kwargs['mode']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Malware settings'],
'operation': 'updateNetworkSecurityMalwareSettings',
}
resource = f'/networks/{networkId}/security/malwareSettings'
body_params = ['mode', 'allowedUrls', 'allowedFiles']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
| 43.86 | 236 | 0.648883 |
f4a2fcfe754676982ae5eee1f6659028c2c7f23d | 12,043 | py | Python | src/snps/io/writer.py | arvkevi/snps | b3ebe4c5985c881314495d8040c28faafd9bb6d9 | [
"BSD-3-Clause"
] | null | null | null | src/snps/io/writer.py | arvkevi/snps | b3ebe4c5985c881314495d8040c28faafd9bb6d9 | [
"BSD-3-Clause"
] | null | null | null | src/snps/io/writer.py | arvkevi/snps | b3ebe4c5985c881314495d8040c28faafd9bb6d9 | [
"BSD-3-Clause"
] | null | null | null | """ Class for writing SNPs.
"""
"""
BSD 3-Clause License
Copyright (c) 2019, Andrew Riha
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import datetime
import logging
import numpy as np
import pandas as pd
import snps
from snps.utils import save_df_as_csv, clean_str
logger = logging.getLogger(__name__)
class Writer:
""" Class for writing SNPs to files. """
def __init__(self, snps=None, filename="", vcf=False, atomic=True, **kwargs):
""" Initialize a `Writer`.
Parameters
----------
snps : SNPs
SNPs to save to file or write to buffer
filename : str or buffer
filename for file to save or buffer to write to
vcf : bool
flag to save file as VCF
atomic : bool
atomically write output to a file on local filesystem
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
"""
self._snps = snps
self._filename = filename
self._vcf = vcf
self._atomic = atomic
self._kwargs = kwargs
def write(self):
if self._vcf:
return self._write_vcf()
else:
return (self._write_csv(),)
@classmethod
def write_file(cls, snps=None, filename="", vcf=False, atomic=True, **kwargs):
""" Save SNPs to file.
Parameters
----------
snps : SNPs
SNPs to save to file or write to buffer
filename : str or buffer
filename for file to save or buffer to write to
vcf : bool
flag to save file as VCF
atomic : bool
atomically write output to a file on local filesystem
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
discrepant_vcf_position : pd.DataFrame
SNPs with discrepant positions discovered while saving VCF
"""
w = cls(snps=snps, filename=filename, vcf=vcf, atomic=atomic, **kwargs)
return w.write()
def _write_csv(self):
""" Write SNPs to a CSV file.
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
filename = self._filename
if not filename:
ext = ".txt"
if "sep" in self._kwargs and self._kwargs["sep"] == ",":
ext = ".csv"
filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{ext}"
comment = (
f"# Source(s): {self._snps.source}\n"
f"# Build: {self._snps.build}\n"
f"# Build Detected: { self._snps.build_detected}\n"
f"# Phased: {self._snps.phased}\n"
f"# SNPs: {self._snps.count}\n"
f"# Chromosomes: {self._snps.chromosomes_summary}\n"
)
if "header" in self._kwargs:
if isinstance(self._kwargs["header"], bool):
if self._kwargs["header"]:
self._kwargs["header"] = ["chromosome", "position", "genotype"]
else:
self._kwargs["header"] = ["chromosome", "position", "genotype"]
return save_df_as_csv(
self._snps._snps,
self._snps._output_dir,
filename,
comment=comment,
atomic=self._atomic,
**self._kwargs,
)
def _write_vcf(self):
""" Write SNPs to a VCF file.
References
----------
1. The Variant Call Format (VCF) Version 4.2 Specification, 8 Mar 2019,
https://samtools.github.io/hts-specs/VCFv4.2.pdf
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
discrepant_vcf_position : pd.DataFrame
SNPs with discrepant positions discovered while saving VCF
"""
filename = self._filename
if not filename:
filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{'.vcf'}"
comment = (
f"##fileformat=VCFv4.2\n"
f'##fileDate={datetime.datetime.utcnow().strftime("%Y%m%d")}\n'
f'##source="{self._snps.source}; snps v{snps.__version__}; https://pypi.org/project/snps/"\n'
)
reference_sequence_chroms = (
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"X",
"Y",
"MT",
)
df = self._snps.snps
p = self._snps._parallelizer
tasks = []
# skip insertions and deletions
df = df.drop(
df.loc[
df["genotype"].notnull()
& (
(df["genotype"].str[0] == "I")
| (df["genotype"].str[0] == "D")
| (df["genotype"].str[1] == "I")
| (df["genotype"].str[1] == "D")
)
].index
)
chroms_to_drop = []
for chrom in df["chrom"].unique():
if chrom not in reference_sequence_chroms:
chroms_to_drop.append(chrom)
continue
tasks.append(
{
"resources": self._snps._resources,
"assembly": self._snps.assembly,
"chrom": chrom,
"snps": pd.DataFrame(df.loc[(df["chrom"] == chrom)]),
}
)
# drop chromosomes without reference sequence data (e.g., unassigned PAR)
for chrom in chroms_to_drop:
df = df.drop(df.loc[df["chrom"] == chrom].index)
# create the VCF representation for SNPs
results = p(self._create_vcf_representation, tasks)
contigs = []
vcf = [pd.DataFrame()]
discrepant_vcf_position = [pd.DataFrame()]
for result in list(results):
contigs.append(result["contig"])
vcf.append(result["vcf"])
discrepant_vcf_position.append(result["discrepant_vcf_position"])
vcf = pd.concat(vcf)
discrepant_vcf_position = pd.concat(discrepant_vcf_position)
comment += "".join(contigs)
comment += '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
comment += "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE\n"
return (
save_df_as_csv(
vcf,
self._snps._output_dir,
filename,
comment=comment,
prepend_info=False,
header=False,
index=False,
na_rep=".",
sep="\t",
),
discrepant_vcf_position,
)
def _create_vcf_representation(self, task):
resources = task["resources"]
assembly = task["assembly"]
chrom = task["chrom"]
snps = task["snps"]
if len(snps.loc[snps["genotype"].notnull()]) == 0:
return {
"contig": "",
"vcf": pd.DataFrame(),
"discrepant_vcf_position": pd.DataFrame(),
}
seqs = resources.get_reference_sequences(assembly, [chrom])
seq = seqs[chrom]
contig = f'##contig=<ID={seq.ID},URL={seq.url},length={seq.length},assembly={seq.build},md5={seq.md5},species="{seq.species}">\n'
snps = snps.reset_index()
df = pd.DataFrame(
columns=[
"CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
"SAMPLE",
]
)
df = df.astype(
{
"CHROM": object,
"POS": np.uint32,
"ID": object,
"REF": object,
"ALT": object,
"QUAL": np.float32,
"FILTER": object,
"INFO": object,
"FORMAT": object,
"SAMPLE": object,
}
)
df["CHROM"] = snps["chrom"]
df["POS"] = snps["pos"]
df["ID"] = snps["rsid"]
# drop SNPs with discrepant positions (outside reference sequence)
discrepant_vcf_position = snps.loc[
(snps.pos - seq.start < 0) | (snps.pos - seq.start > seq.length - 1)
]
df.drop(discrepant_vcf_position.index, inplace=True)
# https://stackoverflow.com/a/24838429
df["REF"] = list(map(chr, seq.sequence[df.POS - seq.start]))
df["FORMAT"] = "GT"
seq.clear()
df["genotype"] = snps["genotype"]
temp = df.loc[df["genotype"].notnull()]
# https://stackoverflow.com/a/19976286
df.loc[df["genotype"].notnull(), "ALT"] = np.vectorize(self._compute_alt)(
temp["REF"], temp["genotype"]
)
temp = df.loc[df["genotype"].notnull()]
df.loc[df["genotype"].notnull(), "SAMPLE"] = np.vectorize(
self._compute_genotype
)(temp["REF"], temp["ALT"], temp["genotype"])
df.loc[df["SAMPLE"].isnull(), "SAMPLE"] = "./."
del df["genotype"]
return {
"contig": contig,
"vcf": df,
"discrepant_vcf_position": discrepant_vcf_position,
}
def _compute_alt(self, ref, genotype):
genotype_alleles = list(set(genotype))
if ref in genotype_alleles:
if len(genotype_alleles) == 1:
return "N"
else:
genotype_alleles.remove(ref)
return genotype_alleles.pop(0)
else:
genotype_alleles.sort()
return ",".join(genotype_alleles)
def _compute_genotype(self, ref, alt, genotype):
alleles = [ref]
if self._snps.phased:
separator = "|"
else:
separator = "/"
if pd.notna(alt):
alleles.extend(alt.split(","))
if len(genotype) == 2:
return (
f"{alleles.index(genotype[0])}{separator}{alleles.index(genotype[1])}"
)
else:
return f"{alleles.index(genotype[0])}"
| 30.643766 | 137 | 0.530184 |
f879a29b16cc6a23ce93d78d951ea32b8cda73a4 | 2,088 | py | Python | examples/exp_configs/non_rl/ring_with_control.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | examples/exp_configs/non_rl/ring_with_control.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | examples/exp_configs/non_rl/ring_with_control.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | """Used as an example of ring experiment.
This example consists of 22 IDM cars on a ring creating shockwaves.
"""
from flow.controllers import FollowerStopper, IDMController, ContinuousRouter
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams
from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, {'noise':.1}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=21)
vehicles.add(
color='red',
veh_id="AV",
acceleration_controller=(FollowerStopper, {'v_des':10.0}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=1)
flow_params = dict(
# name of the experiment
exp_tag='ring',
# name of the flow environment the experiment is running on
env_name=AccelEnv,
# name of the network class the experiment is running on
network=RingNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
render=True,
sim_step=0.1,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=3000,
warmup_steps=750,
additional_params=ADDITIONAL_ENV_PARAMS,
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
additional_params={
"length": 260,
"lanes": 1,
"speed_limit": 30,
"resolution": 40,
}, ),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
bunching=20,
),
)
| 28.216216 | 77 | 0.694923 |
b50019d44e11f4296027cb01523b4b68844c9c25 | 7,141 | py | Python | api/tests/test_views.py | msb/sms-webapp | b663e850f2cad0c10ecc1d52a530c0de5da9cb6b | [
"MIT"
] | null | null | null | api/tests/test_views.py | msb/sms-webapp | b663e850f2cad0c10ecc1d52a530c0de5da9cb6b | [
"MIT"
] | null | null | null | api/tests/test_views.py | msb/sms-webapp | b663e850f2cad0c10ecc1d52a530c0de5da9cb6b | [
"MIT"
] | null | null | null | from unittest import mock
from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework.test import APIRequestFactory, force_authenticate
from smsjwplatform.models import CachedResource
from .. import views
class ViewTestCase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = get_user_model().objects.create_user(username='test0001')
self.patch_get_jwplatform_client()
self.patch_get_person_for_user()
self.client = self.get_jwplatform_client()
def patch_get_jwplatform_client(self):
self.get_jwplatform_client_patcher = mock.patch(
'smsjwplatform.jwplatform.get_jwplatform_client')
self.get_jwplatform_client = self.get_jwplatform_client_patcher.start()
self.addCleanup(self.get_jwplatform_client_patcher.stop)
def patch_get_person_for_user(self):
self.get_person_for_user_patcher = mock.patch('smsjwplatform.acl.get_person_for_user')
self.get_person_for_user = self.get_person_for_user_patcher.start()
self.get_person_for_user.return_value = {
'institutions': [{'instid': 'UIS'}],
'groups': [{'groupid': '12345', 'name': 'uis-members'}]
}
self.addCleanup(self.get_person_for_user_patcher.stop)
class ProfileViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.ProfileView().as_view()
def test_anonymous(self):
"""An anonymous user should have is_anonymous set to True."""
response = self.view(self.factory.get('/'))
self.assertTrue(response.data['is_anonymous'])
def test_authenticated(self):
"""An anonymous user should have is_anonymous set to False and username set."""
request = self.factory.get('/')
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertFalse(response.data['is_anonymous'])
self.assertEqual(response.data['username'], self.user.username)
def test_urls(self):
"""The profile should include a login URL."""
response = self.view(self.factory.get('/'))
self.assertIn('login', response.data['urls'])
class CollectionListViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.CollectionListView().as_view()
self.client.channels.list.return_value = {
'status': 'ok',
'channels': CHANNELS_FIXTURE,
'limit': 10,
'offset': 0,
'total': 30,
}
def test_basic_list(self):
"""An user should get all SMS channels back."""
response_data = self.view(self.factory.get('/')).data
self.assertIn('results', response_data)
# We have some results
self.assertNotEqual(len(response_data['results']), 0)
# How many results do we expect
visible_channels = [
c for c in CHANNELS_FIXTURE
if c.get('custom', {}).get('sms_collection_id') is not None
]
# How many do we get
self.assertEqual(len(response_data['results']), len(visible_channels))
def test_jwplatform_error(self):
"""A JWPlatform error should be reported as a bad gateway error."""
self.client.channels.list.return_value = {'status': 'error'}
response = self.view(self.factory.get('/'))
self.assertEqual(response.status_code, 502)
def test_search(self):
"""A search options should be passed through to the API call."""
self.view(self.factory.get('/?search=foo'))
call_args = self.client.channels.list.call_args
self.assertIsNotNone(call_args)
self.assertIn('search', call_args[1])
self.assertEqual(call_args[1]['search'], 'foo')
class MediaListViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.MediaListView().as_view()
for video in VIDEOS_FIXTURE:
CachedResource.objects.create(type=CachedResource.VIDEO, key=video['key'], data=video)
def test_basic_list(self):
"""An user should get all SMS media back."""
response_data = self.view(self.factory.get('/')).data
self.assertIn('results', response_data)
# We have some results
self.assertNotEqual(len(response_data['results']), 0)
# How many results do we expect
visible_videos = [
v for v in VIDEOS_FIXTURE if (
v.get('custom', {}).get('sms_media_id') is not None
and 'WORLD' in v.get('custom', {}).get('sms_acl', '')
)
]
# How many do we get
self.assertEqual(len(response_data['results']), len(visible_videos))
def test_auth_list(self):
"""An authenticated user should get more SMS media back."""
unauth_response_data = self.view(self.factory.get('/')).data
request = self.factory.get('/')
force_authenticate(request, user=self.user)
auth_response_data = self.view(request).data
# Authorised users have more results
self.assertGreater(
len(auth_response_data['results']), len(unauth_response_data['results']))
CHANNELS_FIXTURE = [
{
'key': 'mock1',
'title': 'Mock 1',
'description': 'Description for mock 1',
'custom': {
'sms_collection_id': 'collection:1234:',
},
},
{
'key': 'mock2',
'title': 'Mock 2',
'description': 'Description for mock 2',
'custom': {
'sms_collection_id': 'collection:1235:',
},
},
{
'key': 'mock3',
'title': 'Mock 3',
'description': 'Not a SMS collection',
},
]
VIDEOS_FIXTURE = [
{
'key': 'mock1',
'title': 'Mock 1',
'description': 'Description for mock 1',
'date': 1234567,
'duration': 54,
'custom': {
'sms_media_id': 'media:1234:',
'sms_acl': 'acl:WORLD:',
},
},
{
'key': 'mock2',
'title': 'Mock 2',
'description': 'Description for mock 2',
'date': 1234567,
'duration': 54,
'custom': {
'sms_media_id': 'media:1235:',
'sms_acl': 'acl:WORLD:',
},
},
# See uisautomation/sms2jwplayer#30. There is a video with an odd ACL.
{
'key': 'oddacl',
'title': 'Mock 2',
'description': 'Description for mock 2',
'date': 1234567,
'duration': 54,
'custom': {
'sms_media_id': 'media:1235:',
'sms_acl': "acl:['']:",
},
},
{
'key': 'mock3',
'title': 'Mock 3',
'description': 'Not a SMS collection',
'date': 1234567,
'duration': 54,
'custom': {},
},
{
'key': 'mock4',
'title': 'Mock 4',
'description': 'Description for mock 4',
'date': 1234567,
'duration': 54,
'custom': {
'sms_media_id': 'media:1435:',
'sms_acl': 'acl:CAM:',
},
},
]
| 32.022422 | 98 | 0.589413 |
cf3d06ac9451b947d76323f281d664eb112b9df9 | 2,906 | py | Python | tutorial-contents/402_RNN_classification.py | deep1leaning/tensorflow | 7bc80fa9e1d29f670bf0e7b1201093dbea5fd7c1 | [
"MIT"
] | 4,612 | 2017-05-11T06:53:00.000Z | 2022-03-30T13:07:33.000Z | tutorial-contents/402_RNN_classification.py | vvejmin/Tensorflow-Tutorial | 7bc80fa9e1d29f670bf0e7b1201093dbea5fd7c1 | [
"MIT"
] | 25 | 2017-05-24T12:53:00.000Z | 2020-02-29T10:02:26.000Z | tutorial-contents/402_RNN_classification.py | vvejmin/Tensorflow-Tutorial | 7bc80fa9e1d29f670bf0e7b1201093dbea5fd7c1 | [
"MIT"
] | 2,213 | 2017-05-11T13:20:33.000Z | 2022-03-31T02:50:51.000Z | """
Know more, visit my Python tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
Dependencies:
tensorflow: 1.1.0
matplotlib
numpy
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
tf.set_random_seed(1)
np.random.seed(1)
# Hyper Parameters
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height
INPUT_SIZE = 28 # rnn input size / image width
LR = 0.01 # learning rate
# data
mnist = input_data.read_data_sets('./mnist', one_hot=True) # they has been normalized to range (0,1)
test_x = mnist.test.images[:2000]
test_y = mnist.test.labels[:2000]
# plot one example
print(mnist.train.images.shape) # (55000, 28 * 28)
print(mnist.train.labels.shape) # (55000, 10)
plt.imshow(mnist.train.images[0].reshape((28, 28)), cmap='gray')
plt.title('%i' % np.argmax(mnist.train.labels[0]))
plt.show()
# tensorflow placeholders
tf_x = tf.placeholder(tf.float32, [None, TIME_STEP * INPUT_SIZE]) # shape(batch, 784)
image = tf.reshape(tf_x, [-1, TIME_STEP, INPUT_SIZE]) # (batch, height, width, channel)
tf_y = tf.placeholder(tf.int32, [None, 10]) # input y
# RNN
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units=64)
outputs, (h_c, h_n) = tf.nn.dynamic_rnn(
rnn_cell, # cell you have chosen
image, # input
initial_state=None, # the initial hidden state
dtype=tf.float32, # must given if set initial_state = None
time_major=False, # False: (batch, time step, input); True: (time step, batch, input)
)
output = tf.layers.dense(outputs[:, -1, :], 10) # output based on the last output step
loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost
train_op = tf.train.AdamOptimizer(LR).minimize(loss)
accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables
labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # the local var is for accuracy_op
sess.run(init_op) # initialize var in graph
for step in range(1200): # training
b_x, b_y = mnist.train.next_batch(BATCH_SIZE)
_, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y})
if step % 50 == 0: # testing
accuracy_ = sess.run(accuracy, {tf_x: test_x, tf_y: test_y})
print('train loss: %.4f' % loss_, '| test accuracy: %.2f' % accuracy_)
# print 10 predictions from test data
test_output = sess.run(output, {tf_x: test_x[:10]})
pred_y = np.argmax(test_output, 1)
print(pred_y, 'prediction number')
print(np.argmax(test_y[:10], 1), 'real number') | 39.808219 | 122 | 0.665175 |
251063a41b4c25b7adf4bde8c75ae8c1d10c99ef | 2,722 | py | Python | test.py | Lamply/SSD | f62fec376a6ce67b28944eaa915ae223bb934d5b | [
"MIT"
] | null | null | null | test.py | Lamply/SSD | f62fec376a6ce67b28944eaa915ae223bb934d5b | [
"MIT"
] | null | null | null | test.py | Lamply/SSD | f62fec376a6ce67b28944eaa915ae223bb934d5b | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import torch
import torch.utils.data
from ssd.config import cfg
from ssd.engine.inference import do_evaluation
from ssd.modeling.detector import build_detection_model
from ssd.utils import dist_util
from ssd.utils.checkpoint import CheckPointer
from ssd.utils.dist_util import synchronize
from ssd.utils.logger import setup_logger
import multiprocessing
def evaluation(cfg, ckpt, distributed):
logger = logging.getLogger("SSD.inference")
model = build_detection_model(cfg)
checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
checkpointer.load(ckpt, use_latest=ckpt is None)
do_evaluation(cfg, model, distributed)
def main():
parser = argparse.ArgumentParser(description='SSD Evaluation on VOC and COCO dataset.')
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--ckpt",
help="The path to the checkpoint for test, default is the latest checkpoint.",
default=None,
type=str,
)
parser.add_argument("--output_dir", default="eval_results", type=str, help="The directory to store evaluation results.")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if torch.cuda.is_available():
# This flag allows you to enable the inbuilt cudnn auto-tuner to
# find the best algorithm to use for your hardware.
torch.backends.cudnn.benchmark = True
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR)
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
evaluation(cfg, ckpt=args.ckpt, distributed=distributed)
if __name__ == '__main__':
multiprocessing.set_start_method('spawn',True)
main()
| 30.931818 | 124 | 0.693975 |
f927693039c01082a12cdd3db0003d67da2da044 | 18,368 | py | Python | mesonbuild/scripts/depfixer.py | TRUEPIC/meson | f8d35aa42073e2bd03020b0b5a119ee96fa073c2 | [
"Apache-2.0"
] | 44 | 2022-03-16T08:32:31.000Z | 2022-03-31T16:02:35.000Z | mesonbuild/scripts/depfixer.py | TRUEPIC/meson | f8d35aa42073e2bd03020b0b5a119ee96fa073c2 | [
"Apache-2.0"
] | 1 | 2022-03-29T02:30:28.000Z | 2022-03-30T03:40:46.000Z | mesonbuild/scripts/depfixer.py | TRUEPIC/meson | f8d35aa42073e2bd03020b0b5a119ee96fa073c2 | [
"Apache-2.0"
] | 18 | 2022-03-19T04:41:04.000Z | 2022-03-31T03:32:12.000Z | # Copyright 2013-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, struct
import shutil, subprocess
from ..mesonlib import OrderedSet
SHT_STRTAB = 3
DT_NEEDED = 1
DT_RPATH = 15
DT_RUNPATH = 29
DT_STRTAB = 5
DT_SONAME = 14
DT_MIPS_RLD_MAP_REL = 1879048245
# Global cache for tools
INSTALL_NAME_TOOL = False
class DataSizes:
def __init__(self, ptrsize, is_le):
if is_le:
p = '<'
else:
p = '>'
self.Half = p + 'h'
self.HalfSize = 2
self.Word = p + 'I'
self.WordSize = 4
self.Sword = p + 'i'
self.SwordSize = 4
if ptrsize == 64:
self.Addr = p + 'Q'
self.AddrSize = 8
self.Off = p + 'Q'
self.OffSize = 8
self.XWord = p + 'Q'
self.XWordSize = 8
self.Sxword = p + 'q'
self.SxwordSize = 8
else:
self.Addr = p + 'I'
self.AddrSize = 4
self.Off = p + 'I'
self.OffSize = 4
class DynamicEntry(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
self.ptrsize = ptrsize
if ptrsize == 64:
self.d_tag = struct.unpack(self.Sxword, ifile.read(self.SxwordSize))[0]
self.val = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.d_tag = struct.unpack(self.Sword, ifile.read(self.SwordSize))[0]
self.val = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
def write(self, ofile):
if self.ptrsize == 64:
ofile.write(struct.pack(self.Sxword, self.d_tag))
ofile.write(struct.pack(self.XWord, self.val))
else:
ofile.write(struct.pack(self.Sword, self.d_tag))
ofile.write(struct.pack(self.Word, self.val))
class SectionHeader(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
if ptrsize == 64:
is_64 = True
else:
is_64 = False
# Elf64_Word
self.sh_name = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Word
self.sh_type = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Xword
if is_64:
self.sh_flags = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_flags = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Addr
self.sh_addr = struct.unpack(self.Addr, ifile.read(self.AddrSize))[0]
# Elf64_Off
self.sh_offset = struct.unpack(self.Off, ifile.read(self.OffSize))[0]
# Elf64_Xword
if is_64:
self.sh_size = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_size = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Word
self.sh_link = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Word
self.sh_info = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Xword
if is_64:
self.sh_addralign = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_addralign = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
# Elf64_Xword
if is_64:
self.sh_entsize = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_entsize = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class Elf(DataSizes):
def __init__(self, bfile, verbose=True):
self.bfile = bfile
self.verbose = verbose
self.bf = open(bfile, 'r+b')
try:
(self.ptrsize, self.is_le) = self.detect_elf_type()
super().__init__(self.ptrsize, self.is_le)
self.parse_header()
self.parse_sections()
self.parse_dynamic()
except (struct.error, RuntimeError):
self.bf.close()
raise
def __enter__(self):
return self
def __del__(self):
if self.bf:
self.bf.close()
def __exit__(self, exc_type, exc_value, traceback):
self.bf.close()
self.bf = None
def detect_elf_type(self):
data = self.bf.read(6)
if data[1:4] != b'ELF':
# This script gets called to non-elf targets too
# so just ignore them.
if self.verbose:
print('File "%s" is not an ELF file.' % self.bfile)
sys.exit(0)
if data[4] == 1:
ptrsize = 32
elif data[4] == 2:
ptrsize = 64
else:
sys.exit('File "%s" has unknown ELF class.' % self.bfile)
if data[5] == 1:
is_le = True
elif data[5] == 2:
is_le = False
else:
sys.exit('File "%s" has unknown ELF endianness.' % self.bfile)
return ptrsize, is_le
def parse_header(self):
self.bf.seek(0)
self.e_ident = struct.unpack('16s', self.bf.read(16))[0]
self.e_type = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_machine = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_version = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_entry = struct.unpack(self.Addr, self.bf.read(self.AddrSize))[0]
self.e_phoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_shoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_flags = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_ehsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shstrndx = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
def parse_sections(self):
self.bf.seek(self.e_shoff)
self.sections = []
for _ in range(self.e_shnum):
self.sections.append(SectionHeader(self.bf, self.ptrsize, self.is_le))
def read_str(self):
arr = []
x = self.bf.read(1)
while x != b'\0':
arr.append(x)
x = self.bf.read(1)
if x == b'':
raise RuntimeError('Tried to read past the end of the file')
return b''.join(arr)
def find_section(self, target_name):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
if name == target_name:
return i
def parse_dynamic(self):
sec = self.find_section(b'.dynamic')
self.dynamic = []
if sec is None:
return
self.bf.seek(sec.sh_offset)
while True:
e = DynamicEntry(self.bf, self.ptrsize, self.is_le)
self.dynamic.append(e)
if e.d_tag == 0:
break
def print_section_names(self):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
print(name.decode())
def print_soname(self):
soname = None
strtab = None
for i in self.dynamic:
if i.d_tag == DT_SONAME:
soname = i
if i.d_tag == DT_STRTAB:
strtab = i
if soname is None or strtab is None:
print("This file does not have a soname")
return
self.bf.seek(strtab.val + soname.val)
print(self.read_str())
def get_entry_offset(self, entrynum):
sec = self.find_section(b'.dynstr')
for i in self.dynamic:
if i.d_tag == entrynum:
return sec.sh_offset + i.val
return None
def print_rpath(self):
offset = self.get_entry_offset(DT_RPATH)
if offset is None:
print("This file does not have an rpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_runpath(self):
offset = self.get_entry_offset(DT_RUNPATH)
if offset is None:
print("This file does not have a runpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_deps(self):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
print(name)
def fix_deps(self, prefix):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
if name.startswith(prefix):
basename = name.split(b'/')[-1]
padding = b'\0' * (len(name) - len(basename))
newname = basename + padding
assert(len(newname) == len(name))
self.bf.seek(offset)
self.bf.write(newname)
def fix_rpath(self, rpath_dirs_to_remove, new_rpath):
# The path to search for can be either rpath or runpath.
# Fix both of them to be sure.
self.fix_rpathtype_entry(rpath_dirs_to_remove, new_rpath, DT_RPATH)
self.fix_rpathtype_entry(rpath_dirs_to_remove, new_rpath, DT_RUNPATH)
def fix_rpathtype_entry(self, rpath_dirs_to_remove, new_rpath, entrynum):
if isinstance(new_rpath, str):
new_rpath = new_rpath.encode('utf8')
rp_off = self.get_entry_offset(entrynum)
if rp_off is None:
if self.verbose:
print('File does not have rpath. It should be a fully static executable.')
return
self.bf.seek(rp_off)
old_rpath = self.read_str()
# Some rpath entries may come from multiple sources.
# Only add each one once.
new_rpaths = OrderedSet()
if new_rpath:
new_rpaths.add(new_rpath)
if old_rpath:
# Filter out build-only rpath entries
# added by get_link_dep_subdirs() or
# specified by user with build_rpath.
for rpath_dir in old_rpath.split(b':'):
if not (rpath_dir in rpath_dirs_to_remove or
rpath_dir == (b'X' * len(rpath_dir))):
if rpath_dir:
new_rpaths.add(rpath_dir)
# Prepend user-specified new entries while preserving the ones that came from pkgconfig etc.
new_rpath = b':'.join(new_rpaths)
if len(old_rpath) < len(new_rpath):
msg = "New rpath must not be longer than the old one.\n Old: {}\n New: {}".format(old_rpath, new_rpath)
sys.exit(msg)
# The linker does read-only string deduplication. If there is a
# string that shares a suffix with the rpath, they might get
# dedupped. This means changing the rpath string might break something
# completely unrelated. This has already happened once with X.org.
# Thus we want to keep this change as small as possible to minimize
# the chance of obliterating other strings. It might still happen
# but our behavior is identical to what chrpath does and it has
# been in use for ages so based on that this should be rare.
if not new_rpath:
self.remove_rpath_entry(entrynum)
else:
self.bf.seek(rp_off)
self.bf.write(new_rpath)
self.bf.write(b'\0')
def remove_rpath_entry(self, entrynum):
sec = self.find_section(b'.dynamic')
if sec is None:
return None
for (i, entry) in enumerate(self.dynamic):
if entry.d_tag == entrynum:
rpentry = self.dynamic[i]
rpentry.d_tag = 0
self.dynamic = self.dynamic[:i] + self.dynamic[i + 1:] + [rpentry]
break
# DT_MIPS_RLD_MAP_REL is relative to the offset of the tag. Adjust it consequently.
for entry in self.dynamic[i:]:
if entry.d_tag == DT_MIPS_RLD_MAP_REL:
entry.val += 2 * (self.ptrsize // 8)
break
self.bf.seek(sec.sh_offset)
for entry in self.dynamic:
entry.write(self.bf)
return None
def fix_elf(fname, rpath_dirs_to_remove, new_rpath, verbose=True):
with Elf(fname, verbose) as e:
if new_rpath is None:
e.print_rpath()
e.print_runpath()
else:
e.fix_rpath(rpath_dirs_to_remove, new_rpath)
def get_darwin_rpaths_to_remove(fname):
out = subprocess.check_output(['otool', '-l', fname],
universal_newlines=True,
stderr=subprocess.DEVNULL)
result = []
current_cmd = 'FOOBAR'
for line in out.split('\n'):
line = line.strip()
if ' ' not in line:
continue
key, value = line.strip().split(' ', 1)
if key == 'cmd':
current_cmd = value
if key == 'path' and current_cmd == 'LC_RPATH':
rp = value.split('(', 1)[0].strip()
result.append(rp)
return result
def fix_darwin(fname, new_rpath, final_path, install_name_mappings):
try:
rpaths = get_darwin_rpaths_to_remove(fname)
except subprocess.CalledProcessError:
# Otool failed, which happens when invoked on a
# non-executable target. Just return.
return
try:
args = []
if rpaths:
# TODO: fix this properly, not totally clear how
#
# removing rpaths from binaries on macOS has tons of
# weird edge cases. For instance, if the user provided
# a '-Wl,-rpath' argument in LDFLAGS that happens to
# coincide with an rpath generated from a dependency,
# this would cause installation failures, as meson would
# generate install_name_tool calls with two identical
# '-delete_rpath' arguments, which install_name_tool
# fails on. Because meson itself ensures that it never
# adds duplicate rpaths, duplicate rpaths necessarily
# come from user variables. The idea of using OrderedSet
# is to remove *at most one* duplicate RPATH entry. This
# is not optimal, as it only respects the user's choice
# partially: if they provided a non-duplicate '-Wl,-rpath'
# argument, it gets removed, if they provided a duplicate
# one, it remains in the final binary. A potentially optimal
# solution would split all user '-Wl,-rpath' arguments from
# LDFLAGS, and later add them back with '-add_rpath'.
for rp in OrderedSet(rpaths):
args += ['-delete_rpath', rp]
subprocess.check_call(['install_name_tool', fname] + args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
args = []
if new_rpath:
args += ['-add_rpath', new_rpath]
# Rewrite -install_name @rpath/libfoo.dylib to /path/to/libfoo.dylib
if fname.endswith('dylib'):
args += ['-id', final_path]
if install_name_mappings:
for old, new in install_name_mappings.items():
args += ['-change', old, new]
if args:
subprocess.check_call(['install_name_tool', fname] + args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except Exception as err:
raise SystemExit(err)
def fix_jar(fname):
subprocess.check_call(['jar', 'xfv', fname, 'META-INF/MANIFEST.MF'])
with open('META-INF/MANIFEST.MF', 'r+') as f:
lines = f.readlines()
f.seek(0)
for line in lines:
if not line.startswith('Class-Path:'):
f.write(line)
f.truncate()
subprocess.check_call(['jar', 'ufm', fname, 'META-INF/MANIFEST.MF'])
def fix_rpath(fname, rpath_dirs_to_remove, new_rpath, final_path, install_name_mappings, verbose=True):
global INSTALL_NAME_TOOL
# Static libraries, import libraries, debug information, headers, etc
# never have rpaths
# DLLs and EXE currently do not need runtime path fixing
if fname.endswith(('.a', '.lib', '.pdb', '.h', '.hpp', '.dll', '.exe')):
return
try:
if fname.endswith('.jar'):
fix_jar(fname)
return
fix_elf(fname, rpath_dirs_to_remove, new_rpath, verbose)
return
except SystemExit as e:
if isinstance(e.code, int) and e.code == 0:
pass
else:
raise
# We don't look for this on import because it will do a useless PATH lookup
# on non-mac platforms. That can be expensive on some Windows machines
# (upto 30ms), which is significant with --only-changed. For details, see:
# https://github.com/mesonbuild/meson/pull/6612#discussion_r378581401
if INSTALL_NAME_TOOL is False:
INSTALL_NAME_TOOL = shutil.which('install_name_tool')
if INSTALL_NAME_TOOL:
fix_darwin(fname, new_rpath, final_path, install_name_mappings)
| 38.346555 | 115 | 0.585747 |
c7a3937ef27a6fcfa13ff17b7aeaae98d3088d56 | 1,473 | py | Python | torch_geometric/utils/undirected.py | DL-85/pytorch_geometric | eb12a94a667e881c4a6bff26b0453428bcb72393 | [
"MIT"
] | 13 | 2019-11-07T02:57:41.000Z | 2021-12-28T08:19:56.000Z | torch_geometric/utils/undirected.py | LiJFrank/pytorch_geometric | 70e575d69c77f6c234373257dcf02014a290bed8 | [
"MIT"
] | 3 | 2019-10-30T20:20:27.000Z | 2022-03-12T22:56:11.000Z | torch_geometric/utils/undirected.py | LiJFrank/pytorch_geometric | 70e575d69c77f6c234373257dcf02014a290bed8 | [
"MIT"
] | 3 | 2020-10-19T02:53:20.000Z | 2022-01-31T04:31:02.000Z | import torch
from torch_sparse import coalesce
from .num_nodes import maybe_num_nodes
def is_undirected(edge_index, num_nodes=None):
r"""Returns :obj:`True` if the graph given by :attr:`edge_index` is
undirected.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
undirected_edge_index = to_undirected(edge_index, num_nodes=num_nodes)
return edge_index.size(1) == undirected_edge_index.size(1)
def to_undirected(edge_index, num_nodes=None):
r"""Converts the graph given by :attr:`edge_index` to an undirected graph,
so that :math:`(j,i) \in \mathcal{E}` for every edge :math:`(i,j) \in
\mathcal{E}`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: :class:`LongTensor`
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)
edge_index = torch.stack([row, col], dim=0)
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
return edge_index
| 33.477273 | 78 | 0.666667 |
759c32e67359b387ae44cccbd4a165d4e1acd25f | 655 | py | Python | 1049.py | ErFer7/URI-Python | 94c36985852204e34806650e4ffec48d4d9e9ab1 | [
"MIT"
] | 1 | 2022-02-06T19:36:33.000Z | 2022-02-06T19:36:33.000Z | 1049.py | ErFer7/URI-Python | 94c36985852204e34806650e4ffec48d4d9e9ab1 | [
"MIT"
] | null | null | null | 1049.py | ErFer7/URI-Python | 94c36985852204e34806650e4ffec48d4d9e9ab1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
a = input()
if a == "vertebrado":
b = input()
if b == "ave":
c = input()
if c == "carnivoro":
print("aguia")
else:
print("pomba")
else:
d = input()
if d == "onivoro":
print("homem")
else:
print("vaca")
else:
e = input()
if e == "inseto":
f = input()
if f == "hematofago":
print("pulga")
else:
print("lagarta")
else:
g = input()
if g == "hematofago":
print("sanguessuga")
else:
print("minhoca") | 12.596154 | 32 | 0.374046 |
d8cf091b7aa8f90c63cf927c6d1febbe5299de04 | 4,038 | py | Python | tests/test_imp_export.py | d3d9/DINO2 | 524a6e4639c268ed1f8cb0ebba8436ad6ffaac23 | [
"0BSD"
] | 1 | 2020-12-11T16:43:50.000Z | 2020-12-11T16:43:50.000Z | tests/test_imp_export.py | d3d9/DINO2 | 524a6e4639c268ed1f8cb0ebba8436ad6ffaac23 | [
"0BSD"
] | 2 | 2021-06-20T11:10:57.000Z | 2021-07-26T20:50:44.000Z | tests/test_imp_export.py | d3d9/DINO2 | 524a6e4639c268ed1f8cb0ebba8436ad6ffaac23 | [
"0BSD"
] | null | null | null | import pytest
from datetime import date, timedelta
from enum import Enum
from filecmp import cmp, dircmp
import os
from shutil import rmtree
from sqlalchemy.exc import IntegrityError
from DINO2 import Database
from DINO2.model import Version
from DINO2.model.location import Stop
from DINO2.model.schedule import Trip
from DINO2.tools.imp import main
from DINO2.tools.export import csv, wikitable
from DINO2.types import DinoDate, DinoTimeDelta, TypeEnum, IntEnum
_test_dburl = "sqlite:///./tests/data/_test.db"
@pytest.fixture(scope='session')
def db_obj():
path = _test_dburl[10:]
yield Database(_test_dburl)
os.remove(path)
def test_import_clear():
argv = ["<python>", _test_dburl, None, "c"]
main(argv)
def test_import_success():
argv = ["<python>", _test_dburl, "./tests/data/2020-05-15-version-9", "a"]
main(argv)
def test_import_integrityerror():
with pytest.raises(IntegrityError):
test_import_success()
def test_dataset_counts(db_obj):
session = db_obj.Session()
assert session.query(Version).count() == 1
version = session.query(Version).one()
assert len(version.daytypes) == 7
assert len(version.day_type_calendar) == 207
assert len(version.stops) == 528
assert len(version.courses) == 265
assert len(version.trips) == 4938
session.close()
def test_wikitable(db_obj):
session = db_obj.Session()
fn = "./tests/data/_test_wiki-9.txt"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
wikitable.wikitable(session, fn)
equal = cmp(fn, "./tests/data/wiki-9.txt")
os.remove(fn)
session.close()
assert equal
def test_stops(db_obj):
session = db_obj.Session()
fn = "./tests/data/_test_stops-9.csv"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
csv.stops(session, fn)
equal = cmp(fn, "./tests/data/stops-9.csv")
os.remove(fn)
session.close()
assert equal
def test_courses(db_obj):
session = db_obj.Session()
fp = "./tests/data/_test_courses-9/"
if os.path.exists(fp):
raise Exception(f"{fp} already exists, won't delete")
csv.courses(session, fp)
dc = dircmp(fp, "./tests/data/courses-9/")
equal = not dc.diff_files and not dc.funny_files
rmtree(fp)
session.close()
assert equal
def test_trips(db_obj):
session = db_obj.Session()
fn = "./tests/data/_test_trips-9-50514-2020-06-14.csv"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
csv.trips(session, fn, date(2020, 6, 14), line_ids={50514})
equal = cmp(fn, "./tests/data/trips-9-50514-2020-06-14.csv")
os.remove(fn)
session.close()
assert equal
def test_line_stats(db_obj):
session = db_obj.Session()
fn = "./tests/data/_test_line_stats-9.csv"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
csv.line_stats(session, fn)
equal = cmp(fn, "./tests/data/line_stats-9.csv")
os.remove(fn)
session.close()
assert equal
def test_departure_stats(db_obj):
session = db_obj.Session()
tq = Trip.query_for_date(session, date(2020,6,15))
assert tq.count() == 2038
fn = "./tests/data/_test_departure_stats-9-2020-06-15.csv"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
csv.departure_stats(tq, fn)
equal = cmp(fn, "./tests/data/departure_stats-9-2020-06-15.csv")
os.remove(fn)
session.close()
assert equal
def test_departures(db_obj):
session = db_obj.Session()
fw = session.query(Stop).filter_by(version_id=9, id=2216).one()
assert fw.name == "Hagen Feuerwache"
fn = "./tests/data/_test_departures-9-2216-2020-06-14+8.csv"
if os.path.exists(fn):
raise Exception(f"{fn} already exists, won't delete")
csv.departures(fw, date(2020, 6, 14), fn, days=8)
equal = cmp(fn, "./tests/data/departures-9-2216-2020-06-14+8.csv")
os.remove(fn)
session.close()
assert equal
| 31.061538 | 78 | 0.674839 |
e917e065e6bf08cbb18cf2aff5008487a6d0c4a0 | 3,094 | py | Python | _draft/coordtransform/coordtransform.py | OmarG247/autopew | df15e9529aa82ba6b7580573d51980a178f13a9a | [
"BSD-3-Clause"
] | 3 | 2021-01-11T20:59:55.000Z | 2022-02-16T19:47:46.000Z | _draft/coordtransform/coordtransform.py | OmarG247/autopew | df15e9529aa82ba6b7580573d51980a178f13a9a | [
"BSD-3-Clause"
] | 23 | 2019-10-10T04:27:49.000Z | 2021-11-11T08:01:47.000Z | _draft/coordtransform/coordtransform.py | OmarG247/autopew | df15e9529aa82ba6b7580573d51980a178f13a9a | [
"BSD-3-Clause"
] | 3 | 2019-10-15T16:02:54.000Z | 2020-05-23T04:11:16.000Z | import logging
import itertools
from autopew.transform import (
affine_from_AB,
transform_from_affine,
inverse_affine_transform,
)
class CoordinateTransform(object):
library = []
def __init__(self, source, dest, *args, **kwargs):
self.forward = None
self.reverse = None
self.source = source
self.dest = dest
# todo: methods for dealing with maximum dimensionality of the transform
# if you create a 3D-3D transform you can keep all dims, but dims will be lost
# for 3D-2D, and any subsequent transforms.
self._register()
if not (self.dest, self.source) in self._links:
self._invert # register inverse
self._iter_library()
@property
def _links(self):
return set(
zip([i.source for i in self.library], [i.dest for i in self.library])
)
@property
def _domains(self):
return set([i.dest for i in self.library] + [i.source for i in self.library])
def _register(self):
"""
Register the Coordinate Transform in the Transform Library
"""
if self not in self.library:
self.library.append(self)
else:
logger.warning("Transform Already Exists in Library")
def _iter_library(self):
"""
Calibrate all relevant transforms between available sources and destination
coordinate systems.
"""
logger.debug("Iterating over transform library.")
# identify all coordinate reference systems
crs = self._domains
present = set([(c.source, c.dest) for c in self.library])
possible = itertools.product(crs, repeat=2)
for a, b in possible:
if (a != b) and ((a, b) not in present):
print("Need to add ({}, {})".format(a, b))
pass
@property
def _invert(self):
logger.debug("Creating inverse for {}".format(str(self)))
self.inverse = CoordinateTransform(self.dest, self.source)
self.inverse.inverse = self
self.inverse.forward, self.inverse.reverse = self.reverse, self.forward
return self.inverse
def calibrate(self, sourcepoints, destpoints):
logger.debug("Calibrating {}".format(str(self)))
self.affine = affine_from_AB(pixelpoints, transformpoints)
self.forward = affine_transform(self.affine)
self.reverse = inverse_affine_transform(self.affine)
self.inverse.forward, self.inverse.reverse, self.inverse.affine = (
self.reverse,
self.forward,
np.linalg.inv(self.affine),
)
def __eq__(self, other):
if other.__class__ == self.__class__:
return (self.source == other.source) and (self.dest == other.dest)
else:
return False
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.source, self.dest)
def __str__(self):
return "{} from {} to {}".format(
self.__class__.__name__, self.source, self.dest
)
| 32.568421 | 86 | 0.610213 |
7b866f3929a0f4b6b8337a6b663137ec4f97a9ff | 567 | py | Python | main/dash/template.py | yopito/cports | ef5f808a46724ee3cd215eb95192904ebb7ddcd1 | [
"BSD-2-Clause"
] | null | null | null | main/dash/template.py | yopito/cports | ef5f808a46724ee3cd215eb95192904ebb7ddcd1 | [
"BSD-2-Clause"
] | null | null | null | main/dash/template.py | yopito/cports | ef5f808a46724ee3cd215eb95192904ebb7ddcd1 | [
"BSD-2-Clause"
] | null | null | null | pkgname = "dash"
pkgver = "0.5.11.3"
pkgrel = 0
build_style = "gnu_configure"
pkgdesc = "POSIX-compliant Unix shell, much smaller than GNU bash"
maintainer = "q66 <daniel@octaforge.org>"
license = "BSD-3-Clause"
url = "http://gondor.apana.org.au/~herbert/dash"
sources = [f"http://gondor.apana.org.au/~herbert/dash/files/{pkgname}-{pkgver}.tar.gz"]
sha256 = ["62b9f1676ba6a7e8eaec541a39ea037b325253240d1f378c72360baa1cbcbc2a"]
options = ["bootstrap", "!check"]
def post_install(self):
self.install_license("COPYING")
self.install_link("dash", "usr/bin/sh")
| 33.352941 | 87 | 0.728395 |
fb29f38dbefa0699c27258a1192e294fc02f2b93 | 11,352 | py | Python | examples/full_analys.py | seeker8529/face_recognition | 7a85e47fdb7d9d2f0167a7e75c69e14aadfcb64a | [
"MIT"
] | null | null | null | examples/full_analys.py | seeker8529/face_recognition | 7a85e47fdb7d9d2f0167a7e75c69e14aadfcb64a | [
"MIT"
] | null | null | null | examples/full_analys.py | seeker8529/face_recognition | 7a85e47fdb7d9d2f0167a7e75c69e14aadfcb64a | [
"MIT"
] | null | null | null |
from PIL import Image, ImageDraw, ImageFont
from PyQt5.QtWidgets import *
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QLabel
from PyQt5.QtGui import QIcon, QPixmap
import face_recognition
# Image for opening
nameImage = "C:\\Users\\Nika Kim\\Desktop\\faces\\K.jpg"
# Load the png file into a numpy array
image = face_recognition.load_image_file(nameImage)
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(image)
pil_image = Image.fromarray(image)
def find_red_pixel(image_name):
# Set the value you want for these variables
r_min = 255
g_min = 0
b_min = 0
pixels = set()
img = Image.open(image_name)
rgb = img.convert('RGB')
for x in range(img.size[0]):
for y in range(img.size[1]):
r, g, b = rgb.getpixel((x, y))
if r == r_min and b == b_min and g == g_min:
pixels.add((x, y))
return pixels
def analys(imageForAnalys):
# Find red point
red_pixels = set()
red_pixels = find_red_pixel(imageForAnalys)
if len(red_pixels) == 0:
print("ERROR: PUT RED POINT IN THE FOREHEAD")
redPoint = red_pixels.pop()
# Calculate main items
betweenEyes = face_landmarks['right_eye'][0][0] - face_landmarks['left_eye'][3][0]
leftEyeLength = face_landmarks['left_eye'][3][0] - face_landmarks['left_eye'][0][0]
rightEyeLength = face_landmarks['right_eye'][3][0] - face_landmarks['right_eye'][0][0]
eyeLength = 1.5 * (rightEyeLength + leftEyeLength) / 2
noseLength = face_landmarks['nose'][4][1] - face_landmarks['right_eye'][0][1]
faceLengthWithoutForehead = face_landmarks['border'][1][1] - face_landmarks['border'][0][1]
fromChinToLips = face_landmarks['border'][1][1] - face_landmarks['lips_middle'][0][1]
meanEyebrowBegining = (face_landmarks['left_eyebrow'][4][1] + face_landmarks['right_eyebrow'][0][1])/2
forehead = meanEyebrowBegining - redPoint[1]
allFace = face_landmarks['chin'][8][1] - red_pixels.pop()[1]
cheekbones = face_landmarks['cheekbones'][1][0] - face_landmarks['cheekbones'][0][0]
yPupil = (face_landmarks['right_eye'][5][1] + face_landmarks['right_eye'][1][1])/2
# Values
pui = (betweenEyes + eyeLength)/cheekbones
puti = (yPupil - redPoint[1])/allFace
nsi = noseLength/allFace
stoi = fromChinToLips/allFace
# Show lines of values on face
d = ImageDraw.Draw(pil_image, 'RGBA')
im = Image.open(nameImage)
(width, height) = im.size
if (width < 600 or height < 600):
lineWidth = 2
ellipsRad = 1
elif (width < 1000 or height < 1000):
lineWidth = 5
ellipsRad = 5
else:
lineWidth = 8
ellipsRad = 10
# Eyes
d.line([(face_landmarks['left_eye'][3][0]-leftEyeLength/2, face_landmarks['left_eye'][3][1]),
(face_landmarks['right_eye'][0][0]+rightEyeLength/2, face_landmarks['right_eye'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['left_eye'][3][0]-leftEyeLength/2 - ellipsRad, face_landmarks['left_eye'][3][1] - ellipsRad,
face_landmarks['left_eye'][3][0]-leftEyeLength/2 + ellipsRad, face_landmarks['left_eye'][3][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['right_eye'][0][0]+rightEyeLength/2 - ellipsRad, face_landmarks['right_eye'][0][1] - ellipsRad,
face_landmarks['right_eye'][0][0]+rightEyeLength/2 + ellipsRad, face_landmarks['right_eye'][0][1] + ellipsRad),
fill="red", outline="red")
# font = ImageFont.truetype(<font-file>, <font-size>)
# font = ImageFont.truetype("sans-serif.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
# Forehead
d.line([(face_landmarks['nose'][0][0], redPoint[1]), (face_landmarks['nose'][0][0], face_landmarks['nose'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, redPoint[1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, redPoint[1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['nose'][0][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['nose'][0][1] + ellipsRad),
fill="red", outline="red")
# Nose
d.line([(face_landmarks['nose'][0][0], face_landmarks['nose'][0][1]), (face_landmarks['nose'][0][0], face_landmarks['nose_tip'][3][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['nose_tip'][3][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['nose_tip'][3][1] + ellipsRad), fill="red", outline="red")
# Cheekbones
d.line([(face_landmarks['cheekbones'][0][0], face_landmarks['cheekbones'][0][1]),
(face_landmarks['cheekbones'][1][0], face_landmarks['cheekbones'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['cheekbones'][0][0] - ellipsRad, face_landmarks['cheekbones'][0][1] - ellipsRad,
face_landmarks['cheekbones'][0][0] + ellipsRad, face_landmarks['cheekbones'][0][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['cheekbones'][1][0] - ellipsRad, face_landmarks['cheekbones'][0][1] - ellipsRad,
face_landmarks['cheekbones'][1][0] + ellipsRad, face_landmarks['cheekbones'][0][1] + ellipsRad),
fill="red", outline="red")
# Lips
d.line([(face_landmarks['nose'][0][0], face_landmarks['lips_middle'][0][1]),
(face_landmarks['nose'][0][0], face_landmarks['border'][1][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['lips_middle'][0][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['lips_middle'][0][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['border'][1][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['border'][1][1] + ellipsRad),
fill="red", outline="red")
# All length
d.line([(face_landmarks['nose'][0][0] + lineWidth*3, redPoint[1]), (face_landmarks['nose'][0][0] + lineWidth*3, face_landmarks['border'][1][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] + lineWidth*3 - ellipsRad, redPoint[1] - ellipsRad,
face_landmarks['nose'][0][0] + lineWidth*3 + ellipsRad, redPoint[1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] + lineWidth*3 - ellipsRad, face_landmarks['border'][1][1] - ellipsRad,
face_landmarks['nose'][0][0] + lineWidth*3 + ellipsRad, face_landmarks['border'][1][1] + ellipsRad),
fill="red", outline="red")
#d.line(face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]], fill=(0, 0, 0, 110), width=6)
# print("----------------------------------------------------------------------------\n")
#
# print("Length of eyes ", betweenEyes)
# print("Length of left eye ", leftEyeLength)
# print("Length of right eye ", rightEyeLength)
# print("Length of nose ", noseLength)
# print("Length face without forehead ", faceLengthWithoutForehead)
# print("Length from chin to lips ", fromChinToLips)
# print("Mean eyebrow begining ", meanEyebrowBegining)
# print("Length of forehead ", forehead)
# print("Length of face ", allFace)
# print("Length of cheekbones ", cheekbones)
# print("Pupil ", yPupil)
#
# print("pui: ", pui)
# print("puti: ", puti)
# print("nsi: ", nsi)
# print("stoi: ", stoi)
print("----------------------------------------------------------------------------\n")
interested = 0.357*nsi + 0.544/puti
happy = 0.4*pui + 0.285*stoi + 0.409*puti
impressed = 0.386*nsi + 0.432/puti
sad = 0.356/pui + 0.423/stoi
despised = 0.287/nsi
scared = 0.499*nsi + 0.317/stoi + 0.495/puti
guilty = 0.435*nsi +0.472/stoi
print("interested: ", interested)
print("happy: ", happy)
print("impressed: ", impressed)
print("sad: ", sad)
print("despised: ", despised)
print("scared: ", scared)
print("guilty: ", guilty)
interested0 = 0.357 * 0.33 + 0.544 / 0.33
happy0 = 0.4 * 0.5 + 0.285 * 0.25 + 0.409 * 0.33
impressed0 = 0.386 * 0.33 + 0.432 / 0.33
sad0 = 0.356 / 0.5 + 0.423 / 0.25
despised0 = 0.287 / 0.33
scared0 = 0.499 * 0.33 + 0.317 / 0.25 + 0.495 / 0.33
guilty0 = 0.435 * 0.33 + 0.472 / 0.25
devInterested = 100 * (1 - interested/interested0)
devHappy = 100 * (1 - happy/happy0)
devImpressed = 100 * (1 - impressed/impressed0)
devSad = 100 * (1 - sad/sad0)
devDespised = 100 * (1 - despised/despised0)
devScared = 100 * (1 - scared/scared0)
devGuilty = 100 * (1 - guilty/guilty0)
print("")
print("devInterested: ", devInterested, "%")
print("devHappy: ", devHappy, "%")
print("devImpressed: ", devImpressed, "%")
print("devSad: ", devSad, "%")
print("devDespised: ", devDespised, "%")
print("devScared: ", devScared, "%")
print("devGuilty: ", devGuilty, "%")
pil_image.show()
class Widget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Analyser')
self.point = None
hbox = QVBoxLayout(self)
pixmap = QPixmap(nameImage)
self.lbl = QLabel(self)
self.lbl.setPixmap(pixmap)
hbox.addWidget(self.lbl)
self.setLayout(hbox)
btn1 = QPushButton("Готово", self)
hbox.addWidget(btn1)
btn2 = QPushButton("Очистить", self)
hbox.addWidget(btn2)
btn1.clicked.connect(self.button1Clicked)
btn2.clicked.connect(self.button2Clicked)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QColor(168, 34, 3))
qp.setFont(QFont('Decorative', 10))
qp.drawText(event.rect(), Qt.AlignCenter, self.text)
def button1Clicked(self):
newName = "C:\\Users\\Nika Kim\\Desktop\\faces\\K.png"
self.lbl.pixmap().save(newName, 'png')
analys(newName)
self.close()
def button2Clicked(self):
pixmap = QPixmap(nameImage)
self.lbl.setPixmap(pixmap)
def mousePressEvent(self, event):
self.point = event.pos()
# Вызов перерисовки виджета
self.update()
def mouseReleaseEvent(self, event):
self.point = None
def paintEvent(self, event):
super().paintEvent(event)
# Если нет
if not self.point:
return
painter = QPainter(self.lbl.pixmap())
painter.setPen(QPen(Qt.red, 10.0))
painter.drawPoint(self.point)
for face_landmarks in face_landmarks_list:
app = QApplication(sys.argv)
ex = Widget()
ex.show()
sys.exit(app.exec_())
| 34.929231 | 149 | 0.603594 |
740383d36ed66846594b82c547bdd20e1660c254 | 1,061 | py | Python | tools/c7n_azure/c7n_azure/resources/key_vault.py | kaskrish/cloud-custodian | 97fd1048d866657c0e85816eaeff55018c336fe8 | [
"Apache-2.0"
] | null | null | null | tools/c7n_azure/c7n_azure/resources/key_vault.py | kaskrish/cloud-custodian | 97fd1048d866657c0e85816eaeff55018c336fe8 | [
"Apache-2.0"
] | null | null | null | tools/c7n_azure/c7n_azure/resources/key_vault.py | kaskrish/cloud-custodian | 97fd1048d866657c0e85816eaeff55018c336fe8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_azure.query import QueryResourceManager
from c7n_azure.provider import resources
@resources.register('keyvault')
class KeyVault(QueryResourceManager):
class resource_type(object):
service = 'azure.mgmt.keyvault'
client = 'KeyVaultManagementClient'
enum_spec = ('vaults', 'list')
id = 'id'
name = 'name'
default_report_fields = (
'name',
'location',
'resourceGroup'
)
| 32.151515 | 74 | 0.700283 |
ffe0ff6106224a92fcfa0192480399537971ae59 | 13,984 | py | Python | maltpynt/tests/test_unit.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 8 | 2015-02-23T13:43:21.000Z | 2021-07-17T11:35:24.000Z | maltpynt/tests/test_unit.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 1 | 2017-09-14T07:55:07.000Z | 2017-09-14T07:55:07.000Z | maltpynt/tests/test_unit.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 4 | 2016-03-02T20:36:07.000Z | 2018-02-26T13:23:53.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""First set of tests."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import maltpynt as mp
import numpy as np
import logging
import os
import unittest
import pytest
MP_FILE_EXTENSION = mp.io.MP_FILE_EXTENSION
logging.basicConfig(filename='MP.log', level=logging.DEBUG, filemode='w')
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, 'data')
def _ratio(a, b):
return np.abs(a - b) / np.abs(a + b)
class TestPDS(unittest.TestCase):
"""Test PDS statistics."""
@classmethod
def setUpClass(cls):
"""Produce common products for all subsequent tests."""
print("Setting up.")
import numpy.random as ra
cls.length = 512000
cls.tstart = 0
cls.tstop = cls.tstart + cls.length
cls.ctrate = 100
cls.bintime = 1
ra.seed(seed=1234)
cls.nphot = ra.poisson(cls.length * cls.ctrate)
events = ra.uniform(cls.tstart, cls.tstop, cls.nphot)
time, cls.lc1 = \
mp.lcurve.lcurve(events,
cls.bintime,
start_time=cls.tstart,
stop_time=cls.tstop)
events = ra.uniform(cls.tstart, cls.tstop, cls.nphot)
time, cls.lc2 = \
mp.lcurve.lcurve(events,
cls.bintime,
start_time=cls.tstart,
stop_time=cls.tstop)
cls.time = time
data = mp.fspec.welch_pds(cls.time, cls.lc1, cls.bintime, 1024)
cls.freq1, cls.pds1, cls.pdse1 = data.f, data.pds, data.epds
data = mp.fspec.welch_pds(cls.time, cls.lc2, cls.bintime, 1024)
cls.freq2, cls.pds2, cls.pdse2 = data.f, data.pds, data.epds
data = mp.fspec.welch_cpds(cls.time, cls.lc1, cls.lc2,
cls.bintime, 1024)
cls.cpds, cls.ec = data.cpds, data.ecpds
# Calculate the variance discarding the freq=0 Hz element
cls.varp1 = np.var(cls.pds1[1:])
cls.varp2 = np.var(cls.pds2[1:])
cls.varcr = np.var(cls.cpds.real[1:])
def test_pdsstat1(self):
"""Test that the Leahy PDS goes to 2."""
from scipy.optimize import curve_fit
def baseline_fun(x, a):
return a
freq, pds, epds = \
mp.rebin.const_rebin(self.freq1[1:], self.pds1[1:], 16,
self.pdse1[1:])
p, pcov = curve_fit(baseline_fun, freq, pds,
p0=[2], sigma=1 / epds**2)
perr = np.sqrt(np.diag(pcov))
assert np.abs(p - 2) < perr * 3, \
('PDS white level did not converge to 2')
def test_pdsstat2(self):
"""Test the statistical properties of the PDS."""
r = _ratio(self.varp1, np.mean(self.pdse1[1:] ** 2))
assert r < 0.1, \
"{0} {1} {2}".format(self.varp1, np.mean(self.pdse1[1:] ** 2), r)
def test_pdsstat3(self):
"""Test the statistical properties of the PDS."""
r = _ratio(self.varp2, np.mean(self.pdse2[1:] ** 2))
assert r < 0.1, \
"{0} {1} {2}".format(self.varp2, np.mean(self.pdse2[1:] ** 2), r)
def test_pdsstat4(self):
"""Test the statistical properties of the cospectrum."""
r = _ratio(self.varcr, np.mean(self.ec[1:] ** 2))
assert r < 0.1, \
"{0} {1} {2}".format(self.varcr, np.mean(self.ec[1:] ** 2), r)
def test_pdsstat5(self):
"""Test the statistical properties of the cospectrum.
In particular ,the standard deviation of the cospectrum is a factor
~sqrt(2) smaller than the standard deviation of the PDS.
"""
geom_mean = np.sqrt(self.varp1 * self.varp2)
r = _ratio(2 * self.varcr, geom_mean)
assert r < 0.1, \
"{0} {1} {2}".format(2 * self.varcr, geom_mean, r)
class TestAll(unittest.TestCase):
"""Real unit tests."""
def test_crossgti1(self):
"""Test the basic working of the intersection of GTIs."""
gti1 = np.array([[1, 4]])
gti2 = np.array([[2, 5]])
newgti = mp.base.cross_gtis([gti1, gti2])
assert np.all(newgti == [[2, 4]]), 'GTIs do not coincide!'
def test_crossgti2(self):
"""A more complicated example of intersection of GTIs."""
gti1 = np.array([[1, 2], [4, 5], [7, 10], [11, 11.2], [12.2, 13.2]])
gti2 = np.array([[2, 5], [6, 9], [11.4, 14]])
newgti = mp.base.cross_gtis([gti1, gti2])
assert np.all(newgti == [[4.0, 5.0], [7.0, 9.0], [12.2, 13.2]]), \
'GTIs do not coincide!'
def test_bti(self):
"""Test the inversion of GTIs."""
gti = np.array([[1, 2], [4, 5], [7, 10], [11, 11.2], [12.2, 13.2]])
bti = mp.base.get_btis(gti)
assert np.all(bti == [[2, 4], [5, 7], [10, 11], [11.2, 12.2]]), \
'BTI is wrong!, %s' % repr(bti)
def test_common_name(self):
"""Test the common_name function."""
a = 'A_3-50_A.nc'
b = 'B_3-50_B.nc'
assert mp.base.common_name(a, b) == '3-50'
def test_geom_bin(self):
"""Test if geom_bin fails under some conditions."""
freq = np.arange(0, 100, 0.1)
pds = np.random.normal(2, 0.1, len(freq))
_ = mp.rebin.geom_bin(freq, pds, 1.3, pds_err=pds)
_ = mp.rebin.geom_bin(freq, pds, 1.3)
del _
def test_exposure_calculation1(self):
"""Test if the exposure calculator works correctly."""
times = np.array([1., 2., 3.])
events = np.array([2.])
priors = np.array([2.])
dt = np.array([1., 1., 1.])
expo = mp.exposure.get_livetime_per_bin(times, events, priors, dt=dt,
gti=None)
np.testing.assert_almost_equal(expo, np.array([1, 0.5, 0.]))
def test_exposure_calculation2(self):
"""Test if the exposure calculator works correctly."""
times = np.array([1., 2.])
events = np.array([2.1])
priors = np.array([0.3])
dt = np.array([1., 1.])
expo = mp.exposure.get_livetime_per_bin(times, events, priors, dt=dt,
gti=None)
np.testing.assert_almost_equal(expo, np.array([0, 0.3]))
def test_exposure_calculation3(self):
"""Test if the exposure calculator works correctly."""
times = np.array([1., 2., 3.])
events = np.array([2.1])
priors = np.array([0.7])
dt = np.array([1., 1., 1.])
expo = mp.exposure.get_livetime_per_bin(times, events, priors, dt=dt,
gti=None)
np.testing.assert_almost_equal(expo, np.array([0.1, 0.6, 0.]))
def test_exposure_calculation4(self):
"""Test if the exposure calculator works correctly."""
times = np.array([1., 1.5, 2., 2.5, 3.])
events = np.array([2.6])
priors = np.array([1.5])
dt = np.array([0.5, 0.5, 0.5, 0.5, 0.5])
expected_expo = np.array([0.15, 0.5, 0.5, 0.35, 0])
expo = mp.exposure.get_livetime_per_bin(times, events, priors, dt=dt,
gti=None)
np.testing.assert_almost_equal(expo, expected_expo)
def test_exposure_calculation5(self):
"""Test if the exposure calculator works correctly."""
times = np.array([1., 2., 3.])
events = np.array([1.1, 1.2, 1.4, 1.5, 1.8, 4])
# dead time = 0.05
priors = np.array([0.55, 0.05, 0.15, 0.05, 0.25, 2.15])
dt = np.array([1, 1, 1])
expected_expo = np.array([0.8, 0.9, 1])
expo = mp.exposure.get_livetime_per_bin(times, events, priors, dt=dt,
gti=None)
np.testing.assert_almost_equal(expo, expected_expo)
def test_high_precision_keyword(self):
"""Test high precision FITS keyword read."""
from maltpynt.io import high_precision_keyword_read
hdr = {"MJDTESTI": 100, "MJDTESTF": np.longdouble(0.5),
"CIAO": np.longdouble(0.)}
assert \
high_precision_keyword_read(hdr,
"MJDTEST") == np.longdouble(100.5), \
"Keyword MJDTEST read incorrectly"
assert \
high_precision_keyword_read(hdr, "CIAO") == np.longdouble(0.), \
"Keyword CIAO read incorrectly"
def test_decide_spectrum_intervals(self):
"""Test the division of start and end times to calculate spectra."""
start_times = \
mp.fspec.decide_spectrum_intervals([[0, 400], [1022, 1200]], 128)
assert np.all(start_times == np.array([0, 128, 256, 1022]))
def test_decide_spectrum_lc_intervals_invalid(self):
with pytest.raises(ValueError):
a, b = mp.fspec.decide_spectrum_lc_intervals([[0, 400]],
128, [500, 501])
with pytest.raises(ValueError):
a, b = mp.fspec.decide_spectrum_lc_intervals([[1000, 1400]],
128, [500, 501])
with pytest.raises(ValueError):
a, b = mp.fspec.decide_spectrum_lc_intervals(
np.array([[0, 5]]), 5, np.array([0, 1, 2, 3]))
def test_decide_spectrum_lc_intervals_corner_case(self):
a, b = mp.fspec.decide_spectrum_lc_intervals(
np.array([[0, 400]]), 100, np.array([200, 250, 300]))
assert np.allclose(a, [0])
assert np.allclose(b, [2])
a, b = mp.fspec.decide_spectrum_lc_intervals(
np.array([[0, 5]]), 5, np.array([0, 1, 2, 3, 4]))
assert np.allclose(a, [0])
assert np.allclose(b, [5])
def test_filter_for_deadtime_nonpar(self):
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
filt_events = mp.fake.filter_for_deadtime(events, 0.11)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.all(filt_events == expected), \
"Wrong: {} vs {}".format(filt_events, expected)
def test_filter_for_deadtime_nonpar_bkg(self):
"""Test dead time filter, non-paralyzable case, with background."""
events = np.array([1.1, 2, 2.2, 3, 3.2])
bkg_events = np.array([1, 3.1])
filt_events, info = \
mp.fake.filter_for_deadtime(events, 0.11, bkg_ev_list=bkg_events,
return_all=True)
expected_ev = np.array([2, 2.2, 3, 3.2])
expected_bk = np.array([1])
assert np.all(filt_events == expected_ev), \
"Wrong: {} vs {}".format(filt_events, expected_ev)
assert np.all(info.bkg == expected_bk), \
"Wrong: {} vs {}".format(info.bkg, expected_bk)
def test_filter_for_deadtime_par(self):
"""Test dead time filter, paralyzable case."""
events = np.array([1, 1.1, 2, 2.2, 3, 3.1, 3.2])
assert np.all(mp.fake.filter_for_deadtime(
events, 0.11, paralyzable=True) == np.array([1, 2, 2.2, 3]))
def test_filter_for_deadtime_par_bkg(self):
"""Test dead time filter, paralyzable case, with background."""
events = np.array([1.1, 2, 2.2, 3, 3.2])
bkg_events = np.array([1, 3.1])
filt_events, info = \
mp.fake.filter_for_deadtime(events, 0.11, bkg_ev_list=bkg_events,
paralyzable=True, return_all=True)
expected_ev = np.array([2, 2.2, 3])
expected_bk = np.array([1])
assert np.all(filt_events == expected_ev), \
"Wrong: {} vs {}".format(filt_events, expected_ev)
assert np.all(info.bkg == expected_bk), \
"Wrong: {} vs {}".format(info.bkg, expected_bk)
def test_event_simulation(self):
"""Test simulation of events."""
times = np.array([0.5, 1.5])
lc = np.array([1000, 2000])
events = mp.fake.fake_events_from_lc(times, lc)
newtime, newlc = mp.lcurve.lcurve(events, 1., start_time=0,
stop_time=2)
assert np.all(np.abs(newlc - lc) < 3 * np.sqrt(lc))
np.testing.assert_almost_equal(newtime, times)
def test_deadtime_mask_par(self):
"""Test dead time filter, paralyzable case, with background."""
events = np.array([1.1, 2, 2.2, 3, 3.2])
bkg_events = np.array([1, 3.1])
filt_events, info = \
mp.fake.filter_for_deadtime(events, 0.11, bkg_ev_list=bkg_events,
paralyzable=True, return_all=True)
assert np.all(filt_events == events[info.mask])
def test_deadtime_conversion(self):
"""Test the functions for count rate conversion."""
original_rate = np.arange(1, 1000, 10)
deadtime = 2.5e-3
rdet = mp.base.r_det(deadtime, original_rate)
rin = mp.base.r_in(deadtime, rdet)
np.testing.assert_almost_equal(rin, original_rate)
def test_gti_filtering_by_length(self):
gti = [[0, 10], [0, 100], [0, 9]]
newgti = mp.create_gti.filter_gti_by_length(gti, 10)
assert np.all(newgti == [[0, 10], [0, 100]])
def test_high_precision_split1(self):
C_I, C_F, C_l, k = \
mp.io._split_high_precision_number("C", np.double(0.01), 8)
assert C_I == 1
np.testing.assert_almost_equal(C_F, 0, 6)
assert C_l == -2
assert k == "double"
def test_high_precision_split2(self):
C_I, C_F, C_l, k = \
mp.io._split_high_precision_number("C", np.double(1.01), 8)
assert C_I == 1
np.testing.assert_almost_equal(C_F, np.double(0.01), 6)
assert C_l == 0
assert k == "double"
if __name__ == '__main__':
unittest.main(verbosity=2)
| 39.502825 | 77 | 0.552918 |
9fe85cee41c0ee3793f909bcd6e28df714a7da01 | 581 | py | Python | users/urls.py | aashaka/twitter-analyser | 313ea4e342d100dac78b0fd07822cac7457450f9 | [
"MIT"
] | null | null | null | users/urls.py | aashaka/twitter-analyser | 313ea4e342d100dac78b0fd07822cac7457450f9 | [
"MIT"
] | null | null | null | users/urls.py | aashaka/twitter-analyser | 313ea4e342d100dac78b0fd07822cac7457450f9 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='user_home'),
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'complete/?$', views.complete, name='complete'),
url(r'delete/?$', views.delete_account, name='user.delete'),
url(r'access_switch/?$', views.access_switch, name='user.access'),
url(r'regenerate/?$', views.regenerate_graphs, name='regenerate'),
url(r'public_data/?$', views.public_data, name='public_data'),
url(r'^upload_simple/?$', views.upload_old, name='upload_old'),
]
| 38.733333 | 70 | 0.672978 |
74f8363382761dfce4653a928cb24fb5aa518e46 | 2,371 | py | Python | Lab 06 Functions/labs06.05-studentreadmodules.py | Silvio622/pands-problems-2020 | 7a9c93b513c881eaf80f20ed4e6e7d2969883596 | [
"MIT"
] | null | null | null | Lab 06 Functions/labs06.05-studentreadmodules.py | Silvio622/pands-problems-2020 | 7a9c93b513c881eaf80f20ed4e6e7d2969883596 | [
"MIT"
] | null | null | null | Lab 06 Functions/labs06.05-studentreadmodules.py | Silvio622/pands-problems-2020 | 7a9c93b513c881eaf80f20ed4e6e7d2969883596 | [
"MIT"
] | null | null | null | # Silvio Dunst
# We can now write the function doAdd(). So we need to think what we want this to do.
# a. Read in the students name (that is straightforward)
# b. Read in the module names and grades (this is a bit more complicated so lets put this in separate function and think about it by itself,
# c. Test this function, it creates a student dict, we can print that out.
# d. We should add the student dict to an array (we will use a global array for the moment)
# e. Test this.
students =[] # create a list/array variable student
def readModules(): # create a new function with the name "readModules"
modules = [] # create a new local list/array variable "modules"
moduleName = input("\tEnter the firts Module name (blank to quit): ").strip() # create a new local moduleName string variable,
# form user input with strip function to cut out leading and trailing spaces
while moduleName != "": # create a while loop, loops so long something is written from the user (not equal empty space)
module = {} # create a local dictionary variable variable "module"
module ["name"] = moduleName # put in a keyword name in the dictionary module with the string value from the user input
#I am not doing error handling
module ["grade"] = int(input("\t\tEnter grade:")) # put in a keyword grade in the dictionary module with a Integer value from the user input
modules.append(module) # add the values from keyword name and grade from the dictionary module to the variable modules list
# now read the next module name
moduleName = input("\tEnter next module name (blank to quit):").strip()
return modules # returns the values in the list/array variable "modules"
def doAdd(): # create a new function with the name "doAdd"
currentStudent = {} # create a new variable currentstudent as a Dictionary
currentStudent ["name"] = input("enter name :") # create a Keyword "name" use a user input to put in a value string in keyword name
currentStudent ["modules"] = readModules() # create a Keyword "modules" and calls the function "readModules()"
students.append(currentStudent) # add the keywords "name" and "modules" with the values to a list
# test
doAdd() # calls the doAdd function
doAdd() # calls the doAdd function
print(students) # prints out the list variable students
| 64.081081 | 148 | 0.716997 |
ab29303b3a7f4127593de9e41fb7b910ba1ea98f | 5,213 | py | Python | service/generated_flatbuffers/tflite/Pool2DOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 213 | 2021-06-11T01:15:16.000Z | 2022-02-25T16:18:57.000Z | service/generated_flatbuffers/tflite/Pool2DOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 32 | 2021-06-17T17:58:54.000Z | 2022-02-02T05:58:10.000Z | service/generated_flatbuffers/tflite/Pool2DOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 28 | 2021-06-17T17:34:21.000Z | 2022-03-24T14:05:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Pool2DOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsPool2DOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Pool2DOptions()
x.Init(buf, n + offset)
return x
@classmethod
def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Pool2DOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Pool2DOptions
def Padding(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Pool2DOptions
def StrideW(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Pool2DOptions
def StrideH(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Pool2DOptions
def FilterWidth(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Pool2DOptions
def FilterHeight(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Pool2DOptions
def FusedActivationFunction(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def Pool2DOptionsStart(builder): builder.StartObject(6)
def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0)
def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0)
def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0)
def Pool2DOptionsEnd(builder): return builder.EndObject()
class Pool2DOptionsT(object):
# Pool2DOptionsT
def __init__(self):
self.padding = 0 # type: int
self.strideW = 0 # type: int
self.strideH = 0 # type: int
self.filterWidth = 0 # type: int
self.filterHeight = 0 # type: int
self.fusedActivationFunction = 0 # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
pool2DOptions = Pool2DOptions()
pool2DOptions.Init(buf, pos)
return cls.InitFromObj(pool2DOptions)
@classmethod
def InitFromObj(cls, pool2DOptions):
x = Pool2DOptionsT()
x._UnPack(pool2DOptions)
return x
# Pool2DOptionsT
def _UnPack(self, pool2DOptions):
if pool2DOptions is None:
return
self.padding = pool2DOptions.Padding()
self.strideW = pool2DOptions.StrideW()
self.strideH = pool2DOptions.StrideH()
self.filterWidth = pool2DOptions.FilterWidth()
self.filterHeight = pool2DOptions.FilterHeight()
self.fusedActivationFunction = pool2DOptions.FusedActivationFunction()
# Pool2DOptionsT
def Pack(self, builder):
Pool2DOptionsStart(builder)
Pool2DOptionsAddPadding(builder, self.padding)
Pool2DOptionsAddStrideW(builder, self.strideW)
Pool2DOptionsAddStrideH(builder, self.strideH)
Pool2DOptionsAddFilterWidth(builder, self.filterWidth)
Pool2DOptionsAddFilterHeight(builder, self.filterHeight)
Pool2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
pool2DOptions = Pool2DOptionsEnd(builder)
return pool2DOptions
| 37.775362 | 133 | 0.703817 |
408e60644b562baf75388d834a6b1df3a0da1296 | 570 | py | Python | Chapter03/killing_processes.py | awangga/Python-Parallel-Programming-Cookbook-Second-Edition | ce6055e0187a334a2210249202f2efd307dd8a61 | [
"MIT"
] | null | null | null | Chapter03/killing_processes.py | awangga/Python-Parallel-Programming-Cookbook-Second-Edition | ce6055e0187a334a2210249202f2efd307dd8a61 | [
"MIT"
] | 72 | 2021-03-15T17:48:02.000Z | 2021-04-23T03:36:02.000Z | Chapter03/killing_processes.py | awangga/Python-Parallel-Programming-Cookbook-Second-Edition | ce6055e0187a334a2210249202f2efd307dd8a61 | [
"MIT"
] | 100 | 2020-05-06T06:58:10.000Z | 2022-03-30T15:52:11.000Z | import multiprocessing
import time
def foo():
print ('Starting function')
for i in range(0,10):
print('-->%d\n' %i)
time.sleep(1)
print ('Finished function')
if __name__ == '__main__':
p = multiprocessing.Process(target=foo)
print ('Process before execution:', p, p.is_alive())
p.start()
print ('Process running:', p, p.is_alive())
time.sleep(5)
p.terminate()
print ('Process terminated:', p, p.is_alive())
p.join()
print ('Process joined:', p, p.is_alive())
print ('Process exit code:', p.exitcode)
| 25.909091 | 56 | 0.608772 |
1e366a776d0083b8a12f220492a25faa6171bfe8 | 748 | py | Python | backend/config.py | MargoWM/codeforpoznan.pl_v3 | 4eee987d2f6c1d81878f14ea1f6878f4d868f0cd | [
"MIT"
] | null | null | null | backend/config.py | MargoWM/codeforpoznan.pl_v3 | 4eee987d2f6c1d81878f14ea1f6878f4d868f0cd | [
"MIT"
] | 1 | 2020-07-08T09:24:29.000Z | 2020-07-08T09:24:29.000Z | backend/config.py | MargoWM/codeforpoznan.pl_v3 | 4eee987d2f6c1d81878f14ea1f6878f4d868f0cd | [
"MIT"
] | null | null | null | import os
from os import urandom
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
DEBUG = False
TESTING = False
class DevelopmentConfig(Config):
DEBUG = True
TESTING = True
"""db config"""
DB_USER = os.environ.get("DB_USER")
DB_PASSWORD = os.environ.get("DB_PASSWORD")
DB_HOSTNAME = os.environ.get("DB_HOST")
DB_PORT = 5432
DB_NAME = os.environ.get("DB_NAME")
SQLALCHEMY_DATABASE_URI = (
f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOSTNAME}:{DB_PORT}/{DB_NAME}"
)
SQLALCHEMY_ENGINE_OPTIONS = {"pool_recycle": 299}
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.environ.get("SECRET_KEY")
JWT_BLACKLIST_ENABLED = True
| 22 | 81 | 0.677807 |
60f99826c96d96ce2ea25473b12a147e6f494606 | 13,747 | py | Python | integration_tests/test_upgrade.py | stanleeeqwe/chain-main | 1257c1c7eade0aeb6be9e936c436c2d37ad04623 | [
"Apache-2.0"
] | 1 | 2021-09-22T11:11:44.000Z | 2021-09-22T11:11:44.000Z | integration_tests/test_upgrade.py | stanleeeqwe/chain-main | 1257c1c7eade0aeb6be9e936c436c2d37ad04623 | [
"Apache-2.0"
] | null | null | null | integration_tests/test_upgrade.py | stanleeeqwe/chain-main | 1257c1c7eade0aeb6be9e936c436c2d37ad04623 | [
"Apache-2.0"
] | null | null | null | import configparser
import json
import re
import subprocess
import time
from datetime import datetime, timedelta
from pathlib import Path
import pytest
from dateutil.parser import isoparse
from pystarport.cluster import SUPERVISOR_CONFIG_FILE
from pystarport.ports import rpc_port
from .utils import (
cluster_fixture,
parse_events,
wait_for_block,
wait_for_block_time,
wait_for_new_blocks,
wait_for_port,
)
pytestmark = pytest.mark.upgrade
def edit_chain_program(chain_id, ini_path, callback):
# edit node process config in supervisor
ini = configparser.RawConfigParser()
ini.read_file(ini_path.open())
reg = re.compile(fr"^program:{chain_id}-node(\d+)")
for section in ini.sections():
m = reg.match(section)
if m:
i = m.group(1)
old = ini[section]
ini[section].update(callback(i, old))
with ini_path.open("w") as fp:
ini.write(fp)
def init_cosmovisor(data):
"""
build and setup cosmovisor directory structure in devnet's data directory
"""
cosmovisor = data / "cosmovisor"
cosmovisor.mkdir()
subprocess.run(
[
"nix-build",
Path(__file__).parent / "upgrade-test.nix",
"-o",
cosmovisor / "upgrades",
],
check=True,
)
(cosmovisor / "genesis").symlink_to("./upgrades/genesis")
def post_init(chain_id, data):
"""
change to use cosmovisor
"""
def prepare_node(i, _):
# link cosmovisor directory for each node
home = data / f"node{i}"
(home / "cosmovisor").symlink_to("../../cosmovisor")
return {
"command": f"cosmovisor start --home %(here)s/node{i}",
"environment": f"DAEMON_NAME=chain-maind,DAEMON_HOME={home.absolute()}",
}
edit_chain_program(chain_id, data / SUPERVISOR_CONFIG_FILE, prepare_node)
def migrate_genesis_time(cluster, i=0):
genesis = json.load(open(cluster.home(i) / "config/genesis.json"))
genesis["genesis_time"] = cluster.config.get("genesis-time")
(cluster.home(i) / "config/genesis.json").write_text(json.dumps(genesis))
# use function scope to re-initialize for each test case
@pytest.fixture(scope="function")
def cosmovisor_cluster(worker_index, pytestconfig, tmp_path_factory):
"override cluster fixture for this test module"
data = tmp_path_factory.mktemp("data")
init_cosmovisor(data)
yield from cluster_fixture(
Path(__file__).parent / "configs/default.yaml",
worker_index,
data,
quiet=pytestconfig.getoption("supervisord-quiet"),
post_init=post_init,
enable_cov=False,
cmd=(data / "cosmovisor/genesis/bin/chain-maind"),
)
@pytest.mark.skip(
reason="CI fail: https://github.com/crypto-org-chain/chain-main/issues/560"
)
def test_cosmovisor(cosmovisor_cluster):
"""
- propose an upgrade and pass it
- wait for it to happen
- it should work transparently
"""
cluster = cosmovisor_cluster
height = cluster.block_height()
target_height = height + 15
print("upgrade height", target_height)
plan_name = "v2.0.0"
rsp = cluster.gov_propose(
"community",
"software-upgrade",
{
"name": plan_name,
"title": "upgrade test",
"description": "ditto",
"upgrade-height": target_height,
"deposit": "0.1cro",
},
)
assert rsp["code"] == 0, rsp
# get proposal_id
ev = parse_events(rsp["logs"])["submit_proposal"]
assert ev["proposal_type"] == "SoftwareUpgrade", rsp
proposal_id = ev["proposal_id"]
rsp = cluster.gov_vote("validator", proposal_id, "yes")
assert rsp["code"] == 0, rsp["raw_log"]
rsp = cluster.gov_vote("validator", proposal_id, "yes", i=1)
assert rsp["code"] == 0, rsp["raw_log"]
proposal = cluster.query_proposal(proposal_id)
wait_for_block_time(cluster, isoparse(proposal["voting_end_time"]))
proposal = cluster.query_proposal(proposal_id)
assert proposal["status"] == "PROPOSAL_STATUS_PASSED", proposal
# block should just pass the target height
wait_for_block(cluster, target_height + 2, 480)
def propose_and_pass(cluster, kind, proposal):
rsp = cluster.gov_propose(
"community",
kind,
proposal,
)
assert rsp["code"] == 0, rsp["raw_log"]
# get proposal_id
ev = parse_events(rsp["logs"])["submit_proposal"]
assert ev["proposal_type"] == kind.title().replace("-", ""), rsp
proposal_id = ev["proposal_id"]
proposal = cluster.query_proposal(proposal_id)
assert proposal["status"] == "PROPOSAL_STATUS_VOTING_PERIOD", proposal
rsp = cluster.gov_vote("validator", proposal_id, "yes")
assert rsp["code"] == 0, rsp["raw_log"]
rsp = cluster.gov_vote("validator", proposal_id, "yes", i=1)
assert rsp["code"] == 0, rsp["raw_log"]
proposal = cluster.query_proposal(proposal_id)
wait_for_block_time(
cluster, isoparse(proposal["voting_end_time"]) + timedelta(seconds=1)
)
proposal = cluster.query_proposal(proposal_id)
assert proposal["status"] == "PROPOSAL_STATUS_PASSED", proposal
return proposal
def test_manual_upgrade(cosmovisor_cluster):
"""
- do the upgrade test by replacing binary manually
- check the panic do happens
"""
cluster = cosmovisor_cluster
# use the normal binary first
edit_chain_program(
cluster.chain_id,
cluster.data_dir / SUPERVISOR_CONFIG_FILE,
lambda i, _: {
"command": f"%(here)s/node{i}/cosmovisor/genesis/bin/chain-maind start "
f"--home %(here)s/node{i}"
},
)
cluster.reload_supervisor()
time.sleep(5) # FIXME the port seems still exists for a while after process stopped
wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
# wait for a new block to make sure chain started up
wait_for_new_blocks(cluster, 1)
target_height = cluster.block_height() + 15
print("upgrade height", target_height)
plan_name = "v2.0.0"
propose_and_pass(
cluster,
"software-upgrade",
{
"name": plan_name,
"title": "upgrade test",
"description": "ditto",
"upgrade-height": target_height,
"deposit": "0.1cro",
},
)
# wait for upgrade plan activated
wait_for_block(cluster, target_height, 600)
# wait a little bit
time.sleep(0.5)
# check nodes are all stopped
assert (
cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node0")["state"]
!= "RUNNING"
)
assert (
cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node1")["state"]
!= "RUNNING"
)
# check upgrade-info.json file is written
assert (
json.load((cluster.home(0) / "data/upgrade-info.json").open())
== json.load((cluster.home(1) / "data/upgrade-info.json").open())
== {
"name": plan_name,
"height": target_height,
}
)
# use the upgrade-test binary
edit_chain_program(
cluster.chain_id,
cluster.data_dir / SUPERVISOR_CONFIG_FILE,
lambda i, _: {
"command": (
f"%(here)s/node{i}/cosmovisor/upgrades/v2.0.0/bin/chain-maind "
f"start --home %(here)s/node{i}"
)
},
)
cluster.reload_supervisor()
# wait for it to generate new blocks
wait_for_block(cluster, target_height + 2, 600)
def test_manual_upgrade_all(cosmovisor_cluster):
test_manual_upgrade(cosmovisor_cluster)
cluster = cosmovisor_cluster
community_addr = cluster.address("community")
reserve_addr = cluster.address("reserve")
# for the fee payment
cluster.transfer(community_addr, reserve_addr, "10000basecro")
signer1_address = cluster.address("reserve", i=0)
validators = cluster.validators()
validator1_operator_address = validators[0]["operator_address"]
validator2_operator_address = validators[1]["operator_address"]
staking_validator1 = cluster.validator(validator1_operator_address, i=0)
assert validator1_operator_address == staking_validator1["operator_address"]
staking_validator2 = cluster.validator(validator2_operator_address, i=1)
assert validator2_operator_address == staking_validator2["operator_address"]
old_bonded = cluster.staking_pool()
rsp = cluster.delegate_amount(
validator1_operator_address,
"2009999498basecro",
signer1_address,
0,
"0.025basecro",
)
assert rsp["code"] == 0, rsp["raw_log"]
assert cluster.staking_pool() == old_bonded + 2009999498
rsp = cluster.delegate_amount(
validator2_operator_address, "1basecro", signer1_address, 0, "0.025basecro"
)
# vesting bug
assert rsp["code"] != 0, rsp["raw_log"]
assert cluster.staking_pool() == old_bonded + 2009999498
target_height = cluster.block_height() + 15
print("upgrade height", target_height)
plan_name = "v3.0.0"
propose_and_pass(
cluster,
"software-upgrade",
{
"name": plan_name,
"title": "upgrade test",
"description": "ditto",
"upgrade-height": target_height,
"deposit": "0.1cro",
},
)
# wait for upgrade plan activated
wait_for_block(cluster, target_height, 600)
# wait a little bit
time.sleep(0.5)
# check nodes are all stopped
assert (
cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node0")["state"]
!= "RUNNING"
)
assert (
cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node1")["state"]
!= "RUNNING"
)
# check upgrade-info.json file is written
assert (
json.load((cluster.home(0) / "data/upgrade-info.json").open())
== json.load((cluster.home(1) / "data/upgrade-info.json").open())
== {
"name": plan_name,
"height": target_height,
}
)
# use the upgrade-test binary
edit_chain_program(
cluster.chain_id,
cluster.data_dir / SUPERVISOR_CONFIG_FILE,
lambda i, _: {
"command": (
f"%(here)s/node{i}/cosmovisor/upgrades/v3.0.0/bin/chain-maind "
f"start --home %(here)s/node{i}"
)
},
)
cluster.reload_supervisor()
# wait for it to generate new blocks
wait_for_block(cluster, target_height + 2, 600)
rsp = cluster.delegate_amount(
validator2_operator_address, "1basecro", signer1_address, 0, "0.025basecro"
)
# vesting bug fixed
assert rsp["code"] == 0, rsp["raw_log"]
assert cluster.staking_pool() == old_bonded + 2009999499
def test_cancel_upgrade(cluster):
"""
use default cluster
- propose upgrade and pass it
- cancel the upgrade before execution
"""
plan_name = "upgrade-test"
time.sleep(5) # FIXME the port seems still exists for a while after process stopped
wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
upgrade_height = cluster.block_height() + 30
print("propose upgrade plan")
print("upgrade height", upgrade_height)
propose_and_pass(
cluster,
"software-upgrade",
{
"name": plan_name,
"title": "upgrade test",
"description": "ditto",
"upgrade-height": upgrade_height,
"deposit": "0.1cro",
},
)
print("cancel upgrade plan")
propose_and_pass(
cluster,
"cancel-software-upgrade",
{
"title": "there is bug, cancel upgrade",
"description": "there is bug, cancel upgrade",
"deposit": "0.1cro",
},
)
# wait for blocks after upgrade, should success since upgrade is canceled
wait_for_block(cluster, upgrade_height + 2)
def test_manual_export(cosmovisor_cluster):
"""
- do chain state export, override the genesis time to the genesis file
- ,and reset the data set
- see https://github.com/crypto-org-chain/chain-main/issues/289
"""
cluster = cosmovisor_cluster
edit_chain_program(
cluster.chain_id,
cluster.data_dir / SUPERVISOR_CONFIG_FILE,
lambda i, _: {
"command": f"%(here)s/node{i}/cosmovisor/genesis/bin/chain-maind start "
f"--home %(here)s/node{i}"
},
)
cluster.reload_supervisor()
wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
# wait for a new block to make sure chain started up
wait_for_new_blocks(cluster, 1)
cluster.supervisor.stopAllProcesses()
# check the state of all nodes should be stopped
for info in cluster.supervisor.getAllProcessInfo():
assert info["statename"] == "STOPPED"
# export the state
cluster.cmd = (
cluster.data_root
/ cluster.chain_id
/ "node0/cosmovisor/genesis/bin/chain-maind"
)
cluster.cosmos_cli(0).export()
# update the genesis time = current time + 5 secs
newtime = datetime.utcnow() + timedelta(seconds=5)
cluster.config["genesis-time"] = newtime.replace(tzinfo=None).isoformat("T") + "Z"
for i in range(cluster.nodes_len()):
migrate_genesis_time(cluster, i)
cluster.validate_genesis(i)
cluster.cosmos_cli(i).unsaferesetall()
cluster.supervisor.startAllProcesses()
wait_for_new_blocks(cluster, 1)
cluster.supervisor.stopAllProcesses()
# check the state of all nodes should be stopped
for info in cluster.supervisor.getAllProcessInfo():
assert info["statename"] == "STOPPED"
| 30.892135 | 88 | 0.635702 |
aaf2d4070af487b06f4f2d8f7feeca7bca3a4435 | 14,169 | py | Python | neo/Settings.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | null | null | null | neo/Settings.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | null | null | null | neo/Settings.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | null | null | null | """
These are the core network and system settings. For user-preferences, take a
look at `UserPreferences.py`.
The settings are dynamically configurable, for instance to set them depending
on CLI arguments. By default these are the testnet settings, but you can
reconfigure them by calling the `setup(..)` methods.
"""
import json
import logging
import os
import sys
import logzero
import pip
from neocore.Cryptography import Helper
from neorpc.Client import RPCClient, NEORPCException
from neorpc.Settings import settings as rpc_settings
from neo import __version__
dir_current = os.path.dirname(os.path.abspath(__file__))
# ROOT_INSTALL_PATH is the root path of neo-python, whether installed as package or from git.
ROOT_INSTALL_PATH = os.path.abspath(os.path.join(dir_current, ".."))
# This detects if we are running from an 'editable' version (like ``python neo/bin/prompt.py``)
# or from a packaged install version from pip
IS_PACKAGE_INSTALL = 'site-packages/neo' in dir_current
# The filenames for various files. Might be improved by using system
# user directories: https://github.com/ActiveState/appdirs
FILENAME_PREFERENCES = os.path.join(ROOT_INSTALL_PATH, 'neo/data/preferences.json')
# The protocol json files are always in the project root
FILENAME_SETTINGS_MAINNET = os.path.join(ROOT_INSTALL_PATH, 'neo/data/protocol.mainnet.json')
FILENAME_SETTINGS_TESTNET = os.path.join(ROOT_INSTALL_PATH, 'neo/data/protocol.testnet.json')
FILENAME_SETTINGS_PRIVNET = os.path.join(ROOT_INSTALL_PATH, 'neo/data/protocol.privnet.json')
FILENAME_SETTINGS_COZNET = os.path.join(ROOT_INSTALL_PATH, 'neo/data/protocol.coz.json')
FILENAME_SETTINGS_UNITTEST_NET = os.path.join(ROOT_INSTALL_PATH, 'neo/data/protocol.unittest-net.json')
class PrivnetConnectionError(Exception):
pass
class SystemCheckError(Exception):
pass
def check_depdendencies():
"""
Makes sure that all required dependencies are installed in the exact version
(as specified in requirements.txt)
"""
# Get installed packages
installed_packages = pip.get_installed_distributions(local_only=False)
installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages])
# Now check if each package specified in requirements.txt is actually installed
deps_filename = os.path.join(ROOT_INSTALL_PATH, "requirements.txt")
with open(deps_filename, "r") as f:
for dep in f.read().split():
if not dep.lower() in installed_packages_list:
raise SystemCheckError("Required dependency %s is not installed. Please run 'pip install -e .'" % dep)
class SettingsHolder:
"""
This class holds all the settings. Needs to be setup with one of the
`setup` methods before using it.
"""
MAGIC = None
ADDRESS_VERSION = None
STANDBY_VALIDATORS = None
SEED_LIST = None
RPC_LIST = None
ENROLLMENT_TX_FEE = None
ISSUE_TX_FEE = None
PUBLISH_TX_FEE = None
REGISTER_TX_FEE = None
DATA_DIR_PATH = None
LEVELDB_PATH = None
NOTIFICATION_DB_PATH = None
RPC_PORT = None
NODE_PORT = None
WS_PORT = None
URI_PREFIX = None
BOOTSTRAP_FILE = None
NOTIF_BOOTSTRAP_FILE = None
ALL_FEES = None
USE_DEBUG_STORAGE = False
DEBUG_STORAGE_PATH = 'Chains/debugstorage'
ACCEPT_INCOMING_PEERS = False
CONNECTED_PEER_MAX = 5
SERVICE_ENABLED = True
COMPILER_NEP_8 = True
VERSION_NAME = "/NEO-PYTHON:%s/" % __version__
# Logging settings
log_level = None
log_smart_contract_events = False
log_vm_instructions = False
# Emit Notify events when smart contract execution failed. Use for debugging purposes only.
emit_notify_events_on_sc_execution_error = False
@property
def chain_leveldb_path(self):
self.check_chain_dir_exists(warn_migration=True)
return os.path.abspath(os.path.join(self.DATA_DIR_PATH, self.LEVELDB_PATH))
@property
def notification_leveldb_path(self):
self.check_chain_dir_exists()
return os.path.abspath(os.path.join(self.DATA_DIR_PATH, self.NOTIFICATION_DB_PATH))
@property
def debug_storage_leveldb_path(self):
self.check_chain_dir_exists()
return os.path.abspath(os.path.join(self.DATA_DIR_PATH, self.DEBUG_STORAGE_PATH))
# Helpers
@property
def is_mainnet(self):
""" Returns True if settings point to MainNet """
return self.NODE_PORT == 10333 and self.MAGIC == 7630401
@property
def is_testnet(self):
""" Returns True if settings point to TestNet """
return self.NODE_PORT == 20333 and self.MAGIC == 1953787457
@property
def is_coznet(self):
""" Returns True if settings point to CoZnet """
return self.NODE_PORT == 20333 and self.MAGIC == 1010102
@property
def net_name(self):
if self.MAGIC is None:
return 'None'
if self.is_mainnet:
return 'MainNet'
if self.is_testnet:
return 'TestNet'
if self.is_coznet:
return 'CozNet'
return 'PrivateNet'
# Setup methods
def setup(self, config_file):
""" Setup settings from a JSON config file """
if not self.DATA_DIR_PATH:
# Setup default data dir
self.set_data_dir(None)
with open(config_file) as data_file:
data = json.load(data_file)
config = data['ProtocolConfiguration']
self.MAGIC = config['Magic']
self.ADDRESS_VERSION = config['AddressVersion']
self.STANDBY_VALIDATORS = config['StandbyValidators']
self.SEED_LIST = config['SeedList']
self.RPC_LIST = config['RPCList']
fees = config['SystemFee']
self.ALL_FEES = fees
self.ENROLLMENT_TX_FEE = fees['EnrollmentTransaction']
self.ISSUE_TX_FEE = fees['IssueTransaction']
self.PUBLISH_TX_FEE = fees['PublishTransaction']
self.REGISTER_TX_FEE = fees['RegisterTransaction']
config = data['ApplicationConfiguration']
self.LEVELDB_PATH = config['DataDirectoryPath']
self.RPC_PORT = int(config['RPCPort'])
self.NODE_PORT = int(config['NodePort'])
self.WS_PORT = config['WsPort']
self.URI_PREFIX = config['UriPrefix']
self.ACCEPT_INCOMING_PEERS = config.get('AcceptIncomingPeers', False)
self.BOOTSTRAP_FILE = config['BootstrapFile']
self.NOTIF_BOOTSTRAP_FILE = config['NotificationBootstrapFile']
Helper.ADDRESS_VERSION = self.ADDRESS_VERSION
self.USE_DEBUG_STORAGE = config.get('DebugStorage', True)
self.DEBUG_STORAGE_PATH = config.get('DebugStoragePath', 'Chains/debugstorage')
self.NOTIFICATION_DB_PATH = config.get('NotificationDataPath', 'Chains/notification_data')
self.SERVICE_ENABLED = config.get('ServiceEnabled', True)
self.COMPILER_NEP_8 = config.get('CompilerNep8', False)
def setup_mainnet(self):
""" Load settings from the mainnet JSON config file """
self.setup(FILENAME_SETTINGS_MAINNET)
def setup_testnet(self):
""" Load settings from the testnet JSON config file """
self.setup(FILENAME_SETTINGS_TESTNET)
def setup_privnet(self, host=None):
"""
Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333.
"""
self.setup(FILENAME_SETTINGS_PRIVNET)
if isinstance(host, str):
if ":" in host:
raise Exception("No protocol prefix or port allowed in host, use just the IP or domain.")
print("Using custom privatenet host:", host)
self.SEED_LIST = ["%s:20333" % host]
self.RPC_LIST = ["http://%s:30333" % host]
print("- P2P:", ", ".join(self.SEED_LIST))
print("- RPC:", ", ".join(self.RPC_LIST))
self.check_privatenet()
def setup_unittest_net(self, host=None):
""" Load settings from privnet JSON config file. """
self.setup(FILENAME_SETTINGS_UNITTEST_NET)
def setup_coznet(self):
""" Load settings from the coznet JSON config file """
self.setup(FILENAME_SETTINGS_COZNET)
def set_data_dir(self, path):
if not path:
path_user_home = os.path.expanduser('~')
self.DATA_DIR_PATH = os.path.join(path_user_home, ".neopython") # Works for both Windows and *nix
elif path == '.':
self.DATA_DIR_PATH = os.getcwd()
else:
self.DATA_DIR_PATH = path
if not os.path.exists(self.DATA_DIR_PATH):
os.makedirs(self.DATA_DIR_PATH)
def set_max_peers(self, num_peers):
try:
self.CONNECTED_PEER_MAX = int(num_peers)
except Exception as e:
logzero.logger.error("Please supply an integer number for max peers")
def set_log_smart_contract_events(self, is_enabled=True):
self.log_smart_contract_events = is_enabled
def set_log_vm_instruction(self, is_enabled=True):
self.log_vm_instructions = is_enabled
def set_emit_notify_events_on_sc_execution_error(self, is_enabled=False):
self.emit_notify_events_on_sc_execution_error = is_enabled
def set_logfile(self, fn, max_bytes=0, backup_count=0):
"""
Setup logging to a (rotating) logfile.
Args:
fn (str): Logfile. If fn is None, disable file logging
max_bytes (int): Maximum number of bytes per logfile. If used together with backup_count,
logfile will be rotated when it reaches this amount of bytes.
backup_count (int): Number of rotated logfiles to keep
"""
logzero.logfile(fn, maxBytes=max_bytes, backupCount=backup_count)
def set_loglevel(self, level):
"""
Set the minimum loglevel for the default logger
Args:
level (int): eg. logging.DEBUG or logging.ERROR. See also https://docs.python.org/2/library/logging.html#logging-levels
"""
self.log_level = level
logzero.loglevel(level)
def check_chain_dir_exists(self, warn_migration=False):
"""
Checks to make sure there is a directory called ``Chains`` at the root of DATA_DIR_PATH
and creates it if it doesn't exist yet
"""
chain_path = os.path.join(self.DATA_DIR_PATH, 'Chains')
if not os.path.exists(chain_path):
try:
os.makedirs(chain_path)
logzero.logger.info("Created 'Chains' directory at %s " % chain_path)
except Exception as e:
logzero.logger.error("Could not create 'Chains' directory at %s %s" % (chain_path, e))
warn_migration = False
# Add a warning for migration purposes if we created a chain dir
if warn_migration and ROOT_INSTALL_PATH != self.DATA_DIR_PATH:
if os.path.exists(os.path.join(ROOT_INSTALL_PATH, 'Chains')):
logzero.logger.warning("[MIGRATION] You are now using the blockchain data at %s, but it appears you have existing data at %s/Chains" % (chain_path, ROOT_INSTALL_PATH))
logzero.logger.warning("[MIGRATION] If you would like to use your existing data, please move any data at %s/Chains to %s " % (ROOT_INSTALL_PATH, chain_path))
logzero.logger.warning("[MIGRATION] Or you can continue using your existing data by starting your script with the `--datadir=.` flag")
def check_privatenet(self):
"""
Check if privatenet is running, and if container is same as the current Chains/privnet database.
Raises:
PrivnetConnectionError: if the private net couldn't be reached or the nonce does not match
"""
rpc_settings.setup(self.RPC_LIST)
client = RPCClient()
try:
version = client.get_version()
except NEORPCException:
raise PrivnetConnectionError("Error: private network container doesn't seem to be running, or RPC is not enabled.")
print("Privatenet useragent '%s', nonce: %s" % (version["useragent"], version["nonce"]))
# Now check if nonce is the same as in the chain path
nonce_container = str(version["nonce"])
neopy_chain_meta_filename = os.path.join(self.chain_leveldb_path, ".privnet-nonce")
if os.path.isfile(neopy_chain_meta_filename):
nonce_chain = open(neopy_chain_meta_filename, "r").read()
if nonce_chain != nonce_container:
raise PrivnetConnectionError(
"Chain database in Chains/privnet is for a different private network than the current container. "
"Consider deleting the Chain directory with 'rm -rf %s*'." % self.chain_leveldb_path
)
else:
# When the Chains/privnet folder is removed, we need to create the directory
if not os.path.isdir(self.chain_leveldb_path):
os.mkdir(self.chain_leveldb_path)
# Write the nonce to the meta file
with open(neopy_chain_meta_filename, "w") as f:
f.write(nonce_container)
# Settings instance used by external modules
settings = SettingsHolder()
# Load testnet settings as default. This is useful to provide default data/db directories
# to any code using "from neo.Settings import settings"
settings.setup_testnet()
# By default, set loglevel to INFO. DEBUG just print a lot of internal debug statements
settings.set_loglevel(logging.INFO)
# System check: Are dependencies must be installed in the correct version
# Can be bypassed with `SKIP_DEPS_CHECK=1 python prompt.py`
# this causes so many headaches when developing between boa and neo and core... :(
# if not os.getenv("SKIP_DEPS_CHECK") and not IS_PACKAGE_INSTALL:
# check_depdendencies()
# System check: Python 3.6+
if not os.getenv("SKIP_PY_CHECK"):
if sys.version_info < (3, 6):
raise SystemCheckError("Needs Python 3.6+. Currently used: %s" % sys.version)
| 38.819178 | 183 | 0.675559 |
438e2285f9c44956d1692579f0c17fb0bcfdb838 | 32,347 | py | Python | tools/dockerize/webportal/usr/lib/python2.7/site-packages/daoliproxy/api/openstack/xmlutil.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/daoliproxy/api/openstack/xmlutil.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/daoliproxy/api/openstack/xmlutil.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from lxml import etree
import six
from daoliproxy import exception
from daoliproxy.i18n import _
from daoliproxy import utils
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
def validate_schema(xml, schema_name, version='v1.1'):
if isinstance(xml, str):
xml = etree.fromstring(xml)
base_path = 'nova/api/openstack/compute/schemas/'
if schema_name not in ('atom', 'atom-link'):
base_path += '%s/' % version
schema_path = os.path.join(utils.daoliproxydir(),
'%s%s.rng' % (base_path, schema_name))
schema_doc = etree.parse(schema_path)
relaxng = etree.RelaxNG(schema_doc)
relaxng.assertValid(xml)
class Selector(object):
"""Selects datum to operate on from an object."""
def __init__(self, *chain):
"""Initialize the selector.
Each argument is a subsequent index into the object.
"""
self.chain = chain
def __repr__(self):
"""Return a representation of the selector."""
return "Selector" + repr(self.chain)
def __call__(self, obj, do_raise=False):
"""Select a datum to operate on.
Selects the relevant datum within the object.
:param obj: The object from which to select the object.
:param do_raise: If False (the default), return None if the
indexed datum does not exist. Otherwise,
raise a KeyError.
"""
# Walk the selector list
for elem in self.chain:
# If it's callable, call it
if callable(elem):
obj = elem(obj)
else:
if obj == '':
return ''
# Use indexing
try:
obj = obj[elem]
except (KeyError, IndexError):
# No sense going any further
if do_raise:
# Convert to a KeyError, for consistency
raise KeyError(elem)
return None
# Return the finally-selected object
return obj
def get_items(obj):
"""Get items in obj."""
return list(obj.items())
def get_items_without_dict(obj):
"""Get items in obj but omit any items containing a dict."""
obj_list = list(obj.items())
for item in obj_list:
if isinstance(list(item)[1], dict):
obj_list.remove(item)
return obj_list
class EmptyStringSelector(Selector):
"""Returns the empty string if Selector would return None."""
def __call__(self, obj, do_raise=False):
"""Returns empty string if the selected value does not exist."""
try:
return super(EmptyStringSelector, self).__call__(obj, True)
except KeyError:
return ""
class ConstantSelector(object):
"""Returns a constant."""
def __init__(self, value):
"""Initialize the selector.
:param value: The value to return.
"""
self.value = value
def __repr__(self):
"""Return a representation of the selector."""
return repr(self.value)
def __call__(self, _obj, _do_raise=False):
"""Select a datum to operate on.
Returns a constant value. Compatible with
Selector.__call__().
"""
return self.value
class TemplateElement(object):
"""Represent an element in the template."""
def __init__(self, tag, attrib=None, selector=None, subselector=None,
colon_ns=False, **extra):
"""Initialize an element.
Initializes an element in the template. Keyword arguments
specify attributes to be set on the element; values must be
callables. See TemplateElement.set() for more information.
:param tag: The name of the tag to create.
:param attrib: An optional dictionary of element attributes.
:param selector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
:param subselector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
This is used to further refine the datum
object returned by selector in the event
that it is a list of objects.
:colon_ns: An optional flag indicating whether to support k:v
type tagname, if True the k:v type tagname will
be supported by adding the k into the namespace.
"""
# Convert selector into a Selector
if selector is None:
selector = Selector()
elif not callable(selector):
selector = Selector(selector)
# Convert subselector into a Selector
if subselector is not None and not callable(subselector):
subselector = Selector(subselector)
self.tag = tag
self.selector = selector
self.subselector = subselector
self.attrib = {}
self._text = None
self._children = []
self._childmap = {}
self.colon_ns = colon_ns
# Run the incoming attributes through set() so that they
# become selectorized
if not attrib:
attrib = {}
attrib.update(extra)
for k, v in attrib.items():
self.set(k, v)
def __repr__(self):
"""Return a representation of the template element."""
return ('<%s.%s %r at %#x>' %
(self.__class__.__module__, self.__class__.__name__,
self.tag, id(self)))
def __len__(self):
"""Return the number of child elements."""
return len(self._children)
def __contains__(self, key):
"""Determine whether a child node named by key exists."""
return key in self._childmap
def __getitem__(self, idx):
"""Retrieve a child node by index or name."""
if isinstance(idx, six.string_types):
# Allow access by node name
return self._childmap[idx]
else:
return self._children[idx]
def append(self, elem):
"""Append a child to the element."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.append(elem)
self._childmap[elem.tag] = elem
def extend(self, elems):
"""Append children to the element."""
# Pre-evaluate the elements
elemmap = {}
elemlist = []
for elem in elems:
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap or elem.tag in elemmap:
raise KeyError(elem.tag)
elemmap[elem.tag] = elem
elemlist.append(elem)
# Update the children
self._children.extend(elemlist)
self._childmap.update(elemmap)
def insert(self, idx, elem):
"""Insert a child element at the given index."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.insert(idx, elem)
self._childmap[elem.tag] = elem
def remove(self, elem):
"""Remove a child element."""
# Unwrap templates...
elem = elem.unwrap()
# Check if element exists
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
raise ValueError(_('element is not a child'))
self._children.remove(elem)
del self._childmap[elem.tag]
def get(self, key):
"""Get an attribute.
Returns a callable which performs datum selection.
:param key: The name of the attribute to get.
"""
return self.attrib[key]
def set(self, key, value=None):
"""Set an attribute.
:param key: The name of the attribute to set.
:param value: A callable taking an object and optional boolean
do_raise indicator and returning the datum bound
to the attribute. If None, a Selector() will be
constructed from the key. If a string, a
Selector() will be constructed from the string.
"""
# Convert value to a selector
if value is None:
value = Selector(key)
elif not callable(value):
value = Selector(value)
self.attrib[key] = value
def keys(self):
"""Return the attribute names."""
return self.attrib.keys()
def items(self):
"""Return the attribute names and values."""
return self.attrib.items()
def unwrap(self):
"""Unwraps a template to return a template element."""
# We are a template element
return self
def wrap(self):
"""Wraps a template element to return a template."""
# Wrap in a basic Template
return Template(self)
def apply(self, elem, obj):
"""Apply text and attributes to an etree.Element.
Applies the text and attribute instructions in the template
element to an etree.Element instance.
:param elem: An etree.Element instance.
:param obj: The base object associated with this template
element.
"""
# Start with the text...
if self.text is not None:
elem.text = unicode(self.text(obj))
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
elem.set(key, unicode(value(obj, True)))
except KeyError:
# Attribute has no value, so don't include it
pass
def _render(self, parent, datum, patches, nsmap):
"""Internal rendering.
Renders the template node into an etree.Element object.
Returns the etree.Element object.
:param parent: The parent etree.Element instance.
:param datum: The datum associated with this template element.
:param patches: A list of other template elements that must
also be applied.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance.
"""
# Allocate a node
if callable(self.tag):
tagname = self.tag(datum)
else:
tagname = self.tag
if self.colon_ns:
if ':' in tagname:
if nsmap is None:
nsmap = {}
colon_key, colon_name = tagname.split(':')
nsmap[colon_key] = colon_key
tagname = '{%s}%s' % (colon_key, colon_name)
elem = etree.Element(tagname, nsmap=nsmap)
# If we have a parent, append the node to the parent
if parent is not None:
parent.append(elem)
# If the datum is None, do nothing else
if datum is None:
return elem
# Apply this template element to the element
self.apply(elem, datum)
# Additionally, apply the patches
for patch in patches:
patch.apply(elem, datum)
# We have fully rendered the element; return it
return elem
def render(self, parent, obj, patches=None, nsmap=None):
"""Render an object.
Renders an object against this template node. Returns a list
of two-item tuples, where the first item is an etree.Element
instance and the second item is the datum associated with that
instance.
:param parent: The parent for the etree.Element instances.
:param obj: The object to render this template element
against.
:param patches: A list of other template elements to apply
when rendering this template element.
:param nsmap: An optional namespace dictionary to attach to
the etree.Element instances.
"""
patches = patches or []
# First, get the datum we're rendering
data = None if obj is None else self.selector(obj)
# Check if we should render at all
if not self.will_render(data):
return []
elif data is None:
return [(self._render(parent, None, patches, nsmap), None)]
# Make the data into a list if it isn't already
if not isinstance(data, list):
data = [data]
elif parent is None:
raise ValueError(_('root element selecting a list'))
# Render all the elements
elems = []
for datum in data:
if self.subselector is not None:
datum = self.subselector(datum)
elems.append((self._render(parent, datum, patches, nsmap), datum))
# Return all the elements rendered, as well as the
# corresponding datum for the next step down the tree
return elems
def will_render(self, datum):
"""Hook method.
An overridable hook method to determine whether this template
element will be rendered at all. By default, returns False
(inhibiting rendering) if the datum is None.
:param datum: The datum associated with this template element.
"""
# Don't render if datum is None
return datum is not None
def _text_get(self):
"""Template element text.
Either None or a callable taking an object and optional
boolean do_raise indicator and returning the datum bound to
the text of the template element.
"""
return self._text
def _text_set(self, value):
# Convert value to a selector
if value is not None and not callable(value):
value = Selector(value)
self._text = value
def _text_del(self):
self._text = None
text = property(_text_get, _text_set, _text_del)
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template rooted at this
element as a string, suitable for inclusion in debug logs.
"""
# Build the inner contents of the tag...
contents = [self.tag, '!selector=%r' % self.selector]
# Add the text...
if self.text is not None:
contents.append('!text=%r' % self.text)
# Add all the other attributes
for key, value in self.attrib.items():
contents.append('%s=%r' % (key, value))
# If there are no children, return it as a closed tag
if len(self) == 0:
return '<%s/>' % ' '.join([str(i) for i in contents])
# OK, recurse to our children
children = [c.tree() for c in self]
# Return the result
return ('<%s>%s</%s>' %
(' '.join(contents), ''.join(children), self.tag))
def SubTemplateElement(parent, tag, attrib=None, selector=None,
subselector=None, colon_ns=False, **extra):
"""Create a template element as a child of another.
Corresponds to the etree.SubElement interface. Parameters are as
for TemplateElement, with the addition of the parent.
"""
# Convert attributes
attrib = attrib or {}
attrib.update(extra)
# Get a TemplateElement
elem = TemplateElement(tag, attrib=attrib, selector=selector,
subselector=subselector, colon_ns=colon_ns)
# Append the parent safely
if parent is not None:
parent.append(elem)
return elem
class Template(object):
"""Represent a template."""
def __init__(self, root, nsmap=None):
"""Initialize a template.
:param root: The root element of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
self.root = root.unwrap() if root is not None else None
self.nsmap = nsmap or {}
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
def _serialize(self, parent, obj, siblings, nsmap=None):
"""Internal serialization.
Recursive routine to build a tree of etree.Element instances
from an object based on the template. Returns the first
etree.Element instance rendered, or None.
:param parent: The parent etree.Element instance. Can be
None.
:param obj: The object to render.
:param siblings: The TemplateElement instances against which
to render the object.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance
rendered.
"""
# First step, render the element
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
# Now, recurse to all child elements
seen = set()
for idx, sibling in enumerate(siblings):
for child in sibling:
# Have we handled this child already?
if child.tag in seen:
continue
seen.add(child.tag)
# Determine the child's siblings
nieces = [child]
for sib in siblings[idx + 1:]:
if child.tag in sib:
nieces.append(sib[child.tag])
# Now we recurse for every data element
for elem, datum in elems:
self._serialize(elem, datum, nieces)
# Return the first element; at the top level, this will be the
# root element
if elems:
return elems[0][0]
def serialize(self, obj, *args, **kwargs):
"""Serialize an object.
Serializes an object against the template. Returns a string
with the serialized XML. Positional and keyword arguments are
passed to etree.tostring().
:param obj: The object to serialize.
"""
elem = self.make_tree(obj)
if elem is None:
return ''
for k, v in self.serialize_options.items():
kwargs.setdefault(k, v)
# Serialize it into XML
return etree.tostring(elem, *args, **kwargs)
def make_tree(self, obj):
"""Create a tree.
Serializes an object against the template. Returns an Element
node with appropriate children.
:param obj: The object to serialize.
"""
# If the template is empty, return the empty string
if self.root is None:
return None
# Get the siblings and nsmap of the root element
siblings = self._siblings()
nsmap = self._nsmap()
# Form the element tree
return self._serialize(None, obj, siblings, nsmap)
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. By default, this is the root element itself.
"""
return [self.root]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
"""
return self.nsmap.copy()
def unwrap(self):
"""Unwraps a template to return a template element."""
# Return the root element
return self.root
def wrap(self):
"""Wraps a template element to return a template."""
# We are a template
return self
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template.
:param master: The master template to test.
"""
return True
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template as a string, suitable
for inclusion in debug logs.
"""
return "%r: %s" % (self, self.root.tree())
class MasterTemplate(Template):
"""Represent a master template.
Master templates are versioned derivatives of templates that
additionally allow slave templates to be attached. Slave
templates allow modification of the serialized result without
directly changing the master.
"""
def __init__(self, root, version, nsmap=None):
"""Initialize a master template.
:param root: The root element of the template.
:param version: The version number of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(MasterTemplate, self).__init__(root, nsmap)
self.version = version
self.slaves = []
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object version %s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.version, id(self)))
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
all the slave templates.
"""
return [self.root] + [slave.root for slave in self.slaves]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
The namespace dictionary is computed by taking the master
template's namespace dictionary and updating it from all the
slave templates.
"""
nsmap = self.nsmap.copy()
for slave in self.slaves:
nsmap.update(slave._nsmap())
return nsmap
def attach(self, *slaves):
"""Attach one or more slave templates.
Attaches one or more slave templates to the master template.
Slave templates must have a root element with the same tag as
the master template. The slave template's apply() method will
be called to determine if the slave should be applied to this
master; if it returns False, that slave will be skipped.
(This allows filtering of slaves based on the version of the
master template.)
"""
slave_list = []
for slave in slaves:
slave = slave.wrap()
# Make sure we have a tree match
if slave.root.tag != self.root.tag:
msg = _("Template tree mismatch; adding slave %(slavetag)s to "
"master %(mastertag)s") % {'slavetag': slave.root.tag,
'mastertag': self.root.tag}
raise ValueError(msg)
# Make sure slave applies to this template
if not slave.apply(self):
continue
slave_list.append(slave)
# Add the slaves
self.slaves.extend(slave_list)
def copy(self):
"""Return a copy of this master template."""
# Return a copy of the MasterTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
tmp.slaves = self.slaves[:]
return tmp
class SlaveTemplate(Template):
"""Represent a slave template.
Slave templates are versioned derivatives of templates. Each
slave has a minimum version and optional maximum version of the
master template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
"""Initialize a slave template.
:param root: The root element of the template.
:param min_vers: The minimum permissible version of the master
template for this slave template to apply.
:param max_vers: An optional upper bound for the master
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(SlaveTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object versions %s-%s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template. This
version requires the master template to have a version number
between min_vers and max_vers.
:param master: The master template to test.
"""
# Does the master meet our minimum version requirement?
if master.version < self.min_vers:
return False
# How about our maximum version requirement?
if self.max_vers is not None and master.version > self.max_vers:
return False
return True
class TemplateBuilder(object):
"""Template builder.
This class exists to allow templates to be lazily built without
having to build them each time they are needed. It must be
subclassed, and the subclass must implement the construct()
method, which must return a Template (or subclass) instance. The
constructor will always return the template returned by
construct(), or, if it has a copy() method, a copy of that
template.
"""
_tmpl = None
def __new__(cls, copy=True):
"""Construct and return a template.
:param copy: If True (the default), a copy of the template
will be constructed and returned, if possible.
"""
# Do we need to construct the template?
if cls._tmpl is None:
tmp = super(TemplateBuilder, cls).__new__(cls)
# Construct the template
cls._tmpl = tmp.construct()
# If the template has a copy attribute, return the result of
# calling it
if copy and hasattr(cls._tmpl, 'copy'):
return cls._tmpl.copy()
# Return the template
return cls._tmpl
def construct(self):
"""Construct a template.
Called to construct a template instance, which it must return.
Only called once.
"""
raise NotImplementedError(_("subclasses must implement construct()!"))
def make_links(parent, selector=None):
"""Attach an Atom <links> element to the parent."""
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
selector=selector)
elem.set('rel')
elem.set('type')
elem.set('href')
# Just for completeness...
return elem
def make_flat_dict(name, selector=None, subselector=None,
ns=None, colon_ns=False, root=None,
ignore_sub_dicts=False):
"""Utility for simple XML templates that traditionally used
XMLDictSerializer with no metadata. Returns a template element
where the top-level element has the given tag name, and where
sub-elements have tag names derived from the object's keys and
text derived from the object's values.
:param root: if None, this will create the root.
:param ignore_sub_dicts: If True, ignores any dict objects inside the
object. If False, causes an error if there is a
dict object present.
"""
# Set up the names we need...
if ns is None:
elemname = name
tagname = Selector(0)
else:
elemname = '{%s}%s' % (ns, name)
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
if selector is None:
selector = name
if not root:
# Build the root element
root = TemplateElement(elemname, selector=selector,
subselector=subselector, colon_ns=colon_ns)
choice = get_items if ignore_sub_dicts is False else get_items_without_dict
# Build an element to represent all the keys and values
elem = SubTemplateElement(root, tagname, selector=choice,
colon_ns=colon_ns)
elem.text = 1
# Return the template
return root
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> entity declaration forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> unparsed entity forbidden")
def external_entity_ref(self, context, base, systemId, publicId):
raise ValueError("<!ENTITY> external entity forbidden")
def notation_decl(self, name, base, sysid, pubid):
raise ValueError("<!ENTITY> notation forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
self._parser.EndDoctypeDeclHandler = None
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.ExternalEntityRefHandler = self.external_entity_ref
self._parser.NotationDeclHandler = self.notation_decl
try:
self._parser.SkippedEntityHandler = None
except AttributeError:
# some pyexpat versions do not support SkippedEntity
pass
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely."""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except (sax.SAXParseException, ValueError,
expat.ExpatError, LookupError) as e:
# NOTE(Vijaya Erukala): XML input such as
# <?xml version="1.0" encoding="TF-8"?>
# raises LookupError: unknown encoding: TF-8
raise exception.MalformedRequestBody(reason=six.text_type(e))
| 32.444333 | 79 | 0.601478 |
2a8334be23dfe562b10bdde3242d864f2fa855c8 | 11,219 | py | Python | mayaSDK/maya/app/renderSetup/model/renderLayer.py | FXTD-ODYSSEY/vscode-mayapy | 7a21872f80b5b740fc653e79c3f9b5268e87b3c3 | [
"MIT"
] | 20 | 2019-09-20T00:30:22.000Z | 2021-12-26T06:56:16.000Z | mayaSDK/maya/app/renderSetup/model/renderLayer.py | minjiang999/vscode-mayapy | 7a21872f80b5b740fc653e79c3f9b5268e87b3c3 | [
"MIT"
] | 5 | 2019-12-29T15:19:03.000Z | 2022-03-29T16:54:19.000Z | mayaSDK/maya/app/renderSetup/model/renderLayer.py | minjiang999/vscode-mayapy | 7a21872f80b5b740fc653e79c3f9b5268e87b3c3 | [
"MIT"
] | 8 | 2019-09-23T05:46:44.000Z | 2022-01-11T14:42:14.000Z | """
This module provides the render layer class, as well as utility
functions to operate on render layers.
Note:
In order to better control the update of the render layer, two flags were added
to each render layer instance to control the update of 1) the list of nodes owned
by the legacy layer and 2) the rendering. The controls were introduced to avoid
performance penalty on any user requests.
The flag RenderLayer.needsMembershipUpdate is set to True when the list of nodes
part of the render layer changed meaning that the legacy layer must be updated.
The update is managed by an evalDeferred() so it will only be executed during
the next idle time. If an update is already planned,
the flag RenderLayer.isUpdatingMembership will be True. These flags only apply
to the visible render layer. No updates are performed on the not visible ones.
The flag RenderLayer.needsApplyUpdate is set to True when the rendering must be updated.
The default dirty mechanism of the scene is not enough as the render setup behavior implies
to sometime apply or unapply some overrides. The first 'not optimized' implementation
of the rendering refresh is to impose a switchToLayer()
(i.e. unapply and apply all overrides). This flag only applies to the visible render layer.
No updates are performed on the not visible ones.
"""
from functools import partial
from maya.app.renderSetup.model.observable import Observable
from maya.app.renderSetup.model.renderSetupPrivate import PostApplyCmd
class RenderLayerBase(object):
"""
Abstract base class for RenderLayer and DefaultRenderLayer classes
Defines functions for toggling visibility and renderability.
Children must implement:
- _getLegacyNodeName()
- _updateLegacyRenderLayerVisibility()
- apply()
- unapply()
"""
def __init__(self):
pass
def isRenderable(self):
pass
def isVisible(self):
pass
def makeVisible(self):
pass
def setRenderable(self, value):
pass
__dict__ = None
__weakref__ = None
class DefaultRenderLayer(RenderLayerBase, Observable):
"""
Singleton class to access and modify default render layer properties
This singleton instance is also observable: it will notify observers
on visibility and renderability changes.
Singleton instance belongs to renderSetup instance
Access it using renderSetup.instance().getDefaultRenderLayer()
"""
def __init__(self):
pass
def apply(self):
pass
def clearMemberNodesCache(self):
pass
def getChildren(self):
pass
def getMemberNodesCache(self):
pass
def hasLightsCollectionInstance(self):
pass
def name(self):
pass
def needsRefresh(self):
pass
def setMemberNodesCache(self, cache):
pass
def unapply(self):
pass
from . import nodeList
from . import childNode
class RenderLayer(RenderLayerBase, nodeList.ListBase, childNode.ChildNode):
"""
Render layer node.
A render layer has an ordered list of collections. It can
optionally have an ordered list of overrides.
"""
def __init__(self):
pass
def acceptImport(self):
pass
def addDefaultMembers(*args, **kwargs):
pass
def aovCollectionInstance(self):
"""
Get the AOV collection instance for this render layer,
creating it if it doesn't exists as long as renderer
callbacks are registered for the current renderer.
"""
pass
def appendChild(*args, **kwargs):
pass
def appendCollection(*args, **kwargs):
pass
def apply(*args, **kwargs):
pass
def attachChild(self, pos, child):
"""
Attach a collection at a specific position
"""
pass
def attachCollection(self, pos, child):
"""
Attach a collection at a specific position
"""
pass
def attachOverride(self, overrideName):
pass
def clearMemberNodesCache(self):
pass
def copyForClipboard(self):
"""
# Pasting a render layer that's visible will trigger a layer
# switch, which can be expensive, and changes the user's currently
# visible render layer. Prevent this on copy for clipboard.
"""
pass
def createAbsoluteOverride(*args, **kwargs):
pass
def createCollection(*args, **kwargs):
pass
def createConnectionOverride(*args, **kwargs):
pass
def createRelativeOverride(*args, **kwargs):
pass
def descendantAdded(*args, **kwargs):
pass
def detachChild(*args, **kwargs):
pass
def detachCollection(*args, **kwargs):
pass
def findCollection(self, predicate, creator='None'):
"""
Find the collection of this layer satisfying the predicate function or creates it
with the creator function if not found and a creator function is specified.
Function signatures are:
predicate(collection): returns boolean.
creator(void) : returns the created node.
"""
pass
def findIn(self, nodeNames, includeSelf='True'):
"""
Generator that returns all the collections in that layer that contain at least on of the
object in nodeNames. Optionally also returns self (with includeSelf=True) if the object is in the layer.
"""
pass
def getChildren(self):
"""
Get list of all existing Collections
"""
pass
def getCollectionByName(self, collectionName, nested='False'):
"""
Look for an existing collection by name
"""
pass
def getCollections(self):
"""
Get list of all existing Collections
"""
pass
def getCorrespondingCollection(self, nodeName, selectedCollectionName):
"""
The behavior is to look for Render Settings attribute to add the override
in the Render Settings collection if it exists, then to use the selected
collection; otherwise, to create a new collection containing the override.
"""
pass
def getDefaultCollection(self):
"""
Get the default collection where newly created nodes are placed
"""
pass
def getEnabledSelectedNodeNames(self):
"""
Get the names of the layer's DAG node members.
The layer's members are DAG nodes selected by the layer's
collections, based on whether a collection is enabled or solo'ed.
@rtype: set
@return: set of node names. Empty if none found.
"""
pass
def getFirstCollectionIndex(self):
pass
def getMemberNodesCache(self):
pass
def getMembers(self):
"""
Get the names of the layer's DAG node members.
The layer's members are DAG nodes selected by the layer's
collections, based on whether a collection is enabled or solo'ed.
@rtype: set
@return: set of node names. Empty if none found.
"""
pass
def getNumIsolatedChildren(self):
pass
def getOverrides(self):
pass
def getRenderSettingsChildCollectionByName(self, renderSettingsChildCollectionName, nested='False'):
"""
Look for an existing render settings collection by name
"""
pass
def hasAOVCollectionInstance(self):
"""
Returns True if this layer has the AOV collection instance created.
"""
pass
def hasCollection(self, collectionName):
pass
def hasDefaultCollection(self):
"""
Get the default collection where newly created nodes are placed
"""
pass
def hasLightsCollectionInstance(self):
"""
Returns True if this layer has the lights collection instance created.
"""
pass
def hasRenderSettingsCollectionInstance(self):
"""
Returns True if this layer has the render settings collection instance created.
"""
pass
def isAbstractClass(self):
pass
def isAcceptableChild(self, modelOrData):
"""
Check if the model could be a child of the render layer model
"""
pass
def lightsCollectionInstance(self):
"""
Get the lights collection instance for this render layer,
creating it if it doesn't exists.
"""
pass
def needsRefresh(self):
"""
Following some changes the instance must be updated.
"""
pass
def postConstructor(self):
pass
def renderSettingsCollectionInstance(self):
"""
Get the render settings collection instance for this render layer,
creating it if it doesn't exists.
"""
pass
def setMemberNodesCache(self, cache):
pass
def setName(*args, **kwargs):
pass
def typeId(self):
pass
def typeName(self):
pass
def unapply(*args, **kwargs):
pass
def creator():
pass
def initializer():
pass
collectionHighest = None
collectionLowest = None
collections = None
kTypeId = None
kTypeName = 'renderSetupLayer'
legacyRenderLayer = None
numIsolatedChildren = None
def create(*args, **kwargs):
pass
def _syncLegacyRenderLayers(layerName):
pass
def delete(*args, **kwargs):
pass
def memberTraversal(node):
"""
Traverse render setup node children to determine layer membership.
During the collection traversal to determine membership, we consider
the isolate select state of the layer and of collections, and prune
those collections that are not included by isolate select.
If the node has no children, an empty list is returned.
"""
pass
kCollectionUnicity = []
kCreateAOVCollection = []
kCollectionAttached = []
kSetRenderability = []
kCreateAOVChildCollection = []
kCreateLightsChildCollection = []
kCreateLightsCollection = []
kInvalidCollectionName = []
kCollectionDetached = []
kUnknownCollection = []
kAttachCollection = []
kCreateRenderSettingsCollection = []
| 21.369524 | 112 | 0.597914 |
da8c2c2b5763922c66471e767dd717f375f5f9f1 | 10,531 | py | Python | data_steward/resources.py | ChaoPang/curation | a754bd51e0f63e306da5b685dac9b31a8154b579 | [
"MIT"
] | null | null | null | data_steward/resources.py | ChaoPang/curation | a754bd51e0f63e306da5b685dac9b31a8154b579 | [
"MIT"
] | null | null | null | data_steward/resources.py | ChaoPang/curation | a754bd51e0f63e306da5b685dac9b31a8154b579 | [
"MIT"
] | null | null | null | import csv
import hashlib
import inspect
import json
import logging
import os
from io import open
import cachetools
from common import ACHILLES_TABLES, ACHILLES_HEEL_TABLES, VOCABULARY_TABLES, PROCESSED_TXT, RESULTS_HTML
LOGGER = logging.getLogger(__name__)
base_path = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
# tools/*
tools_path = os.path.join(base_path, 'tools')
# resources/*
DEID_PATH = os.path.join(base_path, 'deid')
resource_files_path = os.path.join(base_path, 'resource_files')
config_path = os.path.join(base_path, 'config')
fields_path = os.path.join(resource_files_path, 'fields')
cdm_csv_path = os.path.join(resource_files_path, 'cdm.csv')
hpo_site_mappings_path = os.path.join(config_path, 'hpo_site_mappings.csv')
achilles_index_path = os.path.join(resource_files_path, 'curation_report')
AOU_VOCAB_PATH = os.path.join(resource_files_path, 'aou_vocab')
AOU_VOCAB_CONCEPT_CSV_PATH = os.path.join(AOU_VOCAB_PATH, 'concept.csv')
TEMPLATES_PATH = os.path.join(resource_files_path, 'templates')
HPO_REPORT_HTML = 'hpo_report.html'
html_boilerplate_path = os.path.join(TEMPLATES_PATH, HPO_REPORT_HTML)
CRON_TPL_YAML = 'cron.tpl.yaml'
achilles_images_path = os.path.join(achilles_index_path, 'images')
achilles_data_path = os.path.join(achilles_index_path, 'data')
DATASOURCES_JSON = os.path.join(achilles_data_path, 'datasources.json')
domain_mappings_path = os.path.join(resource_files_path, 'domain_mappings')
field_mappings_replaced_path = os.path.join(domain_mappings_path,
'field_mappings_replaced.csv')
table_mappings_path = os.path.join(domain_mappings_path, 'table_mappings.csv')
field_mappings_path = os.path.join(domain_mappings_path, 'field_mappings.csv')
value_mappings_path = os.path.join(domain_mappings_path, 'value_mappings.csv')
CDR_CLEANER_PATH = os.path.join(resource_files_path, 'cdr_cleaner')
DC732_CONCEPT_LOOKUP_CSV_PATH = os.path.join(CDR_CLEANER_PATH,
'dc732_concept_lookup.csv')
PPI_BRANCHING_PATH = os.path.join(CDR_CLEANER_PATH, 'ppi_branching')
BASICS_CSV_PATH = os.path.join(PPI_BRANCHING_PATH, 'basics.csv')
FAMILY_HISTORY_CSV_PATH = os.path.join(PPI_BRANCHING_PATH, 'family_history.csv')
HEALTHCARE_ACCESS_CSV_PATH = os.path.join(PPI_BRANCHING_PATH,
'healthcare_access.csv')
LIFESTYLE_CSV_PATH = os.path.join(PPI_BRANCHING_PATH, 'lifestyle.csv')
OVERALL_HEALTH_CSV_PATH = os.path.join(PPI_BRANCHING_PATH, 'overall_health.csv')
PERSONAL_MEDICAL_HISTORY_CSV_PATH = os.path.join(
PPI_BRANCHING_PATH, 'personal_medical_history.csv')
PPI_BRANCHING_RULE_PATHS = [
BASICS_CSV_PATH, FAMILY_HISTORY_CSV_PATH, HEALTHCARE_ACCESS_CSV_PATH,
LIFESTYLE_CSV_PATH, OVERALL_HEALTH_CSV_PATH,
PERSONAL_MEDICAL_HISTORY_CSV_PATH
]
@cachetools.cached(cache={})
def csv_to_list(csv_path):
"""
Yield a list of `dict` from a CSV file
:param csv_path: absolute path to a well-formed CSV file
:return:
"""
with open(csv_path, mode='r') as csv_file:
list_of_dicts = _csv_file_to_list(csv_file)
return list_of_dicts
def _csv_file_to_list(csv_file):
"""
Yield a list of `dict` from a file-like object with records in CSV format
:param csv_file: file-like object containing records in CSV format
:return: list of `dict`
"""
items = []
reader = csv.reader(csv_file)
field_names = next(reader)
for csv_line in reader:
item = dict(zip(field_names, csv_line))
items.append(item)
return items
def table_mappings_csv():
return csv_to_list(table_mappings_path)
def field_mappings_csv():
return csv_to_list(field_mappings_path)
def value_mappings_csv():
return csv_to_list(value_mappings_path)
def cdm_csv():
return csv_to_list(cdm_csv_path)
def achilles_index_files():
achilles_index_files = []
for path, subdirs, files in os.walk(achilles_index_path):
for name in files:
achilles_index_files.append(os.path.join(path, name))
return achilles_index_files
def fields_for(table, sub_path=None):
"""
Return the json schema for any table identified in the fields directory.
Uses os.walk to traverse subdirectories
:param table: The table to get a schema for
:param sub_path: A string identifying a sub-directory in resource_files/fields.
If provided, this directory will be searched.
:returns: a json object representing the fields for the named table
"""
path = os.path.join(fields_path, sub_path if sub_path else '')
# default setting
json_path = os.path.join(path, table + '.json')
unique_count = 0
for dirpath, _, files in os.walk(path):
if sub_path and os.path.basename(sub_path) != os.path.basename(dirpath):
continue
for filename in files:
if filename[:-5] == table:
json_path = os.path.join(dirpath, filename)
unique_count = unique_count + 1
if unique_count > 1:
raise RuntimeError(
f"Unable to read schema file because multiple schemas exist for:\t"
f"{table} in path {path}")
elif unique_count == 0:
raise RuntimeError(
f"Unable to find schema file for {table} in path {path}")
with open(json_path, 'r') as fp:
fields = json.load(fp)
return fields
def is_internal_table(table_id):
"""
Return True if specified table is an internal table or mapping table for
pipeline (e.g. logging tables or mapping tables)
:param table_id: identifies the table
:return: True if specified table is an internal table, False otherwise
"""
return table_id.startswith('_')
def is_extension_table(table_id):
"""
Return True if specified table is an OMOP extension table.
Extension tables provide additional detail about an OMOP records taht does
not inherently fit in with the OMOP common data model.
:param table_id: identifies the table
:return: True if specified table is an internal table, False otherwise
"""
return table_id.endswith('_ext')
def is_mapping_table(table_id):
"""
Return True if specified table is a mapping table
:param table_id: identifies the table
:return: True if specified table is an mapping table, False otherwise
"""
return table_id.startswith('_mapping_')
def is_pii_table(table_id):
"""
Return True if specified table is a pii table
:param table_id: identifies the table
:return: True if specified table is a pii table, False otherwise
"""
return table_id.startswith('pii') or table_id.startswith('participant')
def is_id_match(table_id):
"""
Return True if specified table is a identity_match table
:param table_id:
:return:
"""
return table_id.startswith('identity_')
def cdm_schemas(include_achilles=False, include_vocabulary=False):
"""
Get a dictionary mapping table_name -> schema
:param include_achilles:
:param include_vocabulary:
:return:
"""
result = dict()
# TODO: update this code as part of DC-1015 and remove this comment
for dir_path, _, files in os.walk(fields_path):
for f in files:
file_path = os.path.join(dir_path, f)
with open(file_path, 'r') as fp:
file_name = os.path.basename(f)
table_name = file_name.split('.')[0]
schema = json.load(fp)
include_table = True
if table_name in VOCABULARY_TABLES and not include_vocabulary:
include_table = False
elif table_name in ACHILLES_TABLES + ACHILLES_HEEL_TABLES and not include_achilles:
include_table = False
elif is_internal_table(table_name):
include_table = False
elif is_pii_table(table_name):
include_table = False
elif is_id_match(table_name):
include_table = False
elif is_extension_table(table_name):
include_table = False
elif table_name == 'post_deid_person':
include_table = False
if include_table:
result[table_name] = schema
return result
def mapping_schemas():
result = dict()
for f in os.listdir(fields_path):
file_path = os.path.join(fields_path, f)
table_name = f.split('.')[0]
if is_mapping_table(table_name):
# only open and load mapping tables, instead of all tables
with open(file_path, 'r') as fp:
result[table_name] = json.load(fp)
return result
def hash_dir(in_dir):
"""
Generate an MD5 digest from the contents of a directory
"""
file_names = os.listdir(in_dir)
hash_obj = hashlib.sha256()
for file_name in file_names:
file_path = os.path.join(in_dir, file_name)
with open(file_path, 'rb') as fp:
hash_obj.update(fp.read())
return hash_obj.hexdigest()
CDM_TABLES = list(cdm_schemas().keys())
MAPPING_TABLES = list(mapping_schemas().keys())
ACHILLES_INDEX_FILES = achilles_index_files()
CDM_FILES = [table + '.csv' for table in CDM_TABLES]
ALL_ACHILLES_INDEX_FILES = [
name.split(resource_files_path + os.sep)[1].strip()
for name in ACHILLES_INDEX_FILES
]
IGNORE_LIST = [PROCESSED_TXT, RESULTS_HTML] + ALL_ACHILLES_INDEX_FILES
def get_domain_id_field(domain_table):
"""
A helper function to create the id field
:param domain_table: the cdm domain table
:return: the id field
"""
return domain_table + '_id'
def get_domain_concept_id(domain_table):
"""
A helper function to create the domain_concept_id field
:param domain_table: the cdm domain table
:return: the domain_concept_id
"""
return domain_table.split('_')[0] + '_concept_id'
def get_domain_source_concept_id(domain_table):
"""
A helper function to create the domain_source_concept_id field
:param domain_table: the cdm domain table
:return: the domain_source_concept_id
"""
return domain_table.split('_')[0] + '_source_concept_id'
def get_domain(domain_table):
"""
A helper function to get the domain for the corresponding cdm domain table
:param domain_table: the cdm domain table
:return: the domains
"""
domain = domain_table.split('_')[0].capitalize()
return domain
| 32.806854 | 104 | 0.691862 |
1b2ff41fe75280eba2b7f0974c8972fca03580af | 2,503 | py | Python | official/transformer/compute_bleu_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | official/transformer/compute_bleu_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | official/transformer/compute_bleu_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test functions in compute_blue.py."""
import tempfile
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.transformer import compute_bleu
class ComputeBleuTest(tf.test.TestCase):
@staticmethod
def _create_temp_file(text):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.gfile.Open(temp_file.name, 'w') as w:
w.write(text)
return temp_file.name
def test_bleu_same(self):
ref = self._create_temp_file("test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nmore tests!")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertEqual(100, cased_score)
def test_bleu_same_different_case(self):
ref = self._create_temp_file("Test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nMore tests!")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertLess(cased_score, 100)
def test_bleu_different(self):
ref = self._create_temp_file("Testing\nmore tests!")
hyp = self._create_temp_file("Dog\nCat")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertLess(uncased_score, 100)
self.assertLess(cased_score, 100)
def test_bleu_tokenize(self):
s = "Test0, 1 two, 3"
tokenized = compute_bleu.bleu_tokenize(s)
self.assertEqual(["Test0", ",", "1", "two", ",", "3"], tokenized)
if __name__ == "__main__":
tf.test.main()
| 37.924242 | 80 | 0.675589 |
f891d5fc1992532399d2d4e8784bfd081a4d47a7 | 246 | py | Python | ch04/04_24.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
] | 121 | 2019-03-23T13:53:06.000Z | 2022-03-28T15:15:03.000Z | ch04/04_24.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
] | 3 | 2021-04-14T14:31:26.000Z | 2021-05-09T13:46:14.000Z | ch04/04_24.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
] | 114 | 2019-03-21T13:43:03.000Z | 2022-03-31T18:42:11.000Z | from pandas import Series, DataFrame
data = {"open": [737, 750], "high": [755, 780], "low": [700, 710], "close": [750, 770]}
df = DataFrame(data)
s = Series([300, 400])
df["volume"] = s
upper = df["open"] * 1.3
df["upper"] = upper
print(df)
| 22.363636 | 88 | 0.585366 |
6a33dca9f675b2bf9c11127fe6c16036ad21a91f | 833 | py | Python | build-support/list_packaged_targets.py | hstenzel/yugabyte-db | b25c8f4d7a9e66d106c41c446b71af870aefa304 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | build-support/list_packaged_targets.py | hstenzel/yugabyte-db | b25c8f4d7a9e66d106c41c446b71af870aefa304 | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2022-02-16T01:17:34.000Z | 2022-02-16T01:17:34.000Z | build-support/list_packaged_targets.py | hstenzel/yugabyte-db | b25c8f4d7a9e66d106c41c446b71af870aefa304 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python2.7
# Copyright (c) YugaByte, Inc.
# Lists the targets that need to be built for a YB release.
import json
import os
import re
YB_SRC_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LATEST_BINARY_RE = re.compile('^\$BUILD_ROOT/bin/([^/]+)$')
def main():
with open(os.path.join(YB_SRC_ROOT, 'yb_release_manifest.json')) as manifest_input_file:
release_manifest = json.loads(manifest_input_file.read())['all']
found_matches = False
for bin_path in release_manifest['bin']:
match = LATEST_BINARY_RE.match(bin_path)
if match:
print match.group(1)
found_matches = True
if not found_matches:
raise RuntimeError("Could not find any targets to be packaged in the release archive")
if __name__ == '__main__':
main()
| 26.03125 | 94 | 0.684274 |
13a1fa6421a6d14a60f1177a83c8679b4e78fd81 | 6,937 | py | Python | qiskit/providers/ibmq/runtime/utils.py | merav-aharoni/qiskit-ibmq-provider | 4f6b7f693d0b6f8afd63f389bb7576ac1be70b91 | [
"Apache-2.0"
] | 1 | 2021-10-19T10:38:25.000Z | 2021-10-19T10:38:25.000Z | qiskit/providers/ibmq/runtime/utils.py | merav-aharoni/qiskit-ibmq-provider | 4f6b7f693d0b6f8afd63f389bb7576ac1be70b91 | [
"Apache-2.0"
] | null | null | null | qiskit/providers/ibmq/runtime/utils.py | merav-aharoni/qiskit-ibmq-provider | 4f6b7f693d0b6f8afd63f389bb7576ac1be70b91 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=method-hidden
# pylint: disable=too-many-return-statements
"""Utility functions for the runtime service."""
import json
from typing import Any, Callable, Dict
import base64
import io
import zlib
import inspect
import importlib
import numpy as np
try:
import scipy.sparse
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
from qiskit.result import Result
from qiskit.circuit import QuantumCircuit, qpy_serialization
from qiskit.circuit import ParameterExpression, Instruction
from qiskit.circuit.library import BlueprintCircuit
def _serialize_and_encode(
data: Any,
serializer: Callable,
compress: bool = True,
**kwargs: Any
) -> str:
"""Serialize the input data and return the encoded string.
Args:
data: Data to be serialized.
serializer: Function used to serialize data.
compress: Whether to compress the serialized data.
kwargs: Keyword arguments to pass to the serializer.
Returns:
String representation.
"""
buff = io.BytesIO()
serializer(buff, data, **kwargs)
buff.seek(0)
serialized_data = buff.read()
buff.close()
if compress:
serialized_data = zlib.compress(serialized_data)
return base64.standard_b64encode(serialized_data).decode("utf-8")
def _decode_and_deserialize(data: str, deserializer: Callable, decompress: bool = True) -> Any:
"""Decode and deserialize input data.
Args:
data: Data to be deserialized.
deserializer: Function used to deserialize data.
decompress: Whether to decompress.
Returns:
Deserialized data.
"""
buff = io.BytesIO()
decoded = base64.standard_b64decode(data)
if decompress:
decoded = zlib.decompress(decoded)
buff.write(decoded)
buff.seek(0)
orig = deserializer(buff)
buff.close()
return orig
def deserialize_from_settings(mod_name: str, class_name: str, settings: Dict) -> Any:
"""Deserialize an object from its settings.
Args:
mod_name: Name of the module.
class_name: Name of the class.
settings: Object settings.
Returns:
Deserialized object.
Raises:
ValueError: If unable to find the class.
"""
mod = importlib.import_module(mod_name)
for name, clz in inspect.getmembers(mod, inspect.isclass):
if name == class_name:
return clz(**settings)
raise ValueError(f"Unable to find class {class_name} in module {mod_name}")
class RuntimeEncoder(json.JSONEncoder):
"""JSON Encoder used by runtime service."""
def default(self, obj: Any) -> Any: # pylint: disable=arguments-differ
if isinstance(obj, complex):
return {'__type__': 'complex', '__value__': [obj.real, obj.imag]}
if isinstance(obj, np.ndarray):
value = _serialize_and_encode(obj, np.save, allow_pickle=False)
return {'__type__': 'ndarray', '__value__': value}
if isinstance(obj, set):
return {'__type__': 'set', '__value__': list(obj)}
if isinstance(obj, Result):
return {'__type__': 'Result', '__value__': obj.to_dict()}
if hasattr(obj, 'to_json'):
return {'__type__': 'to_json', '__value__': obj.to_json()}
if isinstance(obj, QuantumCircuit):
# TODO Remove the decompose when terra 6713 is released.
if isinstance(obj, BlueprintCircuit):
obj = obj.decompose()
value = _serialize_and_encode(
data=obj,
serializer=lambda buff, data: qpy_serialization.dump(data, buff)
)
return {'__type__': 'QuantumCircuit', '__value__': value}
if isinstance(obj, ParameterExpression):
value = _serialize_and_encode(
data=obj,
serializer=qpy_serialization._write_parameter_expression,
compress=False,
)
return {'__type__': 'ParameterExpression', '__value__': value}
if isinstance(obj, Instruction):
value = _serialize_and_encode(
data=obj, serializer=qpy_serialization._write_instruction, compress=False)
return {'__type__': 'Instruction', '__value__': value}
if hasattr(obj, "settings"):
return {'__type__': 'settings',
'__module__': obj.__class__.__module__,
'__class__': obj.__class__.__name__,
'__value__': obj.settings}
if HAS_SCIPY and isinstance(obj, scipy.sparse.spmatrix):
value = _serialize_and_encode(obj, scipy.sparse.save_npz, compress=False)
return {'__type__': 'spmatrix', '__value__': value}
return super().default(obj)
class RuntimeDecoder(json.JSONDecoder):
"""JSON Decoder used by runtime service."""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj: Any) -> Any:
"""Called to decode object."""
if '__type__' in obj:
obj_type = obj['__type__']
obj_val = obj['__value__']
if obj_type == 'complex':
return obj_val[0] + 1j * obj_val[1]
if obj_type == 'ndarray':
return _decode_and_deserialize(obj_val, np.load)
if obj_type == 'set':
return set(obj_val)
if obj_type == 'QuantumCircuit':
return _decode_and_deserialize(obj_val, qpy_serialization.load)[0]
if obj_type == 'ParameterExpression':
return _decode_and_deserialize(
obj_val, qpy_serialization._read_parameter_expression, False)
if obj_type == 'Instruction':
return _decode_and_deserialize(
obj_val, qpy_serialization._read_instruction, False)
if obj_type == 'settings':
return deserialize_from_settings(
mod_name=obj['__module__'],
class_name=obj['__class__'],
settings=obj_val
)
if obj_type == 'Result':
return Result.from_dict(obj_val)
if obj_type == 'spmatrix':
return _decode_and_deserialize(obj_val, scipy.sparse.load_npz, False)
if obj_type == 'to_json':
return obj_val
return obj
| 35.943005 | 95 | 0.628081 |
b71be3e29aadf251397166f598752ca0e6a053a6 | 35,001 | py | Python | pypy/module/micronumpy/loop.py | reingart/pypy | 9d6b4fb5eaabe6d2bbe63e063709d72d86f8b153 | [
"Apache-2.0",
"OpenSSL"
] | 4 | 2017-09-17T03:27:47.000Z | 2020-04-29T00:10:20.000Z | pypy/module/micronumpy/loop.py | reingart/pypy | 9d6b4fb5eaabe6d2bbe63e063709d72d86f8b153 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/micronumpy/loop.py | reingart/pypy | 9d6b4fb5eaabe6d2bbe63e063709d72d86f8b153 | [
"Apache-2.0",
"OpenSSL"
] | 5 | 2017-09-20T08:08:43.000Z | 2022-02-02T08:19:30.000Z | """ This file is the main run loop as well as evaluation loops for various
operations. This is the place to look for all the computations that iterate
over all the array elements.
"""
from pypy.interpreter.error import OperationError
from rpython.rlib import jit
from rpython.rlib.rstring import StringBuilder
from rpython.rtyper.lltypesystem import lltype, rffi
from pypy.module.micronumpy import support, constants as NPY
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter, \
AllButAxisIter
from pypy.interpreter.argument import Arguments
call2_driver = jit.JitDriver(
name='numpy_call2',
greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'],
reds='auto')
def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out):
# handle array_priority
# w_lhs and w_rhs could be of different ndarray subtypes. Numpy does:
# 1. if __array_priorities__ are equal and one is an ndarray and the
# other is a subtype, flip the order
# 2. elif rhs.__array_priority__ is higher, flip the order
# Now return the subtype of the first one
w_ndarray = space.gettypefor(W_NDimArray)
lhs_type = space.type(w_lhs)
rhs_type = space.type(w_rhs)
lhs_for_subtype = w_lhs
rhs_for_subtype = w_rhs
#it may be something like a FlatIter, which is not an ndarray
if not space.is_true(space.issubtype(lhs_type, w_ndarray)):
lhs_type = space.type(w_lhs.base)
lhs_for_subtype = w_lhs.base
if not space.is_true(space.issubtype(rhs_type, w_ndarray)):
rhs_type = space.type(w_rhs.base)
rhs_for_subtype = w_rhs.base
if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray):
lhs_for_subtype = rhs_for_subtype
# TODO handle __array_priorities__ and maybe flip the order
if w_lhs.get_size() == 1:
w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype)
left_iter = left_state = None
else:
w_left = None
left_iter, left_state = w_lhs.create_iter(shape)
left_iter.track_index = False
if w_rhs.get_size() == 1:
w_right = w_rhs.get_scalar_value().convert_to(space, calc_dtype)
right_iter = right_state = None
else:
w_right = None
right_iter, right_state = w_rhs.create_iter(shape)
right_iter.track_index = False
if out is None:
out = W_NDimArray.from_shape(space, shape, res_dtype,
w_instance=lhs_for_subtype)
out_iter, out_state = out.create_iter(shape)
shapelen = len(shape)
while not out_iter.done(out_state):
call2_driver.jit_merge_point(shapelen=shapelen, func=func,
calc_dtype=calc_dtype, res_dtype=res_dtype)
if left_iter:
w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype)
left_state = left_iter.next(left_state)
if right_iter:
w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype)
right_state = right_iter.next(right_state)
out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to(
space, res_dtype))
out_state = out_iter.next(out_state)
return out
call1_driver = jit.JitDriver(
name='numpy_call1',
greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'],
reds='auto')
def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out):
obj_iter, obj_state = w_obj.create_iter(shape)
obj_iter.track_index = False
if out is None:
out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj)
out_iter, out_state = out.create_iter(shape)
shapelen = len(shape)
while not out_iter.done(out_state):
call1_driver.jit_merge_point(shapelen=shapelen, func=func,
calc_dtype=calc_dtype, res_dtype=res_dtype)
elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype))
out_state = out_iter.next(out_state)
obj_state = obj_iter.next(obj_state)
return out
call_many_to_one_driver = jit.JitDriver(
name='numpy_call_many_to_one',
greens=['shapelen', 'nin', 'func', 'res_dtype'],
reds='auto')
def call_many_to_one(space, shape, func, res_dtype, in_args, out):
# out must hav been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
in_states = [None] * nin
for i in range(nin):
in_i = in_args[i]
assert isinstance(in_i, W_NDimArray)
in_iter, in_state = in_i.create_iter(shape)
in_iters[i] = in_iter
in_states[i] = in_state
shapelen = len(shape)
assert isinstance(out, W_NDimArray)
out_iter, out_state = out.create_iter(shape)
vals = [None] * nin
while not out_iter.done(out_state):
call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func,
res_dtype=res_dtype, nin=nin)
for i in range(nin):
vals[i] = in_iters[i].getitem(in_states[i])
w_arglist = space.newlist(vals)
w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist))
out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val))
for i in range(nin):
in_states[i] = in_iters[i].next(in_states[i])
out_state = out_iter.next(out_state)
return out
call_many_to_many_driver = jit.JitDriver(
name='numpy_call_many_to_many',
greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'],
reds='auto')
def call_many_to_many(space, shape, func, res_dtype, in_args, out_args):
# out must hav been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
in_states = [None] * nin
nout = len(out_args)
out_iters = [None] * nout
out_states = [None] * nout
for i in range(nin):
in_i = in_args[i]
assert isinstance(in_i, W_NDimArray)
in_iter, in_state = in_i.create_iter(shape)
in_iters[i] = in_iter
in_states[i] = in_state
for i in range(nout):
out_i = out_args[i]
assert isinstance(out_i, W_NDimArray)
out_iter, out_state = out_i.create_iter(shape)
out_iters[i] = out_iter
out_states[i] = out_state
shapelen = len(shape)
vals = [None] * nin
while not out_iters[0].done(out_states[0]):
call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func,
res_dtype=res_dtype, nin=nin, nout=nout)
for i in range(nin):
vals[i] = in_iters[i].getitem(in_states[i])
w_arglist = space.newlist(vals)
w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist))
# w_outvals should be a tuple, but func can return a single value as well
if space.isinstance_w(w_outvals, space.w_tuple):
batch = space.listview(w_outvals)
for i in range(len(batch)):
out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i]))
out_states[i] = out_iters[i].next(out_states[i])
else:
out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals))
out_states[0] = out_iters[0].next(out_states[0])
for i in range(nin):
in_states[i] = in_iters[i].next(in_states[i])
return space.newtuple([convert_to_array(space, o) for o in out_args])
setslice_driver = jit.JitDriver(name='numpy_setslice',
greens = ['shapelen', 'dtype'],
reds = 'auto')
def setslice(space, shape, target, source):
if not shape:
dtype = target.dtype
val = source.getitem(source.start)
if dtype.is_str_or_unicode():
val = dtype.coerce(space, val)
else:
val = val.convert_to(space, dtype)
target.setitem(target.start, val)
return target
return _setslice(space, shape, target, source)
def _setslice(space, shape, target, source):
# note that unlike everything else, target and source here are
# array implementations, not arrays
target_iter, target_state = target.create_iter(shape)
source_iter, source_state = source.create_iter(shape)
source_iter.track_index = False
dtype = target.dtype
shapelen = len(shape)
while not target_iter.done(target_state):
setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype)
val = source_iter.getitem(source_state)
if dtype.is_str_or_unicode():
val = dtype.coerce(space, val)
else:
val = val.convert_to(space, dtype)
target_iter.setitem(target_state, val)
target_state = target_iter.next(target_state)
source_state = source_iter.next(source_state)
return target
reduce_driver = jit.JitDriver(name='numpy_reduce',
greens = ['shapelen', 'func', 'done_func',
'calc_dtype'],
reds = 'auto')
def compute_reduce(space, obj, calc_dtype, func, done_func, identity):
obj_iter, obj_state = obj.create_iter()
if identity is None:
cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
obj_state = obj_iter.next(obj_state)
else:
cur_value = identity.convert_to(space, calc_dtype)
shapelen = len(obj.get_shape())
while not obj_iter.done(obj_state):
reduce_driver.jit_merge_point(shapelen=shapelen, func=func,
done_func=done_func,
calc_dtype=calc_dtype)
rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
if done_func is not None and done_func(calc_dtype, rval):
return rval
cur_value = func(calc_dtype, cur_value, rval)
obj_state = obj_iter.next(obj_state)
return cur_value
reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver',
greens = ['shapelen', 'func', 'dtype'],
reds = 'auto')
def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity):
obj_iter, obj_state = obj.create_iter()
out_iter, out_state = out.create_iter()
out_iter.track_index = False
if identity is None:
cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
out_iter.setitem(out_state, cur_value)
out_state = out_iter.next(out_state)
obj_state = obj_iter.next(obj_state)
else:
cur_value = identity.convert_to(space, calc_dtype)
shapelen = len(obj.get_shape())
while not obj_iter.done(obj_state):
reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func,
dtype=calc_dtype)
rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
cur_value = func(calc_dtype, cur_value, rval)
out_iter.setitem(out_state, cur_value)
out_state = out_iter.next(out_state)
obj_state = obj_iter.next(obj_state)
def fill(arr, box):
arr_iter, arr_state = arr.create_iter()
while not arr_iter.done(arr_state):
arr_iter.setitem(arr_state, box)
arr_state = arr_iter.next(arr_state)
def assign(space, arr, seq):
arr_iter, arr_state = arr.create_iter()
arr_dtype = arr.get_dtype()
for item in seq:
arr_iter.setitem(arr_state, arr_dtype.coerce(space, item))
arr_state = arr_iter.next(arr_state)
where_driver = jit.JitDriver(name='numpy_where',
greens = ['shapelen', 'dtype', 'arr_dtype'],
reds = 'auto')
def where(space, out, shape, arr, x, y, dtype):
out_iter, out_state = out.create_iter(shape)
arr_iter, arr_state = arr.create_iter(shape)
arr_dtype = arr.get_dtype()
x_iter, x_state = x.create_iter(shape)
y_iter, y_state = y.create_iter(shape)
if x.is_scalar():
if y.is_scalar():
iter, state = arr_iter, arr_state
else:
iter, state = y_iter, y_state
else:
iter, state = x_iter, x_state
out_iter.track_index = x_iter.track_index = False
arr_iter.track_index = y_iter.track_index = False
iter.track_index = True
shapelen = len(shape)
while not iter.done(state):
where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype,
arr_dtype=arr_dtype)
w_cond = arr_iter.getitem(arr_state)
if arr_dtype.itemtype.bool(w_cond):
w_val = x_iter.getitem(x_state).convert_to(space, dtype)
else:
w_val = y_iter.getitem(y_state).convert_to(space, dtype)
out_iter.setitem(out_state, w_val)
out_state = out_iter.next(out_state)
arr_state = arr_iter.next(arr_state)
x_state = x_iter.next(x_state)
y_state = y_iter.next(y_state)
if x.is_scalar():
if y.is_scalar():
state = arr_state
else:
state = y_state
else:
state = x_state
return out
axis_reduce_driver = jit.JitDriver(name='numpy_axis_reduce',
greens=['shapelen', 'func', 'dtype'],
reds='auto')
def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative,
temp):
out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative)
out_state = out_iter.reset()
if cumulative:
temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False)
temp_state = temp_iter.reset()
else:
temp_iter = out_iter # hack
temp_state = out_state
arr_iter, arr_state = arr.create_iter()
arr_iter.track_index = False
if identity is not None:
identity = identity.convert_to(space, dtype)
shapelen = len(shape)
while not out_iter.done(out_state):
axis_reduce_driver.jit_merge_point(shapelen=shapelen, func=func,
dtype=dtype)
w_val = arr_iter.getitem(arr_state).convert_to(space, dtype)
arr_state = arr_iter.next(arr_state)
out_indices = out_iter.indices(out_state)
if out_indices[axis] == 0:
if identity is not None:
w_val = func(dtype, identity, w_val)
else:
cur = temp_iter.getitem(temp_state)
w_val = func(dtype, cur, w_val)
out_iter.setitem(out_state, w_val)
out_state = out_iter.next(out_state)
if cumulative:
temp_iter.setitem(temp_state, w_val)
temp_state = temp_iter.next(temp_state)
else:
temp_state = out_state
return out
def _new_argmin_argmax(op_name):
arg_driver = jit.JitDriver(name='numpy_' + op_name,
greens = ['shapelen', 'dtype'],
reds = 'auto')
def argmin_argmax(arr):
result = 0
idx = 1
dtype = arr.get_dtype()
iter, state = arr.create_iter()
cur_best = iter.getitem(state)
state = iter.next(state)
shapelen = len(arr.get_shape())
while not iter.done(state):
arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype)
w_val = iter.getitem(state)
new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val)
if dtype.itemtype.ne(new_best, cur_best):
result = idx
cur_best = new_best
state = iter.next(state)
idx += 1
return result
return argmin_argmax
argmin = _new_argmin_argmax('min')
argmax = _new_argmin_argmax('max')
dot_driver = jit.JitDriver(name = 'numpy_dot',
greens = ['dtype'],
reds = 'auto')
def multidim_dot(space, left, right, result, dtype, right_critical_dim):
''' assumes left, right are concrete arrays
given left.shape == [3, 5, 7],
right.shape == [2, 7, 4]
then
result.shape == [3, 5, 2, 4]
broadcast shape should be [3, 5, 2, 7, 4]
result should skip dims 3 which is len(result_shape) - 1
(note that if right is 1d, result should
skip len(result_shape))
left should skip 2, 4 which is a.ndims-1 + range(right.ndims)
except where it==(right.ndims-2)
right should skip 0, 1
'''
left_shape = left.get_shape()
right_shape = right.get_shape()
left_impl = left.implementation
right_impl = right.implementation
assert left_shape[-1] == right_shape[right_critical_dim]
assert result.get_dtype() == dtype
outi, outs = result.create_iter()
outi.track_index = False
lefti = AllButAxisIter(left_impl, len(left_shape) - 1)
righti = AllButAxisIter(right_impl, right_critical_dim)
lefts = lefti.reset()
rights = righti.reset()
n = left_impl.shape[-1]
s1 = left_impl.strides[-1]
s2 = right_impl.strides[right_critical_dim]
while not lefti.done(lefts):
while not righti.done(rights):
oval = outi.getitem(outs)
i1 = lefts.offset
i2 = rights.offset
i = 0
while i < n:
i += 1
dot_driver.jit_merge_point(dtype=dtype)
lval = left_impl.getitem(i1).convert_to(space, dtype)
rval = right_impl.getitem(i2).convert_to(space, dtype)
oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval))
i1 += s1
i2 += s2
outi.setitem(outs, oval)
outs = outi.next(outs)
rights = righti.next(rights)
rights = righti.reset(rights)
lefts = lefti.next(lefts)
return result
count_all_true_driver = jit.JitDriver(name = 'numpy_count',
greens = ['shapelen', 'dtype'],
reds = 'auto')
def count_all_true_concrete(impl):
s = 0
iter, state = impl.create_iter()
shapelen = len(impl.shape)
dtype = impl.dtype
while not iter.done(state):
count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype)
s += iter.getitem_bool(state)
state = iter.next(state)
return s
def count_all_true(arr):
if arr.is_scalar():
return arr.get_dtype().itemtype.bool(arr.get_scalar_value())
else:
return count_all_true_concrete(arr.implementation)
nonzero_driver = jit.JitDriver(name = 'numpy_nonzero',
greens = ['shapelen', 'dims', 'dtype'],
reds = 'auto')
def nonzero(res, arr, box):
res_iter, res_state = res.create_iter()
arr_iter, arr_state = arr.create_iter()
shapelen = len(arr.shape)
dtype = arr.dtype
dims = range(shapelen)
while not arr_iter.done(arr_state):
nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype)
if arr_iter.getitem_bool(arr_state):
arr_indices = arr_iter.indices(arr_state)
for d in dims:
res_iter.setitem(res_state, box(arr_indices[d]))
res_state = res_iter.next(res_state)
arr_state = arr_iter.next(arr_state)
return res
getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool',
greens = ['shapelen', 'arr_dtype',
'index_dtype'],
reds = 'auto')
def getitem_filter(res, arr, index):
res_iter, res_state = res.create_iter()
shapelen = len(arr.get_shape())
if shapelen > 1 and len(index.get_shape()) < 2:
index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True)
else:
index_iter, index_state = index.create_iter()
arr_iter, arr_state = arr.create_iter()
arr_dtype = arr.get_dtype()
index_dtype = index.get_dtype()
# XXX length of shape of index as well?
while not index_iter.done(index_state):
getitem_filter_driver.jit_merge_point(shapelen=shapelen,
index_dtype=index_dtype,
arr_dtype=arr_dtype,
)
if index_iter.getitem_bool(index_state):
res_iter.setitem(res_state, arr_iter.getitem(arr_state))
res_state = res_iter.next(res_state)
index_state = index_iter.next(index_state)
arr_state = arr_iter.next(arr_state)
return res
setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool',
greens = ['shapelen', 'arr_dtype',
'index_dtype'],
reds = 'auto')
def setitem_filter(space, arr, index, value):
arr_iter, arr_state = arr.create_iter()
shapelen = len(arr.get_shape())
if shapelen > 1 and len(index.get_shape()) < 2:
index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True)
else:
index_iter, index_state = index.create_iter()
if value.get_size() == 1:
value_iter, value_state = value.create_iter(arr.get_shape())
else:
value_iter, value_state = value.create_iter()
index_dtype = index.get_dtype()
arr_dtype = arr.get_dtype()
while not index_iter.done(index_state):
setitem_filter_driver.jit_merge_point(shapelen=shapelen,
index_dtype=index_dtype,
arr_dtype=arr_dtype,
)
if index_iter.getitem_bool(index_state):
val = arr_dtype.coerce(space, value_iter.getitem(value_state))
value_state = value_iter.next(value_state)
arr_iter.setitem(arr_state, val)
arr_state = arr_iter.next(arr_state)
index_state = index_iter.next(index_state)
flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem',
greens = ['dtype'],
reds = 'auto')
def flatiter_getitem(res, base_iter, base_state, step):
ri, rs = res.create_iter()
dtype = res.get_dtype()
while not ri.done(rs):
flatiter_getitem_driver.jit_merge_point(dtype=dtype)
ri.setitem(rs, base_iter.getitem(base_state))
base_state = base_iter.goto(base_state.index + step)
rs = ri.next(rs)
return res
flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem',
greens = ['dtype'],
reds = 'auto')
def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length):
val_iter, val_state = val.create_iter()
while length > 0:
flatiter_setitem_driver.jit_merge_point(dtype=dtype)
val = val_iter.getitem(val_state)
if dtype.is_str_or_unicode():
val = dtype.coerce(space, val)
else:
val = val.convert_to(space, dtype)
arr_iter.setitem(arr_state, val)
arr_state = arr_iter.goto(arr_state.index + step)
val_state = val_iter.next(val_state)
if val_iter.done(val_state):
val_state = val_iter.reset(val_state)
length -= 1
fromstring_driver = jit.JitDriver(name = 'numpy_fromstring',
greens = ['itemsize', 'dtype'],
reds = 'auto')
def fromstring_loop(space, a, dtype, itemsize, s):
i = 0
ai, state = a.create_iter()
while not ai.done(state):
fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize)
sub = s[i*itemsize:i*itemsize + itemsize]
if dtype.is_str_or_unicode():
val = dtype.coerce(space, space.wrap(sub))
else:
val = dtype.itemtype.runpack_str(space, sub)
ai.setitem(state, val)
state = ai.next(state)
i += 1
def tostring(space, arr):
builder = StringBuilder()
iter, state = arr.create_iter()
w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C')
itemsize = arr.get_dtype().elsize
with w_res_str.implementation as storage:
res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char),
support.get_storage_as_int(storage))
while not iter.done(state):
w_res_str.implementation.setitem(0, iter.getitem(state))
for i in range(itemsize):
builder.append(res_str_casted[i])
state = iter.next(state)
return builder.build()
getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int',
greens = ['shapelen', 'indexlen',
'prefixlen', 'dtype'],
reds = 'auto')
def getitem_array_int(space, arr, res, iter_shape, indexes_w, prefix_w):
shapelen = len(iter_shape)
prefixlen = len(prefix_w)
indexlen = len(indexes_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
indexlen = len(indexes_w)
while not iter.done():
getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
# prepare the index
index_w = [None] * indexlen
for i in range(indexlen):
if iter.idx_w_i[i] is not None:
index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i])
else:
index_w[i] = indexes_w[i]
res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] +
iter.get_index(space, shapelen)),
arr.descr_getitem(space, space.newtuple(index_w)))
iter.next()
return res
setitem_int_driver = jit.JitDriver(name = 'numpy_setitem_int',
greens = ['shapelen', 'indexlen',
'prefixlen', 'dtype'],
reds = 'auto')
def setitem_array_int(space, arr, iter_shape, indexes_w, val_arr,
prefix_w):
shapelen = len(iter_shape)
indexlen = len(indexes_w)
prefixlen = len(prefix_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
while not iter.done():
setitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
# prepare the index
index_w = [None] * indexlen
for i in range(indexlen):
if iter.idx_w_i[i] is not None:
index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i])
else:
index_w[i] = indexes_w[i]
w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space,
shapelen))
if val_arr.is_scalar():
w_value = val_arr.get_scalar_value()
else:
w_value = val_arr.descr_getitem(space, w_idx)
arr.descr_setitem(space, space.newtuple(index_w), w_value)
iter.next()
byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver',
greens = ['dtype'],
reds = 'auto')
def byteswap(from_, to):
dtype = from_.dtype
from_iter, from_state = from_.create_iter()
to_iter, to_state = to.create_iter()
while not from_iter.done(from_state):
byteswap_driver.jit_merge_point(dtype=dtype)
val = dtype.itemtype.byteswap(from_iter.getitem(from_state))
to_iter.setitem(to_state, val)
to_state = to_iter.next(to_state)
from_state = from_iter.next(from_state)
choose_driver = jit.JitDriver(name='numpy_choose_driver',
greens = ['shapelen', 'mode', 'dtype'],
reds = 'auto')
def choose(space, arr, choices, shape, dtype, out, mode):
shapelen = len(shape)
pairs = [a.create_iter(shape) for a in choices]
iterators = [i[0] for i in pairs]
states = [i[1] for i in pairs]
arr_iter, arr_state = arr.create_iter(shape)
out_iter, out_state = out.create_iter(shape)
while not arr_iter.done(arr_state):
choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype,
mode=mode)
index = support.index_w(space, arr_iter.getitem(arr_state))
if index < 0 or index >= len(iterators):
if mode == NPY.RAISE:
raise OperationError(space.w_ValueError, space.wrap(
"invalid entry in choice array"))
elif mode == NPY.WRAP:
index = index % (len(iterators))
else:
assert mode == NPY.CLIP
if index < 0:
index = 0
else:
index = len(iterators) - 1
val = iterators[index].getitem(states[index]).convert_to(space, dtype)
out_iter.setitem(out_state, val)
for i in range(len(iterators)):
states[i] = iterators[i].next(states[i])
out_state = out_iter.next(out_state)
arr_state = arr_iter.next(arr_state)
clip_driver = jit.JitDriver(name='numpy_clip_driver',
greens = ['shapelen', 'dtype'],
reds = 'auto')
def clip(space, arr, shape, min, max, out):
assert min or max
arr_iter, arr_state = arr.create_iter(shape)
if min is not None:
min_iter, min_state = min.create_iter(shape)
else:
min_iter, min_state = None, None
if max is not None:
max_iter, max_state = max.create_iter(shape)
else:
max_iter, max_state = None, None
out_iter, out_state = out.create_iter(shape)
shapelen = len(shape)
dtype = out.get_dtype()
while not arr_iter.done(arr_state):
clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype)
w_v = arr_iter.getitem(arr_state).convert_to(space, dtype)
arr_state = arr_iter.next(arr_state)
if min_iter is not None:
w_min = min_iter.getitem(min_state).convert_to(space, dtype)
if dtype.itemtype.lt(w_v, w_min):
w_v = w_min
min_state = min_iter.next(min_state)
if max_iter is not None:
w_max = max_iter.getitem(max_state).convert_to(space, dtype)
if dtype.itemtype.gt(w_v, w_max):
w_v = w_max
max_state = max_iter.next(max_state)
out_iter.setitem(out_state, w_v)
out_state = out_iter.next(out_state)
round_driver = jit.JitDriver(name='numpy_round_driver',
greens = ['shapelen', 'dtype'],
reds = 'auto')
def round(space, arr, dtype, shape, decimals, out):
arr_iter, arr_state = arr.create_iter(shape)
out_iter, out_state = out.create_iter(shape)
shapelen = len(shape)
while not arr_iter.done(arr_state):
round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype)
w_v = arr_iter.getitem(arr_state).convert_to(space, dtype)
w_v = dtype.itemtype.round(w_v, decimals)
out_iter.setitem(out_state, w_v)
arr_state = arr_iter.next(arr_state)
out_state = out_iter.next(out_state)
diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver',
greens = ['axis1', 'axis2'],
reds = 'auto')
def diagonal_simple(space, arr, out, offset, axis1, axis2, size):
out_iter, out_state = out.create_iter()
i = 0
index = [0] * 2
while i < size:
diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2)
index[axis1] = i
index[axis2] = i + offset
out_iter.setitem(out_state, arr.getitem_index(space, index))
i += 1
out_state = out_iter.next(out_state)
def diagonal_array(space, arr, out, offset, axis1, axis2, shape):
out_iter, out_state = out.create_iter()
iter = PureShapeIter(shape, [])
shapelen_minus_1 = len(shape) - 1
assert shapelen_minus_1 >= 0
if axis1 < axis2:
a = axis1
b = axis2 - 1
else:
a = axis2
b = axis1 - 1
assert a >= 0
assert b >= 0
while not iter.done():
last_index = iter.indexes[-1]
if axis1 < axis2:
indexes = (iter.indexes[:a] + [last_index] +
iter.indexes[a:b] + [last_index + offset] +
iter.indexes[b:shapelen_minus_1])
else:
indexes = (iter.indexes[:a] + [last_index + offset] +
iter.indexes[a:b] + [last_index] +
iter.indexes[b:shapelen_minus_1])
out_iter.setitem(out_state, arr.getitem_index(space, indexes))
iter.next()
out_state = out_iter.next(out_state)
def _new_binsearch(side, op_name):
binsearch_driver = jit.JitDriver(name='numpy_binsearch_' + side,
greens=['dtype'],
reds='auto')
def binsearch(space, arr, key, ret):
assert len(arr.get_shape()) == 1
dtype = key.get_dtype()
op = getattr(dtype.itemtype, op_name)
key_iter, key_state = key.create_iter()
ret_iter, ret_state = ret.create_iter()
ret_iter.track_index = False
size = arr.get_size()
min_idx = 0
max_idx = size
last_key_val = key_iter.getitem(key_state)
while not key_iter.done(key_state):
key_val = key_iter.getitem(key_state)
if dtype.itemtype.lt(last_key_val, key_val):
max_idx = size
else:
min_idx = 0
max_idx = max_idx + 1 if max_idx < size else size
last_key_val = key_val
while min_idx < max_idx:
binsearch_driver.jit_merge_point(dtype=dtype)
mid_idx = min_idx + ((max_idx - min_idx) >> 1)
mid_val = arr.getitem(space, [mid_idx]).convert_to(space, dtype)
if op(mid_val, key_val):
min_idx = mid_idx + 1
else:
max_idx = mid_idx
ret_iter.setitem(ret_state, ret.get_dtype().box(min_idx))
ret_state = ret_iter.next(ret_state)
key_state = key_iter.next(key_state)
return binsearch
binsearch_left = _new_binsearch('left', 'lt')
binsearch_right = _new_binsearch('right', 'le')
| 40.793706 | 93 | 0.60264 |
4cd595d8bc663e599f28ad64abd48dcd6b450dc5 | 1,783 | py | Python | froide/account/oauth_urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/account/oauth_urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/account/oauth_urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | from django.urls import path
from django.http import HttpResponseRedirect
from oauth2_provider.views import (AuthorizationView, TokenView,
ApplicationList, ApplicationRegistration, ApplicationDetail,
ApplicationDelete, ApplicationUpdate)
class CustomAuthorizationView(AuthorizationView):
def render_to_response(self, context, **kwargs):
application = context.get('application')
scopes = context.get('scopes')
if application is not None and application.can_auto_approve(scopes):
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=context, allow=True)
return HttpResponseRedirect(uri)
context['oauth_request'] = context.get('request')
context['request'] = self.request
return super(CustomAuthorizationView, self).render_to_response(context,
**kwargs)
urlpatterns = [
path('authorize/', CustomAuthorizationView.as_view(), name="authorize"),
path('token/', TokenView.as_view(), name="token"),
]
class CustomApplicationUpdate(ApplicationUpdate):
fields = ['name', 'redirect_uris', 'description', 'homepage', 'image_url']
# Application management views
app_name = 'account'
urlpatterns += [
path('applications/', ApplicationList.as_view(), name="list"),
path('applications/register/', ApplicationRegistration.as_view(), name="register"),
path('applications/<int:pk>/', ApplicationDetail.as_view(), name="detail"),
path('applications/<int:pk>/delete/', ApplicationDelete.as_view(), name="delete"),
path('applications/<int:pk>/update/', CustomApplicationUpdate.as_view(), name="update"),
]
| 41.465116 | 92 | 0.685362 |
525214804e4de2a979b6d5bab9bc4c11a5566d55 | 3,458 | py | Python | test/test_basic.py | kaelzhang/stock-pandas | 6b31a3bc24b37a113d610ab2ab3eaf2f6412f5f2 | [
"MIT"
] | 100 | 2020-03-13T15:23:04.000Z | 2022-03-23T12:35:06.000Z | test/test_basic.py | kaelzhang/stock-pandas | 6b31a3bc24b37a113d610ab2ab3eaf2f6412f5f2 | [
"MIT"
] | 25 | 2020-03-13T16:09:21.000Z | 2021-08-13T17:17:02.000Z | test/test_basic.py | kaelzhang/stock-pandas | 6b31a3bc24b37a113d610ab2ab3eaf2f6412f5f2 | [
"MIT"
] | 12 | 2020-04-03T07:19:05.000Z | 2022-02-09T07:24:45.000Z | import numpy as np
import itertools
import pytest
import pandas as pd
from stock_pandas import (
StockDataFrame,
directive_stringify
)
from .common import (
simple_list,
create_stock,
get_stock_update,
get_tencent
)
@pytest.fixture
def stock():
return create_stock()
def test_directive_stringify(stock: StockDataFrame):
assert stock.directive_stringify('boll') == 'boll:20,close'
assert directive_stringify('boll') == 'boll:20,close'
def test_get_column(stock):
stock = stock.rename(columns={
'open': 'Open',
'close': 'Close',
'high': 'High',
'low': 'Low'
})
with pytest.raises(
KeyError,
match='column "close" not found'
):
stock.get_column('close')
with pytest.raises(
KeyError,
match='column "close" not found'
):
stock['ma:20']
stock.alias('close', 'Close')
# get_column should apply alias
stock.get_column('close')
def test_astype(stock):
stock = stock.astype({
'open': 'float',
'close': 'float'
})
assert isinstance(stock, StockDataFrame)
open0 = stock.iloc[0]['open']
assert isinstance(open0, float)
def test_indexing_by_callable(stock):
assert isinstance(stock[lambda df: 'open'], pd.Series)
def test_ma(stock):
stock.alias('Open', 'open')
ma = stock['ma:2']
stock = StockDataFrame(stock)
list_ma0 = [3.5, 4.5, 5.5, 6.5, 7.5]
assert np.isnan(ma[0])
assert list(ma[1:]) == list_ma0
new = get_stock_update()
stock = stock.append(new, ignore_index=True)
assert isinstance(stock, StockDataFrame)
ma2 = stock.exec('ma:2')
assert np.isnan(ma2[0])
assert list(ma2[1:]) == [*list_ma0, 8.5]
assert stock['Open'][0] == 2
COMMANDS = [
'ma:{}',
'macd.signal',
'rsi:{}'
]
def test_period_larger_than_size(stock):
period = len(stock) + 1
for command in COMMANDS:
directive_str = command.format(period)
result = stock.exec(directive_str)
assert np.all(
np.isnan(result)
), directive_str
def test_aliases(stock):
stock.alias('Open', 'open')
assert list(stock['Open']) == simple_list
dropped = stock.drop(columns=['close'])
assert list(dropped['Open']) == simple_list
with pytest.raises(ValueError, match='not exists'):
stock.alias('some_column', 'not-exists')
with pytest.raises(ValueError, match='already exists'):
stock.alias('open', 'close')
def test_invalid_indexing(stock):
with pytest.raises(KeyError, match='None'):
stock[[1]]
def test_multi_index():
tuples = list(itertools.product(
['foo', 'bar'],
['one', 'two']
))
columns = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
with pytest.raises(ValueError, match='MultiIndex'):
StockDataFrame(
np.random.randn(3, 4),
index=['A', 'B', 'C'],
columns=columns
)
def test_date_col_pollution_issue_21():
csv = get_tencent(stock=False)
StockDataFrame(csv, date_col='time_key')
with pytest.raises(KeyError, match='time_key'):
csv['time_key']
csv = get_tencent(stock=False)
StockDataFrame(csv, date_col='time_key', copy=True)
try:
csv['time_key']
except Exception as e:
raise RuntimeError(f'date_col should not change the original dataframe, error: {e}')
| 20.706587 | 92 | 0.617409 |
d8f7bbee34e26feec651d58bad020608cdebd449 | 998 | py | Python | tests/test_io.py | thejonaslab/tinygraph | f1638168ed084dbb0515cafbf69282b38c4b5810 | [
"BSD-3-Clause"
] | null | null | null | tests/test_io.py | thejonaslab/tinygraph | f1638168ed084dbb0515cafbf69282b38c4b5810 | [
"BSD-3-Clause"
] | 16 | 2021-01-13T15:34:26.000Z | 2021-05-14T12:27:56.000Z | tests/test_io.py | thejonaslab/tinygraph | f1638168ed084dbb0515cafbf69282b38c4b5810 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import networkx
import tinygraph as tg
import pytest
import graph_test_suite
import io
suite = graph_test_suite.get_full_suite()
@pytest.mark.parametrize("test_name", [k for k in suite.keys()])
def test_binary(test_name):
"""
Test the conversion to and from binary.
"""
for g in suite[test_name]:
outbuf = io.BytesIO()
tg.io.to_binary(g, outbuf)
s = outbuf.getvalue()
inbuf = io.BytesIO(s)
new_g = tg.io.from_binary(inbuf)
assert tg.util.graph_equality(g, new_g)
def test_binary_graph_props():
g1 = tg.TinyGraph(10)
g1.props['foo'] = 'hello'
g1.props['bar'] = 100
g1.props['baz'] = 100.0
g1.props['quxx'] = [1, 2, 3]
g1.props['quxxx'] = {'a' : 1, 'b' : 'foo', 'c': [4, 5,6]}
outbuf = io.BytesIO()
tg.io.to_binary(g1, outbuf)
s = outbuf.getvalue()
inbuf = io.BytesIO(s)
new_g = tg.io.from_binary(inbuf)
assert tg.util.graph_equality(g1, new_g)
| 22.177778 | 64 | 0.612224 |
9c259817150b96ec872c274799eb730edcc09e04 | 16,640 | py | Python | brozzler/ydl.py | galgeek/brozzler | 040a942ef261a81cf1f13365a1d916a3f748fa98 | [
"Apache-2.0"
] | null | null | null | brozzler/ydl.py | galgeek/brozzler | 040a942ef261a81cf1f13365a1d916a3f748fa98 | [
"Apache-2.0"
] | null | null | null | brozzler/ydl.py | galgeek/brozzler | 040a942ef261a81cf1f13365a1d916a3f748fa98 | [
"Apache-2.0"
] | 1 | 2021-06-11T11:25:49.000Z | 2021-06-11T11:25:49.000Z | '''
brozzler/ydl.py - youtube-dl support for brozzler
Copyright (C) 2020 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import youtube_dl
import brozzler
import urllib.request
import tempfile
import urlcanon
import os
import json
import doublethink
import datetime
import threading
thread_local = threading.local()
_orig__finish_frag_download = youtube_dl.downloader.fragment.FragmentFD._finish_frag_download
def _finish_frag_download(ffd_self, ctx):
'''
We monkey-patch this youtube-dl internal method `_finish_frag_download()`
because it gets called after downloading the last segment of a segmented
video, which is a good time to upload the stitched-up video that youtube-dl
creates for us to warcprox. We have it call a thread-local callback
since different threads may be youtube-dl'ing at the same time.
'''
result = _orig__finish_frag_download(ffd_self, ctx)
if hasattr(thread_local, 'finish_frag_download_callback'):
thread_local.finish_frag_download_callback(ffd_self, ctx)
return result
youtube_dl.downloader.fragment.FragmentFD._finish_frag_download = _finish_frag_download
_orig_webpage_read_content = youtube_dl.extractor.generic.GenericIE._webpage_read_content
def _webpage_read_content(self, *args, **kwargs):
content = _orig_webpage_read_content(self, *args, **kwargs)
if len(content) > 20000000:
logging.warning(
'bypassing youtube-dl extraction because content is '
'too large (%s characters)', len(content))
return ''
return content
youtube_dl.extractor.generic.GenericIE._webpage_read_content = _webpage_read_content
class ExtraHeaderAdder(urllib.request.BaseHandler):
def __init__(self, extra_headers):
self.extra_headers = extra_headers
self.http_request = self._http_request
self.https_request = self._http_request
def _http_request(self, req):
for h, v in self.extra_headers.items():
if h.capitalize() not in req.headers:
req.add_header(h, v)
return req
class YoutubeDLSpy(urllib.request.BaseHandler):
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self):
self.reset()
def _http_response(self, request, response):
fetch = {
'url': request.full_url,
'method': request.get_method(),
'response_code': response.code,
'response_headers': response.headers,
}
self.fetches.append(fetch)
return response
http_response = https_response = _http_response
def reset(self):
self.fetches = []
def final_bounces(fetches, url):
"""
Resolves redirect chains in `fetches` and returns a list of fetches
representing the final redirect destinations of the given url. There could
be more than one if for example youtube-dl hit the same url with HEAD and
then GET requests.
"""
redirects = {}
for fetch in fetches:
# XXX check http status 301,302,303,307? check for "uri" header
# as well as "location"? see urllib.request.HTTPRedirectHandler
if 'location' in fetch['response_headers']:
redirects[fetch['url']] = fetch
final_url = url
while final_url in redirects:
fetch = redirects.pop(final_url)
final_url = urllib.parse.urljoin(
fetch['url'], fetch['response_headers']['location'])
final_bounces = []
for fetch in fetches:
if fetch['url'] == final_url:
final_bounces.append(fetch)
return final_bounces
def _build_youtube_dl(worker, destdir, site):
'''
Builds a `youtube_dl.YoutubeDL` for brozzling `site` with `worker`.
The `YoutubeDL` instance does a few special brozzler-specific things:
- keeps track of urls fetched using a `YoutubeDLSpy`
- periodically updates `site.last_claimed` in rethinkdb
- if brozzling through warcprox and downloading segmented videos (e.g.
HLS), pushes the stitched-up video created by youtube-dl to warcprox
using a WARCPROX_WRITE_RECORD request
- some logging
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
destdir (str): where to save downloaded videos
site (brozzler.Site): the site we are brozzling
Returns:
a `youtube_dl.YoutubeDL` instance
'''
class _YoutubeDL(youtube_dl.YoutubeDL):
logger = logging.getLogger(__module__ + "." + __qualname__)
def urlopen(self, req):
try:
url = req.full_url
except AttributeError:
url = req
self.logger.debug('fetching %r', url)
return super().urlopen(req)
def add_default_extra_info(self, ie_result, ie, url):
# hook in some logging
super().add_default_extra_info(ie_result, ie, url)
if ie_result.get('_type') == 'playlist':
self.logger.info(
'extractor %r found playlist in %s', ie.IE_NAME, url)
if ie.IE_NAME in {'youtube:playlist', 'youtube:tab', 'soundcloud:user', 'instagram:user'}:
# At this point ie_result['entries'] is an iterator that
# will fetch more metadata from youtube to list all the
# videos. We unroll that iterator here partly because
# otherwise `process_ie_result()` will clobber it, and we
# use it later to extract the watch pages as outlinks.
try:
ie_result['entries_no_dl'] = list(ie_result['entries'])
except Exception as e:
self.logger.warning(
"failed to unroll ie_result['entries']? for %s, %s; exception %s",
ie.IE_NAME, url, e)
ie_result['entries_no_dl'] =[]
ie_result['entries'] = []
self.logger.info(
'not downloading %s media files from this '
'playlist because we expect to capture them from '
'individual watch/track/detail pages',
len(ie_result['entries_no_dl']))
else:
self.logger.info(
'extractor %r found a download in %s', ie.IE_NAME, url)
def _push_stitched_up_vid_to_warcprox(self, site, info_dict, ctx):
# XXX Don't know how to get the right content-type. Youtube-dl
# doesn't supply it. Sometimes (with --hls-prefer-native)
# youtube-dl produces a stitched-up video that /usr/bin/file fails
# to identify (says "application/octet-stream"). `ffprobe` doesn't
# give us a mimetype.
if info_dict.get('ext') == 'mp4':
mimetype = 'video/mp4'
else:
try:
import magic
mimetype = magic.from_file(ctx['filename'], mime=True)
except ImportError as e:
mimetype = 'video/%s' % info_dict['ext']
self.logger.warning(
'guessing mimetype %s because %r', mimetype, e)
url = 'youtube-dl:%05d:%s' % (
info_dict.get('playlist_index') or 1,
info_dict['webpage_url'])
size = os.path.getsize(ctx['filename'])
self.logger.info(
'pushing %r video stitched-up as %s (%s bytes) to '
'warcprox at %s with url %s', info_dict['format'],
mimetype, size, worker._proxy_for(site), url)
with open(ctx['filename'], 'rb') as f:
# include content-length header to avoid chunked
# transfer, which warcprox currently rejects
extra_headers = dict(site.extra_headers())
extra_headers['content-length'] = size
request, response = worker._warcprox_write_record(
warcprox_address=worker._proxy_for(site), url=url,
warc_type='resource', content_type=mimetype, payload=f,
extra_headers=extra_headers)
# consulted by _remember_videos()
self.stitch_ups.append({
'url': url,
'response_code': response.code,
'content-type': mimetype,
'content-length': size,
})
def process_info(self, info_dict):
'''
See comment above on `_finish_frag_download()`
'''
def ffd_callback(ffd_self, ctx):
if worker._using_warcprox(site):
self._push_stitched_up_vid_to_warcprox(site, info_dict, ctx)
try:
thread_local.finish_frag_download_callback = ffd_callback
return super().process_info(info_dict)
finally:
delattr(thread_local, 'finish_frag_download_callback')
def maybe_heartbeat_site_last_claimed(*args, **kwargs):
# in case youtube-dl takes a long time, heartbeat site.last_claimed
# to prevent another brozzler-worker from claiming the site
try:
if site.rr and doublethink.utcnow() - site.last_claimed > datetime.timedelta(minutes=worker.SITE_SESSION_MINUTES):
worker.logger.debug(
'heartbeating site.last_claimed to prevent another '
'brozzler-worker claiming this site id=%r', site.id)
site.last_claimed = doublethink.utcnow()
site.save()
except:
worker.logger.debug(
'problem heartbeating site.last_claimed site id=%r',
site.id, exc_info=True)
ydl_opts = {
"outtmpl": "{}/ydl%(autonumber)s.out".format(destdir),
"retries": 1,
"nocheckcertificate": True,
"hls_prefer_native": True,
"noplaylist": True,
"noprogress": True,
"nopart": True,
"no_color": True,
"progress_hooks": [maybe_heartbeat_site_last_claimed],
# https://github.com/rg3/youtube-dl/blob/master/README.md#format-selection
# "best: Select the best quality format represented by a single
# file with video and audio."
"format": "best/bestvideo+bestaudio",
# --cache-dir local or...
"cache_dir": False,
### we do our own logging
# "logger": logging.getLogger("youtube_dl"),
"verbose": False,
"quiet": True,
}
if worker._proxy_for(site):
ydl_opts["proxy"] = "http://{}".format(worker._proxy_for(site))
ydl = _YoutubeDL(ydl_opts)
if site.extra_headers():
ydl._opener.add_handler(ExtraHeaderAdder(site.extra_headers()))
ydl.fetch_spy = YoutubeDLSpy()
ydl.stitch_ups = []
ydl._opener.add_handler(ydl.fetch_spy)
return ydl
def _remember_videos(page, fetches, stitch_ups=None):
'''
Saves info about videos captured by youtube-dl in `page.videos`.
'''
if not 'videos' in page:
page.videos = []
for fetch in fetches or []:
content_type = fetch['response_headers'].get_content_type()
if (content_type.startswith('video/')
# skip manifests of DASH segmented video -
# see https://github.com/internetarchive/brozzler/pull/70
and content_type != 'video/vnd.mpeg.dash.mpd'
and fetch['method'] == 'GET'
and fetch['response_code'] in (200, 206)):
video = {
'blame': 'youtube-dl',
'url': fetch['url'],
'response_code': fetch['response_code'],
'content-type': content_type,
}
if 'content-length' in fetch['response_headers']:
video['content-length'] = int(
fetch['response_headers']['content-length'])
if 'content-range' in fetch['response_headers']:
video['content-range'] = fetch[
'response_headers']['content-range']
logging.debug('embedded video %s', video)
page.videos.append(video)
for stitch_up in stitch_ups or []:
if stitch_up['content-type'].startswith('video/'):
video = {
'blame': 'youtube-dl',
'url': stitch_up['url'],
'response_code': stitch_up['response_code'],
'content-type': stitch_up['content-type'],
'content-length': stitch_up['content-length'],
}
logging.debug('embedded video %s', video)
page.videos.append(video)
def _try_youtube_dl(worker, ydl, site, page):
try:
logging.info("trying youtube-dl on %s", page)
with brozzler.thread_accept_exceptions():
# we do whatwg canonicalization here to avoid "<urlopen error
# no host given>" resulting in ProxyError
# needs automated test
ie_result = ydl.extract_info(str(urlcanon.whatwg(page.url)))
_remember_videos(page, ydl.fetch_spy.fetches, ydl.stitch_ups)
if worker._using_warcprox(site):
info_json = json.dumps(ie_result, sort_keys=True, indent=4)
logging.info(
"sending WARCPROX_WRITE_RECORD request to warcprox "
"with youtube-dl json for %s", page)
worker._warcprox_write_record(
warcprox_address=worker._proxy_for(site),
url="youtube-dl:%s" % str(urlcanon.semantic(page.url)),
warc_type="metadata",
content_type="application/vnd.youtube-dl_formats+json;charset=utf-8",
payload=info_json.encode("utf-8"),
extra_headers=site.extra_headers())
return ie_result
except brozzler.ShutdownRequested as e:
raise
except Exception as e:
if hasattr(e, "exc_info") and e.exc_info[0] == youtube_dl.utils.UnsupportedError:
return None
elif (hasattr(e, "exc_info")
and e.exc_info[0] == urllib.error.HTTPError
and hasattr(e.exc_info[1], "code")
and e.exc_info[1].code == 420):
raise brozzler.ReachedLimit(e.exc_info[1])
elif (hasattr(e, 'exc_info')
and e.exc_info[0] == urllib.error.URLError
and worker._proxy_for(site)):
# connection problem when using a proxy == proxy error (XXX?)
raise brozzler.ProxyError(
'youtube-dl hit apparent proxy error from '
'%s' % page.url) from e
else:
raise
def do_youtube_dl(worker, site, page):
'''
Runs youtube-dl configured for `worker` and `site` to download videos from
`page`.
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
site (brozzler.Site): the site we are brozzling
page (brozzler.Page): the page we are brozzling
Returns:
tuple with two entries:
`list` of `dict`: with info about urls fetched:
[{
'url': ...,
'method': ...,
'response_code': ...,
'response_headers': ...,
}, ...]
`list` of `str`: outlink urls
'''
with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:
ydl = _build_youtube_dl(worker, tempdir, site)
ie_result = _try_youtube_dl(worker, ydl, site, page)
outlinks = set()
if ie_result and ie_result.get('extractor') == 'youtube:playlist':
# youtube watch pages as outlinks
outlinks = {'https://www.youtube.com/watch?v=%s' % e['id']
for e in ie_result.get('entries_no_dl', [])}
# any outlinks for other cases?
return ydl.fetch_spy.fetches, outlinks
| 41.809045 | 126 | 0.592728 |
32172d19a040ba5a0f1c8a9c0669bfd71b60c0e9 | 413 | py | Python | lwlPackage/sms/SMSmain.py | 2892211452/myPackage | acadc6a2dcc36900e6e742745af95c1a60a37448 | [
"MIT"
] | null | null | null | lwlPackage/sms/SMSmain.py | 2892211452/myPackage | acadc6a2dcc36900e6e742745af95c1a60a37448 | [
"MIT"
] | null | null | null | lwlPackage/sms/SMSmain.py | 2892211452/myPackage | acadc6a2dcc36900e6e742745af95c1a60a37448 | [
"MIT"
] | null | null | null | from twilio.rest import Client
account_sid = 'AC988415bd476b4abc248b4afaa8bc6717'
auth_token = '3b62cb9653d077b61f0d1f50bc06e718'
client = Client(account_sid, auth_token)
message = client.messages.create(
from_='+13343423628',
body='asdf',
to='+8617742566640'
)
print(message.sid) | 29.5 | 53 | 0.549637 |
9aef12f9cff394567093bf98b6ee5ff06d1a18a0 | 5,509 | py | Python | tools/stats/grouping.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | tools/stats/grouping.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | tools/stats/grouping.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
# Guruprasad Ananda
# Refactored 2011 to use numpy instead of rpy, Kanwei Li
"""
This tool provides the SQL "group by" functionality.
"""
from __future__ import print_function
import random
import subprocess
import sys
import tempfile
from itertools import groupby
import numpy
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def mode(data):
counts = {}
for x in data:
counts[x] = counts.get(x, 0) + 1
maxcount = max(counts.values())
modelist = []
for x in counts:
if counts[x] == maxcount:
modelist.append( str(x) )
return ','.join(modelist)
def main():
inputfile = sys.argv[2]
ignorecase = int(sys.argv[4])
ops = []
cols = []
round_val = []
if sys.argv[5] != "None":
asciitodelete = sys.argv[5]
if asciitodelete:
oldfile = open(inputfile, 'r')
newinputfile = "input_cleaned.tsv"
newfile = open(newinputfile, 'w')
asciitodelete = asciitodelete.split(',')
for i in range(len(asciitodelete)):
asciitodelete[i] = chr(int(asciitodelete[i]))
for line in oldfile:
if line[0] not in asciitodelete:
newfile.write(line)
oldfile.close()
newfile.close()
inputfile = newinputfile
for var in sys.argv[6:]:
op, col, do_round = var.split()
ops.append(op)
cols.append(col)
round_val.append(do_round)
"""
At this point, ops, cols and rounds will look something like this:
ops: ['mean', 'min', 'c']
cols: ['1', '3', '4']
round_val: ['no', 'yes' 'no']
"""
try:
group_col = int(sys.argv[3]) - 1
except:
stop_err( "Group column not specified." )
tmpfile = tempfile.NamedTemporaryFile()
try:
"""
The -k option for the Posix sort command is as follows:
-k, --key=POS1[,POS2]
start a key at POS1, end it at POS2 (origin 1)
In other words, column positions start at 1 rather than 0, so
we need to add 1 to group_col.
if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
"""
case = ''
if ignorecase == 1:
case = '-f'
command_line = "sort -t ' ' %s -k%s,%s -o %s %s" % (case, group_col + 1, group_col + 1, tmpfile.name, inputfile)
except Exception as exc:
stop_err( 'Initialization error -> %s' % str(exc) )
try:
subprocess.check_output(command_line, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
stop_err( "Sorting input dataset resulted in error: %s: %s" % ( e.returncode, e.output ))
fout = open(sys.argv[1], "w")
def is_new_item(line):
try:
item = line.strip().split("\t")[group_col]
except IndexError:
stop_err( "The following line didn't have %s columns: %s" % (group_col + 1, line) )
if ignorecase == 1:
return item.lower()
return item
for key, line_list in groupby(tmpfile, key=is_new_item):
op_vals = [ [] for _ in ops ]
out_str = key
for line in line_list:
fields = line.strip().split("\t")
for i, col in enumerate(cols):
col = int(col) - 1 # cXX from galaxy is 1-based
try:
val = fields[col].strip()
op_vals[i].append(val)
except IndexError:
sys.stderr.write( 'Could not access the value for column %s on line: "%s". Make sure file is tab-delimited.\n' % (col + 1, line) )
sys.exit( 1 )
# Generate string for each op for this group
for i, op in enumerate( ops ):
data = op_vals[i]
rval = ""
if op == "mode":
rval = mode( data )
elif op == "length":
rval = len( data )
elif op == "random":
rval = random.choice(data)
elif op in ['cat', 'cat_uniq']:
if op == 'cat_uniq':
data = numpy.unique(data)
rval = ','.join(data)
elif op == "unique":
rval = len( numpy.unique(data) )
else:
# some kind of numpy fn
try:
data = [float(_) for _ in data]
except ValueError:
sys.stderr.write( "Operation %s expected number values but got %s instead.\n" % (op, data) )
sys.exit( 1 )
rval = getattr(numpy, op)( data )
if round_val[i] == 'yes':
rval = int(round(rval))
else:
rval = '%g' % rval
out_str += "\t%s" % rval
fout.write(out_str + "\n")
# Generate a useful info message.
msg = "--Group by c%d: " % (group_col + 1)
for i, op in enumerate(ops):
if op == 'cat':
op = 'concat'
elif op == 'cat_uniq':
op = 'concat_distinct'
elif op == 'length':
op = 'count'
elif op == 'unique':
op = 'count_distinct'
elif op == 'random':
op = 'randomly_pick'
msg += op + "[c" + cols[i] + "] "
print(msg)
fout.close()
tmpfile.close()
if __name__ == "__main__":
main()
| 30.776536 | 150 | 0.516609 |
1847e9c1eed0ea3d0bc425bd43e7174644f1ecb8 | 837 | py | Python | src/sentry/api/bases/integration.py | mlapkin/sentry | 83a852b7a2ff5154a9e65721e031582c4c3ca9c3 | [
"BSD-3-Clause"
] | 1 | 2018-12-04T12:57:00.000Z | 2018-12-04T12:57:00.000Z | src/sentry/api/bases/integration.py | mlapkin/sentry | 83a852b7a2ff5154a9e65721e031582c4c3ca9c3 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/bases/integration.py | mlapkin/sentry | 83a852b7a2ff5154a9e65721e031582c4c3ca9c3 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import six
import sys
import traceback
from rest_framework.response import Response
from sentry.utils.sdk import capture_exception
from .organization import OrganizationEndpoint, OrganizationPermission
class IntegrationEndpoint(OrganizationEndpoint):
permission_classes = (OrganizationPermission, )
def handle_exception(self, request, exc):
if exc.code == 503:
sys.stderr.write(traceback.format_exc())
event_id = capture_exception()
context = {
'detail': six.text_type(exc),
'errorId': event_id,
}
response = Response(context, status=503)
response.exception = True
return response
return super(IntegrationEndpoint, self).handle_exception(request, exc)
| 29.892857 | 78 | 0.681004 |
a6b6b9953130146b91be50a618ecc012f1279846 | 2,426 | py | Python | airbyte-integrations/connectors/source-zendesk-chat/source_zendesk_chat/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/source-zendesk-chat/source_zendesk_chat/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 29 | 2021-10-07T17:20:29.000Z | 2021-12-27T13:07:09.000Z | airbyte-integrations/connectors/source-zendesk-chat/source_zendesk_chat/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1 | 2022-03-11T06:21:24.000Z | 2022-03-11T06:21:24.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, Dict, List, Mapping, Tuple
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from .streams import Accounts, Agents, AgentTimelines, Bans, Chats, Departments, Goals, Roles, RoutingSettings, Shortcuts, Skills, Triggers
class ZendeskAuthentication:
"""Provides the authentication capabilities for both old and new methods."""
def __init__(self, config: Dict):
self.config = config
def get_auth(self) -> TokenAuthenticator:
"""Return the TokenAuthenticator object with access_token."""
# the old config supports for backward capability
access_token = self.config.get("access_token")
if not access_token:
# the new config supports `OAuth2.0`
access_token = self.config["credentials"]["access_token"]
return TokenAuthenticator(token=access_token)
class SourceZendeskChat(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
authenticator = ZendeskAuthentication(config).get_auth()
try:
records = RoutingSettings(authenticator=authenticator).read_records(sync_mode=SyncMode.full_refresh)
next(records)
return True, None
except Exception as error:
return False, f"Unable to connect to Zendesk Chat API with the provided credentials - {error}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = ZendeskAuthentication(config).get_auth()
return [
Accounts(authenticator=authenticator),
AgentTimelines(authenticator=authenticator, start_date=config["start_date"]),
Agents(authenticator=authenticator),
Bans(authenticator=authenticator),
Chats(authenticator=authenticator, start_date=config["start_date"]),
Departments(authenticator=authenticator),
Goals(authenticator=authenticator),
Roles(authenticator=authenticator),
RoutingSettings(authenticator=authenticator),
Shortcuts(authenticator=authenticator),
Skills(authenticator=authenticator),
Triggers(authenticator=authenticator),
]
| 40.433333 | 139 | 0.703215 |
fe2162f15f555adb9eabaf6aadeae0ecb5930f22 | 1,644 | py | Python | zerver/webhooks/papertrail/tests.py | zhoufeng1989/zulip | 74143a0801f76051ae30e8037b62205ffc4ccc61 | [
"Apache-2.0"
] | 4 | 2019-06-04T09:06:53.000Z | 2019-06-04T09:07:47.000Z | zerver/webhooks/papertrail/tests.py | 991rajat/zulip | 648a60baf63f9afade83148bd9ae1fc480510178 | [
"Apache-2.0"
] | 4 | 2020-06-06T00:51:42.000Z | 2022-02-10T21:38:40.000Z | zerver/webhooks/papertrail/tests.py | 991rajat/zulip | 648a60baf63f9afade83148bd9ae1fc480510178 | [
"Apache-2.0"
] | 1 | 2020-02-06T13:56:40.000Z | 2020-02-06T13:56:40.000Z |
from zerver.lib.test_classes import WebhookTestCase
class PapertrailHookTests(WebhookTestCase):
STREAM_NAME = 'papertrail'
URL_TEMPLATE = "/api/v1/external/papertrail?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'papertrail'
def test_short_message(self) -> None:
expected_topic = u"logs"
expected_message = """
[Search for "Important stuff"](https://papertrailapp.com/searches/42) found **2** matches:
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body
```
May 18 20:30:02 - server1 - cron OR server1:
``` quote
A short event
```
""".strip()
self.send_and_test_stream_message('short_post', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_long_message(self) -> None:
expected_topic = u"logs"
expected_message = """
[Search for "Important stuff"](https://papertrailapp.com/searches/42) found **5** matches:
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 1
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 2
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 3
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 4
```
[See more](https://papertrailapp.com/searches/42)
""".strip()
self.send_and_test_stream_message('long_post', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("papertrail", fixture_name, file_type="json")
| 29.357143 | 91 | 0.663625 |
b2337206b44c69cf6140e9c2bb25ad0ac12706fa | 983 | py | Python | tests/conftest.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | tests/conftest.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | tests/conftest.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | import json
import pytest
import pathlib
from .utils import is_responsive_404
@pytest.fixture
def tests_dir():
return pathlib.Path(__file__).resolve().parent
@pytest.fixture(scope="session")
def docker_compose_file(pytestconfig):
return pathlib.Path(__file__).resolve().parent / "docker-compose.test.yml"
@pytest.fixture(scope="session")
def http_service(docker_ip, docker_services):
"""
Ensure that Django service is up and responsive.
"""
# `port_for` takes a container port and returns the corresponding host port
port = docker_services.port_for("test_container", 8000)
url = "http://{}:{}".format(docker_ip, port)
url404 = f"{url}/missing"
docker_services.wait_until_responsive(
timeout=300.0, pause=0.1, check=lambda: is_responsive_404(url404)
)
return url
@pytest.fixture
def test_iiif3_manifest(tests_dir):
return json.load(
(tests_dir / "fixtures/iiif3_forager.json").open(encoding="utf-8")
)
| 25.205128 | 79 | 0.717192 |
7eedcf81040e18d16658a3677c2a45dab83a4d0c | 2,577 | py | Python | grr/proto/makefile.py | Dazbeni/grr | 5b49a83eba2f84e346a2b50d154264c190a24f08 | [
"Apache-2.0"
] | 1 | 2020-06-25T14:25:51.000Z | 2020-06-25T14:25:51.000Z | grr/proto/makefile.py | Dazbeni/grr | 5b49a83eba2f84e346a2b50d154264c190a24f08 | [
"Apache-2.0"
] | 44 | 2021-05-14T22:49:24.000Z | 2022-03-13T21:54:02.000Z | grr/proto/makefile.py | Dazbeni/grr | 5b49a83eba2f84e346a2b50d154264c190a24f08 | [
"Apache-2.0"
] | 1 | 2020-06-25T14:25:54.000Z | 2020-06-25T14:25:54.000Z | #!/usr/bin/env python
"""A script to prepare the source tree for building."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# This script must have no special requirements because it wont be able to
# import any GRR stuff until the protos are built.
import argparse
import os
import subprocess
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--clean",
action="store_true",
default=False,
help="Clean compiled protos.")
args = parser.parse_args()
ROOT = os.path.dirname(os.path.abspath(__file__))
def Clean():
"""Clean out compiled protos."""
# Find all the compiled proto files and unlink them.
for (root, _, files) in os.walk(ROOT):
for filename in files:
full_filename = os.path.join(root, filename)
if full_filename.endswith("_pb2.py") or full_filename.endswith(
"_pb2.pyc"):
os.unlink(full_filename)
def MakeProto():
"""Make sure our protos have been compiled to python libraries."""
# Start running from one directory above the grr directory which is found by
# this scripts's location as __file__.
cwd = os.path.dirname(os.path.abspath(__file__))
# Find all the .proto files.
protos_to_compile = []
for (root, dirs, files) in os.walk(cwd):
# Make sure an accidental .eggs cache is ignored.
if ".eggs" in dirs:
dirs.remove(".eggs")
for filename in files:
full_filename = os.path.join(root, filename)
if full_filename.endswith(".proto"):
proto_stat = os.stat(full_filename)
pb2_path = full_filename.rsplit(".", 1)[0] + "_pb2.py"
try:
pb2_stat = os.stat(pb2_path)
if pb2_stat.st_mtime >= proto_stat.st_mtime:
continue
except (OSError, IOError):
pass
protos_to_compile.append(full_filename)
if protos_to_compile:
for proto in protos_to_compile:
command = [
sys.executable,
"-m",
"grpc_tools.protoc",
# Write the python files next to the .proto files.
"--python_out",
ROOT,
"--proto_path=%s" % ROOT,
proto
]
print(
"Compiling %s with (cwd: %s): %s" % (proto, ROOT, " ".join(command)))
# The protoc compiler is too dumb to deal with full paths - it expects a
# relative path from the current working directory.
subprocess.check_call(command, cwd=ROOT)
if __name__ == "__main__":
if args.clean:
Clean()
MakeProto()
| 28.01087 | 79 | 0.655413 |
b4e16af6f1b0364ee7af511ef0a18fad1a73eaac | 4,917 | py | Python | examples/TkPlot2.0.py | AndreIglesias/tkplot | 1c7ec2d9b72909a1785529d4b5cda574bb88ab6e | [
"MIT"
] | null | null | null | examples/TkPlot2.0.py | AndreIglesias/tkplot | 1c7ec2d9b72909a1785529d4b5cda574bb88ab6e | [
"MIT"
] | null | null | null | examples/TkPlot2.0.py | AndreIglesias/tkplot | 1c7ec2d9b72909a1785529d4b5cda574bb88ab6e | [
"MIT"
] | null | null | null | # ============================================================================================
# AUTHORS : QEDD & Ldar01
# PROGRAM : TkPlot.py
# DESCRIPTION : Library based on Tkinter.
# DATE : 12/7/17
# ============================================================================================
from math import *
try:
import tkinter
from tkinter import *
except:
import Tkinter
class CPlot(tkinter.Frame):
"""docstring for CPlot"""
def __init__(self, window = None, fg = 'black', fx = "x^2", scrollregion = None, yaxe = None, xaxe = None,
bg = None, wfg = 1, width = None, height = None, interval = (-50, 50), precision = 0.1,**kwargs):
super(CPlot, self).__init__(master = window, bg = bg, width = width, height = height, **kwargs)
self.window = window
self.fx = fx
self.fg = fg
self.fx = (self.fx.replace("^", "**")).replace(")(", ")*(")
self.interval = interval
self.precision = precision
self.scrollregion = scrollregion
self.yaxe = yaxe
self.xaxe = xaxe
self.wfg = wfg
if yaxe == None: self.yaxe = (width)/2
if xaxe == None: self.xaxe = (height)/2
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.canvas = tkinter.Canvas(self, bd=0, highlightthickness=0
, scrollregion = self.scrollregion, bg = bg, height = height, width = width)
self.canvas.pack()
self.canvas.bind('<Motion>', self.motion)
self.xmouse = None
self.ymouse = None
self.coordinates = tkinter.Label(self.canvas, text = '(x = None, y = None)', font = ('Arial', 12), bg = bg)
self.coordinates.place(x = int(self.canvas['width'])-155, y = int(self.canvas['height'])-30)
'''
self.vScroll = tkinter.Scrollbar(self.window, orient = 'vertical', command = self.canvas.yview)
self.vScroll.place(x = 100, y = 100)#grid(row = 0, column = 1, sticky = 'ns')
self.canvas.configure(yscrollcommand = self.vScroll.set)
'''
self.Pgrid((self.yaxe, self.xaxe))
x, y = self.plot(self.interval, self.precision, fx = self.fx, color = self.fg, wfg = self.wfg)
def motion(self, event = None):
try:
self.canvas.delete(self.xmouse)
except: pass
try:
self.canvas.delete(self.ymouse)
except: pass
self.xmouse = self.canvas.create_line(0, event.y, self.canvas['width'], event.y, fill = 'gray')
self.ymouse = self.canvas.create_line(event.x, 0, event.x, self.canvas['height'], fill = 'gray')
self.coordinates.config(text = '(x = {0}, y = {1})'.format(event.x - self.yaxe, -1*event.y + self.xaxe))
def Pgrid(self, center):
dist = 10
for i in range(100):
if i%2 == 1:
self.canvas.create_line(0, i*dist, int(self.canvas['width']), i*dist, fill = 'gray85')
self.canvas.create_oval(center[0] - 2, i*dist - 2, center[0] + 2, i*dist + 2, fill = 'black')
else:
self.canvas.create_line(i*dist, 0, i*dist, int(self.canvas['height']), fill = 'gray85')
self.canvas.create_oval(i*dist - 2, center[1] - 2, i*dist + 2, center[1] + 2, fill = 'black')
def plot(self, interval = (-50,50), precision = 0.1, fx = "x**2", color = "green", wfg = 2):
x, y = [], []
fx = (fx.replace("^", "**")).replace(")(", ")*(")
iterator = interval[0] - precision
while iterator <= interval[1]:
iterator += precision
try:
function = fx.replace("x", "({0})".format(iterator))
evaluation = eval(function)
except:
continue
x.append(iterator*50)
y.append(evaluation*50)
#if iterator > int(self.canvas['width']) and evaluation > int(self.canvas['height']): break
self.draw(x, y, (self.yaxe, self.xaxe), color, wfg)
return x, y
def draw(self, x, y, center, color, wfg):
self.canvas.create_line(center[0], int(self.canvas['height']), center[0], 0, width = 2)
self.canvas.create_line(0, center[1], int(self.canvas['width']), center[1], width = 2)
coords = []
for i in range(len(x)):
coords += [x[i] + center[0], -1*y[i] + center[1]]
try:
self.canvas.create_line(coords, fill = color, width = wfg)
except: pass
return 0
if __name__ == '__main__':
#window = tkinter.Tk()
#window.grid_rowconfigure(0, weight=1)
#window.grid_columnconfigure(0, weight=1)
root=Tk()
frame=Frame(root,width=300,height=300)
frame.grid(row=0,column=0)
canvas=Canvas(frame,bg='#FFFFFF',width=300,height=300,scrollregion=(0,0,500,500))
c = CPlot(frame, width = 800, height = 500, bg = 'white', fx = 'x^3', fg = 'green', wfg = 2)
hbar=Scrollbar(frame,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(frame,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(width=300,height=300)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
#c.grid(row = 0, column = 0)
c.pack()
#print(c["width"])
c.plot(color = 'red', fx = 'x^2 * sin(x)')
c.plot(color = 'blue', fx = 'cos(x+3)*sin(x*2)')
c.plot(color = 'black', fx = 'x')
tkinter.mainloop() | 36.422222 | 109 | 0.622534 |
5882d3cc18159d150a57d2cb45cc48114cb65458 | 425 | py | Python | pyyasm/exception.py | srounet/PyYasm | 55883a7d8201240d36c30aee65fbf11a07f93346 | [
"BSD-2-Clause"
] | 3 | 2017-07-06T01:03:46.000Z | 2020-03-04T12:46:13.000Z | pyyasm/exception.py | srounet/PyYasm | 55883a7d8201240d36c30aee65fbf11a07f93346 | [
"BSD-2-Clause"
] | null | null | null | pyyasm/exception.py | srounet/PyYasm | 55883a7d8201240d36c30aee65fbf11a07f93346 | [
"BSD-2-Clause"
] | null | null | null | """pyyasm Exceptions"""
class PyYasmError(Exception):
"""Global pyyasm Exception"""
pass
class PyYasmArchitectureError(PyYasmError):
"""Exception in case the client architecture is not supported"""
pass
class PyYasmVersionError(PyYasmError):
"""Exception in case we could not find version"""
pass
class PyYasmTypeError(PyYasmError):
"""Exception in case mnemonics type is wrong"""
pass | 20.238095 | 68 | 0.712941 |
c4ce5062b8787c333c63d419e828dcd83219901e | 2,182 | py | Python | play_game4.py | redfire/tzaar-python-ai | 87766f57ea406c030a45122e8c1790c3741fcfa4 | [
"MIT"
] | 2 | 2016-09-12T15:31:50.000Z | 2017-02-12T23:44:55.000Z | play_game4.py | jeroenmaas/tzaar-python-ai | 87766f57ea406c030a45122e8c1790c3741fcfa4 | [
"MIT"
] | null | null | null | play_game4.py | jeroenmaas/tzaar-python-ai | 87766f57ea406c030a45122e8c1790c3741fcfa4 | [
"MIT"
] | null | null | null | from shared.queueUtils import *
from shared.board import *
import shared.simpleAI3
import json
def jdefault(o):
if isinstance(o, set):
return list(o)
return o.__dict__
def callback(ch, method, properties, body):
dataContainer = json.loads(body.decode('utf-8'))
game_id = dataContainer['game_id']
board = dataContainer['board']
turn = dataContainer['turn']
debug_moves = dataContainer['debug_moves']
for x in range(0, board_size):
for y in range(0, board_size):
item = board[x][y]
board[x][y] = BoardItem(item['type'], item['sub_type'], item['weight'])
turn_info = TurnInformation(turn)
import time
start_time = time.time()
output = shared.simpleAI3.playMove(board, turn_info)
print("--- %s seconds ---" % (time.time() - start_time))
new_state = {}
new_state['game_id'] = game_id
new_state['board'] = output['board']
new_state['turn'] = turn_info.turn_number + turn_info.turns
new_state['debug_moves'] = debug_moves + output['moves']
new_state['original_board'] = dataContainer['original_board']
if output['result'] == BoardResult.none:
channel.basic_publish(exchange='',
routing_key='tzaar_player_2_queue',
body=json.dumps(new_state, default=jdefault))
ch.basic_ack(delivery_tag = method.delivery_tag)
else:
if output['result'] == BoardResult.has_lost:
new_state['winner'] = turn_info.opponent
print("LOSE")
else:
new_state['winner'] = turn_info.player
print("WIN")
channel.basic_publish(exchange='',
routing_key='tzaar_results',
body=json.dumps(new_state, default=jdefault))
ch.basic_ack(delivery_tag = method.delivery_tag)
connection = getQueueConnection()
channel = connection.channel()
channel.basic_qos(prefetch_count=10)
channel.basic_consume(callback,
queue='tzaar_player_1_queue',
no_ack=False)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming() | 34.09375 | 83 | 0.622823 |
fba201775176ae6722f97c614e0d1fc1fdf60896 | 6,311 | py | Python | bin/dbunloader.py | tofu-rocketry/apel | 4f8dd77636b62415cfdc49f3ac66005d7e4e9762 | [
"Apache-2.0"
] | 9 | 2017-05-03T17:59:33.000Z | 2021-11-14T17:11:03.000Z | bin/dbunloader.py | apel/apel | ef23b80716da51d44a18a82ebf594faaa2e50cf0 | [
"Apache-2.0"
] | 161 | 2015-01-13T15:55:37.000Z | 2022-03-08T15:56:51.000Z | bin/dbunloader.py | tofu-rocketry/apel | 4f8dd77636b62415cfdc49f3ac66005d7e4e9762 | [
"Apache-2.0"
] | 28 | 2015-01-28T13:55:16.000Z | 2021-08-09T15:06:20.000Z | #!/usr/bin/env python
# Copyright (C) 2012 STFC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
@author: Konrad Jopek, Will Rogers
'''
import logging.config
import sys
import os
from apel.common import set_up_logging
from apel.db import ApelDb, ApelDbException
from apel.db.unloader import DbUnloader
from apel import __version__
from optparse import OptionParser
import ConfigParser
RECORDS_PER_MESSAGE_MIN = 1
RECORDS_PER_MESSAGE_DEFAULT = 1000
RECORDS_PER_MESSAGE_MAX = 5000
if __name__ == '__main__':
opt_parser = OptionParser()
opt_parser.add_option('-d', '--db', help='location of configuration file for database',
default='/etc/apel/db.cfg')
opt_parser.add_option('-c', '--config', help='Location of configuration file for dbunloader',
default='/etc/apel/unloader.cfg')
opt_parser.add_option('-l', '--log_config', help='Location of logging configuration file for dbloader',
default='/etc/apel/logging.cfg')
(options, args) = opt_parser.parse_args()
# Set default for 'interval' as it is a new option so may not be in config.
cp = ConfigParser.ConfigParser({'interval': 'latest'})
cp.read([options.config])
# set up logging
try:
if os.path.exists(options.log_config):
logging.config.fileConfig(options.log_config)
else:
set_up_logging(cp.get('logging', 'logfile'),
cp.get('logging', 'level'),
cp.getboolean('logging', 'console'))
log = logging.getLogger('dbunloader')
except (ConfigParser.Error, ValueError, IOError), err:
print 'Error configuring logging: %s' % str(err)
print 'The system will exit.'
sys.exit(1)
db = None
dbcp = ConfigParser.ConfigParser()
dbcp.read([options.db])
try:
db = ApelDb(dbcp.get('db', 'backend'),
dbcp.get('db', 'hostname'),
dbcp.getint('db', 'port'),
dbcp.get('db', 'username'),
dbcp.get('db', 'password'),
dbcp.get('db', 'name'))
except ApelDbException, e:
log.fatal('Error: %s', e)
sys.exit(1)
except Exception, e:
log.fatal('Cannot get configuration: %s', e)
sys.exit(1)
log.info('=====================')
log.info('Starting APEL dbunloader %s.%s.%s', *__version__)
unload_dir = cp.get('unloader', 'dir_location')
table_name = cp.get('unloader', 'table_name')
try:
send_ur = cp.getboolean('unloader', 'send_ur')
except ConfigParser.NoOptionError:
send_ur = False
try:
local_jobs = cp.getboolean('unloader', 'local_jobs')
except ConfigParser.NoOptionError:
local_jobs = False
try:
withhold_dns = cp.getboolean('unloader', 'withhold_dns')
except ConfigParser.NoOptionError:
withhold_dns = False
include_vos = None
exclude_vos = None
try:
include = cp.get('unloader', 'include_vos')
include_vos = [ vo.strip() for vo in include.split(',') ]
except ConfigParser.NoOptionError:
# Only exclude VOs if we haven't specified the ones to include.
try:
exclude = cp.get('unloader', 'exclude_vos')
exclude_vos = [ vo.strip() for vo in exclude.split(',') ]
except ConfigParser.NoOptionError:
pass
interval = cp.get('unloader', 'interval')
unloader = DbUnloader(db, unload_dir, include_vos, exclude_vos, local_jobs, withhold_dns)
try:
records_per_message = int(cp.get('unloader', 'records_per_message'))
if records_per_message < RECORDS_PER_MESSAGE_MIN:
unloader.records_per_message = RECORDS_PER_MESSAGE_MIN
log.warning(
'records_per_message too small, increasing from %d to %d',
records_per_message,
RECORDS_PER_MESSAGE_MIN,
)
elif records_per_message > RECORDS_PER_MESSAGE_MAX:
unloader.records_per_message = RECORDS_PER_MESSAGE_MAX
log.warning(
'records_per_message too large, decreasing from %d to %d',
records_per_message,
RECORDS_PER_MESSAGE_MAX,
)
else:
unloader.records_per_message = records_per_message
except ConfigParser.NoOptionError:
log.info(
'records_per_message not specified, defaulting to %d.',
RECORDS_PER_MESSAGE_DEFAULT,
)
unloader.records_per_message = RECORDS_PER_MESSAGE_DEFAULT
except ValueError:
log.error(
'Invalid records_per_message value, must be a postive integer. Defaulting to %d.',
RECORDS_PER_MESSAGE_DEFAULT,
)
unloader.records_per_message = RECORDS_PER_MESSAGE_DEFAULT
try:
if interval == 'latest':
msgs, recs = unloader.unload_latest(table_name, send_ur)
elif interval == 'gap':
start = cp.get('unloader', 'gap_start')
end = cp.get('unloader', 'gap_end')
msgs, recs = unloader.unload_gap(table_name, start, end, send_ur)
elif interval == 'all':
msgs, recs = unloader.unload_all(table_name, send_ur)
else:
log.warning('Unrecognised interval: %s', interval)
log.warning('Will not start unloader.')
log.info('%d records in %d messages unloaded from %s', recs, msgs, table_name)
except KeyError:
log.error('Invalid table name: %s, omitting', table_name)
except ApelDbException, e:
log.error('Unloading failed: %s', e)
log.info('Unloading complete.')
log.info('=====================')
| 36.270115 | 107 | 0.618444 |
6ddc0b4c40f8d85ba140288d40986d6b18e984db | 1,033 | py | Python | getSenderNumberMgtURL.py | linkhub-sdk/popbill.message.example.py | e8dc7d46ff3ce21da2c6fc4fda2624f40c0a8438 | [
"MIT"
] | null | null | null | getSenderNumberMgtURL.py | linkhub-sdk/popbill.message.example.py | e8dc7d46ff3ce21da2c6fc4fda2624f40c0a8438 | [
"MIT"
] | null | null | null | getSenderNumberMgtURL.py | linkhub-sdk/popbill.message.example.py | e8dc7d46ff3ce21da2c6fc4fda2624f40c0a8438 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import MessageService, PopbillException
messageService = MessageService(testValue.LinkID, testValue.SecretKey)
messageService.IsTest = testValue.IsTest
messageService.IPRestrictOnOff = testValue.IPRestrictOnOff
messageService.UseStaticIP = testValue.UseStaticIP
messageService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
발신번호 관리 팝업 URL을 반환합니다.
- 보안정책에 따라 반환된 URL은 30초의 유효시간을 갖습니다.
- https://docs.popbill.com/message/python/api#GetSenderNumberMgtURL
'''
try:
print("=" * 15 + " 발신번호 관리 팝업 URL 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 팝빌회원 아이디
UserID = testValue.testUserID
url = messageService.getSenderNumberMgtURL(CorpNum, UserID)
print("URL: %s" % url)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
| 24.595238 | 70 | 0.734753 |
48ccaf1198610562527bad698173d50b53e048b4 | 149 | py | Python | exercise/test_if_not.py | KeoghRee/python_learn | 30769c799adf04fec0c972c271fa74c2dbaaf719 | [
"MIT"
] | null | null | null | exercise/test_if_not.py | KeoghRee/python_learn | 30769c799adf04fec0c972c271fa74c2dbaaf719 | [
"MIT"
] | null | null | null | exercise/test_if_not.py | KeoghRee/python_learn | 30769c799adf04fec0c972c271fa74c2dbaaf719 | [
"MIT"
] | null | null | null | a = input('输入')
if bool(a):
aa = int(a)
else:
aa = 0
if not aa>1:
print('this way is ok')
else:
print('dame')
b = input()
print(str(b)) | 13.545455 | 25 | 0.536913 |
d722414f2aad60c90423ad4acb539058940e1b36 | 2,208 | py | Python | day3/api/collateral/api_modify_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | null | null | null | day3/api/collateral/api_modify_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | null | null | null | day3/api/collateral/api_modify_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | 5 | 2019-11-19T18:41:41.000Z | 2020-06-18T14:58:09.000Z | import requests
import os
import json
from pprint import pprint
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
if __name__ == "__main__":
"""
--------------------------------------------------------------------------------
HTTP PUT
URL: https://netbox.lasthop.io/api/dcim/devices/8/
HTTP Headers: {'Content-Type': 'application/json; version=2.4;', 'authorization':
'Token 1231231412341234123412341234123412343212'}
PUT DATA: {'id': 8, 'name': 'arista6', 'display_name': 'arista6', 'device_type': 2,
'device_role': 3,
'tenant': None, 'platform': 4, 'serial': '', 'asset_tag': None, 'site': 1, 'rack': 2,
'position': None,
'face': None, 'parent_device': None, 'status': 1, 'primary_ip': None, 'primary_ip4':
None,
'primary_ip6': None, 'cluster': None, 'virtual_chassis': None, 'vc_position': None,
'vc_priority':
None, 'comments': '', 'tags': [], 'custom_fields': {}, 'created': '2018-11-01',
'last_updated':
'2018-11-01T12:29:41.716417-07:00', 'local_context_data': None}
--------------------------------------------------------------------------------
"""
token = os.environ["NETBOX_TOKEN"]
# Arista6
url = "https://netbox.lasthop.io/api/dcim/devices/8/"
http_headers = {
"accept": "application/json; version=2.4;",
"authorization": "Token {}".format(token),
}
response = requests.get(url, headers=http_headers, verify=False)
arista6 = response.json()
http_headers = {
"Content-Type": "application/json; version=2.4;",
"authorization": "Token {}".format(token),
}
# Reformat to get the proper structure for the existing object
for field in ["device_role", "device_type", "platform", "site", "rack"]:
arista6[field] = arista6[field]["id"]
arista6["status"] = 1
arista6["rack"] = 2
response = requests.put(url, headers=http_headers, data=json.dumps(arista6), verify=False)
response = response.json()
print()
pprint(response)
# print(response._content.decode())
print()
| 37.423729 | 96 | 0.580163 |
3c862c0387cc93013cfdf1dd1bf875ce27508597 | 690 | py | Python | controller/login.py | aamir4813/chatbox | c98f12ce58ea497c43f888f19b571bce0eca0278 | [
"MIT"
] | null | null | null | controller/login.py | aamir4813/chatbox | c98f12ce58ea497c43f888f19b571bce0eca0278 | [
"MIT"
] | null | null | null | controller/login.py | aamir4813/chatbox | c98f12ce58ea497c43f888f19b571bce0eca0278 | [
"MIT"
] | null | null | null | from application import app , db
from flask import render_template , flash , redirect , url_for , request
from app_data.forms_wtf import *
from flask_login import current_user , login_user
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated :
return redirect(url_for('chat'))
login_form = LoginForm()
# Allow login if validation success
if login_form.validate_on_submit():
user_object = User.query.filter_by(username=login_form.username.data).first()
db.session.remove()
login_user(user_object)
return redirect(url_for('chat'))
return render_template("login.html", form=login_form)
| 28.75 | 85 | 0.710145 |
4473064c7a059074969c1b25c94d6cbdeb3b2819 | 581 | py | Python | dbank.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | dbank.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | dbank.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | def TEST_DATA_BANK():
jsn = []
with open('filtered.csv', "r") as csv:
index = 0
head = ''
geo_point = ""
for line in csv:
dic = {}
line = line.replace("\n", "").split(',')
if index == 0:
head = line
index += 1
else:
line[19] +=','+ line[20]
del line[20]
for val, title in zip(line, head):
dic[title] = val
jsn.append(dic)
return jsn
| 29.05 | 53 | 0.349398 |
d35c6e9f922723a0b47237ba91776bfbc5d906f5 | 194 | py | Python | competitive-programming/atcoder/abc228/a.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | competitive-programming/atcoder/abc228/a.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | competitive-programming/atcoder/abc228/a.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | def main():
s, t, x = map(int, input().split())
if t > s:
print('Yes') if s <= x < t else print('No')
else:
print('Yes') if x >= s or x < t else print('No')
main()
| 19.4 | 56 | 0.458763 |
2e9cff624cd54cb73f6bf6be184ef77b435d796a | 21,481 | py | Python | salt/auth/__init__.py | Bacon-Unlimited/salt | 9b1b791d212a6810c430dd15c63fbce3a4f7e1d6 | [
"Apache-2.0"
] | null | null | null | salt/auth/__init__.py | Bacon-Unlimited/salt | 9b1b791d212a6810c430dd15c63fbce3a4f7e1d6 | [
"Apache-2.0"
] | null | null | null | salt/auth/__init__.py | Bacon-Unlimited/salt | 9b1b791d212a6810c430dd15c63fbce3a4f7e1d6 | [
"Apache-2.0"
] | null | null | null | """
Salt's pluggable authentication system
This system allows for authentication to be managed in a module pluggable way
so that any external authentication system can be used inside of Salt
"""
# 1. Create auth loader instance
# 2. Accept arguments as a dict
# 3. Verify with function introspection
# 4. Execute auth function
# 5. Cache auth token with relative data opts['token_dir']
# 6. Interface to verify tokens
import getpass
import logging
import random
import time
from collections.abc import Iterable, Mapping
import salt.channel.client
import salt.config
import salt.exceptions
import salt.loader
import salt.payload
import salt.utils.args
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.minions
import salt.utils.user
import salt.utils.versions
import salt.utils.zeromq
log = logging.getLogger(__name__)
AUTH_INTERNAL_KEYWORDS = frozenset(
[
"client",
"cmd",
"eauth",
"fun",
"gather_job_timeout",
"kwarg",
"match",
"metadata",
"print_event",
"raw",
"yield_pub_data",
]
)
class LoadAuth:
"""
Wrap the authentication system to handle peripheral components
"""
def __init__(self, opts, ckminions=None):
self.opts = opts
self.max_fail = 1.0
self.auth = salt.loader.auth(opts)
self.tokens = salt.loader.eauth_tokens(opts)
self.ckminions = ckminions or salt.utils.minions.CkMinions(opts)
def load_name(self, load):
"""
Return the primary name associate with the load, if an empty string
is returned then the load does not match the function
"""
if "eauth" not in load:
return ""
fstr = "{}.auth".format(load["eauth"])
if fstr not in self.auth:
return ""
try:
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])["args"][0]
return load[pname_arg]
except IndexError:
return ""
def __auth_call(self, load):
"""
Return the token and set the cache data for use
Do not call this directly! Use the time_auth method to overcome timing
attacks
"""
if "eauth" not in load:
return False
fstr = "{}.auth".format(load["eauth"])
if fstr not in self.auth:
return False
# When making auth calls, only username, password, auth, and token
# are valid, so we strip anything else out.
_valid = ["username", "password", "eauth", "token"]
_load = {key: value for (key, value) in load.items() if key in _valid}
fcall = salt.utils.args.format_call(
self.auth[fstr], _load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
if "kwargs" in fcall:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
else:
return self.auth[fstr](*fcall["args"])
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return False
def time_auth(self, load):
"""
Make sure that all failures happen in the same amount of time
"""
start = time.time()
ret = self.__auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_fail / 4
r_time = random.SystemRandom().uniform(
self.max_fail - deviation, self.max_fail + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False
def __get_acl(self, load):
"""
Returns ACL for a specific user.
Returns None if eauth doesn't provide any for the user. I. e. None means: use acl declared
in master config.
"""
if "eauth" not in load:
return None
mod = self.opts["eauth_acl_module"]
if not mod:
mod = load["eauth"]
fstr = "{}.acl".format(mod)
if fstr not in self.auth:
return None
fcall = salt.utils.args.format_call(
self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return None
def __process_acl(self, load, auth_list):
"""
Allows eauth module to modify the access list right before it'll be applied to the request.
For example ldap auth module expands entries
"""
if "eauth" not in load:
return auth_list
fstr = "{}.process_acl".format(load["eauth"])
if fstr not in self.auth:
return auth_list
try:
return self.auth[fstr](auth_list, self.opts)
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return auth_list
def get_groups(self, load):
"""
Read in a load and return the groups a user is a member of
by asking the appropriate provider
"""
if "eauth" not in load:
return False
fstr = "{}.groups".format(load["eauth"])
if fstr not in self.auth:
return False
fcall = salt.utils.args.format_call(
self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
except IndexError:
return False
except Exception: # pylint: disable=broad-except
return None
def _allow_custom_expire(self, load):
"""
Return bool if requesting user is allowed to set custom expire
"""
expire_override = self.opts.get("token_expire_user_override", False)
if expire_override is True:
return True
if isinstance(expire_override, Mapping):
expire_whitelist = expire_override.get(load["eauth"], [])
if isinstance(expire_whitelist, Iterable):
if load.get("username") in expire_whitelist:
return True
return False
def mk_token(self, load):
"""
Run time_auth and create a token. Return False or the token
"""
if not self.authenticate_eauth(load):
return {}
if self._allow_custom_expire(load):
token_expire = load.pop("token_expire", self.opts["token_expire"])
else:
_ = load.pop("token_expire", None)
token_expire = self.opts["token_expire"]
tdata = {
"start": time.time(),
"expire": time.time() + token_expire,
"name": self.load_name(load),
"eauth": load["eauth"],
}
if self.opts["keep_acl_in_token"]:
acl_ret = self.__get_acl(load)
tdata["auth_list"] = acl_ret
groups = self.get_groups(load)
if groups:
tdata["groups"] = groups
return self.tokens["{}.mk_token".format(self.opts["eauth_tokens"])](
self.opts, tdata
)
def get_tok(self, tok):
"""
Return the name associated with the token, or False if the token is
not valid
"""
tdata = {}
try:
tdata = self.tokens["{}.get_token".format(self.opts["eauth_tokens"])](
self.opts, tok
)
except salt.exceptions.SaltDeserializationError:
log.warning("Failed to load token %r - removing broken/empty file.", tok)
rm_tok = True
else:
if not tdata:
return {}
rm_tok = False
if tdata.get("expire", 0) < time.time():
# If expire isn't present in the token it's invalid and needs
# to be removed. Also, if it's present and has expired - in
# other words, the expiration is before right now, it should
# be removed.
rm_tok = True
if rm_tok:
self.rm_token(tok)
return {}
return tdata
def list_tokens(self):
"""
List all tokens in eauth_tokn storage.
"""
return self.tokens["{}.list_tokens".format(self.opts["eauth_tokens"])](
self.opts
)
def rm_token(self, tok):
"""
Remove the given token from token storage.
"""
self.tokens["{}.rm_token".format(self.opts["eauth_tokens"])](self.opts, tok)
def authenticate_token(self, load):
"""
Authenticate a user by the token specified in load.
Return the token object or False if auth failed.
"""
token = self.get_tok(load["token"])
# Bail if the token is empty or if the eauth type specified is not allowed
if not token or token["eauth"] not in self.opts["external_auth"]:
log.warning('Authentication failure of type "token" occurred.')
return False
return token
def authenticate_eauth(self, load):
"""
Authenticate a user by the external auth module specified in load.
Return True on success or False on failure.
"""
if "eauth" not in load:
log.warning('Authentication failure of type "eauth" occurred.')
return False
if load["eauth"] not in self.opts["external_auth"]:
log.warning('The eauth system "%s" is not enabled', load["eauth"])
log.warning('Authentication failure of type "eauth" occurred.')
return False
# Perform the actual authentication. If we fail here, do not
# continue.
if not self.time_auth(load):
log.warning('Authentication failure of type "eauth" occurred.')
return False
return True
def authenticate_key(self, load, key):
"""
Authenticate a user by the key passed in load.
Return the effective user id (name) if it's different from the specified one (for sudo).
If the effective user id is the same as the passed one, return True on success or False on
failure.
"""
error_msg = 'Authentication failure of type "user" occurred.'
auth_key = load.pop("key", None)
if auth_key is None:
log.warning(error_msg)
return False
if "user" in load:
auth_user = AuthUser(load["user"])
if auth_user.is_sudo():
# If someone sudos check to make sure there is no ACL's around their username
if auth_key != key[self.opts.get("user", "root")]:
log.warning(error_msg)
return False
return auth_user.sudo_name()
elif (
load["user"] == self.opts.get("user", "root") or load["user"] == "root"
):
if auth_key != key[self.opts.get("user", "root")]:
log.warning(
"Master runs as %r, but user in payload is %r",
self.opts.get("user", "root"),
load["user"],
)
log.warning(error_msg)
return False
elif auth_user.is_running_user():
if auth_key != key.get(load["user"]):
log.warning(error_msg)
return False
elif auth_key == key.get("root"):
pass
else:
if load["user"] in key:
# User is authorised, check key and check perms
if auth_key != key[load["user"]]:
log.warning(error_msg)
return False
return load["user"]
else:
log.warning(error_msg)
return False
else:
if auth_key != key[salt.utils.user.get_user()]:
log.warning(error_msg)
return False
return True
def get_auth_list(self, load, token=None):
"""
Retrieve access list for the user specified in load.
The list is built by eauth module or from master eauth configuration.
Return None if current configuration doesn't provide any ACL for the user. Return an empty
list if the user has no rights to execute anything on this master and returns non-empty list
if user is allowed to execute particular functions.
"""
# Get auth list from token
if token and self.opts["keep_acl_in_token"] and "auth_list" in token:
return token["auth_list"]
# Get acl from eauth module.
auth_list = self.__get_acl(load)
if auth_list is not None:
return auth_list
eauth = token["eauth"] if token else load["eauth"]
if eauth not in self.opts["external_auth"]:
# No matching module is allowed in config
log.debug('The eauth system "%s" is not enabled', eauth)
log.warning("Authorization failure occurred.")
return None
if token:
name = token["name"]
groups = token.get("groups")
else:
name = self.load_name(load) # The username we are attempting to auth with
groups = self.get_groups(load) # The groups this user belongs to
eauth_config = self.opts["external_auth"][eauth]
if not eauth_config:
log.debug('eauth "%s" configuration is empty', eauth)
if not groups:
groups = []
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = self.ckminions.fill_auth_list(eauth_config, name, groups)
auth_list = self.__process_acl(load, auth_list)
log.trace("Compiled auth_list: %s", auth_list)
return auth_list
def check_authentication(self, load, auth_type, key=None, show_username=False):
"""
.. versionadded:: 2018.3.0
Go through various checks to see if the token/eauth/user can be authenticated.
Returns a dictionary containing the following keys:
- auth_list
- username
- error
If an error is encountered, return immediately with the relevant error dictionary
as authentication has failed. Otherwise, return the username and valid auth_list.
"""
auth_list = []
username = load.get("username", "UNKNOWN")
ret = {"auth_list": auth_list, "username": username, "error": {}}
# Authenticate
if auth_type == "token":
token = self.authenticate_token(load)
if not token:
ret["error"] = {
"name": "TokenAuthenticationError",
"message": 'Authentication failure of type "token" occurred.',
}
return ret
# Update username for token
username = token["name"]
ret["username"] = username
auth_list = self.get_auth_list(load, token=token)
elif auth_type == "eauth":
if not self.authenticate_eauth(load):
ret["error"] = {
"name": "EauthAuthenticationError",
"message": 'Authentication failure of type "eauth" occurred for user {}.'.format(
username
),
}
return ret
auth_list = self.get_auth_list(load)
elif auth_type == "user":
auth_ret = self.authenticate_key(load, key)
msg = 'Authentication failure of type "user" occurred'
if not auth_ret: # auth_ret can be a boolean or the effective user id
if show_username:
msg = "{} for user {}.".format(msg, username)
ret["error"] = {"name": "UserAuthenticationError", "message": msg}
return ret
# Verify that the caller has root on master
if auth_ret is not True:
if AuthUser(load["user"]).is_sudo():
if not self.opts["sudo_acl"] or not self.opts["publisher_acl"]:
auth_ret = True
if auth_ret is not True:
# Avoid a circular import
import salt.utils.master
auth_list = salt.utils.master.get_values_of_matching_keys(
self.opts["publisher_acl"], auth_ret
)
if not auth_list:
ret["error"] = {"name": "UserAuthenticationError", "message": msg}
return ret
else:
ret["error"] = {
"name": "SaltInvocationError",
"message": "Authentication type not supported.",
}
return ret
# Authentication checks passed
ret["auth_list"] = auth_list
return ret
class Resolver:
"""
The class used to resolve options for the command line and for generic
interactive interfaces
"""
def __init__(self, opts):
self.opts = opts
self.auth = salt.loader.auth(opts)
def _send_token_request(self, load):
master_uri = "tcp://{}:{}".format(
salt.utils.zeromq.ip_bracket(self.opts["interface"]),
str(self.opts["ret_port"]),
)
with salt.channel.client.ReqChannel.factory(
self.opts, crypt="clear", master_uri=master_uri
) as channel:
return channel.send(load)
def cli(self, eauth):
"""
Execute the CLI options to fill in the extra data needed for the
defined eauth system
"""
ret = {}
if not eauth:
print("External authentication system has not been specified")
return ret
fstr = "{}.auth".format(eauth)
if fstr not in self.auth:
print(
'The specified external authentication system "{}" is not available'.format(
eauth
)
)
print(
"Available eauth types: {}".format(
", ".join(
sorted([k[:-5] for k in self.auth if k.endswith(".auth")])
)
)
)
return ret
args = salt.utils.args.arg_lookup(self.auth[fstr])
for arg in args["args"]:
if arg in self.opts:
ret[arg] = self.opts[arg]
elif arg.startswith("pass"):
ret[arg] = getpass.getpass("{}: ".format(arg))
else:
ret[arg] = input("{}: ".format(arg))
for kwarg, default in list(args["kwargs"].items()):
if kwarg in self.opts:
ret["kwarg"] = self.opts[kwarg]
else:
ret[kwarg] = input("{} [{}]: ".format(kwarg, default))
# Use current user if empty
if "username" in ret and not ret["username"]:
ret["username"] = salt.utils.user.get_user()
return ret
def token_cli(self, eauth, load):
"""
Create the token from the CLI and request the correct data to
authenticate via the passed authentication mechanism
"""
load["cmd"] = "mk_token"
load["eauth"] = eauth
tdata = self._send_token_request(load)
if "token" not in tdata:
return tdata
try:
with salt.utils.files.set_umask(0o177):
with salt.utils.files.fopen(self.opts["token_file"], "w+") as fp_:
fp_.write(tdata["token"])
except OSError:
pass
return tdata
def mk_token(self, load):
"""
Request a token from the master
"""
load["cmd"] = "mk_token"
tdata = self._send_token_request(load)
return tdata
def get_token(self, token):
"""
Request a token from the master
"""
load = {}
load["token"] = token
load["cmd"] = "get_token"
tdata = self._send_token_request(load)
return tdata
class AuthUser:
"""
Represents a user requesting authentication to the salt master
"""
def __init__(self, user):
"""
Instantiate an AuthUser object.
Takes a user to reprsent, as a string.
"""
self.user = user
def is_sudo(self):
"""
Determines if the user is running with sudo
Returns True if the user is running with sudo and False if the
user is not running with sudo
"""
return self.user.startswith("sudo_")
def is_running_user(self):
"""
Determines if the user is the same user as the one running
this process
Returns True if the user is the same user as the one running
this process and False if not.
"""
return self.user == salt.utils.user.get_user()
def sudo_name(self):
"""
Returns the username of the sudoer, i.e. self.user without the
'sudo_' prefix.
"""
return self.user.split("_", 1)[-1]
| 33.616588 | 101 | 0.55463 |
dc5924ce92f848e9651c7321f68c76bd31b273f0 | 736 | py | Python | rest/api/urls.py | forafekt/curly-spoon | 4826a8da22bbaee33561052169212f5afec60a5a | [
"MIT"
] | 1 | 2021-10-31T08:46:06.000Z | 2021-10-31T08:46:06.000Z | rest/api/urls.py | forafekt/curly-spoon | 4826a8da22bbaee33561052169212f5afec60a5a | [
"MIT"
] | 1 | 2022-03-02T09:47:10.000Z | 2022-03-02T09:47:10.000Z | rest/api/urls.py | forafekt/curly-spoon | 4826a8da22bbaee33561052169212f5afec60a5a | [
"MIT"
] | null | null | null | from django.urls import path, include, re_path
from rest_framework.authtoken.views import obtain_auth_token
from rest.api import views
app_name = 'api'
""" REST URLS """
urlpatterns = [
path('', views.APIBaseView.as_view(), name='api'),
path('', include('rest_framework.urls', namespace='rest_framework')),
path('allauth/', include('allauth.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('api-token-auth/', obtain_auth_token, name='api_token_auth'),
re_path('rest-auth/', include('rest.users.urls')),
re_path('marketing/', include('rest.subscribe.urls'), name='subscribe'),
]
""" APP URLS """
urlpatterns += [
]
| 30.666667 | 76 | 0.686141 |
504782e83dfa6046cc81e58414258257ecf13bdb | 2,579 | py | Python | raylab/agents/svg/policy.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 29 | 2020-05-05T13:25:33.000Z | 2022-01-03T14:12:29.000Z | raylab/agents/svg/policy.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 215 | 2019-11-26T12:59:39.000Z | 2022-02-01T12:38:31.000Z | raylab/agents/svg/policy.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 7 | 2020-06-12T01:42:02.000Z | 2021-05-27T03:40:42.000Z | """Base Policy with common methods for all SVG variations."""
import torch
from nnrl.nn.utils import update_polyak
from ray.rllib import SampleBatch
from raylab.options import configure, option
from raylab.policy import EnvFnMixin, TorchPolicy
from raylab.policy.action_dist import WrapStochasticPolicy
from raylab.policy.losses import ISFittedVIteration, MaximumLikelihood
@configure
@option(
"polyak",
0.995,
help="Interpolation factor in polyak averaging for target networks.",
)
@option("max_is_ratio", 5.0, help="Clip importance sampling weights by this value")
@option(
"vf_loss_coeff",
1.0,
help="Weight of the fitted V loss in the joint model-value loss",
)
class SVGTorchPolicy(EnvFnMixin, TorchPolicy):
"""Stochastic Value Gradients policy using PyTorch."""
# pylint:disable=abstract-method
dist_class = WrapStochasticPolicy
def __init__(self, observation_space, action_space, config):
super().__init__(observation_space, action_space, config)
self.loss_model = MaximumLikelihood(self.module.model)
self.loss_critic = ISFittedVIteration(
self.module.critic, self.module.target_critic
)
self.loss_critic.gamma = self.config["gamma"]
@torch.no_grad()
def add_truncated_importance_sampling_ratios(self, batch_tensors):
"""Compute and add truncated importance sampling ratios to tensor batch."""
is_ratios = self.importance_sampling_ratios(batch_tensors)
_is_ratios = torch.clamp(is_ratios, max=self.config["max_is_ratio"])
batch_tensors[ISFittedVIteration.IS_RATIOS] = _is_ratios
return batch_tensors, {"is_ratio_mean": is_ratios.mean().item()}
def importance_sampling_ratios(self, batch_tensors):
"""Compute unrestricted importance sampling ratios."""
curr_logp = self.module.actor.log_prob(
batch_tensors[SampleBatch.CUR_OBS], batch_tensors[SampleBatch.ACTIONS]
)
is_ratio = torch.exp(curr_logp - batch_tensors[SampleBatch.ACTION_LOGP])
return is_ratio
def compute_joint_model_value_loss(self, batch_tensors):
"""Compute model MLE loss and fitted value function loss."""
mle_loss, mle_info = self.loss_model(batch_tensors)
isfv_loss, isfv_info = self.loss_critic(batch_tensors)
loss = mle_loss + self.config["vf_loss_coeff"] * isfv_loss
return loss, {**mle_info, **isfv_info}
def _update_polyak(self):
update_polyak(
self.module.critic, self.module.target_critic, self.config["polyak"]
)
| 39.075758 | 83 | 0.721598 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.