hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acecb1d2272db73a21d659a806271a11d9b35c63 | 302 | py | Python | api/python/basic/api.py | sk-cm/todo-mariadb | a6e285a03b30d82b946c0964ff36891ea7938468 | [
"MIT"
] | null | null | null | api/python/basic/api.py | sk-cm/todo-mariadb | a6e285a03b30d82b946c0964ff36891ea7938468 | [
"MIT"
] | null | null | null | api/python/basic/api.py | sk-cm/todo-mariadb | a6e285a03b30d82b946c0964ff36891ea7938468 | [
"MIT"
] | null | null | null | import flask
from tasks import tasks
from flask_cors import CORS
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.register_blueprint(tasks)
cors = CORS(app, resources={r"*": {"origins": "*"}})
@app.route("/api/version")
def version():
return "1.0"
app.run(host="0.0.0.0", port=8080)
| 17.764706 | 52 | 0.682119 |
acecb1ef4e4e0108a911998deda1b3047796697b | 4,821 | py | Python | code/04_persons_depth.py | MxFxM/TGMB | 1367703287b4748aaf725445f19690ef7e3679ab | [
"MIT"
] | null | null | null | code/04_persons_depth.py | MxFxM/TGMB | 1367703287b4748aaf725445f19690ef7e3679ab | [
"MIT"
] | null | null | null | code/04_persons_depth.py | MxFxM/TGMB | 1367703287b4748aaf725445f19690ef7e3679ab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
import credentials
import mariadb
try:
conn = mariadb.connect(
user = credentials.mdb_user,
password = credentials.mdb_password,
host = "192.168.178.61",
port = 3306,
database = "grafana"
)
except mariadb.Error as e:
print(f"Error with MariaDB: {e}")
sys.exit(1)
cursor = conn.cursor()
# Get argument first
mobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())
if len(sys.argv) > 1:
mobilenet_path = sys.argv[1]
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(1)
# Define a neural network that will make predictions based on the source frames
detectionNetwork = pipeline.createMobileNetDetectionNetwork()
detectionNetwork.setConfidenceThreshold(0.8) # increased threshold, because there is no threshold later on
detectionNetwork.setBlobPath(mobilenet_path)
detectionNetwork.setNumInferenceThreads(2)
detectionNetwork.input.setBlocking(False)
cam_rgb.preview.link(detectionNetwork.input)
# Create outputs
#xout_rgb = pipeline.createXLinkOut()
#xout_rgb.setStreamName("rgb")
#detectionNetwork.passthrough.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("rgb_nn")
detectionNetwork.out.link(xout_nn.input)
# Define 2 more sources
cam_right = pipeline.createMonoCamera()
cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
cam_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # reduced resolution
cam_left = pipeline.createMonoCamera()
cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT)
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# depth by disparity
depth = pipeline.createStereoDepth()
depth.setConfidenceThreshold(200)
depth.setOutputRectified(True) # mirror image
depth.setRectifyEdgeFillColor(0) # black on the edges
cam_left.out.link(depth.left)
cam_right.out.link(depth.right)
# depth output
xout_depth = pipeline.createXLinkOut()
xout_depth.setStreamName("depth")
depth.disparity.link(xout_depth.input)
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Output queues will be used to get the rgb frames, depth info and nn data from the outputs defined above
#q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
q_depth = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
q_nn = device.getOutputQueue(name="rgb_nn", maxSize=4, blocking=False)
frame_depth = None
bboxes = []
while True:
# sync inputs (since tryGet() is not used)
in_nn = q_nn.get()
in_depth = q_depth.get()
if in_nn is not None:
bboxes = in_nn.detections
if in_depth is not None:
frame_depth = in_depth.getData().reshape((in_depth.getHeight(), in_depth.getWidth())).astype(np.uint8)
frame_depth = np.ascontiguousarray(frame_depth)
frame_depth = cv2.applyColorMap(frame_depth, cv2.COLORMAP_JET)
color = (255, 255, 255)
if frame_depth is not None and in_nn is not None: # should be the case since .get() is used to sync inputs
person_count = 0
distances = []
height = frame_depth.shape[0]
width = frame_depth.shape[1]
for bbox in bboxes:
if bbox.label == 15:
person_count = person_count + 1
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
crop_frame = frame_depth[y1:y2, x1:x2]
#cv2.imshow("depth_crop", crop_frame)
distances.append(cv2.mean(crop_frame)[1])
#print(f"Person {person_count} at {distances[-1]}") # BGR, green channel is depth (more or less)
#print(f"{person_count} persons")
closest = 0
if person_count != 0:
closest = np.max(np.array(distances))
#print(f"The closest person is at {closest}")
try:
cursor.execute("INSERT INTO peopledetector (time, count, closest) VALUES (?, ?, ?)",
(time.time(), person_count, closest))
conn.commit()
except Exception as e:
print(f"Commit error with MariaDB: {e}")
#print()
if cv2.waitKey(1) == ord('q'):
break
| 34.934783 | 116 | 0.665007 |
acecb2258efa3397602f513770e2f8bec6d1d336 | 24,164 | py | Python | tools/generate-wire.py | GlenCooper/lightning | 602b81fef2f47834aa815d723d0f210af80062ce | [
"MIT"
] | null | null | null | tools/generate-wire.py | GlenCooper/lightning | 602b81fef2f47834aa815d723d0f210af80062ce | [
"MIT"
] | null | null | null | tools/generate-wire.py | GlenCooper/lightning | 602b81fef2f47834aa815d723d0f210af80062ce | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# Script to parse spec output CSVs and produce C files.
# Released by lisa neigut under CC0:
# https://creativecommons.org/publicdomain/zero/1.0/
#
# Reads from stdin, outputs C header or body file.
#
# Standard message types:
# msgtype,<msgname>,<value>[,<option>]
# msgdata,<msgname>,<fieldname>,<typename>,[<count>][,<option>]
#
# TLV types:
# tlvtype,<tlvstreamname>,<tlvname>,<value>[,<option>]
# tlvdata,<tlvstreamname>,<tlvname>,<fieldname>,<typename>,[<count>][,<option>]
#
# Subtypes:
# subtype,<subtypename>
# subtypedata,<subtypename>,<fieldname>,<typename>,[<count>]
from argparse import ArgumentParser, REMAINDER
from collections import OrderedDict
import copy
import fileinput
from mako.template import Template
import os
import re
import sys
# Generator to give us one line at a time.
def next_line(args, lines):
if lines is None:
lines = fileinput.input(args)
for i, line in enumerate(lines):
yield i + 1, line.strip()
# Class definitions, to keep things classy
class Field(object):
def __init__(self, name, type_obj, extensions=[],
field_comments=[], optional=False):
self.name = name
self.type_obj = type_obj
self.count = 1
self.len_field_of = None
self.len_field = None
self.implicit_len = False
self.extension_names = extensions
self.is_optional = optional
self.field_comments = field_comments
def __deepcopy__(self, memo):
deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
field = copy.deepcopy(self, memo)
self.__deepcopy__ = deepcopy_method
field.type_obj = self.type_obj
return field
def add_count(self, count):
self.count = int(count)
def add_len_field(self, len_field):
self.count = False
# we cache our len-field's name
self.len_field = len_field.name
# the len-field caches our name
len_field.len_field_of = self.name
def add_implicit_len(self):
self.count = False
self.implicit_len = True
def is_array(self):
return self.count > 1
def is_varlen(self):
return not self.count
def is_implicit_len(self):
return self.implicit_len
def is_extension(self):
return bool(self.extension_names)
def size(self, implicit_expression=None):
if self.count:
return self.count
if self.len_field:
return self.len_field
assert self.is_implicit_len()
assert implicit_expression
return implicit_expression
def needs_context(self):
""" A field needs a context if it's varsized """
return self.is_varlen() or self.type_obj.needs_context()
def arg_desc_to(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', const {} {}[{}]'.format(type_name, self.name, self.count)
if self.type_obj.is_assignable() and not self.is_varlen():
name = self.name
if self.is_optional:
name = '*' + name
return ', {} {}'.format(type_name, name)
if self.is_varlen() and self.type_obj.is_varsize():
return ', const {} **{}'.format(type_name, self.name)
return ', const {} *{}'.format(type_name, self.name)
def arg_desc_from(self):
type_name = self.type_obj.type_name()
if self.type_obj.is_const_ptr_ptr_type():
return ', const {} **{}'.format(type_name, self.name)
if self.len_field_of:
return ''
if self.is_array():
return ', {} {}[{}]'.format(type_name, self.name, self.count)
ptrs = '*'
if self.is_varlen() or self.is_optional or self.type_obj.is_varsize():
ptrs += '*'
if self.is_varlen() and self.type_obj.is_varsize():
ptrs += '*'
return ', {} {}{}'.format(type_name, ptrs, self.name)
class FieldSet(object):
def __init__(self):
self.fields = OrderedDict()
self.len_fields = {}
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False,
implicit_len_ok=False):
field = Field(field_name, type_obj, extensions=extensions,
field_comments=comments, optional=optional)
if bool(count):
try:
field.add_count(int(count))
except ValueError:
if count in self.fields:
len_field = self.find_data_field(count)
field.add_len_field(len_field)
self.len_fields[len_field.name] = len_field
else:
# '...' means "rest of TLV"
assert implicit_len_ok
assert count == '...'
field.add_implicit_len()
# You can't have any fields after an implicit-length field.
if len(self.fields) != 0:
assert not self.fields[next(reversed(self.fields))].is_implicit_len()
self.fields[field_name] = field
def find_data_field(self, field_name):
return self.fields[field_name]
def get_len_fields(self):
return list(self.len_fields.values())
def has_len_fields(self):
return bool(self.len_fields)
def needs_context(self):
return any([field.needs_context() or field.is_optional for field in self.fields.values()])
class Type(FieldSet):
assignables = [
'u8',
'u16',
'u32',
'u64',
'tu16',
'tu32',
'tu64',
'bool',
'amount_sat',
'amount_msat',
'int',
'bigsize',
'varint'
]
typedefs = [
'u8',
'u16',
'u32',
'u64',
'bool',
'secp256k1_ecdsa_signature',
'secp256k1_ecdsa_recoverable_signature',
'wirestring',
'double',
'int',
'bigsize',
'varint',
]
truncated_typedefs = [
'tu16',
'tu32',
'tu64',
]
# Externally defined variable size types (require a context)
varsize_types = [
'peer_features',
'gossip_getnodes_entry',
'gossip_getchannels_entry',
'failed_htlc',
'utxo',
'bitcoin_tx',
'wirestring',
'per_peer_state',
'bitcoin_tx_output',
'exclude_entry',
'fee_states',
'onionreply',
]
# Some BOLT types are re-typed based on their field name
# ('fieldname partial', 'original type', 'outer type'): ('true type', 'collapse array?')
name_field_map = {
('txid', 'sha256'): ('bitcoin_txid', False),
('amt', 'u64'): ('amount_msat', False),
('msat', 'u64'): ('amount_msat', False),
('satoshis', 'u64'): ('amount_sat', False),
('node_id', 'pubkey', 'channel_announcement'): ('node_id', False),
('node_id', 'pubkey', 'node_announcement'): ('node_id', False),
('temporary_channel_id', 'u8'): ('channel_id', True),
('secret', 'u8'): ('secret', True),
('preimage', 'u8'): ('preimage', True),
}
# For BOLT specified types, a few type names need to be simply 'remapped'
# 'original type': 'true type'
name_remap = {
'byte': 'u8',
'signature': 'secp256k1_ecdsa_signature',
'chain_hash': 'bitcoin_blkid',
'point': 'pubkey',
# FIXME: omits 'pad'
}
# Types that are const pointer-to-pointers, such as chainparams, i.e.,
# they set a reference to some const entry.
const_ptr_ptr_types = [
'chainparams'
]
@staticmethod
def true_type(type_name, field_name=None, outer_name=None):
""" Returns 'true' type of a given type and a flag if
we've remapped a variable size/array type to a single struct
(an example of this is 'temporary_channel_id' which is specified
as a 32*byte, but we re-map it to a channel_id
"""
if type_name in Type.name_remap:
type_name = Type.name_remap[type_name]
if field_name:
for t, true_type in Type.name_field_map.items():
if t[0] in field_name and t[1] == type_name:
if len(t) == 2 or outer_name == t[2]:
return true_type
return (type_name, False)
def __init__(self, name):
FieldSet.__init__(self)
self.name, self.is_enum = self.parse_name(name)
self.depends_on = {}
self.type_comments = []
self.tlv = False
def parse_name(self, name):
if name.startswith('enum '):
return name[5:], True
return name, False
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False):
FieldSet.add_data_field(self, field_name, type_obj, count,
extensions=extensions,
comments=comments, optional=optional)
if type_obj.name not in self.depends_on:
self.depends_on[type_obj.name] = type_obj
def type_name(self):
if self.name in self.typedefs:
return self.name
if self.name in self.truncated_typedefs:
return self.name[1:]
if self.is_enum:
prefix = 'enum '
else:
prefix = 'struct '
return prefix + self.struct_name()
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
def struct_name(self):
if self.is_tlv():
return self.tlv.struct_name()
return self.name
def subtype_deps(self):
return [dep for dep in self.depends_on.values() if dep.is_subtype()]
def is_subtype(self):
return bool(self.fields)
def is_const_ptr_ptr_type(self):
return self.name in self.const_ptr_ptr_types
def is_truncated(self):
return self.name in self.truncated_typedefs
def needs_context(self):
return self.is_varsize()
def is_assignable(self):
""" Generally typedef's and enums """
return self.name in self.assignables or self.is_enum
def is_varsize(self):
""" A type is variably sized if it's marked as such (in varsize_types)
or it contains a field of variable length """
return self.name in self.varsize_types or self.has_len_fields()
def add_comments(self, comments):
self.type_comments = comments
def mark_tlv(self, tlv):
self.tlv = tlv
def is_tlv(self):
return bool(self.tlv)
class Message(FieldSet):
def __init__(self, name, number, option=[], enum_prefix='wire',
struct_prefix=None, comments=[]):
FieldSet.__init__(self)
self.name = name
self.number = number
self.enum_prefix = enum_prefix
self.option = option[0] if len(option) else None
self.struct_prefix = struct_prefix
self.enumname = None
self.msg_comments = comments
self.if_token = None
def has_option(self):
return self.option is not None
def enum_name(self):
name = self.enumname if self.enumname else self.name
return "{}_{}".format(self.enum_prefix, name).upper()
def struct_name(self):
if self.struct_prefix:
return self.struct_prefix + "_" + self.name
return self.name
def add_if(self, if_token):
self.if_token = if_token
class Tlv(object):
def __init__(self, name):
self.name = name
self.messages = {}
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option]) """
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
enum_prefix=self.name,
struct_prefix=self.struct_name(),
comments=comments)
def type_name(self):
return 'struct ' + self.struct_name()
def struct_name(self):
return "tlv_{}".format(self.name)
def find_message(self, name):
return self.messages[name]
def ordered_msgs(self):
return sorted(self.messages.values(), key=lambda item: int(item.number))
class Master(object):
types = {}
tlvs = {}
messages = {}
extension_msgs = {}
inclusions = []
top_comments = []
def add_comments(self, comments):
self.top_comments += comments
def add_include(self, inclusion):
self.inclusions.append(inclusion)
def add_tlv(self, tlv_name):
if tlv_name not in self.tlvs:
self.tlvs[tlv_name] = Tlv(tlv_name)
if tlv_name not in self.types:
self.types[tlv_name] = Type(tlv_name)
return self.tlvs[tlv_name]
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option])"""
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
comments=comments)
def add_extension_msg(self, name, msg):
self.extension_msgs[name] = msg
def add_type(self, type_name, field_name=None, outer_name=None):
optional = False
if type_name.startswith('?'):
type_name = type_name[1:]
optional = True
# Check for special type name re-mapping
type_name, collapse_original = Type.true_type(type_name, field_name,
outer_name)
if type_name not in self.types:
self.types[type_name] = Type(type_name)
return self.types[type_name], collapse_original, optional
def find_type(self, type_name):
return self.types[type_name]
def find_message(self, msg_name):
if msg_name in self.messages:
return self.messages[msg_name]
if msg_name in self.extension_msgs:
return self.extension_msgs[msg_name]
return None
def find_tlv(self, tlv_name):
return self.tlvs[tlv_name]
def get_ordered_subtypes(self):
""" We want to order subtypes such that the 'no dependency'
types are printed first """
subtypes = [s for s in self.types.values() if s.is_subtype()]
# Start with subtypes without subtype dependencies
sorted_types = [s for s in subtypes if not len(s.subtype_deps())]
unsorted = [s for s in subtypes if len(s.subtype_deps())]
while len(unsorted):
names = [s.name for s in sorted_types]
for s in list(unsorted):
if all([dependency.name in names for dependency in s.subtype_deps()]):
sorted_types.append(s)
unsorted.remove(s)
return sorted_types
def tlv_messages(self):
return [m for tlv in self.tlvs.values() for m in tlv.messages.values()]
def find_template(self, options):
dirpath = os.path.dirname(os.path.abspath(__file__))
filename = dirpath + '/gen/{}{}_template'.format(
'print_' if options.print_wire else '', options.page)
return Template(filename=filename)
def post_process(self):
""" method to handle any 'post processing' that needs to be done.
for now, we just need match up types to TLVs """
for tlv_name, tlv in self.tlvs.items():
if tlv_name in self.types:
self.types[tlv_name].mark_tlv(tlv)
def write(self, options, output):
template = self.find_template(options)
enum_sets = []
enum_sets.append({
'name': options.enum_name,
'set': self.messages.values(),
})
stuff = {}
stuff['top_comments'] = self.top_comments
stuff['options'] = options
stuff['idem'] = re.sub(r'[^A-Z]+', '_', options.header_filename.upper())
stuff['header_filename'] = options.header_filename
stuff['includes'] = self.inclusions
stuff['enum_sets'] = enum_sets
subtypes = self.get_ordered_subtypes()
stuff['structs'] = subtypes + self.tlv_messages()
stuff['tlvs'] = self.tlvs
# We leave out extension messages in the printing pages. Any extension
# fields will get printed under the 'original' message, if present
if options.print_wire:
stuff['messages'] = list(self.messages.values())
else:
stuff['messages'] = list(self.messages.values()) + list(self.extension_msgs.values())
stuff['subtypes'] = subtypes
print(template.render(**stuff), file=output)
def main(options, args=None, output=sys.stdout, lines=None):
genline = next_line(args, lines)
comment_set = []
token_name = None
# Create a new 'master' that serves as the coordinator for the file generation
master = Master()
try:
while True:
ln, line = next(genline)
tokens = line.split(',')
token_type = tokens[0]
if not bool(line):
master.add_comments(comment_set)
comment_set = []
token_name = None
continue
if len(tokens) > 2:
token_name = tokens[1]
if token_type == 'subtype':
subtype, _, _ = master.add_type(tokens[1])
subtype.add_comments(list(comment_set))
comment_set = []
elif token_type == 'subtypedata':
subtype = master.find_type(tokens[1])
if not subtype:
raise ValueError('Unknown subtype {} for data.\nat {}:{}'
.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if optional:
raise ValueError('Subtypes cannot have optional fields {}.{}\n at {}:{}'
.format(subtype.name, tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[4]
subtype.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type == 'tlvtype':
tlv = master.add_tlv(tokens[1])
tlv.add_message(tokens[2:], comments=list(comment_set))
comment_set = []
elif token_type == 'tlvdata':
type_obj, collapse, optional = master.add_type(tokens[4], tokens[3], tokens[1])
if optional:
raise ValueError('TLV messages cannot have optional fields {}.{}\n at {}:{}'
.format(tokens[2], tokens[3], ln, line))
tlv = master.find_tlv(tokens[1])
if not tlv:
raise ValueError('tlvdata for unknown tlv {}.\nat {}:{}'
.format(tokens[1], ln, line))
msg = tlv.find_message(tokens[2])
if not msg:
raise ValueError('tlvdata for unknown tlv-message {}.\nat {}:{}'
.format(tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[5]
msg.add_data_field(tokens[3], type_obj, count, comments=list(comment_set),
optional=optional, implicit_len_ok=True)
comment_set = []
elif token_type == 'msgtype':
master.add_message(tokens[1:], comments=list(comment_set))
comment_set = []
elif token_type == 'msgdata':
msg = master.find_message(tokens[1])
if not msg:
raise ValueError('Unknown message type {}. {}:{}'.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if collapse:
count = 1
elif len(tokens) < 5:
raise ValueError('problem with parsing {}:{}'.format(ln, line))
else:
count = tokens[4]
# if this is an 'extension' field*, we want to add a new 'message' type
# in the future, extensions will be handled as TLV's
#
# *(in the spec they're called 'optional', but that term is overloaded
# in that internal wire messages have 'optional' fields that are treated
# differently. for the sake of clarity here, for bolt-wire messages,
# we'll refer to 'optional' message fields as 'extensions')
#
if tokens[5:] == []:
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
else: # is one or more extension fields
if optional:
raise ValueError("Extension fields cannot be optional. {}:{}"
.format(ln, line))
orig_msg = msg
for extension in tokens[5:]:
extension_name = "{}_{}".format(tokens[1], extension)
msg = master.find_message(extension_name)
if not msg:
msg = copy.deepcopy(orig_msg)
msg.enumname = msg.name
msg.name = extension_name
master.add_extension_msg(msg.name, msg)
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set), optional=optional)
# If this is a print_wire page, add the extension fields to the
# original message, so we can print them if present.
if options.print_wire:
orig_msg.add_data_field(tokens[2], type_obj, count=count,
extensions=tokens[5:],
comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type.startswith('#include'):
master.add_include(token_type)
elif token_type.startswith('#if'):
msg = master.find_message(token_name)
if (msg):
if_token = token_type[token_type.index(' ') + 1:]
msg.add_if(if_token)
elif token_type.startswith('#'):
comment_set.append(token_type[1:])
else:
raise ValueError("Unknown token type {} on line {}:{}".format(token_type, ln, line))
except StopIteration:
pass
master.post_process()
master.write(options, output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--expose-subtypes", help="print subtypes in header",
action="store_true", default=False)
parser.add_argument("-P", "--print_wire", help="generate wire printing source files",
action="store_true", default=False)
parser.add_argument("--page", choices=['header', 'impl'], help="page to print")
parser.add_argument('--expose-tlv-type', action='append', default=[])
parser.add_argument('header_filename', help='The filename of the header')
parser.add_argument('enum_name', help='The name of the enum to produce')
parser.add_argument("files", help='Files to read in (or stdin)', nargs=REMAINDER)
parsed_args = parser.parse_args()
main(parsed_args, parsed_args.files)
| 35.483113 | 117 | 0.561124 |
acecb248f7d1271b7a25109026f00564ccfc8904 | 252 | py | Python | students/K33421/practical_works/Dzhapua_Esnat/warriors_project/warriors_app/admin.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/practical_works/Dzhapua_Esnat/warriors_project/warriors_app/admin.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/practical_works/Dzhapua_Esnat/warriors_project/warriors_app/admin.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Warrior, Profession, Skill, SkillOfWarrior
# Register your models here.
admin.site.register(Warrior)
admin.site.register(Profession)
admin.site.register(Skill)
admin.site.register(SkillOfWarrior)
| 22.909091 | 62 | 0.81746 |
acecb2835060153a72588eac408a79415e9a8328 | 1,328 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/9_features/numtrees_45/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/9_features/numtrees_45/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/9_features/numtrees_45/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Education, obj[4]: Occupation, obj[5]: Bar, obj[6]: Restaurant20to50, obj[7]: Direction_same, obj[8]: Distance
# {"feature": "Distance", "instances": 23, "metric_value": 0.9656, "depth": 1}
if obj[8]<=2:
# {"feature": "Time", "instances": 19, "metric_value": 0.998, "depth": 2}
if obj[1]>0:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 3}
if obj[4]<=5:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.5436, "depth": 4}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[2]<=0:
return 'False'
elif obj[2]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>5:
# {"feature": "Education", "instances": 5, "metric_value": 0.971, "depth": 4}
if obj[3]<=2:
return 'True'
elif obj[3]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 6, "metric_value": 0.65, "depth": 3}
if obj[4]<=10:
return 'True'
elif obj[4]>10:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
| 34.947368 | 191 | 0.579066 |
acecb28efb94b8eb73221630842d6ec00dcccb6e | 568 | py | Python | read.py | KrShivanshu/264136_PythonMiniProject | ce9a3843c868a43a714a21b916e283d89e1e7f04 | [
"CC0-1.0"
] | null | null | null | read.py | KrShivanshu/264136_PythonMiniProject | ce9a3843c868a43a714a21b916e283d89e1e7f04 | [
"CC0-1.0"
] | null | null | null | read.py | KrShivanshu/264136_PythonMiniProject | ce9a3843c868a43a714a21b916e283d89e1e7f04 | [
"CC0-1.0"
] | null | null | null | def readList():
file = open("ProductList.txt", "r")
lines = file.readlines()
L = []
for line in lines:
L.append(line.replace("\n", "").split(","))
file.close()
print("Following products are avilable in our Store")
print("--------------------------------------------")
print("PRODUCT\t\tPRICE\t\tQUANTITY")
print("--------------------------------------------")
for i in range(len(L)):
print(L[i][0], "\t\t", L[i][1], "\t\t", L[i][2])
print("--------------------------------------------")
return L
| 35.5 | 58 | 0.399648 |
acecb3bc75987a0d9f81644cdb665d7db82886c4 | 1,022 | py | Python | manage.py | caizhimin/demo | 9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5 | [
"MIT"
] | null | null | null | manage.py | caizhimin/demo | 9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5 | [
"MIT"
] | null | null | null | manage.py | caizhimin/demo | 9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# demo directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'demo'))
execute_from_command_line(sys.argv)
| 34.066667 | 77 | 0.654599 |
acecb3ee4b0cfdd13eb68e0c99149e6fc2122ca5 | 105,143 | py | Python | ffpreview.py | irrwahn/ffpreview | 9e44ab7a2454e5ef1448b1c2052d8ef0da13bb35 | [
"BSD-3-Clause"
] | 1 | 2021-05-27T01:53:56.000Z | 2021-05-27T01:53:56.000Z | ffpreview.py | irrwahn/ffpreview | 9e44ab7a2454e5ef1448b1c2052d8ef0da13bb35 | [
"BSD-3-Clause"
] | 1 | 2021-05-22T23:28:22.000Z | 2021-05-22T23:43:47.000Z | ffpreview.py | irrwahn/ffpreview | 9e44ab7a2454e5ef1448b1c2052d8ef0da13bb35 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T11:42:50.000Z | 2021-05-26T11:42:50.000Z | #!/usr/bin/env python3
"""
ffpreview.py
Copyright (c) 2021 Urban Wallasch <irrwahn35@freenet.de>
FFpreview is distributed under the Modified ("3-clause") BSD License.
See `LICENSE` file for more information.
"""
_FFPREVIEW_VERSION = '0.4+'
_FFPREVIEW_NAME = 'FFpreview'
_FFPREVIEW_IDX = 'ffpreview.idx'
_FFPREVIEW_CFG = 'ffpreview.conf'
_FF_DEBUG = False
_FFPREVIEW_HELP = """
<style>
td {padding: 0.5em 0em 0em 0.5em;}
td.m {font-family: mono;}
</style>
<h3>Keyboard Shortcuts</h3>
<table><tbody><tr>
<td class="m" width="30%">
Up, Down, PgUp, PgDown, Home, End, TAB, Shift+TAB</td>
<td>Navigate thumbnails</td>
</tr><tr>
<td class="m">Double-click, Return, Space</td>
<td>Open video at selected position in paused state</td>
</tr><tr>
<td class="m">Shift+dbl-click, Shift+Return</td>
<td>Play video starting at selected position</td>
</tr><tr>
<td class="m">Mouse-2, Menu, Alt+Return</td>
<td>Open the context menu</td>
</tr><tr>
<td class="m">ESC</td>
<td>Exit full screen view; quit application</td>
</tr><tr>
<td class="m">Ctrl+Q, Ctrl-W</td>
<td>Quit application</td>
</tr><tr>
<td class="m">Alt+Return, F</td>
<td>Toggle full screen view</td>
</tr><tr>
<td class="m">Ctrl+G</td>
<td>Adjust window geometry for optimal fit</td>
</tr><tr>
<td class="m">Ctrl+O</td>
<td>Show open file dialog</td>
</tr><tr>
<td class="m">Ctrl+M</td>
<td>Open thumbnail manager</td>
</tr><tr>
<td class="m">Ctrl+B</td>
<td>Open batch processing dialog</td>
</tr>
</tbody></table>
"""
import sys
_PYTHON_VERSION = float("%d.%d" % (sys.version_info.major, sys.version_info.minor))
if _PYTHON_VERSION < 3.6:
raise Exception ('Need Python version 3.6 or later, got version ' + str(sys.version))
import platform
import io
import os
import signal
import time
import re
import tempfile
import argparse
import json
from configparser import RawConfigParser as ConfigParser
from subprocess import PIPE, Popen, DEVNULL
import shlex
import base64
from copy import deepcopy
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from inspect import currentframe
############################################################
# utility functions
def eprint(lvl, *args, vo=0, **kwargs):
v = cfg['verbosity'] if 'cfg' in globals() else vo
if lvl <= v:
print('LINE %d: ' % currentframe().f_back.f_lineno, file=sys.stderr, end = '')
print(*args, file=sys.stderr, **kwargs)
def hms2s(ts):
h = 0
m = 0
s = 0.0
t = ts.split(':')
for i in range(len(t)):
h = m; m = s; s = float(t[i])
return float(h * 3600) + m * 60 + s
def s2hms(ts, frac=True, zerohours=False):
s, ms = divmod(float(ts), 1.0)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
res = '' if h < 1 and zerohours == False else '%02d:' % h
res += '%02d:%02d' % (m, s)
res += '' if not frac else ('%.3f' % ms).lstrip('0')
return res
def str2bool(s):
if type(s) == type(True):
return s
if s and type(s) == type(' '):
return s.lower() in ['true', '1', 'on', 'y', 'yes']
return False
def str2int(s):
if type(s) == type(1):
return s
if s and type(s) == type(' '):
return int(re.match(r'^\s*([+-]?\d+)', s).groups()[0])
return 0
def str2float(s):
if type(s) == type(1.1):
return s
if s and type(s) == type(' '):
m = re.match(r'^\s*([+-]?([0-9]+([.][0-9]*)?|[.][0-9]+))', s)
return float(m.groups()[0])
return 0.0
def sfrac2float(s):
a = s.split('/')
d = 1 if len(a) < 2 else str2float(a[1])
return str2float(a[0]) / (d if d else 1)
def hr_size(sz, prec=1):
i = 0
while sz >= 1024:
sz /= 1024
i += 1
prec = prec if i else 0
return '%.*f %s' % (prec, sz, ['', 'KiB', 'MiB', 'GiB', 'TiB'][i])
def ppdict(dic, excl=[]):
s = ''
with io.StringIO() as sf:
for k, v in dic.items():
if v is not None and not k in excl:
print(k+':', v, file=sf)
s = sf.getvalue()
return s.strip()
def proc_running():
if 'proc' in globals():
global proc
return proc
def kill_proc(p=None):
if p is None and 'proc' in globals():
global proc
else:
proc = p
if proc is not None:
eprint(1, 'killing subprocess: %s' % proc.args)
proc.terminate()
try:
proc.wait(timeout=3)
except subprocess.TimeoutExpired:
proc.kill()
proc = None
return None
def die(rc):
kill_proc()
if '_ffdbg_thread' in globals():
global _ffdbg_thread, _ffdbg_run
_ffdbg_run = False
eprint(0, 'waiting for debug thread to finish')
_ffdbg_thread.join()
sys.exit(rc)
def sig_handler(signum, frame):
eprint(0, 'ffpreview caught signal %d, exiting.' % signum)
die(signum)
############################################################
# configuration
class ffConfig:
""" Configuration class with only class attributes, not instantiated."""
cfg = None
cfg_dflt = {
'conffile': _FFPREVIEW_CFG,
'vid': [''],
'outdir': '',
'grid_columns': 5,
'grid_rows': 4,
'thumb_width': 192,
'ffprobe': 'ffprobe',
'ffmpeg': 'ffmpeg',
'player': 'mpv --no-ordered-chapters --start=%t %f',
'plpaused': 'mpv --no-ordered-chapters --start=%t --pause %f',
'force': 'False',
'reuse': 'False',
'method': 'iframe',
'frame_skip': 200,
'time_skip': 60,
'scene_thresh': '0.2',
'customvf': 'scdet=s=1:t=12',
'start': 0,
'end': 0,
'addss': -1,
'verbosity': 0,
'batch': 0,
'manage': 0,
'platform': platform.system(),
'env': os.environ.copy(),
'vformats': '*.3g2 *.3gp *.asf *.avi *.divx *.evo *.f4v *.flv '
'*.m2p *.m2ts *.mkv *.mk3d *.mov *.mp4 *.mpeg *.mpg '
'*.ogg *.ogv *.ogv *.qt *.rmvb *.vob *.webm *.wmv'
}
def __new__(cls):
if cls.cfg is None:
cls.init()
return cls
@classmethod
def init(cls):
# initialize default values
if cls.cfg_dflt['platform'] == 'Windows':
cls.cfg_dflt['env']['PATH'] = sys.path[0] + os.pathsep + cls.cfg_dflt['env']['PATH']
cfg = cls.get_defaults()
# parse command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Generate interactive video thumbnail preview.',
epilog=' The -C, -i, -N, -n and -s options are mutually exclusive. If more\n'
' than one is supplied: -C beats -i beats -N beats -n beats -s.\n\n'
' The -r option causes ffpreview to ignore any of the -w, -C, -i\n'
' -N, -n and -s options, provided that filename, duration, start\n'
' and end times match, and the index file appears to be healthy.\n'
'\nwindow controls:\n'
' ESC leave full screen view, quit application\n'
' Ctrl+Q, Ctrl-W quit application\n'
' Alt+Return, F toggle full screen view\n'
' Ctrl+G adjust window geometry for optimal fit\n'
' Ctrl+O show open file dialog\n'
' Ctrl+M open thumbnail manager\n'
' Ctrl+B open batch processing dialog\n'
' Ctrl+Alt+P open preferences dialog\n'
' Alt+H open about dialog\n'
' Double-click,\n'
' Return, Space open video at selected position in paused state\n'
' Shift+dbl-click,\n'
' Shift+Return play video starting at selected position\n'
' Mouse-2, Menu,\n'
' Ctrl+Return open the context menu\n'
' Up, Down,\n'
' PgUp, PgDown,\n'
' Home, End,\n'
' TAB, Shift+TAB move highlighted selection marker\n'
)
parser.add_argument('filename', nargs='*', default=[os.getcwd()], help='input video file')
parser.add_argument('-b', '--batch', action='count', help='batch mode, do not draw window')
parser.add_argument('-m', '--manage', action='count', help='start with thumbnail manager')
parser.add_argument('-c', '--config', metavar='F', help='read configuration from file F')
parser.add_argument('-g', '--grid', metavar='G', help='set grid geometry in COLS[xROWS] format')
parser.add_argument('-w', '--width', type=int, metavar='N', help='thumbnail image width in pixel')
parser.add_argument('-o', '--outdir', metavar='P', help='set thumbnail parent directory to P')
parser.add_argument('-f', '--force', action='count', help='force thumbnail and index rebuild')
parser.add_argument('-r', '--reuse', action='count', help='reuse filter settings from index file')
parser.add_argument('-i', '--iframe', action='count', help='select only I-frames (default)')
parser.add_argument('-n', '--nskip', type=int, metavar='N', help='select only every Nth frame')
parser.add_argument('-N', '--nsecs', type=float, metavar='F', help='select one frame every F seconds')
parser.add_argument('-s', '--scene', type=float, metavar='F', help='select by scene change threshold; 0 < F < 1')
parser.add_argument('-C', '--customvf', metavar='S', help='select frames using custom filter string S')
parser.add_argument('-S', '--start', metavar='T', help='start video analysis at time T')
parser.add_argument('-E', '--end', metavar='T', help='end video analysis at time T')
parser.add_argument('-a', '--addss', nargs='?', type=int, const=0, metavar='N', help='add subtitles from stream N')
parser.add_argument('-v', '--verbose', action='count', help='be more verbose; repeat to increase')
parser.add_argument('--version', action='count', help='print version info and exit')
args = parser.parse_args()
# if requested print only version and exit
if args.version:
print('ffpreview version %s running on python %.1f.x (%s)'
% (_FFPREVIEW_VERSION, _PYTHON_VERSION, cfg['platform']))
die(0)
# parse config file
vo = args.verbose if args.verbose else 0
if args.config:
cfg['conffile'] = args.config
cls.load_cfgfile(cfg, cfg['conffile'], vo)
else:
cdirs = [ os.path.dirname(os.path.realpath(__file__)) ]
if os.environ.get('APPDATA'):
cdirs.append(os.environ.get('APPDATA'))
if os.environ.get('XDG_CONFIG_HOME'):
cdirs.append(os.environ.get('XDG_CONFIG_HOME'))
if os.environ.get('HOME'):
cdirs.append(os.path.join(os.environ.get('HOME'), '.config'))
for d in cdirs:
cf = os.path.join(d, _FFPREVIEW_CFG)
if not os.path.exists(cf):
eprint(2, 'no such file:', cf, vo=vo)
continue
if cls.load_cfgfile(cfg, cf, vo):
cfg['conffile'] = cf
break
# evaluate remaining command line args
cfg['vid'] = args.filename
if args.outdir:
cfg['outdir'] = args.outdir
if args.start:
cfg['start'] = hms2s(args.start)
if args.end:
cfg['end'] = hms2s(args.end)
if args.addss is not None:
cfg['addss'] = args.addss
if args.grid:
grid = re.split(r'[xX,;:]', args.grid)
cfg['grid_columns'] = int(grid[0])
if len(grid) > 1:
cfg['grid_rows'] = int(grid[1])
if args.width:
cfg['thumb_width'] = args.width
if args.force:
cfg['force'] = True
if args.reuse:
cfg['reuse'] = True
if args.scene:
cfg['method'] = 'scene'
cfg['scene_thresh'] = args.scene
if args.nskip:
cfg['method'] = 'skip'
cfg['frame_skip'] = args.nskip
if args.nsecs:
cfg['method'] = 'time'
cfg['time_skip'] = args.nsecs
if args.iframe:
cfg['method'] = 'iframe'
if args.customvf:
cfg['method'] = 'customvf'
cfg['customvf'] = args.customvf
if args.verbose:
cfg['verbosity'] = args.verbose
if args.batch:
cfg['batch'] = args.batch
if args.manage:
cfg['manage'] = args.manage
# commit to successfully prepared config
cls.fixup_cfg(cfg)
return cls.set(cfg)
@classmethod
def load_cfgfile(cls, cfg, fname, vo=1):
fconf = ConfigParser(allow_no_value=True, defaults=cfg)
try:
cf = fconf.read(fname)
for option in fconf.options('Default'):
cfg[option] = fconf.get('Default', option)
except Exception as e:
eprint(1, str(e), '(config file', fname, 'corrupt?)', vo=vo)
return False
eprint(1, 'read config from', fname, vo=vo)
return cls.fixup_cfg(cfg)
@classmethod
def fixup_cfg(cls, cfg):
# prepare output directory
if not cfg['outdir']:
cfg['outdir'] = tempfile.gettempdir()
cfg['outdir'] = make_outdir(cfg['outdir'])
eprint(1, 'outdir =', cfg['outdir'])
# fix up types of non-string options
cfg['force'] = str2bool(cfg['force'])
cfg['reuse'] = str2bool(cfg['reuse'])
cfg['grid_rows'] = str2int(cfg['grid_rows'])
cfg['grid_columns'] = str2int(cfg['grid_columns'])
cfg['thumb_width'] = str2int(cfg['thumb_width'])
cfg['frame_skip'] = str2int(cfg['frame_skip'])
cfg['time_skip'] = str2float(cfg['time_skip'])
cfg['scene_thresh'] = str2float(cfg['scene_thresh'])
cfg['start'] = str2float(cfg['start'])
cfg['end'] = str2float(cfg['end'])
cfg['addss'] = str2int(cfg['addss'])
return True
@classmethod
def get(cls):
return cls.cfg
@classmethod
def set(cls, newcfg=None):
if cls.cfg:
cls.cfg.clear()
cls.update(newcfg)
return cls.cfg
@classmethod
def update(cls, updcfg=None):
if cls.cfg is None:
cls.cfg = {}
if updcfg:
cls.cfg.update(deepcopy(updcfg))
return cls.cfg
@classmethod
def get_defaults(cls):
return deepcopy(cls.cfg_dflt)
############################################################
# Qt classes
class ffIcon:
""" Icon resource storage with only class attributes, not instantiated."""
initialized = False
apply_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABDlBMVEX///8ATwAATAAASQAATQAOaAsBWwEATgARaw4AWwAATgASaxAEUwQATAAVaxITZBIATAAshCMEUwQAVQAXaRUJXwcATQAOaAwDUgMXZhUQXA0ASwACUAIYXRQATgACTgIXVhECTgIaVBIATQAC
TQIcUBQCSAIcUBEATAAATQAATgB2tWOay3u26qF5uGGTxnCZ0pCZ0I+QwW+m0HdYoEKRxWmJxnuIwnqQvWuayGhztGSTyGpZn0GOxGB/wGh7u2aWw2xKjTCKwVtksVCPyVxbnD+KwVd3wFV2vFmdyW1OizGDwkpQrCqCxkVkujJdsi2JvUtOgi1/yDVHug5XwhiOx0RU
gy2R3j6Y1UdNfSlq55gUAAAAK3RSTlMAHXIOIe3YDeu8bPWRG+nWa/6QGOf1MtuV5vYzjfc0mvmd+TWg+qP6NkYkIiPNwAAAAIJJREFUGNNjYCAFMDIxo/BZWLXZ2JHlOXR09ThBLC5uHiDJy6dvYGjED2QJCBqbCDEIi5iamVuIigEFxC2trG0kJG3t7B2kpEE6ZByd
nF1c3dw9PGXlIKbJe3n7+Pr5ByjIwcxXDAwKDglVUkbYqBIWHqGqjOwmtUh1DVRXa2oR70MAwogP6KXmWqMAAAAASUVORK5CYII=
"""
broken_png = """iVBORw0KGgoAAAANSUhEUgAAAIAAAABJCAAAAADQsnFrAAAERUlEQVRo3u2ZTUhUURTHz4uglVvbzMqF7VoIroRw40JaFAQyiW1dBBYTSC1qEc1GzMpw0cJNuJCMCKIEm8hMU9Eio8xhRKMPMoxRRHPQGee0ePPuu/e+cz/ezBMXdTbz3p1zzv93zzt35r37HISDtUMH
rP8f4OABDu9H0sIkwAlbZ4zQCvP9XOZhqxgnkmX49ckFcnIWoVEAOOrymoPLasK3txzHcRyzo4VLqB7IDJChUsahFX9w1ZjTDJAbbjewu2f9y0LYsjs6Wz5AbjZpWbwMGd/hOu2GBijMX7VsBVNqOy92NGUp2zmxrcm3PlwPADBtTeB9vWxWvr6ozTQmtEoREbfcw1NW
ABrhWqm9ApZQXZ/SonlpAdAbzHAvrVVd939pSewGRMSUe7xnBnjtR1bfz2qVc16t9QAwZdcG8iX4ofGdFZeHAQCKiLhnJAj0AOW00qO6yjoAQESccQ8bjQA7BACtGwDYUjnUIyJ+cY/fmQDYNJq0k6IAFpUeb7hUeRPAJzGvnGsk+/OpAmC0dN6Mu+l4sA0MjUhcSURE
7OLzbDCnagKgWzgvVgUgtQT+8DdlCTY5/4UgQLN7OsZ8anyfOq5JWgwA6hIIAePeaJsUxzvVsdgJzmPcABBTlEC6ufSGX4kD8vyE6NLxbT2AGMNdAzpzVhwIpu60agMKQC5BjgQQB1qp8t5gbVBUEgCVu13qgg7O5woNgAprAKhBRBxSEZAAwYXAXGqABFD+0CGehGeI
iLUKAmEg5yXvlkoAAJ8RV58DxaQrACIijvHTKegANCUIGh90Hi2sFLemAzjnZd8IB2Cjjx/pRUVSWpSgxwvZAUhYAbDp6QBYj08jotQForEbtrT909U1NzSmAbAvQdGLSEHaqCxl7+WG5IdT9ne3BAAA6hKw584MHANbW3M/Lq5QvRSyBCygLczz7d2AauUA4fZYHhnD
73gKdXoCFpAMBeDlq1LzG0twaY66dLaWL2XxuIOJBg0E5SrLM1xXAfiK7kqTFkLWXklhC8JMiE2qlodssgAgbfTsRbCz6SVEAHqXjCkGASLYU3vRJGTTTuiBLFqh/tZZx3GapEHiIo1IX3pnxyu58ikCaAkVPyPM4wx/fjNCadcKKoAZqQRdAAAzZWhvxkFnqALwS3CZ
na9ZqxonXjJ344cGyAS6IFLpHm6fTfXI6NnvSGsOAClZyQAQYurGiccmKSU6WZpFbaOFmSeeUJVSuW9gWwKjdO17bbwqv3/T9EEPoBVvMLcQmBOXCZD8YxTXAmyqAB6LY4R045yVtAHAz+zfxn9PBKEk8T6rpg0H4Lr0CTokQF9IbQOAtr0kr3jOrKSw8t6ctnLHA/GK
3r5qgneOKL86zdWiEnXQv7hUv/X7dbRCWd90t2Qqtng+On39PSHxQnigiDgY6St3fY/GfMdkyH/mypeha/0AAKP7oo2IUb2+368e+CcA/gJT9EOt+V1/bgAAAABJRU5ErkJggg==
"""
error_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAACSVBMVEUAAACEAACOAACTAACUAACQAACLAAB9AACLAACUAgKZCQmNAACGAAB+AACOAACSAACIAABMAACNAACSAACDAACFAACLAAAAAAAAAACPAACVAAB5AABgAACZAACKAAB6AACQAABzAACVAAAcAACo
AACLAAAAAACgAACrAAAgAAByAAC1AACbAAAAAAAAAACZAACwAAAQAAAAAACaAADAAACxAAAyAAAAAABlAACrAAC6AAC8AACzAACIAAAAAAAAAAAqAAA3AAACAAAAAACwNTXFYGDHZWW3RESqHR3biYnnp6fop6fimpq3NTWrERHafXreiYLhi4PhioThh4TfhITbgYHc
gYG4KSmeAADKSELRf2725eLUi3fbfF/ceGDMbWLy29vUiIjNVFSnAwOvAwLPWDrMkn/////58/HEeVi+ZD/u29bctbK+ODe5EBC5CQDRRAfGVRHEknn38e7q2tTXuLCxNhXFIAy7AgCgAAC9EADSRQDaXwDBXAC2jX/TvrmlQAXSSgDLLwC/DQCnAACmAAC+DQDQQQDW
XACxWBLj08307u2tYjLRWQDRQgDCFgCsAAC3BADHMQC1SRLo1s3hx63NpHf28O27aDLMSgC9EQCtAACxAACzFQDMi3jmxq3JbgXUfADaqnPnyri5NwCzAwCuAgCsGwHQgma/TQXOYQDUbADOaQDal1zCVBCxEAC0AAC6AACrAgCqEwCzLAC7PgDASgDBSwC4NgCsCwC2
AACyAACnAgClCQClCwCnBgCvAAC/AAC7AAB8252hAAAARHRSTlMADlF5fV8dC4n0/aseFs7sNwTC6htlowEDzvkUFv5RMW0taBD0Pway5QtC+3UCCIi6DQ6I+rAYCkew7PTBXgweMjYgD4hci68AAAECSURBVBjTY2AAAUYmZhZWNgYYYOfgdHF1c+fi5oHwefk8PL28
vb19fPkFQHxBIT//gMCg4JDQsHBhEaCAaERkVHRMbFx8QmJSspg4g4RkSmpaekZmVnZ6Tm6elDSDjGx+QWFRenFJemlZeUWlHIN8ZVV1TW1denp9Q2NTc4sCg2JrW3tHZ1d6endPb1//BCUGZZWJkyZPSZ86LX36jJmzZqsyqKnPmTsvff6ChYvSFy9ZqqHJIK6lvWz5
ipWrVq9Zu279Bh1dBgY9/Y2bNm/Zum37jp27DAxBTjUyNtm9Z+++/QcOmpqJgz1jbmFpdWijtY2tkTjUu7p29g6OTs4SIDYAHdlQzb5sNMYAAAAASUVORK5CYII=
"""
delete_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABF1BMVEX///+tAADJYF2vBATSeHWoAAC/NCrKVkyqAACoAADITEPFRz+iAADDSkCcAACOAACyQzqcAACzKx+hPjR7PDK+Sj+YAACvJxuzMSaRAACrJBemHhCKAAClHhKHAACkHxOkHxWAAACkIRpbIRtZ
IRykIx6HDg5/HhZqIx9uAABqAABqIyCWKyuAAACdHxyAIyGBIyGCJBttAADDSknviYfwjYrfaGHmbW3mdnXlc3HpjYnTV1TTVlbXY2HUYl7WYF3LRD/JPj7LTkrCTUjMRkbTZWDFOTLCMzPDOjq/OTPBEQq9AAC+AADQEgzQAADGEgzHEgzfEw/jAADbExDbFBHfFBLl
Lyr1AADvFBLvFRPlMCzlMi3lMi5AN76JAAAANHRSTlMAMupJ5DL0+0hJ+/XQ+0pP/Ej7/P37SPr6SPr6SPpI+vpJ+v7++kvd/VBS/dcy8vz83lHmy4y2TQAAAJ5JREFUGNNjYCAGMDIxgyhmFkYIn5XNhJ2DgYGTy5SbFSzAY2ZuwcvHL2BpbsUDFhAUsraxFRaxs7EX
FYToERN3cHRydnGVEIOZKinl5u7hKS0Jt0ZG1svbx0tWBsaXk/f18w/w81WQg/AVlQKDgpVVQoJCVdXAAuph4REamlrakeFROmABXb1ofU0GBi2DGD1diB5dQyMQpWWsS5RPAZg2FJZPz1t8AAAAAElFTkSuQmCC
"""
close_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABv1BMVEX////////Pe3u4PDymAgKyJCS1MTGrERG4PT2/WVmoDw+mDw+mEQ+jCgnJe3ueDw+TCgiaBASmCQmtDg65FBOqPDyRDAqUCwuqCwu3GRieIyOUBASmCwu3GhmSCwudCgq0FhWLCQegAACOCwug
CwuxFBKKCQedAACmAACSIyOxFhSKBgKZAACjCwuXPDyHBgKUAACgCwu5GxS9e3uCDg6IAgKUCAiiDQ27HxWhWVmCBQKdWVl4Dg6xe3uKPDxrAAB3DQ15EBBsAQG7SEfMc3PYiorZi4vNdna5QkLJb23QeXewNzelHByuJSO8SUbLcG/CY2G/XVq/V1S5SkbAYWG5U1Cr
KyXCYF7BU06+U1OxSES0TUuqKyavPjelMSvMYVnFRka4PzqGBwWcJiKsOzaqQzqcHhfOUkrMNzfEOjScFRCTHhaMEgmaEgu+HxTLFRXGHRSlEw2QDQSREQWcEgi1FAjHAADLDQSuFgyaEQWXEQWWBQKuGwq8AADIDgWrBgKcEAWfFwiqAAC/FQeACAO/DgarFwiqGgi+
CAO+EwaxFQq8GQ/OHA3QCgO2GQieFQe4GwjMGwnSHArAHAqjFgdfTWjIAAAAQ3RSTlMAAirVzfr70dWT+Pj+/ir47XQcNfDV7hcX3e12Ft75Gd79Z/ku3v1bF+3y/Vtw1f1bF/Aq+HQedvGT/pP4KtXN+fnNf0ybtwAAANpJREFUGNNjYIAARkYGJMDEzMLKxs7BBONz
cjm7uLq5e3BzQvk8nl7ePr5+/gG8YBE+/sAgAUEhYZHgkFB+PqCAaFi4mDiDhGREZFR0jChQQCo2TppBRjY+ITEpOUUKaJ9capq8jEJ6RqaiUla2HCMDo3JOropqXn6BmrpGYZEy0DmaxSVapWXl2joMuhWVmkAz9Kqqa2rr9A0YDI3qG/SAAsYmjU3NLaZm5hatbSbG
IIdYWrV3dHZ19/T2WVlCnGpt0z9h4qTJU2ysYZ6xtbN3cHSys0X2MNz7AP4nLgM0DCzVAAAAAElFTkSuQmCC
"""
ffpreview_png = """iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAAAAABWESUoAAACVUlEQVQ4y9WQT0iTcRzGn9/vfX33m3NbIm6619ycCwwM7CARS/CyPIhI9AfTi1RCB09C0Ck6RB4iCqSLYYGVGIIHbaF2MSsJpET6g6HMNd272HRjG9vrXt+9bwcVhWCee47P5/M9fB9SjsIhkZSlAE5Z
KDULU/0xhuEnOZZ5/Jqy8L1ZxpbuLjNhqj9mYXQJocX2QXwx1s3Afya7hIFOf0S+f7NXCS22DwJUgWIXk9iudMjIipWKnqiqyKklzoSm2MUkdL4atoWUF85HJRdR90q6Q5qertoE+8Alg20h5QUhCdkYj9XykHIuogdK7FBXHFbIa26GeKxWZWQsZwBX7SYAgkEVAMC7
XAD0wLoKReCuaBzyb381UO3ltEgBAMq4dIqoQ/MOgjxHErIR0EbLWj7+vM7tfZ8fOtk0s9lBgW22e0NbRvGmbZ+Da/Nj9Pwe2q1Mn/Sw6WBAU1h/Z8Rh4d9Y6BHCDo4Q8H8KtKCQ8RIxc9BmRHIue1jQpq+idSK/z/OTreiY1gAAZCxnQP5z5TVeG/nezAMA1Nn6Tqo+
k85yUAQypgjgj7sJgN/B3X2LXE4A+lpIhSKQhGyMRz08wrkaoq+aK7Cz4jiGbMDDEI96VMZ1FWf6tqT6lQffrOL7iYnT1uc/hn30dnKqOdm3JdXxNIRoY/c8Qhc6lrHc1RrSP9zwxOTN3nEl2tg9D50KECIbVjBJMqJ4QxJI6fofA58KllIhsmEF4R5qZem5Hqvtq3SZ
VU2W+bgTL3wNRe6RW6IlPddj4omUNhcYOm0m5SgqIOzgL/oO5qijSLZZAAAAAElFTkSuQmCC
"""
info_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABwlBMVEX///////////////+7zd69z9////////////////9skrZKfKxcjLp5pMhvm8Rdjbp/o8T///8uYpYxa6Nai71IfKwTSoMmYZoANG4ERoUHUJMLSYEAOHMAQYEAOXMAQ4UALWkAOXMAQ4QALWoA
KnEAOHEAQH8AKGsAN3EAO3gANXAANnAAN3EAOHQAOHUANnAANW8ANXAAAAAANXAAOHQCOXUANXAAM3AHPHQHQHcFRHkJQ3kDOnMBLWgALWiErM2Vtda3zeW+0ui90ei3zeahvtwmYp6evNyMsNd9pdKHrNWKrtekwN5Jg7h6os1xnMxwm8x8pND5+/2AptFCd65lk8di
kcZynMz8/f5kk8dDfLVDerVViL9olMVnk8RLgbpEfLZIf7n///9HgLtIf7tPhLo2cLE6dbI6eLg5e7o7fb85e7wqbrIbYKQFT5sASJIBTZgGVaQKYK8OZLTu7u4AX7YAXbQAWKwAUqIAS5YARIoAUJ4AV6oAXrYAYLgAY70AZL8ASpMAUaEAW7EAa8kAV6sAPn4AWq4A
ZsIAa8oActQAdtwAb88APn8AOqQAX7MAWtYAX90AZbwAPagBOXMAOXMa0d8aAAAAP3RSTlMAAQYLISINCAIDOMXx/vXPPwR8+PuKc34n9fYpq6zf3xH9/RES/f0T6OjExTn7+zqZmQGf/f2gQtv5+t9DHh7QNZoPAAAA5klEQVQY02NgAAJGJmYWVjZ2DgYo4OTi5uG1
5+MXEITwOYSEHRydnF1cRUQhImJu7h6eXl6e3j6+4oxAvoSkn39AYFBQYIB/sJQ0UEAmJDQsLDwiIjwsLDJKFiggFx0DBLFxIDJenoFBQTEhMTExKSkpOSUxMVVJmUFFNS09PSMzKSkrOyc3T02dgUEjv6CwqLikpLSsvKJSE2iGVlV1TW1dSUl9Q21NozZQQEe3qbm+
BAha6lv19EEOM2hr7+js6u7qqes1BLvUyNikr3/CxEmTTc2MoL4zt7CcMtXK2oYBCdjaQRkADNM7nD2IGIMAAAAASUVORK5CYII=
"""
ok_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABDlBMVEX///8JIzUJHToAEjcIHz4WM1oOJ00AFDsaNVwMJUsJHzsdOV8OIUMJHDkdNVoeN1kKHTkvTXYOIkILIDUdNVYVMFcGHDcZMVUOIj8aMk0aNFYFHjcLHz8cNVMFGzcKHj0aMUYJGjwLIDsXMEAI
GzsMHjoYMT4HGjoAAAALHzcbND8HHzoKHTpph6h9mrWryt5ig6VwjKiUsshvi6h0kalEYINifpp/m7J+m7Jng59depZnhJ1lgZs7VG9VcYdphZ5ifZgsSGlRa4dbd5RgfZo1TFpNanpmgZ9kgJxje5MtRl1PaolUcJNadpdbdptWcJREX3kqRFNSb5ZffKhigKxQbYkz
TFd0lL1oiKY2Ul62MpKDAAAALXRSTlMAHXIOIezYDeq8bPWRG+jUa/6QGOfzLtqW5/Uzjvc4oPk8qPtBsPxGAbb8S2r7wD5BAAAAhElEQVQY02NgIAUwMjGj8FlYddnYkeU59PQNOEEsLm4eIMnLZ2hkZMwPZAkImpgKMQiLmJlbWIqKAQXEraxtbCUk7eytHaSkQTpk
HJ2cXVzd3D08ZeUgpsl7efv4+vkHKCjCzFcKDAoOCVVWQdioGhYeoaaO5AQNzUgtbRRHaugoMhANAOfbEF197TngAAAAAElFTkSuQmCC
"""
open_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAB41BMVEUAAABUf7UAAAAAAAAAAAAAAAAJCQlUf7UqKinExMRYh8KTsctKdaZFX4AAGDgAEzMAFDMAEzIAEzIAEzEAEzEAEjAAEjEAGz0oTHb////o5+dKc6dRcpv29vb4+Pj5+Pj19PTr6+v///47UWpR
frhUg7xUgbs9ZJJKapD5+fnv7u6goKDk4d5OYXd4otd8qeFJdq0vUno+Xof49/f39/fy8vKamprx7+/h3ds3T21olM0wUnsrS3H19fWhoKD//PpGXXkqS3MlRmohQ236+fn7+/q4uLi0s7OtrKyysLD08vAOKkoiQWQRNF77+vr6+frf4eXP09jO0tbMz9XJzdHGyM7B
xMrg4uUAFDEYOVsAHkXo6OrO0dXQ0ta1u8OHor6nzeuozeqpzeuozeuqzuu84PxPdp+84PunzOqmy+mmzOqozuuqzuqFq9qCpteAptd/pteHreFIXXhQeamDqdx8o9Z9o9Z+o9aBptiAp9iCp9iBp9iHruFJX3tReqqKruCDqduGqduHqduFqduEqduEqdqJseRKX3w5
Z5tAdrQ/dLBAdLBDerknRGZYf62Hrd+FqdqGqdqDqdqKsONQZIGBqN19o9h+pNh8o9h4o9l8o9l+pNl+o9l5o9l6o9h+o9iBqeD0enAlAAAAGHRSTlMAAANJSz0IyZb+1bq5r0F6e3x9fn+Bf0Lax4JAAAABAElEQVQY02NgYGJmAQFWNgYIYJeQBAMpDjCXkUFaRlJW
Tl5BkVNJWUVVjYuBQV1DUk5OU1ZLW1JHV0/fgIHB0EjSWFPTxNRM0tzC0sqagcHGECQgZ2sHNMfewZGBwclZ0kTOxdVF0c3dw9PLm4HBx1dSzs8/IDAoOCQ0LDyCgSEyKjomNi4+ITEpOTk5JZWbIS09IzMrOyc3L78gLz+/sIihuKS0rKy8vKKysrKqurqmlqGuvqGh
saGpuQVINra2tTN0dHYBQTcQgkBPL0Nff+uEiRNaQcSkCa2TpzDwTJ02fcbMWbNnz5k7b/r8BbwMfPwCgkLCIqKiYsJCggL84gBhOUmZU0MiDgAAAABJRU5ErkJggg==
"""
question_png = """iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAMAAADXqc3KAAABlVBMVEUAAAAyaKQ2Z6UzZqVIdK1FcqxIda1EcqxEcqs1ZqQAAAAuXJVFcaxEcawsWI8AAAAAAAAAAAAAAAApU4ZEcasnT4AAAAAAAAAAAAAAAAAAAAAAAAAmTXwkSXYAAAAAAAAAAAAmTHslSXcAAAAA
AAAAAAAmT300ZaEmTnsAAAAAAAAAAAAAAAAAAADa5PDV4e7i6/S3zeS3zOTf6PPj6/S4zuVnlMdlk8bf6PK5z+ZqmMlolshkksW70Odtm8xsmctql8lolchjkcVwnc5vnM1tmstrmMpmlMZkkcVhj8O3zOPe5/K6z+f3+fz5+/3x9frP3e2HqtJfjcK2y+O4zuZrmcpt
msxum8zG1+ukwN6uxuL////d5/JfjcFci8C2yuLe5/G4zeVplsiLrdRgjsNejMFcir9ZiL7Q3Otpl8mRs9f4+v3z9vqApc9di8Bbib9Zh72xx+DS3uxnlcdmlMfs8viDqNBgjsJcisBaib6wx+Dd5vG4zeRlksZjkcRikMTd5/FejcFbir9fjsJaiL62y+LQ3eveT0o1
AAAALnRSTlMATOFV9/f39/fhBl74+GINBQ4WafhuHgQIERkicXcqByVydiwBE2TlZgkCChIQvDkr4AAAAUpJREFUKM9jYKAAMDIxMWITZ2bR02dlxiLOZmBoZIwpw8xmYmpmbmSBLgMUt7SyNrNBlwGK29rZOziaO6HKgMSdXVzdrN09PL28ETJAcR8XZxdfP/+AQM+g
YG92qAwji0FIaFh4eERkVHSMZ2xcfAIHxD9MeonWDqH2oVZJydEmKalp6fEZnBAJ/UQz68zMTOus7JzcoLz8gsIiiAQjq3Giu1lxcUlpdFl5akVlVTXUKAZmVuMaG/Nam7r6lNi8/MIGDoSzWC1q6uvro6Mb85oKEziQPcJq4VXeHJsKEmdH9joXN4+FV15eWn5hAi8f
vwBCXFBImMc7uLKlqkFEVExIUBwmISEpJS3Dm9BaWC0iKyctJSkPkxAQFBJTUORtKxJRUlYQE+JXgZslzq8qJaamrq4hLSalqqmCZDuDFpe2oKSOLr+mOAMBAAAs80S883HicAAAAABJRU5ErkJggg==
"""
refresh_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAACNFBMVEUhxhIcMVMcMlUhO2MvVI42YaMoSHkdM1YcMVMcMVMcMVMcMVMcMlQrTIEqS34cMlUcMVMnRnYhO2MeNls6aK88a7UrTYIcMlUzW5ocMVMnRXUcMlQwVpEfNlwhOmIdNFgdM1YfN1wkQGwhOmIh
OmIcMVMfN10kP2skP2sdNFcgOmEgOWAiPGYdM1ccMVMcMVMcMlUdM1YdM1YcMlQfNlwhOmIcMVMcMlUtUIctUYgoR3cjPmgcMlQdM1YzW5kiPWccMlQcM1UfNlwrTIE4ZKk2X6EzWpklQnAcMVMgOF83YaQ3YqVDc8BAcb89bLc6Zq00XZ0lQnAcMVMnRXREdME+brkj
PmkcMVMcMVMcMVMsTYNNe8QtUIYcMlQcMVMkP2sxV5M1Xp8hO2McMVMcMVMcMVMcMVMcMVMdM1YcMlUcMVMcMVMcMVNahch8ntOctt6fuN+Jp9hficqAodWIp9d1mdEjPWhyltBvlM9wlc9TgMY5ZqxxltBzl9AjPmhNe8RYg8dTf8YtUYgcMlUgOF8dM1cpSXtKecNg
icpfiMojP2pAcb8qSn0uUYlRfsUkQG0vVI04Y6cwVpEcMVM0XJxId8JXg8dMesMtUIcwVZAmQ3EbMFEmRHI8arQqS34mQ3IeNVkfN14hO2MgOWAaLk4nRnYsToQmRHMvU4wrTYIzW5o2YaM6Z68+bro2X6E6Zq1Hd8JSfsVPfcQ8a7VDdMBkjMtuk89sks5hispJeMJ/
oNRii8v+uJI/AAAAbXRSTlMAI37A8fPJhSQcIAJz9/d6RfLqwfr6+3T6J/eE/v74tLH4/sX1Mfb+9bBFtvq5CBcYFRINxPciuSwZDQV6/v70qar0vWxMMhop9/7++tyvhmcgdPrZrtxAAbj4/O6ybfT9/vR6Bh9/uu3uwIUdBOvzJgAAAQtJREFUGNNjYAACRiZmFlY2
dg4GBk4uIJebhzc3L7+gsIiPX0BQCMgXFikuAYNiUbHSMgYGcYnyisqq6urcioqaito6Bkmp+obGpubmlta29o7Ozi4GaZnu7h5ZOXkFRaXevqr6fgblCRMnTVbhYFBVmzJ12vRqdQaNGTNnzdZkYNCaM7d6yrz52gw6CxYuWqwLtF1P38DQyJiBwWTJ4qXLTCUZGMzM
LSytrBkYbGyXr5hiZ+/g6OTs4urmzsDg4bly1eo1a728fXz9/AMCgXqDgtetX7thY8O0TSGhYeFAgYjIqM1bqrZu274jOiY2AuTXiLj4hJ0Vu3YnJiWnMEBAalp6RmZWdg6IDQC1PFKUTLcgtQAAAABJRU5ErkJggg==
"""
remove_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAllBMVEX////////////HZGS/QUHsQUH2YmKhBATqBASbAADlAACWAACeAADaAADiAACwAAC3AAC+AADEAADLAADSAADYAADfAADMZ2fPWlrXWlrcWlrhWlrkWlriZGTRV1fHRUXBUlLITU3SUVHZUFDe
TU3hSUnfSUnQKCi+HBy+IiKlAACrAACyAAC5AAC/AADGAADNAADTAABMyJi7AAAAD3RSTlMAAQSE+vuI5Ozk7GT5+mqpk8vSAAAATUlEQVQY02NgoA1gZIKxmBjBFDMLv4CgkLCIqBgrG1iAXVxCUkpaRkZWTp4DLMCpoKikrKKqpq6hyQUW4ObR0tbR1dM3MOTlo5Ez
MQAAgFYE6RdXIhUAAAAASUVORK5CYII=
"""
revert_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAB11BMVEX///8AAAAAAAAAAAB5URuIaDV5VhoqHQqXcyu8mUCDYSaVcCeTbiaSbCWOZh4AAAB9VxiNZBaQahqLhHcAAAAAAABmZjMMCASLh3sAAACGhHsAAAAAAAD29vTo2bmEgnkAAADq6uro6enq6+vs
7e3t7u7u7+/v8PDw8fHx8vLz9PTo6ery8vL09PTr7Ozx8fHz8/P09fX19fX29vbOxbemkGuHaDN6Vh6LbDm5p4rh29H29/f39/f4+fmMbz2xjzvhyFzx32n673Ty4Gjhx1m5l0Gfhlz39vX6+vr7+/vavFn35mr35mf35mndwFiObDHx7+vz2mjw12nz3Gfx02PswVHn
sjbrv0by1GL02mXkxFqObj7v0mPu1GPu1WPkvTvWnRC2fg6jbA60dArZjgvpuUjwzV+8lTvCspjqzFzr0F3iwTnWrQ6NZxm4porb0sWtmXiQai/OhQrpuEnswVbpvlV8WBzlwknnzVLlzUHfyB6qhBPSx7X8/Pzz8Oyqg0XhoC3puFDotkqXby+UcR+Vcx6UcheVeT/y
7+v9/f3+/v69qo3MoFXFspDSyLXx8O708/HGtp7CsZX29vX29fL5+fn49/TTwqbOx7z49/P6+vn7+vnu6N7UEScRAAAAIXRSTlMAEDk6E9J0UPz+/fz8/Pw16P7+0wUBBT/cCtoSOP7+zAeKvFaxAAAA+UlEQVQY02NgYGBkYmKGASZGBgYGJkUlZRVVNXUNDU1NLSag
ALM2VEBTR0eXGSSgp6eipq6vY6BraGTMzMDCwKxqYmpmbmFpZW1jY8vMwMrGbmfv4Ojk7OLq5u7hyczAwcnl5e0DAr5+/gFAAe7AoODgkNCw8IjIyKhooABPTGxcfEJiUnJKalpaegYzA29mVnZObl5+QWFRcUlpGRMDX3lFZVV1jWdtXX1DY1MzP4OAoFBLa1t7R2dX
d09vn7AIg6iYeP8ED8/azq6JkzolJBlAfpk8ZSpQYNr0GVLSID6DjOzMWbNqZ8+RkwfLMzAoyID9z8QvAuIBALefO7A/pgxdAAAAAElFTkSuQmCC
"""
save_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAA9lBMVEX///9NmQRPmQZOmgZOmAROmQZOmgZMmAVOmgZOmwZOmgZOmgZOmgZOmgVOmQZPnAdQmwhOmwau339yxSFOmgbQ866K4jSu4nu66Y11uDZ6uzy76o6c2mCX2lef51nH8Z/Q862d51ac5lN4zSdt
wB6M4Tif51iJ4DNtvh2/wbuChYZATD9FfRNaqRB70ClaqA8xPzEuNDa6vbbT1ND////29/X19vT7+/r+/v75+vj09fPv8O3p6+fj5uK1uLGkpqHv8O/k5eLh4d/t7ezNzsri4+Dl5uSdoJqGiYPr7OrQ0c65ureQko2IioWNj4tvcW3x8vFYWlaeE5PLAAAAEnRSTlMA
PNPFPt3IL9fL4czKvs7w7S42D9ScAAAAjUlEQVQY02NgAANGJmYWBmTAKiTMhiIgIiomQpEAOwcnSICLG2YRj7iEpKiYlLQML1SAT1ZOXkFMUUmZGSrAwq+iqiamriEgCOFramnr6Orp6esYGBqBBYxNTM3MLSytrG1s7cAC9g6OTiYmJs4urm7uYAEPT1cvbx9fPxdX
Tw+wgH8AHPiDBQKRAAMDAFjyF6ty/R1iAAAAAElFTkSuQmCC
"""
warning_png = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABQVBMVEX////6sgX8sQb8uQX7uUD5uAX7sw36rgv9uQL6tgX6tQ35sgn7twT5tAT3tBH3sA33sgH2rwH/mQD0qgjyqQb/zADzowHzpAH/ogDwmg7wmQroogDwkgDwkQDpdwDqfATpegLrggblZgDmbgbo
eQD7vlL/16v+06X8vlL/2q7/1qX8u0r/0o3LbzjLbjb+y3j7wUj/0oC8RxL/zGf7vTj+0mr/0mXDSRP/zkv+y0X6wTP/1VH/1lLMViLOWyn/1UD/1Dn5wSv+yQX/zgD/0QDddRXccg//1gD/1wD+1Af3uhD/2AD/3AD/4AD/4gD/5AD/5QD/4wD3wBD+3AX/6AD/7QDm
fxD/8wD/8QD+5gn2vBf/6gD/7wD/9ADqfxHqgBH/9wD/8gD2vhf92CT/5hf/5gD/6QD/7AD/5wD/4xf92yY5YL/DAAAAJXRSTlMAY1VX/lzn6Wtv7O17fvHyiYsF9vYFmZcL+voLqKbq6pb49/aTMf8OLAAAAJ5JREFUGNNjYMABGJlQ+cwsqqwoAmxq6uzIfA4NTS1t
TiQBLh1dPX1uBJ/HwNDIyNiEFy7AZ2pmbm5hyQ/jC1hZ29ja2Ts4CkL4QsJOzi6ubu4eniKiYAExL28fXz//gIDAIHEQX0Iy2D8kNCwsPCI0UkoaKCAT5R8dExsXn5AYGpIkCxSQS05JTUsPTY/OCMzMkgcKKCgqwYGyCqa3AZWSG22RwdIDAAAAAElFTkSuQmCC
"""
def __new__(cls):
if cls.initialized:
return
cls.initialized = True
# NOTE: commented icons are currently unused
cls.apply_pxm = sQPixmap(imgdata=ffIcon.apply_png)
cls.apply = QIcon(ffIcon.apply_pxm)
cls.broken_pxm = sQPixmap(imgdata=ffIcon.broken_png)
cls.broken = QIcon(ffIcon.broken_pxm)
cls.close_pxm = sQPixmap(imgdata=ffIcon.close_png)
cls.close = QIcon(ffIcon.close_pxm)
cls.delete_pxm = sQPixmap(imgdata=ffIcon.delete_png)
cls.delete = QIcon(ffIcon.delete_pxm)
cls.error_pxm = sQPixmap(imgdata=ffIcon.error_png)
cls.error = QIcon(ffIcon.error_pxm)
cls.ffpreview_pxm = sQPixmap(imgdata=ffIcon.ffpreview_png)
cls.ffpreview = QIcon(ffIcon.ffpreview_pxm)
#cls.info_pxm = sQPixmap(imgdata=ffIcon.info_png)
#cls.info = QIcon(ffIcon.info_pxm)
cls.ok_pxm = sQPixmap(imgdata=ffIcon.ok_png)
cls.ok = QIcon(ffIcon.ok_pxm)
cls.open_pxm = sQPixmap(imgdata=ffIcon.open_png)
cls.open = QIcon(ffIcon.open_pxm)
#cls.question_pxm = sQPixmap(imgdata=ffIcon.question_png)
#cls.question = QIcon(ffIcon.question_pxm)
cls.refresh_pxm = sQPixmap(imgdata=ffIcon.refresh_png)
cls.refresh = QIcon(ffIcon.refresh_pxm)
cls.remove_pxm = sQPixmap(imgdata=ffIcon.remove_png)
cls.remove = QIcon(ffIcon.remove_pxm)
cls.revert_pxm = sQPixmap(imgdata=ffIcon.revert_png)
cls.revert = QIcon(ffIcon.revert_pxm)
cls.save_xpm = sQPixmap(imgdata=ffIcon.save_png)
cls.save = QIcon(ffIcon.save_xpm)
#cls.warning_pxm = sQPixmap(imgdata=ffIcon.warning_png)
#cls.warning = QIcon(ffIcon.warning_pxm)
class sQPixmap(QPixmap):
def __init__(self, *args, imgdata=None, **kwargs):
super().__init__(*args, **kwargs)
if imgdata is not None:
super().loadFromData(base64.b64decode(imgdata))
class sQIcon(QIcon):
def __init__(self, *args, imgdata=None, **kwargs):
super().__init__(*args, **kwargs)
if imgdata is not None:
super().addPixmap(sQPixmap(imgdata=imgdata))
class tLabel(QWidget):
__slots__ = ['info']
notify = pyqtSignal(dict)
def __init__(self, *args, pixmap=None, text=None, info=None, receptor=None, **kwargs):
super().__init__(*args, **kwargs)
layout = QVBoxLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0,0,0,0)
if pixmap is not None:
pl = QLabel()
pl.setPixmap(pixmap)
pl.setAlignment(Qt.AlignCenter)
pl.setStyleSheet('QLabel {padding: 2px;}')
layout.addWidget(pl)
if text is not None:
tl = QLabel()
tl.setText(text)
tl.setAlignment(Qt.AlignCenter)
layout.addWidget(tl)
self.info = info
self.notify.connect(receptor)
self.adjustSize()
self.setMaximumSize(self.width(), self.height())
def mouseReleaseEvent(self, event):
self.notify.emit({'type': 'set_cursorw', 'id': self})
def mouseDoubleClickEvent(self, event):
self.notify.emit({'type': 'play_video', 'ts': self.info[2],
'pause': not (QApplication.keyboardModifiers() & Qt.ShiftModifier)})
def contextMenuEvent(self, event):
self.notify.emit({'type': 'context_menu', 'id': self, 'pos': self.mapToGlobal(event.pos())})
class tFlowLayout(QLayout):
""" Based on Qt flowlayout example, heavily optimized for speed
in this specific use case, stripped down to bare minimum. """
def __init__(self, parent=None, size=1):
super().__init__(parent)
self._items = [None] * size
self._icnt = 0
self._layout_enabled = False
def enableLayout(self):
self._layout_enabled = True
def addItem(self, item):
self._items[self._icnt] = item
self._icnt += 1
def itemAt(self, index):
if 0 <= index < self._icnt:
return self._items[index]
def hasHeightForWidth(self):
return self._layout_enabled
def heightForWidth(self, width):
if self._layout_enabled:
return self.doLayout(QRect(0, 0, width, 0), True)
return -1
def setGeometry(self, rect):
if self._layout_enabled:
self.doLayout(rect, False)
def sizeHint(self):
return QSize()
def doLayout(self, rect, testonly):
if not self._icnt:
return 0
x = rect.x()
y = rect.y()
right = rect.right() + 1
iszhint = self._items[0].sizeHint()
iwidth = iszhint.width()
iheight = iszhint.height()
ngaps = int(right / iwidth)
gap = 0 if ngaps < 1 else int((right % iwidth) / ngaps)
for i in range(self._icnt):
nextX = x + iwidth
if nextX > right:
x = rect.x()
y = y + iheight
nextX = x + iwidth + gap
else:
nextX += gap
if not testonly:
self._items[i].setGeometry(QRect(QPoint(x, y), iszhint))
x = nextX
return y + iheight - rect.y()
class tScrollArea(QScrollArea):
notify = pyqtSignal(dict)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.delayTimeout = 50
self._resizeTimer = QTimer(self)
self._resizeTimer.timeout.connect(self._delayedUpdate)
def resizeEvent(self, event):
self._resizeTimer.start(self.delayTimeout)
self.rsz_event = event
def _delayedUpdate(self):
self._resizeTimer.stop()
# ask parent to call our own do_update()
self.notify.emit({'type': 'scroll_do_update'})
def do_update(self, tlwidth, tlheight):
super().resizeEvent(self.rsz_event)
if tlwidth < 1 or tlheight < 1:
return
rows = int(self.viewport().height() / tlheight + 0.5)
self.verticalScrollBar().setSingleStep(int(tlheight / 5.9287))
cfg['grid_rows'] = rows
cols = int((self.viewport().width()) / tlwidth)
if cols < 1:
cols = 1
if cols != cfg['grid_columns']:
cfg['grid_columns'] = cols
def clear_grid(self):
if self.widget():
self.takeWidget().deleteLater()
def fill_grid(self, tlabels, progress_cb=None):
self.setUpdatesEnabled(False)
l = len(tlabels)
thumb_pane = QWidget()
self.setWidget(thumb_pane)
layout = tFlowLayout(thumb_pane, l)
x = 0; y = 0; cnt = 0
for tl in tlabels:
layout.addWidget(tl)
if progress_cb and cnt % 100 == 0:
progress_cb(cnt, l)
x += 1
if x >= cfg['grid_columns']:
x = 0; y += 1
cnt += 1
if y < cfg['grid_rows']:
cfg['grid_rows'] = y + 1
if y == 0 and x < cfg['grid_columns']:
cfg['grid_columns'] = x
layout.enableLayout()
self.setUpdatesEnabled(True)
class tmQTreeWidget(QTreeWidget):
def __init__(self, *args, load_action=None, **kwargs):
super().__init__(*args, **kwargs)
self.load_action = load_action
def contextMenuEvent(self, event):
menu = QMenu()
if self.load_action and len(self.selectedItems()) == 1:
menu.addAction('Load Thumbnails', self.load_action)
menu.addSeparator()
menu.addAction('Select All', self.select_all)
menu.addAction('Select None', self.select_none)
menu.addAction('Invert Selection', self.invert_selection)
menu.exec_(self.mapToGlobal(event.pos()))
def select_all(self, sel=True):
for i in range(self.topLevelItemCount()):
self.topLevelItem(i).setSelected(sel)
def select_none(self):
self.select_all(False)
def invert_selection(self):
sel = self.selectedItems()
self.select_all()
for i in sel:
i.setSelected(False)
class tmDialog(QDialog):
ilist = []
outdir = ''
loadfile = ''
def __init__(self, *args, odir='', **kwargs):
super().__init__(*args, **kwargs)
self.outdir = odir
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Thumbnail Manager")
self.resize(800, 700)
self.dlg_layout = QVBoxLayout(self)
self.hdr_layout = QHBoxLayout()
self.loc_label = QLabel(text='Index of ' + self.outdir + '/')
self.tot_label = QLabel(text='--')
self.tot_label.setAlignment(Qt.AlignRight)
self.tot_label.setToolTip('Approximate size of displayed items')
self.hdr_layout.addWidget(self.loc_label)
self.hdr_layout.addWidget(self.tot_label)
self.tree_widget = tmQTreeWidget(load_action=self.accept)
self.tree_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.tree_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tree_widget.setRootIsDecorated(False)
self.tree_widget.setColumnCount(4)
self.tree_widget.setHeaderLabels(['Name', 'Count', 'Size', 'Date Modified'])
self.tree_widget.itemDoubleClicked.connect(self.accept)
self.tree_widget.itemSelectionChanged.connect(self.sel_changed)
self.tree_widget.setAlternatingRowColors(True)
self.filter_layout = QHBoxLayout()
self.filter_check = QCheckBox('Filter:')
self.filter_check.setTristate(False)
self.filter_check.setCheckState(Qt.Checked)
self.filter_check.setToolTip('Activate or deactivate filter')
self.filter_check.stateChanged.connect(self.redraw_list)
self.filter_edit = QLineEdit()
self.filter_edit.setToolTip('Filter list by text contained in name')
self.filter_edit.textChanged.connect(self.redraw_list)
self.filter_layout.addWidget(self.filter_check, 1)
self.filter_layout.addWidget(self.filter_edit, 200)
self.btn_layout = QHBoxLayout()
self.load_button = QPushButton("Load Thumbnails")
self.load_button.setIcon(ffIcon.open)
self.load_button.setToolTip('Load selected video thumbnail preview')
self.load_button.clicked.connect(self.accept)
self.load_button.setEnabled(False)
self.load_button.setDefault(True)
self.refresh_button = QPushButton("Refresh")
self.refresh_button.setIcon(ffIcon.refresh)
self.refresh_button.setToolTip('Rescan the thumbnail library and update list')
self.refresh_button.clicked.connect(self.refresh_list)
self.invert_button = QPushButton("Invert Selection")
self.invert_button.setIcon(ffIcon.revert)
self.invert_button.setToolTip('Invert the current selection')
self.invert_button.clicked.connect(self.tree_widget.invert_selection)
self.selbroken_button = QPushButton("Select Broken")
self.selbroken_button.setIcon(ffIcon.remove)
self.selbroken_button.setToolTip('Select orphaned or otherwise corrupted thumbnail directories')
self.selbroken_button.clicked.connect(self.select_broken)
self.remove_button = QPushButton("Remove Selected")
self.remove_button.setIcon(ffIcon.delete)
self.remove_button.setToolTip('Remove selected preview thumbnail directories')
self.remove_button.clicked.connect(self.remove)
self.remove_button.setEnabled(False)
self.close_button = QPushButton("Close")
self.close_button.setIcon(ffIcon.close)
self.close_button.setToolTip('Close thumbnail manager')
self.close_button.clicked.connect(self.reject)
self.btn_layout.addWidget(self.refresh_button)
self.btn_layout.addWidget(self.invert_button)
self.btn_layout.addWidget(self.selbroken_button)
self.btn_layout.addWidget(self.remove_button)
self.btn_layout.addStretch()
self.btn_layout.addWidget(QLabel(' '))
self.btn_layout.addWidget(self.load_button)
self.btn_layout.addWidget(QLabel(' '))
self.btn_layout.addWidget(self.close_button)
self.dlg_layout.addLayout(self.hdr_layout)
self.dlg_layout.addWidget(self.tree_widget)
self.dlg_layout.addLayout(self.filter_layout)
self.dlg_layout.addLayout(self.btn_layout)
QShortcut('Del', self).activated.connect(self.remove)
QShortcut('F5', self).activated.connect(self.refresh_list)
self.open()
self.refresh_list()
hint = self.tree_widget.sizeHintForColumn(0)
mwid = int(self.width() / 8 * 5)
self.tree_widget.setColumnWidth(0, min(mwid, hint))
for col in range(1, self.tree_widget.columnCount()):
self.tree_widget.resizeColumnToContents(col)
def accept(self):
for item in self.tree_widget.selectedItems():
if item.vfile:
self.loadfile = item.vfile
break
super().accept()
def refresh_list(self):
def show_progress(n, tot):
self.tot_label.setText('Scanning %d/%d' % (n, tot))
QApplication.processEvents()
self.ilist = get_indexfiles(self.outdir, show_progress)
self.redraw_list()
self.filter_edit.setFocus()
def redraw_list(self):
selected = [item.text(0) for item in self.tree_widget.selectedItems()]
self.tree_widget.setUpdatesEnabled(False)
self.tree_widget.clear()
ncols = self.tree_widget.columnCount()
total_size = 0
cnt_broken = 0
flt = self.filter_edit.text().strip().lower() if self.filter_check.isChecked() else None
for entry in self.ilist:
if flt and not flt in entry['tdir'].lower():
continue
total_size += entry['size']
item = QTreeWidgetItem([entry['tdir'], str(entry['idx']['count']), hr_size(entry['size']),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(entry['idx']['date']))])
item.setToolTip(0, ppdict(entry['idx'], ['th']))
item.setTextAlignment(1, Qt.AlignRight|Qt.AlignVCenter)
item.setTextAlignment(2, Qt.AlignRight|Qt.AlignVCenter)
if not entry['idx'] or not entry['vfile']:
cnt_broken += 1
font = item.font(0)
font.setItalic(True)
for col in range(ncols):
item.setForeground(col, QColor('red'))
item.setBackground(col, QColor('lightyellow'))
item.setFont(col, font)
item.setIcon(0, ffIcon.error)
else:
item.setIcon(0, ffIcon.ok)
item.vfile = entry['vfile']
self.tree_widget.addTopLevelItem(item)
if entry['tdir'] in selected:
item.setSelected(True)
selected.remove(entry['tdir'])
self.tot_label.setText('~ ' + hr_size(total_size, 0))
self.selbroken_button.setEnabled(cnt_broken > 0)
self.tree_widget.setUpdatesEnabled(True)
def select_broken(self):
for i in range(self.tree_widget.topLevelItemCount()):
item = self.tree_widget.topLevelItem(i)
item.setSelected(not item.vfile)
def sel_changed(self):
sel = self.tree_widget.selectedItems()
nsel = len(sel)
self.remove_button.setEnabled(nsel > 0)
self.load_button.setEnabled(True if nsel==1 and sel[0].vfile else False)
def remove(self):
dirs = [sel.text(0) for sel in self.tree_widget.selectedItems()]
l = len(dirs)
if l < 1:
return
mbox = QMessageBox(self)
mbox.setWindowTitle('Remove Thumbnails')
mbox.setIcon(QMessageBox.Warning)
mbox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
mbox.setDefaultButton(QMessageBox.Cancel)
mbox.setText('Confirm removal of %d thumbnail folder%s.' % (l, 's' if l>1 else ''))
if QMessageBox.Ok == mbox.exec_():
for d in dirs:
rm = os.path.join(self.outdir, d)
clear_thumbdir(rm)
eprint(1, "rmdir: ", rm)
try:
os.rmdir(rm)
except Exception as e:
eprint(0, str(e))
mbox = QMessageBox(self)
mbox.setWindowTitle('Directory Removal Failed')
mbox.setIcon(QMessageBox.Critical)
mbox.setStandardButtons(QMessageBox.Ok)
mbox.setText(re.sub(r'^\[.*\]\s*', '', str(e)).replace(':', ':\n\n', 1))
mbox.exec_()
self.refresh_list()
def get_loadfile(self):
return self.loadfile
class aboutDialog(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle('Help & About')
self.setFixedSize(600, 600)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.icon_label = QLabel()
self.icon_label.setPixmap(ffIcon.ffpreview_pxm)
self.tag_label = QLabel('%s %s\n'
'Copyright (c) 2021, Urban Wallasch\n'
'BSD 3-Clause License'
% (_FFPREVIEW_NAME, _FFPREVIEW_VERSION))
self.tag_label.setAlignment(Qt.AlignCenter)
self.hdr_layout = QHBoxLayout()
self.hdr_layout.addWidget(self.icon_label, 1)
self.hdr_layout.addWidget(self.tag_label, 100)
self.help_pane = QTextEdit()
self.help_pane.setReadOnly(True)
self.help_pane.setStyleSheet('QTextEdit {border: none;}')
self.help_pane.setHtml(_FFPREVIEW_HELP)
self.qt_button = QPushButton('About Qt')
self.qt_button.clicked.connect(lambda: QMessageBox.aboutQt(self))
self.ok_button = QPushButton('Ok')
self.ok_button.setIcon(ffIcon.ok)
self.ok_button.clicked.connect(self.accept)
self.btn_layout = QHBoxLayout()
self.btn_layout.addWidget(self.qt_button)
self.btn_layout.addStretch()
self.btn_layout.addWidget(self.ok_button)
self.dlg_layout = QVBoxLayout(self)
self.dlg_layout.addLayout(self.hdr_layout)
self.dlg_layout.addWidget(self.help_pane)
self.dlg_layout.addLayout(self.btn_layout)
class cfgDialog(QDialog):
ilist = []
outdir = ''
loadfile = ''
opt = [ ['outdir', ('sfile', True, 0), 'Thumbnail storage directory'],
['ffprobe', ('sfile', False, 0), 'Command to start ffprobe'],
['ffmpeg', ('sfile', False, 0), 'Command to start ffmpeg'],
['player', ('sfile', False, 0), 'Command to open video player'],
['plpaused', ('sfile', False, 0), 'Command to open player in paused mode'],
['grid_columns', ('spin', 1, 999), 'Number of columns in thumbnail view'],
['grid_rows', ('spin', 1, 999), 'Number of rows in thumbnail view'],
['force', ('check', 0, 0), 'Forcibly rebuild preview when opening a file (reset after each view load)'],
['reuse', ('check', 0, 0), 'If possible, reuse existing thumbnail parameters when viewing'],
['thumb_width', ('spin', 1, 9999), 'Width in pixel for thumbnail creation'],
['start', ('time', 0, 0), 'Start time for thumbnail creation'],
['end', ('time', 0, 0), 'End time for thumbnail creation'],
['method', ('mcombo', 0, 0), 'Select video filter method for thumbnail creation'],
['frame_skip', ('spin', 1, 99999), 'Number of frames to skip for method \'skip\''],
['time_skip', ('spin', 1, 9999), 'Number of seconds to skip for method \'time\''],
['scene_thresh', ('dblspin', 0.0, 1.0), 'Scene detection threshold for method \'scene\''],
['customvf', ('edit', 199, 0), 'Filter expression for method \'customvf\''],
['addss', ('spin', -1, 99), 'Add subtitles from stream'],
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle('Preferences')
self.table_widget = QTableWidget()
self.table_widget.setSelectionMode(QAbstractItemView.NoSelection)
self.table_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.table_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.table_widget.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.table_widget.horizontalHeader().setVisible(False)
self.table_widget.setShowGrid(False)
self.table_widget.setStyleSheet('QTableView::item {border-bottom: 1px solid lightgrey;}')
self.table_widget.setRowCount(len(self.opt))
self.table_widget.setColumnCount(1)
self.resize(self.table_widget.width() + 150, self.table_widget.height()+120)
self.btn_layout = QHBoxLayout()
self.reset_button = QPushButton('Reset')
self.reset_button.setIcon(ffIcon.revert)
self.reset_button.setToolTip('Revert to previous settings')
self.reset_button.clicked.connect(self.reset)
self.load_button = QPushButton('Load')
self.load_button.setIcon(ffIcon.open)
self.load_button.setToolTip('Load settings from file')
self.load_button.clicked.connect(self.load)
self.apply_button = QPushButton('Apply')
self.apply_button.setIcon(ffIcon.apply)
self.apply_button.setToolTip('Apply current changes')
self.apply_button.clicked.connect(self.apply)
self.save_button = QPushButton('Save')
self.save_button.setIcon(ffIcon.save)
self.save_button.setToolTip('Apply current changes and save to file')
self.save_button.clicked.connect(self.save)
self.close_button = QPushButton('Cancel')
self.close_button.setIcon(ffIcon.close)
self.close_button.setToolTip('Close dialog without applying changes')
self.close_button.clicked.connect(self.reject)
self.ok_button = QPushButton('Ok')
self.ok_button.setIcon(ffIcon.ok)
self.ok_button.setToolTip('Apply current changes and close dialog')
self.ok_button.clicked.connect(self.accept)
self.ok_button.setDefault(True)
self.btn_layout.addWidget(self.reset_button)
self.btn_layout.addWidget(self.load_button)
self.btn_layout.addStretch()
self.btn_layout.addWidget(self.apply_button)
self.btn_layout.addWidget(self.save_button)
self.btn_layout.addWidget(self.close_button)
self.btn_layout.addWidget(self.ok_button)
self.dlg_layout = QVBoxLayout(self)
self.dlg_layout.addWidget(self.table_widget)
self.dlg_layout.addLayout(self.btn_layout)
self.refresh()
def accept(self):
self.apply()
super().accept()
def reset(self):
ffConfig.init()
self.refresh()
def changed(self, _=True):
self.reset_button.setEnabled(True)
def load(self):
fn, _ = QFileDialog.getOpenFileName(self, 'Load Preferences', self.cfg['conffile'],
'Config Files (*.conf);;All Files (*)',
options=QFileDialog.DontUseNativeDialog)
if not fn:
return
if not ffConfig.load_cfgfile(self.cfg, fn, self.cfg['verbosity']):
mbox = QMessageBox(self)
mbox.setWindowTitle('Load Preferences Failed')
mbox.setIcon(QMessageBox.Critical)
mbox.setStandardButtons(QMessageBox.Ok)
mbox.setText('%s:\nFile inaccessible or corrupt.' % fn)
mbox.exec_()
self.refresh_view()
self.changed()
def save(self):
fn, _ = QFileDialog.getSaveFileName(self, 'Save Preferences', self.cfg['conffile'],
'Config Files (*.conf);;All Files (*)',
options=QFileDialog.DontUseNativeDialog)
if not fn:
return
eprint(1, 'saving config to:', self.cfg['conffile'])
self.apply()
try:
with open(fn) as file:
lines = [line.rstrip() for line in file]
except Exception as e:
eprint(1, str(e))
lines = []
if '[Default]' not in lines:
lines = ['[Default]']
for o in self.opt:
found = False
repl = '%s=%s' % (o[0], str(self.cfg[o[0]]))
for i in range(len(lines)):
if re.match(r'^\s*%s\s*=' % o[0], lines[i]):
lines[i] = repl
found = True
break
if not found:
lines.append(repl)
lines.append('')
cont = '\n'.join(lines)
try:
with open(fn, 'wt') as file:
file.write(cont)
self.cfg['conffile'] = fn
except Exception as e:
eprint(0, str(e))
mbox = QMessageBox(self)
mbox.setWindowTitle('Save Preferences Failed')
mbox.setIcon(QMessageBox.Critical)
mbox.setStandardButtons(QMessageBox.Ok)
mbox.setText(str(e))
mbox.exec_()
if self.cfg['verbosity'] > 2:
eprint(3, cont)
def apply(self):
for i in range(len(self.opt)):
o = self.opt[i]
w = self.table_widget.cellWidget(i, 0)
if o[1][0] == 'sfile':
self.cfg[o[0]] = w.children()[1].text()
elif o[1][0] == 'edit':
self.cfg[o[0]] = w.text()
elif o[1][0] == 'spin' or o[1][0] == 'dblspin':
self.cfg[o[0]] = w.value()
elif o[1][0] == 'check':
self.cfg[o[0]] = w.isChecked()
elif o[1][0] == 'time':
t = w.children()[1].time()
self.cfg[o[0]] = t.hour()*3600 + t.minute()*60 + t.second() + t.msec()/1000
elif o[1][0] == 'mcombo':
self.cfg[o[0]] = w.currentText()
eprint(3, 'apply:', o[0], '=', self.cfg[o[0]])
self.cfg['outdir'] = make_outdir(self.cfg['outdir'])
ffConfig.update(self.cfg)
self.refresh()
def _fs_browse(self, path, dironly=False):
def _filedlg():
if dironly:
fn = QFileDialog.getExistingDirectory(self, 'Open Directory',
path, QFileDialog.ShowDirsOnly | QFileDialog.DontUseNativeDialog)
else:
fn, _ = QFileDialog.getOpenFileName(self, 'Open File', path,
options=QFileDialog.DontUseNativeDialog)
if fn:
edit.setText(fn)
widget = QWidget()
edit = QLineEdit(path)
edit.textChanged.connect(self.changed)
browse = QPushButton('Browse...')
browse.clicked.connect(_filedlg)
layout = QHBoxLayout()
layout.addWidget(edit)
layout.addWidget(browse)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
return widget
def _time_edit(self, h=0, m=0, s=0, ms=0):
widget = QWidget()
edit = QTimeEdit(QTime(h, m, s, ms))
edit.timeChanged.connect(self.changed)
edit.setDisplayFormat('hh:mm:ss.zzz')
zero = QPushButton('→ 00:00')
zero.clicked.connect(lambda: edit.setTime(QTime(0, 0, 0, 0)))
layout = QHBoxLayout()
layout.addWidget(edit, 10)
layout.addWidget(zero, 1)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
return widget
def refresh(self):
self.cfg = ffConfig.get()
self.refresh_view()
def refresh_view(self):
self.table_widget.setUpdatesEnabled(False)
for i in range(len(self.opt)):
o = self.opt[i]
eprint(3, 'refresh:', o[0], '=', self.cfg[o[0]])
self.table_widget.setVerticalHeaderItem(i, QTableWidgetItem(o[0]))
self.table_widget.verticalHeaderItem(i).setToolTip(o[2])
if o[1][0] == 'sfile':
w = self._fs_browse(self.cfg[o[0]], dironly=o[1][1])
w.setToolTip(o[2])
elif o[1][0] == 'edit':
w = QLineEdit(self.cfg[o[0]])
w.setMaxLength(o[1][1])
w.setToolTip(o[2])
w.textChanged.connect(self.changed)
elif o[1][0] == 'spin':
w = QSpinBox()
w.setRange(o[1][1], o[1][2])
w.setValue(int(self.cfg[o[0]]))
w.setToolTip(o[2])
w.valueChanged.connect(self.changed)
elif o[1][0] == 'dblspin':
w = QDoubleSpinBox()
w.setRange(o[1][1], o[1][2])
w.setSingleStep(0.05)
w.setDecimals(2)
w.setValue(self.cfg[o[0]])
w.setToolTip(o[2])
w.valueChanged.connect(self.changed)
elif o[1][0] == 'check':
w = QCheckBox(' ')
w.setTristate(False)
w.setCheckState(2 if self.cfg[o[0]] else 0)
w.setToolTip(o[2])
w.stateChanged.connect(self.changed)
elif o[1][0] == 'time':
rs = self.cfg[o[0]]
s = round(rs, 0)
ms = (rs - s) * 1000
h = s / 3600
s = s % 3600
m = s / 60
s = s % 60
w = self._time_edit(int(h), int(m), int(s), int(ms))
w.setToolTip(o[2])
elif o[1][0] == 'mcombo':
w = QComboBox()
w.addItems(['iframe', 'scene', 'skip', 'time', 'customvf'])
w.setCurrentIndex(w.findText(self.cfg[o[0]]))
w.setToolTip(o[2])
w.currentIndexChanged.connect(self.changed)
self.table_widget.setCellWidget(i, 0, w)
self.table_widget.setUpdatesEnabled(True)
self.reset_button.setEnabled(False)
class batchDialog(QDialog):
def __init__(self, *args, fnames=[], **kwargs):
super().__init__(*args, **kwargs)
self._abort = False
self._done = False
self.fnames = fnames
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle('Batch Processing')
self.resize(800, 700)
self.logview = QTextEdit()
self.logview.setReadOnly(True)
self.logview.setStyleSheet('QTextEdit {border: none;}')
self.statbar = QHBoxLayout()
self.proglabel = QLabel('')
self.progbar = QProgressBar()
self.progbar.resize(100, 20)
self.progbar.hide()
self.statbar.addWidget(self.proglabel)
self.statbar.addWidget(self.progbar)
self.abort_button = QPushButton('Abort')
self.abort_button.setIcon(ffIcon.close)
self.abort_button.clicked.connect(self.abort)
self.layout = QVBoxLayout(self)
self.layout.addWidget(self.logview)
self.layout.addLayout(self.statbar)
self.layout.addWidget(self.abort_button)
self.show()
self.run_batch()
self.proglabel.setText('')
self.progbar.hide()
self.abort_button.setText('Ok')
self.abort_button.setIcon(ffIcon.ok)
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.accept)
def log_append(self, *args):
sio = io.StringIO()
print(self.logview.toHtml(), *args, file=sio, end='')
self.logview.setHtml(sio.getvalue())
sio.close()
sb = self.logview.verticalScrollBar()
sb.setValue(sb.maximum())
def reject(self):
if self._done:
self.accept()
else:
self.abort()
def abort(self):
mbox = QMessageBox(self)
mbox.setWindowTitle('Abort Operation')
mbox.setIcon(QMessageBox.Warning)
mbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
mbox.setDefaultButton(QMessageBox.No)
mbox.setText('Aborting now will likely leave you with a broken or '
'incomplete set of thumbnails.\n\nAbort anyway?')
if QMessageBox.Yes == mbox.exec_():
kill_proc()
self._abort = True
def prog_cb(self, n, tot):
if not n and not tot:
self.proglabel.setText('')
self.progbar.hide()
self.proglabel.setText('%d / %d' % (n, tot))
self.progbar.setValue(int(n * 100 / max(0.01, tot)))
self.progbar.show()
QApplication.processEvents()
def run_batch(self):
cnt = 0
nfiles = len(self.fnames)
for fname in self.fnames:
self.prog_cb(0, 0)
if self._abort:
break
fname = os.path.abspath(fname)
vfile = os.path.basename(fname)
thdir = os.path.join(cfg['outdir'], vfile)
cnt += 1
self.log_append('%d/%d:'%(cnt,nfiles), vfile)
QApplication.processEvents()
if not os.path.exists(fname) or not os.access(fname, os.R_OK):
self.log_append(' <span style="color:red;">no permission</span>\n')
continue
if os.path.isdir(fname):
self.log_append(' <span style="color:red;">is a directory</span>\n')
continue
thinfo, ok = get_thinfo(fname, thdir)
if thinfo is None:
self.log_append(' <span style="color:red;">failed</span>\n')
continue
if ok:
self.log_append(' <span style="color:blue;">nothing to do</span>\n')
continue
clear_thumbdir(thdir)
thinfo, ok = make_thumbs(fname, thinfo, thdir, self.prog_cb)
if ok:
self.log_append(' <span style="color:green;">ok</span>\n')
else:
if self._abort:
clear_thumbdir(thdir)
self.log_append(' <span style="color:red;">failed</span>\n')
if self._abort:
self.log_append('<p style="color:red;">aborted</p>\n')
else:
self.log_append('<p>done</p>\n')
self._done = True
class sMainWindow(QMainWindow):
""" Application main window class singleton. """
_instance = None
tlwidth = 100
tlheight = 100
tlabels = []
thinfo = None
fname = None
vfile = None
vpath = None
thdir = None
cur = 0
view_locked = 0
_dbg_num_tlabels = 0
_dbg_num_qobjects = 0
def __new__(cls, *args, title='', **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, title='', **kwargs):
super().__init__(*args, **kwargs)
ffIcon()
self.init_window(title)
def closeEvent(self, event):
if type(event) == QCloseEvent:
event.accept()
die(0)
# calculate optimal window geometry in ten easy steps
def optimize_geometry(self):
if self.windowState() & (Qt.WindowFullScreen | Qt.WindowMaximized):
return
# get current window geometry (excluding WM decorations)
wg = self.geometry()
wx = max(wg.x(), 0)
wy = max(wg.y(), 0)
ww = wg.width()
wh = wg.height()
# get frame geometry (including WM dewcorations)
fg = self.frameGeometry()
fx = fg.x()
fy = fg.y()
fw = fg.width()
fh = fg.height()
eprint(4, 'w', wx, wy, ww, wh, 'f', fx, fy, fw, fh)
# calculate overhead WRT to thumbnail viewport
scpol = self.scroll.verticalScrollBarPolicy()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
ow = ww - self.scroll.viewport().width()
oh = wh - self.scroll.viewport().height()
self.scroll.setVerticalScrollBarPolicy(scpol)
# grid granularity (i.e. thumbnail label dimension)
gw = self.tlwidth
gh = self.tlheight
# set minimum window size (i.e. flip-book sized)
minw = gw + ow
minh = gh + oh
self.setMinimumSize(minw, minh)
eprint(4, 'o', ow, oh, 'g', gw, gh, 'c,r', cfg['grid_columns'], cfg['grid_rows'])
# get current available(!) screen geometry
screens = QGuiApplication.screens()
for sc in reversed(screens):
scg = sc.availableGeometry()
sx = scg.x()
sy = scg.y()
sw = scg.width()
sh = scg.height()
if wx >= sx and wy >= sy and wx < sx+sw and wy < sy+sh:
break
eprint(4, 's', sx, sy, sw, sh)
# tentative (wanted) window geometry
tx = max(wx, sx)
ty = max(wy, sy)
tw = gw * cfg['grid_columns'] + ow
th = gh * cfg['grid_rows'] + oh
# available remaining screen estate (right and below)
aw = sw - (tx - sx)
ah = sh - (ty - sy)
eprint(4, 't', tx, ty, tw, th, 'a', aw, ah)
# try to fit the window on screen, move or resize if necessary
if tw > aw - (fw - ww):
frame_left = (fx + fw) - (wx +ww)
tx = tx - (tw - aw) - frame_left
tx = max(tx, sx)
aw = sw - (tx - sx)
tw = max(minw, min(tw, aw))
if th > ah - (fh - wh):
frame_bottom = (fy + fh) - (wy + wh)
ty = ty - (th - ah) - frame_bottom
ty = max(ty, sy)
ah = sh - (ty - sy)
th = max(minh, min(th, ah))
# round down window dimensions to thumb grid
tw = int((tw - ow) / gw) * gw + ow
th = int((th - oh) / gh) * gh + oh
eprint(4, 't', tx, ty, tw, th)
# set final size
self.setGeometry(tx, ty, tw, th)
def lock_view(self, lock=True):
if lock:
self.view_locked += 1
self.scroll.setEnabled(False)
self.set_cursor(disable=True)
else:
if self.view_locked > 0:
self.view_locked -= 1
if self.view_locked == 0:
self.scroll.setEnabled(True)
self.set_cursor(disable=False)
def rebuild_view(self):
self.lock_view(True)
self.scroll.fill_grid(self.tlabels, self.show_progress)
self.lock_view(False)
self.set_cursor()
def clear_view(self):
self.lock_view(True)
self.scroll.clear_grid()
self.cur = 0
self.tlabels.clear()
self.lock_view(False)
def set_cursor(self, idx=None, disable=False):
l = len(self.tlabels)
if l < 1:
self.cur = 0
return
try:
self.tlabels[self.cur].setStyleSheet('')
if disable:
return
bg_hl = self.palette().highlight().color().name()
fg_hl = self.palette().highlightedText().color().name()
style_hl = 'QLabel {background-color: %s; color: %s;}' % (bg_hl, fg_hl)
self.cur = min(max(0, self.cur if idx is None else idx), l - 1)
self.tlabels[self.cur].setStyleSheet(style_hl)
self.statdsp[3].setText('%d / %d' % (self.tlabels[self.cur].info[0], l))
self.scroll.ensureWidgetVisible(self.tlabels[self.cur], 0, 0)
except:
pass
def set_cursorw(self, label):
try:
self.set_cursor(idx=self.tlabels.index(label))
except:
pass
def move_cursor(self, amnt):
self.set_cursor(self.cur + amnt)
def toggle_fullscreen(self):
if self.windowState() & Qt.WindowFullScreen:
self.showNormal()
for w in self.statdsp:
w.show()
else:
self.showFullScreen()
for w in self.statdsp:
w.hide()
def esc_action(self):
if self.view_locked:
if proc_running():
self.abort_build()
elif self.windowState() & Qt.WindowFullScreen:
self.toggle_fullscreen()
else:
self.closeEvent(None)
def contextMenuEvent(self, event):
tlabel = None
if event:
# genuine click on canvas
pos = event.pos()
elif len(self.tlabels) > 0:
# kbd shortcut, show context menu for active label
tlabel = self.tlabels[self.cur]
pos = tlabel.pos()
pos.setX(pos.x() + int(self.tlwidth / 2))
pos.setY(pos.y() + int(self.tlheight / 2))
else:
# kbd shortcut, have no active label
pos = QPoint(self.width()/2, self.height()/2)
self.show_contextmenu(tlabel, self.mapToGlobal(pos))
def show_contextmenu(self, tlabel, pos):
menu = QMenu()
if not self.view_locked:
if tlabel:
self.set_cursorw(tlabel)
menu.addAction('Play From Here', lambda: self._play_video(ts=tlabel.info[2]))
if self.fname:
menu.addAction('Play From Start', lambda: self._play_video(ts='0'))
menu.addSeparator()
menu.addAction('Open Video File...', lambda: self.load_view(self.vpath))
if self.fname:
menu.addAction('Reload', lambda: self.load_view(self.fname))
menu.addAction('Force Rebuild', self.force_rebuild)
menu.addSeparator()
if tlabel or self.fname:
copymenu = menu.addMenu('Copy')
if tlabel:
copymenu.addAction('Timestamp [H:M:S.ms]', lambda: self.clipboard.setText(s2hms(tlabel.info[2], zerohours=True)))
copymenu.addAction('Timestamp [S.ms]', lambda: self.clipboard.setText(tlabel.info[2]))
if self.fname:
copymenu.addAction('Original Filename', lambda: self.clipboard.setText(self.fname))
if tlabel:
copymenu.addAction('Thumb Filename', lambda: self.clipboard.setText(os.path.join(self.thdir, tlabel.info[1])))
copymenu.addAction('Thumbnail Image', lambda: self.clipboard.setPixmap(tlabel.layout().itemAt(0).widget().pixmap()))
menu.addSeparator()
if not (self.windowState() & (Qt.WindowFullScreen | Qt.WindowMaximized)):
menu.addAction('Window Best Fit', self.optimize_geometry)
menu.addAction('Thumbnail Manager', lambda: self.manage_thumbs(cfg['outdir']))
menu.addAction('Batch Processing', self.batch_dlg)
menu.addAction('Preferences', lambda: self.config_dlg())
else:
if proc_running():
menu.addAction('Abort Operation', self.abort_build)
menu.addSeparator()
menu.addAction('Help && About', self.about_dlg)
menu.addSeparator()
menu.addAction('Quit', lambda: self.closeEvent(None))
menu.exec_(pos)
def manage_thumbs(self, outdir):
if self.view_locked:
return
self.lock_view(True)
dlg = tmDialog(self, odir=cfg['outdir'])
res = dlg.exec_()
if res == QDialog.Accepted:
lfile = dlg.get_loadfile()
if lfile:
self.load_view(lfile)
self.lock_view(False)
def config_dlg(self):
if self.view_locked:
return
self.lock_view(True)
dlg = cfgDialog(self)
res = dlg.exec_()
if res == QDialog.Accepted:
self.load_view(self.fname)
self.lock_view(False)
def about_dlg(self):
dlg = aboutDialog(self)
res = dlg.exec_()
def _play_video(self, ts=None, paused=False):
if self.view_locked:
return
if ts is None:
if len(self.tlabels) < 1:
return
ts = self.tlabels[self.cur].info[2]
play_video(self.fname, ts, paused)
# handle various notifications emitted by downstream widgets
@pyqtSlot(dict)
def notify_receive(self, event):
eprint(4, 'got event: ', event)
if event['type'] == 'set_cursorw':
self.set_cursorw(event['id'])
elif event['type'] == 'context_menu':
self.show_contextmenu(event['id'], event['pos'])
elif event['type'] == 'scroll_do_update':
if not self.view_locked:
self.scroll.do_update(self.tlwidth, self.tlheight)
elif event['type'] == 'play_video':
self._play_video(ts=event['ts'], paused=event['pause'])
elif event['type'] == '_dbg_count':
self._dbg_num_tlabels = len(self.findChildren(tLabel))
self._dbg_num_qobjects = len(self.findChildren(QObject))
else:
eprint(0, 'event not handled: ', event)
def init_window(self, title):
self.setWindowTitle(title)
self.setWindowIcon(ffIcon.ffpreview)
self.resize(500, 300)
self.clipboard = QApplication.clipboard()
# set up status bar
statbar = QHBoxLayout()
self.statdsp = []
for i in range(4):
s = QLabel('')
s.resize(100, 20)
s.setStyleSheet('QLabel {margin: 0px 2px 0px 2px;}');
self.statdsp.append(s)
statbar.addWidget(s)
self.progbar = QProgressBar()
self.progbar.resize(100, 20)
self.progbar.hide()
statbar.addWidget(self.progbar)
# set up thumbnail view area
thumb_frame = QWidget()
thumb_layout = tFlowLayout(thumb_frame)
self.scroll = tScrollArea()
self.scroll.notify.connect(self.notify_receive)
self.scroll.setWidget(thumb_frame)
self.scroll.setWidgetResizable(True)
self.scroll.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scroll.setStyleSheet('QFrame {border: none;}')
# set up main window layout
main_frame = QWidget()
main_layout = QVBoxLayout(main_frame)
main_layout.setContentsMargins(0, 2, 0, 0)
main_layout.addWidget(self.scroll)
main_layout.addLayout(statbar)
self.setCentralWidget(main_frame)
# register shotcuts
QShortcut('Esc', self).activated.connect(self.esc_action)
QShortcut('Ctrl+Q', self).activated.connect(lambda: self.closeEvent(None))
QShortcut('Ctrl+W', self).activated.connect(lambda: self.closeEvent(None))
QShortcut('F', self).activated.connect(self.toggle_fullscreen)
QShortcut('Alt+Return', self).activated.connect(self.toggle_fullscreen)
QShortcut('Ctrl+G', self).activated.connect(self.optimize_geometry)
QShortcut('Ctrl+O', self).activated.connect(lambda: self.load_view(self.vpath))
QShortcut('Ctrl+M', self).activated.connect(lambda: self.manage_thumbs(cfg['outdir']))
QShortcut('Tab', self).activated.connect(lambda: self.move_cursor(1))
QShortcut('Shift+Tab', self).activated.connect(lambda: self.move_cursor(-1))
QShortcut('Right', self).activated.connect(lambda: self.move_cursor(1))
QShortcut('Left', self).activated.connect(lambda: self.move_cursor(-1))
QShortcut('Up', self).activated.connect(lambda: self.move_cursor(-cfg['grid_columns']))
QShortcut('Down', self).activated.connect(lambda: self.move_cursor(cfg['grid_columns']))
QShortcut('PgUp', self).activated.connect(lambda: self.move_cursor(-cfg['grid_rows'] * cfg['grid_columns']))
QShortcut('PgDown', self).activated.connect(lambda: self.move_cursor(cfg['grid_rows'] * cfg['grid_columns']))
QShortcut('Home', self).activated.connect(lambda: self.set_cursor(0))
QShortcut('End', self).activated.connect(lambda: self.set_cursor(len(self.tlabels)-1))
QShortcut('Space', self).activated.connect(lambda: self._play_video(paused=True))
QShortcut('Return', self).activated.connect(lambda: self._play_video(paused=True))
QShortcut('Shift+Return', self).activated.connect(lambda: self._play_video())
QShortcut('Ctrl+Return', self).activated.connect(lambda: self.contextMenuEvent(None))
QShortcut('Ctrl+Alt+P', self).activated.connect(self.config_dlg)
QShortcut('Alt+H', self).activated.connect(self.about_dlg)
QShortcut('Ctrl+B', self).activated.connect(self.batch_dlg)
def show_progress(self, n, tot):
self.statdsp[1].setText('%d / %d' % (n, tot))
self.progbar.setValue(int(n * 100 / max(0.01, tot)))
QApplication.processEvents()
# generate clickable thumbnail labels
def make_tlabels(self, tlabels):
dummy_thumb = ffIcon.broken_pxm.scaledToWidth(cfg['thumb_width'])
tlabels.clear()
try:
with open(os.path.join(self.thdir, _FFPREVIEW_IDX), 'r') as idxfile:
idx = json.load(idxfile)
if cfg['verbosity'] > 3:
eprint(4, 'idx =', json.dumps(idx, indent=2))
self.show_progress(0, idx['count'])
for th in idx['th']:
if th[0] % 100 == 0:
self.show_progress(th[0], idx['count'])
thumb = QPixmap(os.path.join(self.thdir, th[1]))
if thumb.isNull():
thumb = dummy_thumb
tlabel = tLabel(pixmap=thumb, text=s2hms(th[2]),
info=th, receptor=self.notify_receive)
tlabels.append(tlabel)
except Exception as e:
eprint(0, str(e))
if len(tlabels) == 0:
# no thumbnails available, make a dummy
tlabels.append(tLabel(pixmap=dummy_thumb, text=s2hms(str(cfg['start'])),
info=[0, 'broken', str(cfg['start'])],
receptor=self.notify_receive))
def abort_build(self):
mbox = QMessageBox(self)
mbox.setWindowTitle('Abort Operation')
mbox.setIcon(QMessageBox.Warning)
mbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
mbox.setDefaultButton(QMessageBox.No)
mbox.setText('Aborting now will likely leave you with a broken or '
'incomplete set of thumbnails.\n\nAbort anyway?')
if QMessageBox.Yes == mbox.exec_():
kill_proc()
return True
def force_rebuild(self):
if self.thinfo['duration'] > 300:
mbox = QMessageBox(self)
mbox.setWindowTitle('Rebuild Thumbnails')
mbox.setIcon(QMessageBox.Warning)
mbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
mbox.setDefaultButton(QMessageBox.No)
mbox.setText('Rebuilding thumbnails may take a while.\n\nAre you sure?')
rebuild = (mbox.exec_() == QMessageBox.Yes)
else:
rebuild = True
if rebuild:
cfg['force'] = True
self.load_view(self.fname)
def batch_dlg(self):
if self.view_locked:
return
self.lock_view(True)
fdir = os.path.dirname(self.fname) if self.fname else os.getcwd()
fnames, _ = QFileDialog.getOpenFileNames(self, 'Select Files for Batch Processing',
fdir, 'Video Files ('+ cfg['vformats'] +');;All Files (*)',
options=QFileDialog.Options()|QFileDialog.DontUseNativeDialog)
if len(fnames) < 1:
self.lock_view(False)
return
dlg = batchDialog(self, fnames=fnames)
res = dlg.exec_()
cfg['force'] = False
self.lock_view(False)
def load_view(self, fname):
self.lock_view(True)
# sanitize file name
if not fname:
fname = os.getcwd()
if not os.path.exists(fname) or not os.access(fname, os.R_OK):
fname = os.path.dirname(fname)
if not os.path.isdir(fname):
fname = os.getcwd()
if os.path.isdir(fname):
fname, _ = QFileDialog.getOpenFileName(self, 'Open File', fname,
'Video Files ('+ cfg['vformats'] +');;All Files (*)',
options=QFileDialog.Options()|QFileDialog.DontUseNativeDialog)
if not fname or not os.path.exists(fname) or not os.access(fname, os.R_OK):
self.lock_view(False)
return
self.fname = os.path.abspath(fname)
self.vfile = os.path.basename(self.fname)
self.vpath = os.path.dirname(self.fname)
self.thdir = os.path.abspath(os.path.join(cfg['outdir'], self.vfile))
self.setWindowTitle(_FFPREVIEW_NAME + ' - ' + self.vfile)
eprint(1, "open file:", self.fname)
# clear previous view
for sd in self.statdsp:
sd.setText('')
sd.setToolTip('')
self.statdsp[0].setText('Clearing view')
QApplication.processEvents()
self.clear_view()
# analyze video
self.statdsp[0].setText('Analyzing')
QApplication.processEvents()
if self.thinfo:
self.thinfo.clear()
self.thinfo, ok = get_thinfo(self.fname, self.thdir)
if self.thinfo is None:
self.statdsp[0].setText('Unrecognized file format')
self.lock_view(False)
return
if not ok:
# (re)generate thumbnails and index file
self.statdsp[0].setText('Processing')
clear_thumbdir(self.thdir)
self.progbar.show()
self.thinfo, ok = make_thumbs(fname, self.thinfo, self.thdir, self.show_progress)
# load thumbnails and make labels
self.statdsp[0].setText('Loading')
self.progbar.show()
self.make_tlabels(self.tlabels)
self.tlwidth = self.tlabels[0].width()
self.tlheight = self.tlabels[0].height()
# build thumbnail view
tooltip = ppdict(self.thinfo, ['th'])
for sd in self.statdsp:
self.statdsp[2].setText('')
sd.setToolTip(tooltip)
self.statdsp[0].setText('Building view')
QApplication.processEvents()
self.rebuild_view()
self.set_cursor(0)
self.progbar.hide()
QApplication.processEvents()
# final window touch-up
self.statdsp[0].setText(s2hms(self.thinfo['duration']))
self.statdsp[1].setText(str(self.thinfo['method']))
self.optimize_geometry()
QApplication.processEvents()
# reset force flag to avoid accidental rebuild for every file
cfg['force'] = False
self.lock_view(False)
############################################################
# Helper functions
def proc_cmd(cmd):
if proc_running():
return '', '', None
global proc
retval = -1
stdout = stderr = ''
try:
eprint(1, 'run:', cmd)
proc = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, env=cfg['env'])
stdout, stderr = proc.communicate()
stdout = stdout.decode()
stderr = stderr.decode()
retval = proc.wait()
proc = None
if retval != 0:
eprint(0, cmd, '\n returned %d' % retval)
eprint(2, stderr)
except Exception as e:
eprint(0, cmd, '\n failed:', str(e))
proc = kill_proc(proc)
return stdout, stderr, retval
# get video meta information
def get_meta(vidfile):
meta = { 'frames': -1, 'duration':-1, 'fps':-1.0, 'nsubs': -1 }
if proc_running():
return meta, False
# count subtitle streams
cmd = [cfg['ffprobe'], '-v', 'error', '-select_streams', 's',
'-show_entries', 'stream=index', '-of', 'csv=p=0', vidfile]
out, err, rc = proc_cmd(cmd)
if rc == 0:
meta['nsubs'] = len(out.splitlines())
eprint(1, 'number of subtitle streams:', meta['nsubs'])
else: # ffprobe failed, try using ffmpeg
cmd = [cfg['ffmpeg'], '-i', vidfile]
out, err, rc = proc_cmd(cmd)
nsubs = 0
for line in io.StringIO(err).readlines():
if re.match(r'\s*Stream #.*: Subtitle:', line):
nsubs += 1
if nsubs > 0:
meta['nsubs'] = nsubs
eprint(1, 'number of subtitle streams:', meta['nsubs'])
# get frames / duration / fps
# try ffprobe fast method
cmd = [cfg['ffprobe'], '-v', 'error', '-select_streams', 'v:0',
'-show_streams', '-show_format', '-of', 'json', vidfile]
out, err, rc = proc_cmd(cmd)
if rc == 0:
info = json.loads(out)
strinf = info['streams'][0]
fmtinf = info['format']
d = f = None
fps = -1
if 'duration' in strinf:
d = float(strinf['duration'])
elif 'duration' in fmtinf:
d = float(fmtinf['duration'])
if d is not None:
d = max(d, 0.000001)
if 'nb_frames' in strinf:
f = int(strinf['nb_frames'])
fps = f / d
elif 'avg_frame_rate' in strinf:
fps = sfrac2float(strinf['avg_frame_rate'])
f = int(fps * d)
if f is not None:
meta['duration'] = d
meta['frames'] = f
meta['fps'] = fps
eprint(3, 'meta =', meta)
return meta, True
# no dice, try ffprobe slow method
cmd = [cfg['ffprobe'], '-v', 'error', '-select_streams', 'v:0', '-of', 'json', '-count_packets',
'-show_entries', 'format=duration:stream=nb_read_packets', vidfile]
out, err, rc = proc_cmd(cmd)
if rc == 0:
info = json.loads(out)
meta['frames'] = int(info['streams'][0]['nb_read_packets'])
d = float(info['format']['duration'])
meta['duration'] = max(d, 0.0001)
meta['fps'] = round(meta['frames'] / meta['duration'], 2)
eprint(3, 'meta =', meta)
return meta, True
# ffprobe didn't cut it, try ffmpeg instead
cmd = [cfg['ffmpeg'], '-nostats', '-i', vidfile, '-c:v', 'copy',
'-f', 'rawvideo', '-y', os.devnull]
out, err, rc = proc_cmd(cmd)
if rc == 0:
for line in io.StringIO(err).readlines():
m = re.match(r'^frame=\s*(\d+).*time=\s*(\d+:\d+:\d+(\.\d+)?)', line)
if m:
meta['frames'] = int(m.group(1))
d = hms2s(m.group(2))
meta['duration'] = max(d, 0.0001)
meta['fps'] = round(meta['frames'] / meta['duration'], 2)
eprint(3, 'meta =', meta)
return meta, True
# not our lucky day, eh?!
return meta, False
# extract subtitles
def extract_subs(vidfile, thinfo):
try:
fd, subs_file = tempfile.mkstemp(suffix='.mkv', prefix='ffpreview_subs_')
os.close(fd)
eprint(2, 'created subtitle dump file:', subs_file)
cmd = [ cfg['ffmpeg'] ]
if cfg['start']:
cmd.extend( ['-ss', str(cfg['start'])] )
if cfg['end']:
cmd.extend( ['-to', str(cfg['end'])] )
cmd.extend( ['-i', vidfile, '-map', '0', '-c', 'copy', '-vn', '-an', '-y', subs_file] )
out, err, rc = proc_cmd(cmd)
if rc == 0:
eprint(1, 'copied subtitles to:', subs_file)
return subs_file
except Exception as e:
eprint(0, str(e))
return None
# extract thumbnails from video and collect timestamps
def make_thumbs(vidfile, thinfo, thdir, prog_cb=None):
# prepare command line
pictemplate = '%08d.png'
cmd = [cfg['ffmpeg'], '-loglevel', 'info', '-hide_banner', '-y']
if cfg['start']:
cmd.extend( ['-ss', str(cfg['start'])] )
if cfg['end']:
cmd.extend( ['-to', str(cfg['end'])] )
cmd.extend( ['-i', vidfile] )
# prepare filters
if cfg['method'] == 'scene':
flt = 'select=gt(scene\,' + str(cfg['scene_thresh']) + ')'
elif cfg['method'] == 'skip':
flt = 'select=not(mod(n\,' + str(cfg['frame_skip']) + '))'
elif cfg['method'] == 'time':
fs = int(float(cfg['time_skip']) * float(thinfo['fps']))
flt = 'select=not(mod(n\,' + str(fs) + '))'
elif cfg['method'] == 'customvf':
flt = cfg['customvf']
else: # iframe
flt = 'select=eq(pict_type\,I)'
flt += ',showinfo,scale=' + str(cfg['thumb_width']) + ':-1'
# dump subtitles and add to filter
subs_file = None
if thinfo['addss'] >= 0:
subs_file = extract_subs(vidfile, thinfo)
if subs_file:
# escape windows path; unix temp file name should be fine?
sf = subs_file.replace('\\', r'\\\\').replace(':', r'\\:')
flt += ',subtitles=' + sf + ':si=' + str(thinfo['addss'])
# finalize command line
cmd.extend( ['-vf', flt, '-vsync', 'vfr', os.path.join(thdir, pictemplate)] )
# generate thumbnail images from video
rc = False
if proc_running():
return thinfo, rc
global proc
ebuf = ''
cnt = 0
eprint(1, 'run:', cmd)
try:
proc = Popen(cmd, shell=False, stderr=PIPE, env=cfg['env'])
while proc.poll() is None:
line = proc.stderr.readline()
if line:
line = line.decode()
ebuf += line
x = re.search(r'pts_time:\d*\.?\d*', line)
if x is not None:
cnt += 1
t = x.group().split(':')[1]
if cfg['start']:
t = str(float(t) + cfg['start'])
thinfo['th'].append([ cnt, pictemplate % cnt, t ])
if prog_cb and cnt % 10 == 0:
prog_cb(float(t), thinfo['duration'])
retval = proc.wait()
proc = None
if retval != 0:
eprint(0, cmd, '\n returned %d' % retval)
eprint(2, ebuf)
thinfo['count'] = cnt
with open(os.path.join(thdir, _FFPREVIEW_IDX), 'w') as idxfile:
thinfo['date'] = int(time.time())
json.dump(thinfo, idxfile, indent=2)
rc = (retval == 0)
except Exception as e:
eprint(0, cmd, '\n failed:', str(e))
proc = kill_proc(proc)
if subs_file:
try:
os.remove(subs_file)
except Exception as e:
eprint(0, str(e))
return thinfo, rc
# open video in player
def play_video(filename, start='0', paused=False):
if not filename:
return
# keep this for Windows, for the time being
if cfg['platform'] == 'Windows':
# prepare argument vector
cmd = cfg['plpaused'] if paused and cfg['plpaused'] else cfg['player']
cmd = cmd.replace('%t', start).replace('%f', '"'+filename+'"')
eprint(1, 'cmd =', cmd)
Popen(cmd, shell=False, stdout=DEVNULL, stderr=DEVNULL,
env=cfg['env'], start_new_session=True)
return
# Linux; Darwin?
# double fork to avoid accumulating zombie processes
try:
pid = os.fork()
if pid > 0:
eprint(3, '1st fork ok')
status = os.waitid(os.P_PID, pid, os.WEXITED)
if status.si_status:
eprint(0, 'child exit error:', status)
else:
eprint(3, 'child exit ok')
return # parent: back to business
except Exception as e:
eprint(0, '1st fork failed:', str(e))
return
# child
# become session leader and fork a second time
os.setsid()
try:
pid = os.fork()
if pid > 0:
eprint(3, '2nd fork ok')
os._exit(0) # child done
except Exception as e:
eprint(0, '2nd fork failed:', str(e))
os._exit(1)
# grandchild
# restore default signal handlers
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# prepare argument vector
cmd = cfg['plpaused'] if paused and cfg['plpaused'] else cfg['player']
args = shlex.split(cmd)
for i in range(len(args)):
args[i] = args[i].replace('%t', start).replace('%f', filename)
eprint(1, 'run:', args)
# close all fds and redirect stdin, stdout and stderr to /dev/null
sys.stdout.flush()
sys.stderr.flush()
for fd in range(1024): # more than enough for us
try:
os.close(fd)
except:
pass
os.open(os.devnull, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
# execute command
os.execvpe(args[0], args, cfg['env'])
os._exit(255)
# check validity of existing index file
def chk_idxfile(thinfo, thdir):
idxpath = os.path.join(thdir, _FFPREVIEW_IDX)
try:
with open(idxpath, 'r') as idxfile:
idx = json.load(idxfile)
if idx['name'] != thinfo['name']:
return False
if int(idx['duration']) != int(thinfo['duration']):
return False
if idx['start'] != thinfo['start']:
return False
if idx['end'] != thinfo['end']:
return False
if idx['count'] != len(idx['th']):
return False
if not cfg['reuse']:
if idx['width'] != thinfo['width']:
return False
if idx['nsubs'] != thinfo['nsubs']:
return False
if idx['addss'] != thinfo['addss']:
return False
if idx['method'] != thinfo['method']:
return False
if idx['method'] == 'skip':
if not 'frame_skip' in idx or idx['frame_skip'] != thinfo['frame_skip']:
return False
elif idx['method'] == 'time':
if not 'time_skip' in idx or idx['time_skip'] != thinfo['time_skip']:
return False
elif idx['method'] == 'scene':
if not 'scene_thresh' in idx or idx['scene_thresh'] != thinfo['scene_thresh']:
return False
elif idx['method'] == 'customvf':
if not 'customvf' in idx or idx['customvf'] != thinfo['customvf']:
return False
return idx
except Exception as e:
eprint(1, idxpath, str(e))
pass
return False
# initialize thumbnail info structure
def get_thinfo(vfile, thdir):
thinfo = {
'name': os.path.basename(vfile),
'path': os.path.dirname(vfile),
'frames': -1,
'duration': -1,
'fps': -1,
'nsubs': -1,
'start': cfg['start'],
'end': cfg['end'],
'count': 0,
'width': cfg['thumb_width'],
'method': cfg['method'],
}
# include method specific parameters (only)
if cfg['method'] == 'scene':
thinfo['scene_thresh'] = cfg['scene_thresh']
elif cfg['method'] == 'skip':
thinfo['frame_skip'] = cfg['frame_skip']
elif cfg['method'] == 'time':
thinfo['time_skip'] = cfg['time_skip']
elif cfg['method'] == 'customvf':
thinfo['customvf'] = cfg['customvf']
# set these here for neater ordering
thinfo['addss'] = cfg['addss']
thinfo['ffpreview'] = _FFPREVIEW_VERSION
thinfo['date'] = 0
thinfo['th'] = []
# get video file meta info (frames, duration, fps)
meta, ok = get_meta(vfile)
if not ok:
return None, False
thinfo.update(meta)
if thinfo['addss'] >= thinfo['nsubs']:
thinfo['addss'] = -1
if not cfg['force']:
chk = chk_idxfile(thinfo, thdir)
if chk:
return chk, True
return thinfo, False
# create output directory
def make_outdir(outdir):
suffix = 'ffpreview_thumbs'
if os.path.basename(outdir) != suffix:
outdir = os.path.join(outdir, suffix)
try:
os.makedirs(outdir, exist_ok=True)
eprint(1, 'outdir', outdir, 'ok')
except Exception as e:
eprint(0, str(e))
return False
return outdir
# clear out thumbnail directory
def clear_thumbdir(thdir):
if os.path.dirname(thdir) != cfg['outdir']:
eprint(0, 'clearing of directory %s denied' % thdir)
return False
# prepare thumbnail directory
eprint(2, 'clearing out %s' % thdir)
try:
os.makedirs(thdir, exist_ok=True)
except Exception as e:
eprint(0, str(e))
return False
f = os.path.join(thdir, _FFPREVIEW_IDX)
if os.path.exists(f):
try:
os.unlink(f)
except Exception as e:
eprint(0, str(e))
pass
for f in os.listdir(thdir):
if re.match(r'^\d{8}\.png$', f):
try:
os.unlink(os.path.join(thdir, f))
except Exception as e:
eprint(0, str(e))
pass
# process a single file in console-only mode
def batch_process(fname):
def cons_progress(n, tot):
print('\r%4d / %4d' % (int(n), int(tot)), end='', file=sys.stderr)
if tot > 0:
print(' %3d %%' % int(n * 100 / tot), end='', file=sys.stderr)
# sanitize file name
if not os.path.exists(fname) or not os.access(fname, os.R_OK):
eprint(0, '%s: no permission' % fname)
return False
if os.path.isdir(fname):
eprint(0, '%s is a directory!' % fname)
return False
fname = os.path.abspath(fname)
vfile = os.path.basename(fname)
thdir = os.path.join(cfg['outdir'], vfile)
# analyze video
print('Analyzing %s ...\r' % vfile, end='', file=sys.stderr)
thinfo, ok = get_thinfo(fname, thdir)
if thinfo is None:
print('\nFailed.', file=sys.stderr)
return False
# prepare info and thumbnail files
if not ok:
# (re)generate thumbnails and index file
print('Processing', file=sys.stderr)
clear_thumbdir(thdir)
thinfo, ok = make_thumbs(fname, thinfo, thdir, cons_progress)
print('\r \r', end='', file=sys.stderr)
else:
print('', file=sys.stderr)
if ok:
print('Ok. ', file=sys.stderr)
else:
print('Failed. ', file=sys.stderr)
return ok
# get list of all index files for thumbnail manager
def get_indexfiles(path, prog_cb=None):
flist = []
dlist = os.listdir(path)
dlen = len(dlist)
dcnt = 0
for sd in dlist:
if prog_cb and not dcnt % 20:
prog_cb(dcnt, dlen)
dcnt += 1
d = os.path.join(path, sd)
if not os.path.isdir(d):
continue
entry = { 'tdir': sd, 'idx': None, 'vfile': '', 'size': 0 }
fidx = os.path.join(d, _FFPREVIEW_IDX)
if os.path.isfile(fidx):
with open(fidx, 'r') as idxfile:
try:
idx = json.load(idxfile)
except Exception as e:
eprint(1, fidx, str(e))
idx = {}
else:
idx['th'] = None
entry['idx'] = idx.copy()
if 'name' in idx and 'path' in idx:
opath = os.path.join(idx['path'], idx['name'])
if os.path.isfile(opath):
entry['vfile'] = opath
sz = cnt = 0
for f in os.listdir(d):
if re.match(r'^\d{8}\.png$', f):
cnt += 1
try:
sz += os.path.getsize(os.path.join(d, f))
except:
pass
entry['size'] = sz
if not entry['idx']:
entry['idx'] = { 'count': cnt, 'date': int(os.path.getmtime(d)) }
flist.append(entry)
flist = sorted(flist, key=lambda k: k['tdir'])
if cfg['verbosity'] > 3:
eprint(4, json.dumps(flist, indent=2))
return flist
############################################################
# main function
def main():
# initialization
global proc, cfg
proc = None
cfg = ffConfig().get()
if cfg['verbosity'] > 2:
eprint(3, 'cfg = ' + json.dumps(cfg, indent=2))
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
if cfg['platform'] != 'Windows':
signal.signal(signal.SIGHUP, sig_handler)
signal.signal(signal.SIGQUIT, sig_handler)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
global _FF_DEBUG
if os.environ.get('FFDEBUG'):
_FF_DEBUG = True
# run in console batch mode, if requested
if cfg['batch']:
errcnt = 0
for fn in cfg['vid']:
if not batch_process(fn):
errcnt += 1
die(errcnt)
# set up window
if not _FF_DEBUG:
os.environ['QT_LOGGING_RULES'] = 'qt5ct.debug=false'
app = QApplication(sys.argv)
app.setApplicationName(_FFPREVIEW_NAME)
root = sMainWindow(title=_FFPREVIEW_NAME + ' ' + _FFPREVIEW_VERSION)
# start console debugging thread, if _FF_DEBUG is set
if _FF_DEBUG:
import threading, resource, gc
global _ffdbg_thread, _ffdbg_run
gc.set_debug(gc.DEBUG_SAVEALL)
class _dbgProxy(QObject):
notify = pyqtSignal(dict)
def __init__(self, *args, receptor=None, **kwargs):
super().__init__(*args, **kwargs)
self.notify.connect(receptor)
def ping(self):
self.notify.emit({'type': '_dbg_count'})
def _ffdbg_update(*args):
tstart = time.time()
dbg_proxy = _dbgProxy(receptor=root.notify_receive)
def p(*args):
print(*args, file=sys.stderr)
while _ffdbg_run:
gc.collect()
time.sleep(0.5)
dbg_proxy.ping()
time.sleep(0.5)
p('----- %.3f -----' % (time.time()-tstart))
p('max rss:', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss, 'KiB')
p('tLabel :', args[0]._dbg_num_tlabels)
p('QObject:', args[0]._dbg_num_qobjects)
p('gc cnt :', gc.get_count())
p('gc gen0:', gc.get_stats()[0])
p('gc gen1:', gc.get_stats()[1])
p('gc gen2:', gc.get_stats()[2])
_ffdbg_thread = threading.Thread(target=_ffdbg_update, args=(root,))
_ffdbg_run = True
_ffdbg_thread.start()
# start in selected mode of operation, run main loop
root.show()
if cfg['manage']:
root.manage_thumbs(cfg['outdir'])
else:
root.load_view(cfg['vid'][0])
die(app.exec_())
# run application
if __name__== "__main__":
main()
# EOF
| 43.645911 | 223 | 0.618358 |
acecb4798d23dd3dedc451070faa4782d7933bbf | 11,832 | py | Python | stock_trading/views.py | SaurabhPanja/herodha | bb78877cd7d29b80f446245b1b5feb6e16984489 | [
"MIT"
] | null | null | null | stock_trading/views.py | SaurabhPanja/herodha | bb78877cd7d29b80f446245b1b5feb6e16984489 | [
"MIT"
] | null | null | null | stock_trading/views.py | SaurabhPanja/herodha | bb78877cd7d29b80f446245b1b5feb6e16984489 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView
from django.contrib.auth.decorators import login_required
from .models import BuyTransaction, SellTransaction, Bookmark, User, Transaction
from django.contrib import messages
from datetime import datetime
from django.views.decorators.csrf import csrf_exempt
import os
import json
import random
from elasticsearch import Elasticsearch
ELASTICSEARCH_URL = os.getenv('ELASTICSEARCH_URL')
es = Elasticsearch(ELASTICSEARCH_URL)
from nsetools import Nse
nse = Nse()
from .forms import CustomUserCreationForm
from .checksum import *
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.views.decorators.cache import cache_page
# CACHE_TTL = getattr(settings, 'CACHE_TTL', DEFAULT_TIMEOUT)
#Market Action
# nifty_50 = nse.get_index_quote('nifty 50')
# nifty_bank = nse.get_index_quote('nifty bank')
# nifty_pharma = nse.get_index_quote('nifty pharma')
# nifty_it = nse.get_index_quote('nifty it')
# nifty_auto = nse.get_index_quote('nifty auto')
#Top Gainers
# top_gainers = nse.get_top_gainers()
#Top Losers
# top_losers = nse.get_top_losers()
# Create your views here.
# from .forms import RegistrationForm
# from django.contrib.auth.models import User
# def registration(request):
# # if this is a POST request we need to process the form data
# if request.method == 'POST':
# # create a form instance and populate it with data from the request:
# form = RegistrationForm(request.POST)
# print('hello')
# # check whether it's valid:
# if form.is_valid():
# user = User.objects.create_user(form.cleaned_data['username'], form.cleaned_data['email'], form.cleaned_data['password1'])
# print(user, 'hi')
# # user = User()
# # user.username = form.cleaned_data['username']
# # user.email = form.cleaned_data['email']
# # user.password = form.cleaned_data['password1']
# # user.save()
# # redirect to a new URL:
# return HttpResponseRedirect('/login/')
# # if a GET (or any other method) we'll create a blank form
# else:
# form = RegistrationForm()
# return render(request, 'registration_form.html', {'form': form})
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration.html'
def index(request):
# #Market Action
nifty_50 = nse.get_index_quote('nifty 50')
nifty_bank = nse.get_index_quote('nifty bank')
nifty_pharma = nse.get_index_quote('nifty pharma')
nifty_it = nse.get_index_quote('nifty it')
nifty_auto = nse.get_index_quote('nifty auto')
# #Top Gainers
top_gainers = nse.get_top_gainers()
# #Top Losers
top_losers = nse.get_top_losers()
return render(request, 'index.html', {'nifty_50':nifty_50, \
'nifty_auto':nifty_auto, 'nifty_bank':nifty_bank, \
'nifty_it':nifty_it, 'nifty_pharma':nifty_pharma, \
'top_gainers':top_gainers, 'top_losers':top_losers})
def search_stocks(request):
try:
stock = request.POST['stock']
result = es.search(index='stocks', body={'query': {'multi_match': \
{'query': stock, 'fields': ['*']}}})
print(result)
result = result['hits']['hits'][0]['_source']['symbol']
return redirect("/get_quote/{}".format(result))
except:
return redirect("/")
def get_quote(request, company_code):
stock = nse.get_quote(company_code)
try:
bm = Bookmark.objects.get(user_id=request.user.id, company_code=company_code.upper())
except Bookmark.DoesNotExist:
bm = None
if bm is None:
status = False
else:
status = True
return render(request, 'get_quote.html', {'stock_data' : stock, 'bookmark_status': status})
@login_required(login_url='/accounts/login/')
def buy(request, company_code):
current_user = request.user
buyer_id = current_user.id
try:
bt = BuyTransaction.objects.get(user_id=buyer_id, company_code=company_code)
except BuyTransaction.DoesNotExist:
bt = None
company_name = request.POST.get("name")
company_code = request.POST.get('symbol')
last_price = float(request.POST.get('lastPrice'))
quantity = int(request.POST.get('qty'))
total = float(request.POST.get('total'))
available_funds = current_user.funds
print(buyer_id)
if available_funds < total:
messages.error(request, 'Insufficient funds.')
return redirect('/get_quote/{}'.format(company_code))
else:
available_funds -= total
current_user.funds = available_funds
current_user.save()
if bt is None:
transaction = BuyTransaction(user_id=current_user, company_name=company_name, company_code=company_code, qty=quantity, last_price=last_price, Total=total, avg_price=last_price, date=datetime.now)
transaction.save()
messages.info(request, 'Your transaction was successful.')
else:
old_quantity = bt.qty
new_quantity = old_quantity + quantity
new_total = new_quantity*last_price
avg_price = bt.avg_price
new_avg_price = ((old_quantity*avg_price)+(quantity*last_price))/(old_quantity+quantity)
bt.qty = new_quantity
bt.last_price = last_price
bt.Total = new_total
bt.avg_price = new_avg_price
bt.date = datetime.now
bt.save()
messages.info(request, 'Your transaction was successful.')
# bt.update(qty=new_quantity, last_price=last_price, Total=new_total)
# print(company_name)
# print(company_code)
# print(total)
return redirect('/get_quote/{}'.format(company_code))
@login_required(login_url='/accounts/login')
def sell(request, company_code):
current_user = request.user
buyer_id = current_user.id
bt = BuyTransaction.objects.get(user_id=buyer_id, company_code=company_code)
avg_price = bt.avg_price
company_name = request.POST.get("name")
company_code = request.POST.get('symbol')
sell_value = float(request.POST.get('total'))
quantity = int(request.POST.get('qty'))
last_price = sell_value/quantity
old_quantity = bt.qty
if quantity > old_quantity:
messages.error(request, "Not enough stock quantity to sell.")
return redirect('/get_quote/{}'.format(company_code))
new_quantity = old_quantity - quantity
old_total = bt.Total
new_total = old_total - sell_value
avg_price = bt.avg_price
profit = (last_price - avg_price)*quantity
bt.qty = new_quantity
bt.last_price = last_price
bt.Total = new_total
bt.save()
current_user.funds += sell_value
current_user.save()
sell_transaction = SellTransaction(company_name=company_name, company_code=company_code, qty=quantity, buying_price=avg_price, selling_price=last_price, profit=profit, total_selling=sell_value, user_id=current_user)
sell_transaction.save()
messages.info(request, 'Your transaction was successful.')
current_user.funds += sell_value
return redirect('/current_holdings'.format(company_code))
def get_current_price(request, company_code):
stock_data = nse.get_quote(company_code)
return JsonResponse({'stock_data' : stock_data})
@csrf_exempt
def add_company(request):
current_user = request.user
response = json.load(request)
company_code = response
print(company_code)
bookmark = Bookmark(user_id=current_user, company_code=company_code)
bookmark.save()
return HttpResponse(status=201)
@csrf_exempt
def remove_company(request):
current_user = request.user
response = json.load(request)
company_code = response
bookmark = Bookmark.objects.get(user_id=current_user, company_code=company_code)
bookmark.delete()
return HttpResponse(status=200)
@login_required(login_url='/accounts/login')
def dashboard(request):
return render(request, 'dashboard.html')
@login_required(login_url='/accounts/login')
def current_holdings(request):
current_user = request.user
bookmarks = Bookmark.objects.filter(user_id=current_user.id).values()
obj = BuyTransaction.objects.filter(user_id=current_user.id)
obj = list(obj)
return render(request, 'current_holdings.html', {'current_shares':obj, 'bookmarks': bookmarks})
@login_required(login_url='/accounts/login')
def past_holdings(request):
current_user = request.user
# print(current_user.id)
bookmarks = Bookmark.objects.filter(user_id=current_user.id).values()
obj = list(SellTransaction.objects.filter(user_id=current_user.id))
# obj = list(obj)
return render(request, 'past_holdings.html', {'past_shares':obj, 'bookmarks': bookmarks})
@login_required(login_url='/accounts/login')
def profile(request):
current_user = request.user
bookmarks = Bookmark.objects.filter(user_id=current_user.id).values()
return render(request, 'profile.html', {'user' : current_user , 'bookmarks': bookmarks})
#not needed
@login_required(login_url='/accounts/login')
def get_bookmarks(request):
current_user = request.user
bookmarks = Bookmark.objects.filter(user_id=current_user.id).values()
return render(request, 'bookmarks.html', {'bookmarks': bookmarks})
# return JsonResponse({'bookmarks': list(bookmarks)}, safe=False)
@login_required(login_url='/accounts/login')
def addfunds(request):
if request.method == "POST":
amount = request.POST.get('amount')
user_id = request.user.id
order_id = random.randint(100000000000,999999999999)
MERCHANT_KEY = os.getenv('MERCHANT_KEY')
paytmParams = {
'MID': os.getenv('MID'),
'ORDER_ID': str(order_id)+str(user_id),
'TXN_AMOUNT': str(amount),
'CUST_ID': str(user_id),
'INDUSTRY_TYPE_ID': 'Retail',
'WEBSITE': 'WEBSTAGING',
'CHANNEL_ID': 'WEB',
'CALLBACK_URL':'http://'+request.get_host()+'/payments/',
}
print('http://'+request.get_host()+'/payments/')
checksum = generate_checksum(paytmParams, MERCHANT_KEY)
return render(request, 'paytm.html', {'paytmParams':paytmParams, \
'checksum':checksum})
else:
return render(request, 'addfunds.html')
# paytm will send post request here
@csrf_exempt
def payments(request):
MERCHANT_KEY = os.getenv('MERCHANT_KEY')
form = request.POST
response_dict = {}
for i in form.keys():
response_dict[i] = form[i]
if i == 'CHECKSUMHASH':
checksum = form[i]
verify = verify_checksum(response_dict, MERCHANT_KEY, checksum)
order_id = response_dict.get('ORDERID')
user_id = int(order_id[12:])
amount = float(response_dict.get('TXNAMOUNT'))
user = User.objects.get(id=user_id)
if verify:
if response_dict['RESPCODE'] == '01':
user.funds = user.funds + amount
user.save()
order = Transaction(user_id=user, order_id=order_id, amount=amount, status=True, date=datetime.now)
order.save()
else:
# print('order was not successful because' + response_dict['RESPMSG'])
order = Transaction(user_id=user, order_id=order_id, amount=amount, status=False, date=datetime.now)
order.save()
return render(request, 'paymentstatus.html', {'response': response_dict})
| 35.319403 | 220 | 0.679598 |
acecb5264297ef96e72041c5f72855f3a3efdbe3 | 11,041 | py | Python | model/sheet_treatment.py | marcos-de-sousa/BioTaxGeo | 9549ed20c1429eee9668a4fb845845f83da530e4 | [
"MIT"
] | 1 | 2020-11-11T22:39:16.000Z | 2020-11-11T22:39:16.000Z | model/sheet_treatment.py | marcos-de-sousa/BioTaxGeo | 9549ed20c1429eee9668a4fb845845f83da530e4 | [
"MIT"
] | null | null | null | model/sheet_treatment.py | marcos-de-sousa/BioTaxGeo | 9549ed20c1429eee9668a4fb845845f83da530e4 | [
"MIT"
] | null | null | null | import os
import xlrd
from xlutils.copy import copy
from model.coordinate import Coordinate
from model.data_treatment import Data_Treatment
from model.locality import Locality
import xlwt
import pandas as pd
class Sheet:
def __init__(self):
# Essas variáveis irão ser atribuídas assim que o objeto for criado, pois dizem respeito somente ao arquivo, então já configuro eles automaticamente.
self.path = None
self.file = None
self.write_file = None
self.sheet_list = None
self.sheet = None
self.formated_sheet = None
self.coordinate = None
self.locality = None
self.data_treatment = None
self.columns_total = 0
self.row_total = 0
self.cell_value = str
self.values_column = []
self.values_row = []
self.index_sheet = 0
self.isCSV = False
def set_Path_configure_all(self, path):
self.path = str(os.getcwd()) + "/files/" + path if path != None else None # O comando os.getcwd pega o diretório atual de onde o arquivo python está.
try:
self.file = xlrd.open_workbook(self.path,
formatting_info=True) # Abre o arquivo com o nome enviado no parâmetro diretorio
except:
self.file = xlrd.open_workbook(self.path) # Abre o arquivo com o nome enviado no parâmetro diretorio
self.write_file = copy(self.file)
xlwt.add_palette_colour("custom_colour", 0x21)
self.sheet_list = self.file.sheet_names() # Pega o nome das páginas do arquivo
self.sheet = self.file.sheet_by_index(0) # Pega a página inicial (começa por 0)
self.formated_sheet = self.write_file.get_sheet(0)
# Aqui já vão ser atribuídas no decorrer do processamento.
self.columns_total = self.sheet.ncols
self.row_total = self.sheet.nrows
self.coordinate = Coordinate(self.sheet)
self.data_treatment = Data_Treatment(self.sheet)
self.locality = Locality(self.sheet)
def set_Path(self, path):
self.path = str(os.getcwd()) + "/files/" + path if path != None else None
def get_Path(self):
return self.path
def set_File(self, file):
self.file = file
def create_WriteFile(self):
self.write_file = xlwt.Workbook()
def create_SheetWriteFile(self, name):
self.formated_sheet = self.write_file.add_sheet(name, cell_overwrite_ok=True)
def set_HeaderWriteFile(self, values):
index = 0
for name in values:
self.formated_sheet.write(0, index, name)
index += 1
def set_isCSV(self, value):
self.isCSV = value
def get_Sheet(self):
return self.sheet_list[self.index_sheet]
def get_Sheet_List(self):
self.sheet_list = self.file.sheet_names()
return self.sheet_list
def get_Sheet_Header(self):
return self.sheet.row_values(0)
def set_Columns_Total(self, value):
self.columns_total = value
def get_Columns_Total(self):
return self.columns_total
def get_Row_Total(self):
return self.row_total
def Value_in_Cell(self, row, columns):
if (row > self.get_Row_Total()):
return "Linha excede valor total de linhas do arquivo."
if (columns > self.get_Columns_Total()):
return "Coluna excede valor total de colunas do arquivo."
if (row <= self.get_Row_Total() and columns <= self.get_Columns_Total()):
self.cell_value = self.sheet.cell(row, columns).value
return self.cell_value
return "Empty"
def Value_in_Column(self, column):
self.Reset_Values()
try:
if (type(column) == str):
index_column = self.sheet.row_values(0).index(column)
self.values_column = self.sheet.col_values(index_column, 1)
if (self.values_column == []):
return "Not found."
else:
return self.values_column
elif (type(column) == int):
self.values_column = self.sheet.col_values(column, 1)
return self.values_column
except:
return "Not found."
def Value_in_Row(self, row):
if (row <= self.get_Row_Total() and row > 0):
self.Resetar_Values()
self.values_row = self.sheet.row_values((row - 1))
return self.values_row
else:
return "Linha excede limite de linhas do documento."
def Reset_Values(self):
self.cell_value = str
self.values_column = []
self.values_row = []
def set_Check_Columns(self, titles):
self.data_treatment.Reset_Values()
for column in titles:
if titles[column] != None:
values_column = self.Value_in_Column(titles[column])
self.data_treatment.set_Original_Titles(titles[column])
self.data_treatment.set_Check_Columns(column, values_column)
def get_Columns_Checked(self):
return self.data_treatment.get_Validate_Columns()
def Change_Data_Spreadsheet(self, data_to_change):
column_change_taxon = self.get_Columns_Total()
rows_changed = []
self.formated_sheet.write(0, column_change_taxon, "is_taxonomy_changed")
for values in data_to_change:
key1 = values
for data in data_to_change[values]:
level = data_to_change[values][data]["level"][0]
column_index = self.sheet.row_values(0).index(
self.data_treatment.get_Verified_Hierarchy()[key1][data]["title"])
column_index_level = self.sheet.row_values(0).index(
self.data_treatment.get_Verified_Hierarchy()[key1][level]["title"])
for row in range(0, self.get_Row_Total()):
value1 = self.data_treatment.get_Verified_Hierarchy()[key1][data]["type"]
value2 = self.Value_in_Cell(row, column_index)
value1_level = data_to_change[values][data]["level"][1]
value2_level = self.Value_in_Cell(row, column_index_level)
if ((value1 == value2) and (value1_level == value2_level)):
style = xlwt.easyxf(
'pattern: pattern solid, fore_colour green; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
self.formated_sheet.write(row, column_index, data_to_change[key1][data]["suggestion"])
if row not in rows_changed:
rows_changed.append(row)
for row in range(1, self.get_Row_Total()):
if row in rows_changed:
self.formated_sheet.write(row, column_change_taxon, "TRUE")
else:
self.formated_sheet.write(row, column_change_taxon, "FALSE")
def Change_Data_Spreadsheet2(self, data_to_change):
column_change_taxon = self.get_Columns_Total()
rows_changed = []
self.formated_sheet.write(0, column_change_taxon, "is_occurrence_changed")
for row in data_to_change:
for column in data_to_change[row]:
column_index = self.sheet.row_values(0).index(column)
change_row = int(row) - 1
style = xlwt.easyxf(
'pattern: pattern solid, fore_colour green; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
if row not in rows_changed:
rows_changed.append(change_row)
self.formated_sheet.write(change_row, column_index, data_to_change[row][column])
for row in range(1, self.get_Row_Total()):
if row in rows_changed:
self.formated_sheet.write(row, column_change_taxon, "TRUE")
else:
self.formated_sheet.write(row, column_change_taxon, "FALSE")
def Change_Column(self, column, value, wrong_cell=None, index=None, name="None"):
row = 1
column_index = 0
rows_changed = []
if name != "None":
column_change_taxon = self.get_Columns_Total()
self.formated_sheet.write(0, column_change_taxon, "is_"+name+"_changed")
if column:
column_index = self.sheet.row_values(0).index(column)
if index:
column_index = index
for data in value:
if index == None:
if (data == self.Value_in_Cell(row, column_index)):
style = xlwt.easyxf('pattern: pattern solid, fore_colour red; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
elif wrong_cell != None:
if row in wrong_cell:
style = xlwt.easyxf('pattern: pattern solid, fore_colour red; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
else:
if row not in rows_changed:
rows_changed.append(row)
style = xlwt.easyxf('pattern: pattern solid, fore_colour green; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
else:
if row not in rows_changed:
rows_changed.append(row)
style = xlwt.easyxf('pattern: pattern solid, fore_colour green; font: colour white; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
else:
style = xlwt.easyxf('pattern: pattern solid, fore_colour white; font: colour black; borders: left 1, right 1, top 1, bottom 1; font: bold 1;')
self.formated_sheet.write(row, column_index, data)
row += 1
if name != "None":
for row in range(1, self.get_Row_Total()):
if row in rows_changed:
self.formated_sheet.write(row, column_change_taxon, "TRUE")
else:
self.formated_sheet.write(row, column_change_taxon, "FALSE")
def Save_Formatted_Spreadsheet(self, type):
if(type == ".csv"):
self.write_file.save("files/Planilha_Formatada.xls")
data_xls = pd.read_excel("files/Planilha_Formatada.xls")
data_xls.to_csv("files/Planilha_Formatada.csv", encoding="utf-8", index=False),
self.set_Path_configure_all("Planilha_Formatada.xls")
self.write_file.save("files/Planilha_Formatada{}".format(type))
self.set_Path_configure_all("Planilha_Formatada{}".format(type))
def Save_Write_Spreadsheet(self, type, name):
if(type == ".csv"):
self.write_file.save("files/"+name+".xls")
data_xls = pd.read_excel("files/"+name+".xls")
return data_xls.to_csv("files/"+name+".csv", encoding="utf-8", index=False)
return self.write_file.save("files/"+name+"{}".format(type))
| 45.065306 | 166 | 0.605742 |
acecb5482a2e998f840f2b6a39c5db0d4b386578 | 1,443 | py | Python | VSCode_work/chapter16/chapter16_16_2_1.py | yangyahu-1994/Python-Crash-Course | 6f8ef7fe8466d88931a0d3cc423ba5d966663b9d | [
"MIT"
] | 12 | 2020-10-22T14:03:27.000Z | 2022-03-28T08:14:22.000Z | VSCode_work/chapter16/chapter16_16_2_1.py | syncccc/Python-Crash-Course | 51fe429dd606583a790f3c1603bb3439382c09e0 | [
"MIT"
] | null | null | null | VSCode_work/chapter16/chapter16_16_2_1.py | syncccc/Python-Crash-Course | 51fe429dd606583a790f3c1603bb3439382c09e0 | [
"MIT"
] | 9 | 2020-12-22T10:22:12.000Z | 2022-03-28T08:14:53.000Z | # 导入必要的模块
import csv
from datetime import datetime
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
# 从文件中获取日期、最高气温和最低气温
filename = '/home/yyh/Documents/VSCode_work/chapter16/death_valley_2014.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
try:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
title = "Daily high and low temperatures - 2014\nDeath Valley, CA"
plt.title(title, fontsize=20)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', top=True, right=True, which='both', labelsize=16)
plt.ylim(10, 120)
plt.xlim(dates[0], dates[-1]) # 这样也可以的
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %Y')) # 日期格式,%B为月份名,%b为月份名缩写
# plt.show()
plt.savefig("/home/yyh/Documents/VSCode_work/chapter16/chapter16_16_2_1", bbox_inches='tight') | 32.795455 | 94 | 0.668746 |
acecb5da668860a98d6941b1346ed4eea833d9d7 | 485 | py | Python | commonblocks/simple_rich_text.py | springload/wagtailblocks | 2226597db2fad52e661eed14e5fc8de0a1a8b347 | [
"MIT"
] | 25 | 2015-11-27T09:22:50.000Z | 2018-01-28T23:42:51.000Z | commonblocks/simple_rich_text.py | springload/wagtailcommonblocks | 2226597db2fad52e661eed14e5fc8de0a1a8b347 | [
"MIT"
] | 7 | 2018-03-21T04:46:51.000Z | 2021-04-14T05:15:33.000Z | commonblocks/simple_rich_text.py | springload/wagtailblocks | 2226597db2fad52e661eed14e5fc8de0a1a8b347 | [
"MIT"
] | 6 | 2016-11-12T20:11:34.000Z | 2018-03-12T15:49:07.000Z | from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
try:
from wagtail.core.rich_text import RichText, expand_db_html
except:
from wagtail.wagtailcore.rich_text import RichText, expand_db_html
@python_2_unicode_compatible
class SimpleRichText(RichText):
"""
A custom simple RichText to avoid the <div class='richtext'></div>
"""
def __str__(self):
return mark_safe(expand_db_html(self.source))
| 26.944444 | 70 | 0.769072 |
acecb6130e27a08240f487d243951f2c01689ce8 | 4,192 | py | Python | main.py | migueltsantana/datoosh | b65894b7bdf08a3bd256e9f39710bd3f2496a840 | [
"MIT"
] | null | null | null | main.py | migueltsantana/datoosh | b65894b7bdf08a3bd256e9f39710bd3f2496a840 | [
"MIT"
] | null | null | null | main.py | migueltsantana/datoosh | b65894b7bdf08a3bd256e9f39710bd3f2496a840 | [
"MIT"
] | null | null | null | import argparse
import multiprocessing as mp
import sys
import threading
import progressbar
from itertools import zip_longest
import yaml
import databases
q = mp.Queue()
mutex = threading.Lock()
def grouper(n, iterable, fillvalue=None):
"""
Collects the file data into fixed length chunks
:param n: the number of chunks to generate
:param iterable: the iterable object
:param fillvalue: the fillvalue to be used in zip_longest
:return: file contents in chunks
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def process_chunk(chunk, delimiter, q, table_name):
"""
Processes each file chunk and adds the query to the queue.
:param chunk: the file segment to process
:param delimiter: the delimiter to use
:param q: the multiprocessing.queue object
:param table_name: the name of the table defined in the YAML file
"""
for line in chunk:
preprocess = "', '".join(line.strip().split(delimiter))
q.put(f"INSERT INTO {table_name} VALUES ('{preprocess}')", False)
def blocks(files, size=65536):
"""
Auxiliary method to read the file efficiently.
:param files: the file object
:param size: the maximum number of bytes to read
:return: fixed amount of bytes of the file content
"""
while True:
b = files.read(size)
if not b: break
yield b
# Command-line arguments
parser = argparse.ArgumentParser(description='Process big CSV files into SQLite databases.')
parser.add_argument("-f", "--file", help="The CSV file to process", type=str, required=True)
parser.add_argument("-w", "--max-worker-threads", help="The maximum number of concurrent processes to read "
"and process the CSV file", type=int, required=True)
parser.add_argument("-s", "--settings", help="The settings file", type=str, required=True)
parser.add_argument("-d", "--delimiter", help="The delimiter of the CSV file", type=str, default=",")
args = parser.parse_args()
# GLOBAL VARIABLES
NUM_LINES = 0
MAX_LINES_PER_THREAD = 0
MAX_WORKER_THREADS = args.max_worker_threads
FILE = args.file
SETTINGS_FILE = args.settings
DELIMITER = args.delimiter
# Reading the number of lines of the file
try:
with open(FILE, "r", encoding="utf-8", errors='ignore') as f:
NUM_LINES = sum(bl.count("\n") for bl in blocks(f))
except FileNotFoundError:
print(f"There is no such file with the name '{FILE}'. Please make sure there are no typos or this is the correct "
f"path...")
sys.exit(0)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# Reading the YAML file
try:
with open(SETTINGS_FILE) as file:
settings = yaml.full_load(file)
except FileNotFoundError:
print(f"There is no such file with the name '{SETTINGS_FILE}'. Please make sure there are no typos or this is the "
f"correct path...")
sys.exit(0)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# Creating the database table
getattr(databases, settings["database"]["type"]).create_table(settings)
MAX_LINES_PER_THREAD = NUM_LINES // MAX_WORKER_THREADS
# Shared value to calculate how many lines have been processed
num = mp.Value('i', 0)
# Initiate the processes that chunk the file into pieces
pool_processes = []
with open(FILE) as f:
next(f)
for i, g in enumerate(grouper(MAX_LINES_PER_THREAD, f, fillvalue=""), 1):
pool_processes.append(mp.Process(target=process_chunk, args=(g, DELIMITER, q, settings["table-name"])).start())
# Initiate the processes that process the queue and the queries
sql_pool_processes = []
for track in range(MAX_WORKER_THREADS):
sql_pool_processes.append(
mp.Process(target=getattr(databases, settings["database"]["type"]).process_sql,
args=(q, mutex, settings, num)).start())
# Progress bar to display the current status
bar = progressbar.ProgressBar(max_value=NUM_LINES)
def print_status():
"""
Simple timed function to update the progressbar.ProgressBar instance.
"""
threading.Timer(5.0, print_status).start()
bar.update(num.value)
print_status()
| 32 | 119 | 0.692987 |
acecb645445eeba39abeceb5f3ef03faeca9601f | 7,268 | py | Python | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1beta2_controller_revision.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1beta2_controller_revision.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1beta2_controller_revision.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2ControllerRevision(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'api_version': 'str',
'data': 'RuntimeRawExtension',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'revision': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'kind': 'kind',
'metadata': 'metadata',
'revision': 'revision'
}
def __init__(self,
api_version=None,
data=None,
kind=None,
metadata=None,
revision=None):
"""
V1beta2ControllerRevision - a model defined in Swagger
"""
self._api_version = None
self._data = None
self._kind = None
self._metadata = None
self._revision = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if data is not None:
self.data = data
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.revision = revision
@property
def api_version(self):
"""
Gets the api_version of this V1beta2ControllerRevision.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta2ControllerRevision.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta2ControllerRevision.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta2ControllerRevision.
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""
Gets the data of this V1beta2ControllerRevision.
Data is the serialized representation of the state.
:return: The data of this V1beta2ControllerRevision.
:rtype: RuntimeRawExtension
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this V1beta2ControllerRevision.
Data is the serialized representation of the state.
:param data: The data of this V1beta2ControllerRevision.
:type: RuntimeRawExtension
"""
self._data = data
@property
def kind(self):
"""
Gets the kind of this V1beta2ControllerRevision.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta2ControllerRevision.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta2ControllerRevision.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta2ControllerRevision.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta2ControllerRevision.
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:return: The metadata of this V1beta2ControllerRevision.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta2ControllerRevision.
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param metadata: The metadata of this V1beta2ControllerRevision.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def revision(self):
"""
Gets the revision of this V1beta2ControllerRevision.
Revision indicates the revision of the state represented by Data.
:return: The revision of this V1beta2ControllerRevision.
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this V1beta2ControllerRevision.
Revision indicates the revision of the state represented by Data.
:param revision: The revision of this V1beta2ControllerRevision.
:type: int
"""
if revision is None:
raise ValueError('Invalid value for `revision`, must not be `None`')
self._revision = revision
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2ControllerRevision):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.727273 | 100 | 0.643093 |
acecb6d7bbfaa69e6e4f0de7cb1d159fdd5206d2 | 278 | py | Python | cookieDjango/users/apps.py | aperson11/dunnotbh | be5d34abf28b1270384f0bb8afbfdc27cddf9e91 | [
"MIT"
] | null | null | null | cookieDjango/users/apps.py | aperson11/dunnotbh | be5d34abf28b1270384f0bb8afbfdc27cddf9e91 | [
"MIT"
] | null | null | null | cookieDjango/users/apps.py | aperson11/dunnotbh | be5d34abf28b1270384f0bb8afbfdc27cddf9e91 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'cookieDjango.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| 19.857143 | 37 | 0.600719 |
acecb80ff0f703a82e73a35d1cf9f5c6b655dc0d | 338 | py | Python | models/section.py | Medisur/journalmanagement | bc356e8d3354529a14a5e04bec3d80c03ed1c0ec | [
"MIT"
] | 1 | 2019-04-16T08:53:16.000Z | 2019-04-16T08:53:16.000Z | models/section.py | Medisur/journalmanagement | bc356e8d3354529a14a5e04bec3d80c03ed1c0ec | [
"MIT"
] | null | null | null | models/section.py | Medisur/journalmanagement | bc356e8d3354529a14a5e04bec3d80c03ed1c0ec | [
"MIT"
] | null | null | null |
def _on_section_define(table):
pass
db.define_table('section',
Field('title', 'string',
requires=IS_NOT_EMPTY(), label=T('Section Title')),
Field('journal', 'reference journal'),
on_define=_on_section_define,
format='%(title)s'
) | 28.166667 | 73 | 0.508876 |
acecb84dd17a59c97af964ca4ef1385c591e96de | 567 | py | Python | rest/sip-in/create-acl-address/create-acl-address.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 234 | 2016-01-27T03:04:38.000Z | 2022-02-25T20:13:43.000Z | rest/sip-in/create-acl-address/create-acl-address.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 351 | 2016-04-06T16:55:33.000Z | 2022-03-10T18:42:36.000Z | rest/sip-in/create-acl-address/create-acl-address.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 494 | 2016-03-30T15:28:20.000Z | 2022-03-28T19:39:36.000Z | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
ip_address = client.sip \
.ip_access_control_lists("AL32a3c49700934481addd5ce1659f04d2") \
.ip_addresses \
.create("My office IP Address", "55.102.123.124")
print(ip_address.sid)
| 33.352941 | 72 | 0.77425 |
acecb855a65e1d7a35eeb3a902f20e1b78d75c38 | 2,267 | py | Python | vr/server/tests/test_deploy.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | null | null | null | vr/server/tests/test_deploy.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 3 | 2016-12-15T21:55:02.000Z | 2019-02-13T11:43:29.000Z | vr/server/tests/test_deploy.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 2 | 2017-01-16T09:31:03.000Z | 2022-03-26T09:21:36.000Z | import io
import pytest
import yaml
from django.utils import timezone
from vr.server.models import App, Build, Release
from vr.server.tasks import build_proc_info
from vr.common.utils import randchars, tmpdir
# If there's a release with current config, and a good build, it should be
# returned by Swarm.get_current_release()
@pytest.mark.usefixtures('postgresql')
class TestDeploy(object):
def setup(self):
self.app = App(name=randchars(), repo_url=randchars(), repo_type='hg')
self.app.save()
self.version = 'v1'
self.build = Build(app=self.app, start_time=timezone.now(),
end_time=timezone.now(), tag=self.version,
status='success', buildpack_url=randchars(),
buildpack_version=randchars(), hash=randchars())
self.build.file = '%s/build.tar.gz' % randchars()
self.build.save()
self.env = {'a': 1}
self.config = {'b': 2, 'tricky_value': "@We're testing, aren't we?"}
self.volumes = [['/blah', '/blerg']]
self.release = Release(build=self.build, env_yaml=self.env,
config_yaml=self.config, volumes=self.volumes)
self.release.save()
def test_build_proc_info(self, gridfs):
info = build_proc_info(self.release, 'test', 'somehost', 'web', 8000)
assert info['volumes'] == self.volumes
def test_build_proc_yaml_file(self, gridfs):
# Test that the proc.yaml file that gets deployed has the correct
# information.
config_name = 'test'
hostname = 'somehost'
proc = 'web'
port = 8000
with tmpdir():
# Generate the proc.yaml file the same way that
# server.vr.server.tasks.deploy() does; then yaml.load() it
# and compare with the local info.
with io.open('proc.yaml', 'w+') as f:
info = build_proc_info(self.release, config_name, hostname,
proc, port)
yaml.safe_dump(
info, stream=f, default_flow_style=False, encoding=None)
f.seek(0)
written_info = yaml.load(f.read())
assert info == written_info
| 35.421875 | 78 | 0.589766 |
acecb88f76930b5d7df201b6547803c82d8672da | 345 | py | Python | DS_Store_Cleaner/DS_Store_Cleaner.py | VXenomac/DS_Store_Cleaner | 59612fbb6f33acf78c425e9e383af72ad4745734 | [
"MIT"
] | 14 | 2019-11-13T14:58:52.000Z | 2021-04-24T15:33:34.000Z | DS_Store_Cleaner/DS_Store_Cleaner.py | VXenomac/DS_Store_Cleaner | 59612fbb6f33acf78c425e9e383af72ad4745734 | [
"MIT"
] | null | null | null | DS_Store_Cleaner/DS_Store_Cleaner.py | VXenomac/DS_Store_Cleaner | 59612fbb6f33acf78c425e9e383af72ad4745734 | [
"MIT"
] | 1 | 2020-06-08T08:56:22.000Z | 2020-06-08T08:56:22.000Z | import os
from pathlib import Path
def remove_file(path):
"""
根据文件路径删除文件
:param path: 指定需要删除 ,DS_Store 的路径
"""
path = Path(path)
files = [f for f in path.rglob('.DS_Store')]
for file_path in files:
print('正在删除 %s…' % file_path)
os.remove(file_path)
print('一共删除 %d 个 .DS_Store 文件…' % len(files))
| 21.5625 | 49 | 0.602899 |
acecb927787cad674d89e80bc591097d5e83761d | 8,907 | py | Python | salt/states/user.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | 1 | 2020-09-06T16:03:14.000Z | 2020-09-06T16:03:14.000Z | salt/states/user.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | salt/states/user.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | '''
User Management
===============
The user module is used to create and manage user settings, users can be set
as either absent or present
.. code-block:: yaml
fred:
user:
- present
- fullname: Fred Jones
- shell: /bin/zsh
- home: /home/fred
- uid: 4000
- gid: 4000
- groups:
- wheel
- storage
- games
'''
def _changes(
name,
uid=None,
gid=None,
groups=None,
home=True,
password=None,
enforce_password=True,
shell=None,
fullname=None,
roomnumber=None,
workphone=None,
homephone=None,
other=None,
unique=True,
):
'''
Return a dict of the changes required for a user if the user is present,
otherwise return False.
'''
change = {}
found = False
if __grains__['os'] != 'FreeBSD':
lshad = __salt__['shadow.info'](name)
for lusr in __salt__['user.getent']():
# Scan over the users
if lusr['name'] == name:
found = True
if uid:
if lusr['uid'] != uid:
change['uid'] = uid
if gid:
if lusr['gid'] != gid:
change['gid'] = gid
if groups:
if lusr['groups'] != sorted(groups):
change['groups'] = groups
if home:
if lusr['home'] != home:
change['home'] = home
if shell:
if lusr['shell'] != shell:
change['shell'] = shell
if password:
if __grains__['os'] != 'FreeBSD':
if lshad['pwd'] == '!' or \
lshad['pwd'] != '!' and enforce_password:
if lshad['pwd'] != password:
change['passwd'] = password
if fullname:
if lusr['fullname'] != fullname:
change['fullname'] = fullname
if roomnumber:
if lusr['roomnumber'] != roomnumber:
change['roomnumber'] = roomnumber
if workphone:
if lusr['workphone'] != workphone:
change['workphone'] = workphone
if homephone:
if lusr['homephone'] != homephone:
change['homephone'] = homephone
if other:
if lusr['other'] != other:
change['other'] = other
if not found:
return False
return change
def present(
name,
uid=None,
gid=None,
groups=None,
home=True,
password=None,
enforce_password=True,
shell=None,
fullname=None,
roomnumber=None,
workphone=None,
homephone=None,
other=None,
unique=True,
):
'''
Ensure that the named user is present with the specified properties
name
The name of the user to manage
uid
The user id to assign, if left empty then the next available user id
will be assigned
gid
The default group id
groups
A list of groups to assign the user to, pass a list object
home
The location of the home directory to manage
password
A password hash to set for the user
enforce_password
Set to False to keep the password from being changed if it has already
been set and the password hash differs from what is specified in the
"password" field. This option will be ignored if "password" is not
specified.
shell
The login shell, defaults to the system default shell
User comment field (GECOS) support (currently Linux-only):
The below values should be specified as strings to avoid ambiguities when
the values are loaded. (Especially the phone and room number fields which
are likely to contain numeric data)
fullname
The user's full name.
roomnumber
The user's room number
workphone
The user's work phone number
homephone
The user's home phone number
other
The user's "other" GECOS field
unique
Require a unique UID, True by default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
changes = _changes(
name,
uid,
gid,
groups,
home,
password,
enforce_password,
shell,
fullname,
roomnumber,
workphone,
homephone,
other,
unique)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The following user attributes are set to be '
'changed:\n')
for key, val in changes.items():
ret['comment'] += '{0}: {1}\n'.format(key, val)
return ret
# The user is present
if __grains__['os'] != 'FreeBSD':
lshad = __salt__['shadow.info'](name)
pre = __salt__['user.info'](name)
for key, val in changes.items():
if key == 'passwd':
__salt__['shadow.set_password'](name, password)
continue
__salt__['user.ch{0}'.format(key)](name, val)
post = __salt__['user.info'](name)
spost = {}
if __grains__['os'] != 'FreeBSD':
if lshad['pwd'] != password:
spost = __salt__['shadow.info'](name)
# See if anything changed
for key in post:
if post[key] != pre[key]:
ret['changes'][key] = post[key]
if __grains__['os'] != 'FreeBSD':
for key in spost:
if lshad[key] != spost[key]:
ret['changes'][key] = spost[key]
if ret['changes']:
ret['comment'] = 'Updated user {0}'.format(name)
return ret
if changes is False:
# The user is not present, make it!
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set to be added'.format(name)
return ret
if __salt__['user.add'](name,
uid=uid,
gid=gid,
groups=groups,
home=home,
shell=shell,
fullname=fullname,
roomnumber=roomnumber,
workphone=workphone,
homephone=homephone,
other=other,
unique=unique):
ret['comment'] = 'New user {0} created'.format(name)
ret['changes'] = __salt__['user.info'](name)
if password:
__salt__['shadow.set_password'](name, password)
spost = __salt__['shadow.info'](name)
if spost['pwd'] != password:
ret['comment'] = ('User {0} created but failed to set'
' password to {1}').format(name, password)
ret['result'] = False
ret['changes']['password'] = password
else:
ret['comment'] = 'Failed to create new user {0}'.format(name)
ret['result'] = False
return ret
def absent(name, purge=False, force=False):
'''
Ensure that the named user is absent
name
The name of the user to remove
purge
Set purge to delete all of the user's file as well as the user
force
If the user is logged in the absent state will fail, set the force
option to True to remove the user even if they are logged in
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
for lusr in __salt__['user.getent']():
# Scan over the users
if lusr['name'] == name:
# The user is present, make it not present
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set for removal'.format(name)
return ret
ret['result'] = __salt__['user.delete'](name, purge, force)
if ret['result']:
ret['changes'] = {name: 'removed'}
ret['comment'] = 'Removed user {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to remove user {0}'.format(name)
return ret
ret['comment'] = 'User {0} is not present'.format(name)
return ret
| 30.091216 | 78 | 0.485461 |
acecba4bb05c58ee7764d0406002afc952c98b4e | 4,662 | py | Python | tests/async_scheduler.py | aekasitt/schedule | d37842a340aac64cfe5b52802271e8de76f21f9a | [
"MIT"
] | null | null | null | tests/async_scheduler.py | aekasitt/schedule | d37842a340aac64cfe5b52802271e8de76f21f9a | [
"MIT"
] | null | null | null | tests/async_scheduler.py | aekasitt/schedule | d37842a340aac64cfe5b52802271e8de76f21f9a | [
"MIT"
] | null | null | null | """Unit tests for async_scheduler.py"""
import datetime
import mock
import pytest
import sys
import unittest
from typing import Callable
from .schedule import mock_job, mock_datetime
if sys.version_info >= (3, 6, 0):
import schedule
import asyncio
else:
raise unittest.SkipTest("AsyncMock is supported since version 3.6")
async_scheduler = schedule.AsyncScheduler()
@pytest.fixture
def set_up():
async_scheduler.clear()
@pytest.fixture
def async_mock_job(name="async_job") -> Callable:
job = mock.AsyncMock()
job.__name__ = name
return job
async def stop_job():
return schedule.CancelJob
async def increment(array, index):
array[index] += 1
@pytest.mark.asyncio
@pytest.mark.skip(reason="slow demo test")
async def test_async_sample(set_up):
duration = 10 # seconds
test_array = [0] * duration
for index, value in enumerate(test_array):
async_scheduler.every(index + 1).seconds.do(increment, test_array, index)
start = datetime.datetime.now()
current = start
while (current - start).total_seconds() < duration:
await async_scheduler.run_pending()
await asyncio.sleep(1)
current = datetime.datetime.now()
for index, value in enumerate(test_array):
position = index + 1
expected = duration / position
expected = int(expected) if expected != int(expected) else expected - 1
error_msg = "unexpected value for {}th".format(position)
assert value == expected, error_msg
@pytest.mark.asyncio
async def test_async_run_pending(set_up, async_mock_job):
with mock_datetime(2010, 1, 6, 12, 15):
async_scheduler.every().minute.do(async_mock_job)
async_scheduler.every().hour.do(async_mock_job)
async_scheduler.every().day.do(async_mock_job)
async_scheduler.every().sunday.do(async_mock_job)
await async_scheduler.run_pending()
assert async_mock_job.call_count == 0
with mock_datetime(2010, 1, 6, 12, 16):
await async_scheduler.run_pending()
assert async_mock_job.call_count == 1
with mock_datetime(2010, 1, 6, 13, 16):
async_mock_job.reset_mock()
await async_scheduler.run_pending()
assert async_mock_job.call_count == 2
with mock_datetime(2010, 1, 7, 13, 16):
async_mock_job.reset_mock()
await async_scheduler.run_pending()
assert async_mock_job.call_count == 3
with mock_datetime(2010, 1, 10, 13, 16):
async_mock_job.reset_mock()
await async_scheduler.run_pending()
assert async_mock_job.call_count == 4
@pytest.mark.asyncio
async def test_async_run_all(set_up, async_mock_job):
async_scheduler.every().minute.do(async_mock_job)
async_scheduler.every().hour.do(async_mock_job)
async_scheduler.every().day.at("11:00").do(async_mock_job)
await async_scheduler.run_all()
assert async_mock_job.call_count == 3
@pytest.mark.asyncio
async def test_async_job_func_args_are_passed_on(set_up, async_mock_job):
async_scheduler.every().second.do(async_mock_job, 1, 2, "three", foo=23, bar={})
await async_scheduler.run_all()
async_mock_job.assert_called_once_with(1, 2, "three", foo=23, bar={})
@pytest.mark.asyncio
async def test_cancel_async_job(set_up, async_mock_job):
async_scheduler.every().second.do(stop_job)
mj = async_scheduler.every().second.do(async_mock_job)
assert len(async_scheduler.jobs) == 2
await async_scheduler.run_all()
assert len(async_scheduler.jobs) == 1
assert async_scheduler.jobs[0] == mj
async_scheduler.cancel_job("Not a job")
assert len(async_scheduler.jobs) == 1
async_scheduler.cancel_job(mj)
assert len(async_scheduler.jobs) == 0
@pytest.mark.asyncio
async def test_cancel_async_jobs(set_up):
async_scheduler.every().second.do(stop_job)
async_scheduler.every().second.do(stop_job)
async_scheduler.every().second.do(stop_job)
assert len(async_scheduler.jobs) == 3
await async_scheduler.run_all()
assert len(async_scheduler.jobs) == 0
@pytest.mark.asyncio
async def test_mixed_sync_async_tasks(set_up, mock_job, async_mock_job):
async_func = async_mock_job
sync_func = mock_job
async_scheduler.every().second.do(async_func)
async_scheduler.every().second.do(sync_func)
assert async_func.call_count == 0
assert sync_func.call_count == 0
await async_scheduler.run_all()
assert async_func.call_count == 1
assert sync_func.call_count == 1
| 30.470588 | 85 | 0.691549 |
acecbab9c737a9af5c87653afa69cf8a95153cc6 | 604 | py | Python | models/Conformer_encoder/__init__.py | llxcn/conformer_Informer | 4d39683f9e42d88474e47d02ecb9a872ecf2fa42 | [
"Apache-2.0"
] | null | null | null | models/Conformer_encoder/__init__.py | llxcn/conformer_Informer | 4d39683f9e42d88474e47d02ecb9a872ecf2fa42 | [
"Apache-2.0"
] | null | null | null | models/Conformer_encoder/__init__.py | llxcn/conformer_Informer | 4d39683f9e42d88474e47d02ecb9a872ecf2fa42 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 37.75 | 74 | 0.756623 |
acecbc1fe88e990eabda47d057a3783e4866a8e5 | 71,300 | py | Python | API/error_handling.py | marbotte/colSpList | 413cf556105b160d8c9bd67e26fa6b8b9f8becad | [
"MIT"
] | null | null | null | API/error_handling.py | marbotte/colSpList | 413cf556105b160d8c9bd67e26fa6b8b9f8becad | [
"MIT"
] | null | null | null | API/error_handling.py | marbotte/colSpList | 413cf556105b160d8c9bd67e26fa6b8b9f8becad | [
"MIT"
] | null | null | null | """
In this file we redirect the errors before passing them to the endpoints
We manage here 2 types of errors:
1. if the error is due to user input, the functions should returns a result consisting of a dictionary with a key 'error' and the explicative message for the user
2. if the error is due to a problem in the api code or database, the function raises a "Abort500Error" in order to indicate to the endpoint that it should use the abort method with the "500" html error code and the message from the initial error code
"""
from errors_def import MissingArgError, DatabaseUncompatibilityValueError, DatabaseUncompatibilityError, AlreadyExistsDbError, DeleteMissingElementDbError, ModifyMissingStatusDbError, TaxonNotFoundDbError, GrantExistingRightError, RevokeUnexistingRightError, UncompatibilityGbifKeyCanonicalname, DbIntegrityError, UncompatibleStatusError, UnauthorizedValueError,UncompatibilityCdTaxInputTaxError, ModifyMissingRefDbError, UserNotFoundError, Abort500Error
from taxo import manageInputTax, get_gbif_parsed_from_sci_name,childrenList,deleteTaxo, checkCdTax,modifyTaxo
from flask import abort, g
from getStatus import testEndemStatus, testExotStatus, testThreatStatus, getListTax,getListExot, getListEndem, getListThreat, getTax, getListReferences
from manageStatus import manageSource,deleteRef,mergeRefs, modifyRef, deleteExot, deleteEndem,deleteThreat,modifyEndem,modifyThreat,modifyExot,manageInputEndem,manageInputThreat,manageInputExot
from security import new_user, delete_user, valid_password, user_exists, get_user, generate_auth_token, verify_auth_token, grant_user, revoke_user, grant_edit, revoke_edit, grant_admin, revoke_admin,change_password, get_user_list
from admin import delReference_no_status,delTaxo_no_status,delStatus_no_reference,delSyno_no_tax
import psycopg2
from psycopg2 import sql
import psycopg2.extras
from io import BytesIO
from flask import send_file
def testEndemGet_err_hand(connection, **testEndemArgs):
"""
Description
-----------
Look for a species. If the found species has an endemism status, returns its status and associated references
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
testEndemArgs: dict
Dictionary with the following elements:
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_tax_acc: Int
Identifier of the accepted taxon
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
hasEndemStatus: Bool
Whether the taxon has an endemism status in the database
cd_nivel: Int
Endemism level (from 0: unsuficient information to 4: endemic)
endemism: Str
Endemism level (Spanish)
endemism_en: Str
Endemism level (English)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
res = manageInputTax(connection=connection, insert=False, **testEndemArgs)
if res.get('alreadyInDb'):
res.update(testEndemStatus(connection,res.get('cd_tax_acc')))
else:
res.update({'hasEndemStatus':False,'cd_status':None,'comments':None,'references':list(),'links':list()})
except (MissingArgError, UncompatibilityGbifKeyCanonicalname) as e:
return {'error':str(e)}
except UnauthorizedValueError as e:
if e.var=='gbifMatchMode':
raise Abort500Error(str(e)) from e
else:
return {'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def testExotGet_err_hand(connection, **testExotArgs):
"""
Description
-----------
Look for a species. If the found species has an exotic status, returns its status and associated references
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
testExotArgs: dict
Dictionary with the following elements:
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_tax_acc: Int
Identifier of the accepted taxon
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
hasExotStatus: Bool
Whether the taxon has an alien/invasive status in the database
is_alien: Bool
Whether a taxon is alien for Colombia (part of the exotic status)
is_invasive: Bool
Whether a taxo is invasive in Colombia (part of the exotic status)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
res = manageInputTax(connection=connection, insert=False, **testExotArgs)
if res.get('alreadyInDb'):
res.update(testExotStatus(connection,res.get('cd_tax_acc')))
else:
res.update({'hasExotStatus':False,'is_alien':None,'is_invasive':None,'comments':None,'references':list(),'links':list()})
except (MissingArgError, UncompatibilityGbifKeyCanonicalname) as e:
return {'error':str(e)}
except UnauthorizedValueError as e:
if e.var=='gbifMatchMode':
raise Abort500Error(str(e)) from e
else:
return {'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def testThreatGet_err_hand(connection, **testThreatArgs):
"""
Description
-----------
Look for a species. If the found species has an threat status, returns its status and associated references
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
testThreatArgs: dict
Dictionary with the following elements:
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
hasThreatStatus: Bool
Whether the taxon has a threat status in the database
cd_status: Str
Status of the species (IUCN threat statusl)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
res = manageInputTax(connection=connection, insert=False, **testThreatArgs)
if res.get('alreadyInDb'):
res.update(testThreatStatus(connection,res.get('cd_tax_acc')))
else:
res.update({'hasThreatStatus':False,'cd_status':None,'comments':None,'references':list(),'links':list()})
except (MissingArgError, UncompatibilityGbifKeyCanonicalname) as e:
return {'error':str(e)}
except UnauthorizedValueError as e:
if e.var=='gbifMatchMode':
raise Abort500Error(str(e)) from e
else:
return {'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def ListEndem_err_hand(connection, **listEndemArgs):
"""
Description
-----------
Returns of endemic taxa and associated reference. Without argument, returns the complete list. With the “childrenOf” argument, returns only the taxa from a taxonomic clade. Export format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
listEndemArgs: dict
Dictionary with the following elements:
childrenOf: Str [optional]
canonicalname or scientificname of the parent taxon for which we want to get the list of children taxa (and their statuses)
format: Str [optional]
JSON or CSV format in GET methods
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
scientificname: Str
Name of a taxon, with associated authorship
parentname: Str
Name of the direct parent taxon
tax_rank: Str
Taxonomic level (from FORM to DOMAIN)
gbifkey: Int
Identifier of a taxon in the GBIF backbone
synonyms: List(Str)
List of synonyms associated with a taxon
endemism: Str
Endemism level (Spanish)
endemism_en: Str
Endemism level (English)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
if listEndemArgs.get('childrenOf'):
parsed = get_gbif_parsed_from_sci_name(listEndemArgs.get('childrenOf'))
if parsed.get('scientificName') and parsed.get('canonicalNameComplete') and parsed.get('scientificName')==parsed.get('canonicalNameComplete'):
parent=manageInputTax(connection=connection, insert=False,canonicalname=listEndemArgs.get('childrenOf'))
else:
parent=manageInputTax(connection=connection, insert=False,scientificname=listEndemArgs.get('childrenOf'))
if parent.get('alreadyInDb'):
cursor=connection.cursor()
listChildren=childrenList(cursor,parent.get('cd_tax_acc'))
cursor.close()
res=getListEndem(connection=connection, listChildren=listChildren, formatExport=listEndemArgs.get('format'))
else:
raise TaxonNotFoundDbError(tax=listEndemArgs.get('childrenOf'), message='\'childrenOf\' taxon not recognized')
#raise Exception("childrenOfNotFound")
else:
res = getListEndem(connection=connection, listChildren=[], formatExport=listEndemArgs.get('format'))
except (TaxonNotFoundDbError, MissingArgError) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
if listEndemArgs.get('format')=="CSV":
response_stream = BytesIO(res.to_csv().encode())
return send_file(response_stream, mimetype = "text/csv", attachment_filename = "endemic.csv")
else:
return res
def ListExot_err_hand(connection, **listExotArgs):
"""
Description
-----------
Returns of exotic taxa and associated reference. Without argument, returns the complete list. With the “childrenOf” argument, returns only the taxa from a taxonomic clade. Export format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
listExotArgs: dict
Dictionary with the following elements:
childrenOf: Str [optional]
canonicalname or scientificname of the parent taxon for which we want to get the list of children taxa (and their statuses)
format: Str [optional]
JSON or CSV format in GET methods
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
scientificname: Str
Name of a taxon, with associated authorship
parentname: Str
Name of the direct parent taxon
tax_rank: Str
Taxonomic level (from FORM to DOMAIN)
gbifkey: Int
Identifier of a taxon in the GBIF backbone
synonyms: List(Str)
List of synonyms associated with a taxon
is_alien: Bool
Whether a taxon is alien for Colombia (part of the exotic status)
is_invasive: Bool
Whether a taxo is invasive in Colombia (part of the exotic status)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
if listExotArgs.get('childrenOf'):
parsed = get_gbif_parsed_from_sci_name(listExotArgs.get('childrenOf'))
if parsed.get('scientificName') and parsed.get('canonicalNameComplete') and parsed.get('scientificName')==parsed.get('canonicalNameComplete'):
parent=manageInputTax(connection=connection, insert=False,canonicalname=listExotArgs.get('childrenOf'))
else:
parent=manageInputTax(connection=connection, insert=False,scientificname=listExotArgs.get('childrenOf'))
if parent.get('alreadyInDb'):
cursor=connection.cursor()
listChildren=childrenList(cursor,parent.get('cd_tax_acc'))
cursor.close()
res=getListExot(connection=connection, listChildren=listChildren, formatExport=listExotArgs.get('format'))
else:
raise TaxonNotFoundDbError(tax=listExotArgs.get('childrenOf'), message='\'childrenOf\' taxon not recognized')
#raise Exception("childrenOfNotFound")
else:
res = getListExot(connection=connection, listChildren=[], formatExport=listExotArgs.get('format'))
except (TaxonNotFoundDbError, MissingArgError) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
if listExotArgs.get('format')=="CSV":
response_stream = BytesIO(res.to_csv().encode())
return send_file(response_stream, mimetype = "text/csv", attachment_filename = "exot.csv")
else:
return res
def ListThreat_err_hand(connection, **listThreatArgs):
"""
Description
-----------
Returns of threatened taxa and associated reference. Without argument, returns the complete list. With the “childrenOf” argument, returns only the taxa from a taxonomic clade. Export format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
listThreatArgs: dict
Dictionary with the following elements:
childrenOf: Str [optional]
canonicalname or scientificname of the parent taxon for which we want to get the list of children taxa (and their statuses)
format: Str [optional]
JSON or CSV format in GET methods
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
scientificname: Str
Name of a taxon, with associated authorship
parentname: Str
Name of the direct parent taxon
tax_rank: Str
Taxonomic level (from FORM to DOMAIN)
gbifkey: Int
Identifier of a taxon in the GBIF backbone
synonyms: List(Str)
List of synonyms associated with a taxon
cd_status: Str
Status of the species (IUCN threat statusl)
comments: Str
Comments on the taxon status
references: List(Str)
List of blibliographic references
links: List(Str)
List of internet links (URLs) for resources (usually datasets or pdf) associated with a bibliographic reference
"""
try:
if listThreatArgs.get('childrenOf'):
parsed = get_gbif_parsed_from_sci_name(listThreatArgs.get('childrenOf'))
if parsed.get('scientificName') and parsed.get('canonicalNameComplete') and parsed.get('scientificName')==parsed.get('canonicalNameComplete'):
parent=manageInputTax(connection=connection, insert=False,canonicalname=listThreatArgs.get('childrenOf'))
else:
parent=manageInputTax(connection=connection, insert=False,scientificname=listThreatArgs.get('childrenOf'))
if parent.get('alreadyInDb'):
cursor=connection.cursor()
listChildren=childrenList(cursor,parent.get('cd_tax_acc'))
cursor.close()
res=getListThreat(connection=connection, listChildren=listChildren, formatExport=listThreatArgs.get('format'))
else:
raise TaxonNotFoundDbError(tax=listThreatArgs.get('childrenOf'), message='\'childrenOf\' taxon not recognized')
#raise Exception("childrenOfNotFound")
else:
res = getListThreat(connection=connection, listChildren=[], formatExport=listThreatArgs.get('format'))
except (TaxonNotFoundDbError, MissingArgError) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
if listThreatArgs.get('format')=="CSV":
response_stream = BytesIO(res.to_csv().encode())
return send_file(response_stream, mimetype = "text/csv", attachment_filename = "threat.csv")
else:
return res
def GetTaxon_err_hand(connection, **taxInput):
"""
Description
-----------
Returns the information about one taxon.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
taxInput: dict
Dictionary with the following elements:
cd_tax: Int [optional]
Identificator of a taxon in the database
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
scientificname: Str
Name of a taxon, with associated authorship
canonicalname: Str
Name of the taxon, without authorship (corresponds to canonicalNameWithMarkers in the GBIF DarwinCore format)
authorship: Str
Authorship associated with the taxon name
tax_rank: Str
Taxonomic level (from FORM to DOMAIN)
cd_parent: Int
Identitificator of the parent taxon
parentname: Str
Name of the direct parent taxon
cd_accepted: Int
Identifier of the accepted taxon
acceptedname: Str
Name of the accepted taxon
status: Str
Taxonomic status of a taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
hasEndemStatus: Bool
Whether the taxon has an endemism status in the database
hasExotStatus: Bool
Whether the taxon has an alien/invasive status in the database
hasThreatStatus: Bool
Whether the taxon has a threat status in the database
"""
try:
if not taxInput.get('cd_tax'):
taxInput.update(manageInputTax(connection=connection,insert=False,**taxInput))
taxOutput = getTax(connection,taxInput.get('cd_tax'))
except (MissingArgError, UncompatibilityGbifKeyCanonicalname) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
return taxOutput
def ListTax_err_hand(connection, **listTaxArgs):
"""
Description
-----------
Returns a list of tax integrated in the API database. Without argument, returns the complete list. With the “childrenOf” argument, returns only the taxa from a taxonomic clade. Export format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
listTaxArgs: dict
Dictionary with the following elements:
childrenOf: Str [optional]
canonicalname or scientificname of the parent taxon for which we want to get the list of children taxa (and their statuses)
format: Str [optional]
JSON or CSV format in GET methods
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
scientificname: Str
Name of a taxon, with associated authorship
canonicalname: Str
Name of the taxon, without authorship (corresponds to canonicalNameWithMarkers in the GBIF DarwinCore format)
authorship: Str
Authorship associated with the taxon name
tax_rank: Str
Taxonomic level (from FORM to DOMAIN)
cd_parent: Int
Identitificator of the parent taxon
parentname: Str
Name of the direct parent taxon
cd_accepted: Int
Identifier of the accepted taxon
acceptedname: Str
Name of the accepted taxon
status: Str
Taxonomic status of a taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
hasEndemStatus: Bool
Whether the taxon has an endemism status in the database
hasExotStatus: Bool
Whether the taxon has an alien/invasive status in the database
hasThreatStatus: Bool
Whether the taxon has a threat status in the database
"""
try:
if listTaxArgs.get('childrenOf'):
parsed = get_gbif_parsed_from_sci_name(listTaxArgs.get('childrenOf'))
if parsed.get('scientificName') and parsed.get('canonicalNameComplete') and parsed.get('scientificName')==parsed.get('canonicalNameComplete'):
parent=manageInputTax(connection=connection, insert=False,canonicalname=listTaxArgs.get('childrenOf'))
else:
parent=manageInputTax(connection=connection, insert=False,scientificname=listTaxArgs.get('childrenOf'))
if parent.get('alreadyInDb'):
cursor=connection.cursor()
listChildren=childrenList(cursor,parent.get('cd_tax_acc'))
cursor.close()
res=getListTax(connection=connection, listChildren=listChildren, formatExport=listTaxArgs.get('format'))
else:
raise TaxonNotFoundDbError(tax=listTaxArgs.get('childrenOf'), message='\'childrenOf\' taxon not recognized')
#raise Exception("childrenOfNotFound")
else:
res = getListTax(connection=connection, listChildren=[], formatExport=listTaxArgs.get('format'))
except (TaxonNotFoundDbError, MissingArgError) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
if listTaxArgs.get('format')=="CSV":
response_stream = BytesIO(res.to_csv().encode())
return send_file(response_stream, mimetype = "text/csv", attachment_filename = "taxonomy.csv")
else:
return res
def ListRef_err_hand(connection, **listRefArgs):
"""
Description
-----------
Returns a list of bibliographic references from the database, with the number of taxa with statuses (endemism, alien and threatened). Format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
listRefArgs: dict
Dictionary with the following elements:
onlyExot: Bool [optional]
Whether to returns only the exotic-related references
onlyEndem: Bool [optional]
Whether to returns only the endemism-related references
onlyThreat: Bool [optional]
Whether to returns only the threat-related references
format: Str [optional]
JSON or CSV format in GET methods
Returns
-----------
cd_ref: Int
Identifier of the bibliographic reference
ref_citation: Str
Bibliographic reference descriptor
link: Str
Internet link for resources (usually datasets or pdf) associated with a bibliographic reference
nb_exot: Int
Number of exotic taxa associated with a bibliographic reference
nb_endem: Int
Number of endemic taxa associated with a bibliographic reference
nb_threat: Int
Number of threatened taxa associated with a bibliographic reference
"""
try:
listRef = getListReferences(connection=connection, formatExport= listRefArgs.get('format'), onlyEndem= listRefArgs.get('onlyEndem'), onlyExot= listRefArgs.get('onlyExot'), onlyThreat= listRefArgs.get('onlyThreat'))
except (MissingArgError) as e:
return {'error':str(e)}
except (DbIntegrityError, UnauthorizedValueError) as e:
raise Abort500Error(str(e)) from e
else:
if listRefArgs.get('format')=="CSV":
response_stream = BytesIO(listRef.to_csv().encode())
return send_file(response_stream, mimetype = "text/csv", attachment_filename = "references.csv")
else:
return listRef
def cleanDbDel_err_hand(connection,**cdbArgs):
"""
Description
-----------
Delete status without references, references without status nor taxa, synonyms without accepted tax, and/or taxa without statuses ( and which are not synonyms or parents of taxa with statuses)
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
cdbArgs: dict
Dictionary with the following elements:
status_no_ref: Bool [optional]
Whether to delete statuses without associated bibliographic references
ref_no_status: Bool [optional]
Whether to delete references which are not associated with any status or taxon
syno_no_tax: Bool [optional]
Whether to delete synonym without accepted names
tax_no_status: Bool [optional]
Whether to delete taxa which have no status in the database (and are neither synonym or parents of a taxon with status)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_ref: Int
Identifier of the bibliographic reference
cd_st: List(Int)
List of status Identifiers (since a taxon can only have only a status in each category, corresponds to the cd_tax)
"""
try:
cd_taxs=[]
cd_refs=[]
cd_status=[]
if cdbArgs.get('status_no_ref'):
cd_status+=delStatus_no_reference(connection)
if cdbArgs.get('ref_no_status'):
cd_refs+=delReference_no_status(connection)
if cdbArgs.get('syno_no_tax'):
cd_taxs+=delSyno_no_tax(connection)
if cdbArgs.get('tax_no_status'):
cd_taxs+=delTaxo_no_status(connection)
except (MissingArgError,DeleteMissingElementDbError) as e:
return {'error':str(e)}
else:
return {'cd_taxs': cd_taxs, 'cd_refs':cd_refs,'cd_status':cd_status}
def userPost_err_hand(connection, **userArgs):
"""
Description
-----------
Creates a user without editing/admin rights
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
userArgs: dict
Dictionary with the following elements:
username: Str [required]
Name of a user
password: Str [required]
Password of a user for its creation
Returns
-----------
uid: Int
Identifier of a user
"""
try:
uid,username=new_user(connection,**userArgs)
except (AlreadyExistsDbError) as e:
return {'error':str(e)}
else:
return{'uid':uid, 'username':username}
def userPut_err_hand(connection,**userArgs):
"""
Description
-----------
Change password for the autenticated user
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
userArgs: dict
Dictionary with the following elements:
newPassword: Str [required]
New password of the user
Returns
-----------
uid: Int
Identifier of a user
"""
try:
user=g.get('user')
user.update(**userArgs)
cur=connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
uid = change_password(cur,**user)
except (UserNotFoundError,MissingArgError) as e:
return {'error':str(e)}
else:
connection.commit()
return {'uid':uid}
finally:
cur.close()
def adminUserDel_err_hand(connection,**userArgs):
"""
Description
-----------
Delete a user
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
userArgs: dict
Dictionary with the following elements:
uid: Int [optional]
Identificator of a user in the API database
username: Str [optional]
Name of a user
Returns
-----------
uid: Int
Identifier of a user
username: Str
User name
"""
try:
uid, delUsername = delete_user(connection,**userArgs)
except (UserNotFoundError,MissingArgError,DeleteMissingElementDbError) as e:
return {'error':str(e)}
else:
connection.commit()
return{'uid':uid, 'username':delUsername}
def adminUserPut_err_hand(connection,**modifyArgs):
"""
Description
-----------
Change permission and/or password of a user.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
modifyArgs: dict
Dictionary with the following elements:
uid: Int [optional]
Identificator of a user in the API database
username: Str [optional]
Name of a user
grant_user: Bool [optional]
Whether to grant or not the user basic permissions to the user
revoke_user: Bool [optional]
Whether to revoke basic rights of a user
grant_edit: Bool [optional]
Whether to grant or not the editing permission to the user
revoke_edit: Bool [optional]
Whether to revoke edition rights of a user
grant_admin: Bool [optional]
Whether to grant or not the administrative permission to the user
revoke_admin: Bool [optional]
Whether to revoke administrative rights of a user
newPassword: Str [optional]
New password of the user
Returns
-----------
uid: Int
Identifier of a user
username: Str
User name
"""
try:
res={'grant_edit':None,'grant_user':None,'grant_admin':None,'revoke_edit':None,'revoke_admin':None,'revoke_user':None,'newPassword':None}
cur=connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if modifyArgs.get('grant_user'):
res['grant_user']=grant_user(cur,**modifyArgs)
if modifyArgs.get('grant_edit'):
res['grant_edit']=grant_edit(cur,**modifyArgs)
if modifyArgs.get('grant_admin'):
res['grant_admin']=grant_admin(cur,**modifyArgs)
if modifyArgs.get('revoke_user'):
res['revoke_user']=revoke_user(cur,**modifyArgs)
if modifyArgs.get('revoke_edit'):
res['revoke_edit']=revoke_edit(cur,**modifyArgs)
if modifyArgs.get('revoke_admin'):
res['revoke_admin']=revoke_admin(cur,**modifyArgs)
if modifyArgs.get('newPassword'):
res['newPassword']=change_password(cur,**modifyArgs)
except (UserNotFoundError,GrantExistingRightError,RevokeUnexistingRightError,MissingArgError) as e:
return {'error':str(e)}
else:
connection.commit()
return res
finally:
cur.close()
def adminUserGet_err_hand(connection):
"""
Description
-----------
Returns the list of users and their permissions. Format may be JSON or CSV
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
Returns
-----------
uid: Int
Identifier of a user
username: Str
User name
roles: List(Str)
List of roles (permissions, rights) for a user
"""
try:
cur=connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
user_list=get_user_list(cur)
return user_list
finally:
cur.close()
def manageTaxPost_err_hand(connection, **inputTax):
"""
Description
-----------
Insert a taxon in the datababase (with its accepted taxon, if synonym, and parent taxa)
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
inputTax: dict
Dictionary with the following elements:
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
authorship: Str [optional]
Authorship of a taxon (usually corresponds to the difference between scientificname and canonicalname)
syno: Bool [optional]
Whether the taxon is a synonym
parentgbifkey: Int [optional]
gbifkey of the parent taxon
parentcanonicalname: Str [optional]
canonicalname of the parent taxon
parentscientificname: Str [optional]
scientificname of the parent taxon
synogbifkey: Int [optional]
Accepted taxon gbifkey (when the sent taxon is a synonym)
synocanonicalname: Str [optional]
Accepted taxon canonicalname (when the sent taxon is a synonym)
synoscientificname: Str [optional]
Accepted taxon scientificname (when the sent taxon is a synonym)
rank: Str [optional]
Taxonomic rank (level) of the provided taxon
min_gbif_conf: Int [optional]
Minimum value for the confidence in the GBIF matching: default value is 90, maximum is 100. Higher value means the taxon found in GBIF needs to have a closer spelling to the provided one in order to be accepted
no_gbif: Bool [optional]
Whether the provided taxon should be matched through the GBIF API. Note that even though ‘no_gbif’ is set to True, the parents taxa will be searched through the GBIF API if not found in the database
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_tax_acc: Int
Identifier of the accepted taxon
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
"""
try:
res = manageInputTax(connection=connection, insert=True,**inputTax)
except (MissingArgError,UncompatibilityGbifKeyCanonicalname) as e:
return{'error':str(e)}
except (UnauthorizedValueError, DbIntegrityError) as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageTaxDel_err_hand(connection,**delTaxArgs):
"""
Description
-----------
Delete a taxon and its statuses in the database. Note that if the taxon has synonyms and/or children taxa, it might cause problems in the app. Use carefully.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
delTaxArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_children: List(Int)
List of Identifiers of children taxa
cd_synos: List(Int)
List of synonym Identifiers
"""
try:
cd_tax = delTaxArgs.get('cd_tax')
if delTaxArgs.get('canonicalname') or delTaxArgs.get('scientificname') or delTaxArgs.get('gbifkey'):
if not checkCdTax(connection=connection, cd_tax=cd_tax, **delTaxArgs):
raise UncompatibilityCdTaxInputTaxError(cd_tax=cd_tax,inputTax={key:value for (key,value) in delTaxArgs.items() if key in ('canonicalname','scientificname','gbifkey')})
res = deleteTaxo(connection, cd_tax)
except (MissingArgError,UncompatibilityGbifKeyCanonicalname,UncompatibilityCdTaxInputTaxError) as e:
return{'error':str(e)}
except (UnauthorizedValueError, DbIntegrityError) as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageTaxPut_err_hand(connection,**putTaxArgs):
"""
Description
-----------
Modify a taxon in the database, if the arguments gbifkey, parentgbifkey, or synogbifkey are provided, information is extracted from GBIF. Otherwise the information concerning the taxon is extracted from provided arguments
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
putTaxArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
authorship: Str [optional]
Authorship of a taxon (usually corresponds to the difference between scientificname and canonicalname)
syno: Bool [optional]
Whether the taxon is a synonym
parentgbifkey: Int [optional]
gbifkey of the parent taxon
parentcanonicalname: Str [optional]
canonicalname of the parent taxon
parentscientificname: Str [optional]
scientificname of the parent taxon
synogbifkey: Int [optional]
Accepted taxon gbifkey (when the sent taxon is a synonym)
synocanonicalname: Str [optional]
Accepted taxon canonicalname (when the sent taxon is a synonym)
synoscientificname: Str [optional]
Accepted taxon scientificname (when the sent taxon is a synonym)
status: Str [optional]
Taxonomic status (ACCEPTED, DOUBTFUL or SYNONYM) Note: doubtful synonym are “SYNONYM”
reference: Str [optional]
Bibliographic references for the taxonomic status of a taxon
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
cd_ref: Int [optional]
Identificator of a bibliographic reference
rank: Str [optional]
Taxonomic rank (level) of the provided taxon
no_gbif: Bool [optional]
Whether the provided taxon should be matched through the GBIF API. Note that even though ‘no_gbif’ is set to True, the parents taxa will be searched through the GBIF API if not found in the database
min_gbif_conf: Int [optional]
Minimum value for the confidence in the GBIF matching: default value is 90, maximum is 100. Higher value means the taxon found in GBIF needs to have a closer spelling to the provided one in order to be accepted
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
insertedTax: List(Int)
List of inserted taxa
"""
try:
res = modifyTaxo(connection=connection,**putTaxArgs)
except (MissingArgError,UncompatibilityGbifKeyCanonicalname,AlreadyExistsDbError) as e:
return{'error':str(e)}
except (UnauthorizedValueError, DbIntegrityError) as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageEndemPost_err_hand(connection,**inputEndem):
"""
Description
-----------
Add references to an endemic status, or insert references and status if the taxon has no status yet. If the taxon is not yet in the database, insert the taxon as well (by the same process as in the /manageTaxo endpoint). The optional parameter “priority” control the behavior of the function when the endemic status already exists in the database: if “high”, replace the preexisting status, if low, only add new references. If not provided, or null, and the status from the database is different from the provided status, returns an error and no modification is applied in the database.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
inputEndem: dict
Dictionary with the following elements:
endemstatus: Str [required]
Endemic status to insert or edit in the database
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
authorship: Str [optional]
Authorship of a taxon (usually corresponds to the difference between scientificname and canonicalname)
syno: Bool [optional]
Whether the taxon is a synonym
parentgbifkey: Int [optional]
gbifkey of the parent taxon
parentcanonicalname: Str [optional]
canonicalname of the parent taxon
parentscientificname: Str [optional]
scientificname of the parent taxon
synogbifkey: Int [optional]
Accepted taxon gbifkey (when the sent taxon is a synonym)
synocanonicalname: Str [optional]
Accepted taxon canonicalname (when the sent taxon is a synonym)
synoscientificname: Str [optional]
Accepted taxon scientificname (when the sent taxon is a synonym)
rank: Str [optional]
Taxonomic rank (level) of the provided taxon
min_gbif_conf: Int [optional]
Minimum value for the confidence in the GBIF matching: default value is 90, maximum is 100. Higher value means the taxon found in GBIF needs to have a closer spelling to the provided one in order to be accepted
no_gbif: Bool [optional]
Whether the provided taxon should be matched through the GBIF API. Note that even though ‘no_gbif’ is set to True, the parents taxa will be searched through the GBIF API if not found in the database
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
priority: Str [optional]
“high” if the provided status is prioritary on (must replace) the preexisting status, “low” if the preexisting status should not be modified (in this case only the new references are added in the database)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
cd_refs: List(Int)
List of Identifiers of bibliographic references
status_replaced: Bool
Whether the status has been replaced in the application of the POST method
status_created: Bool
Whether the status has been created in the POST method
"""
try:
res = manageInputTax(connection=connection, insert=True,**inputEndem)
res.update(manageInputEndem(res.get('cd_tax_acc'), connection = connection, **inputEndem))
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageEndemDel_err_hand(connection,**delEndemArgs):
"""
Description
-----------
Delete a link between a taxon and its endemic status, or the status of a taxon.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
delEndemArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
cd_ref: Int [optional]
Identificator of a bibliographic reference
delete_status: Bool [optional]
Whether to suppress the whole status of a taxon in the delete methods (if negative or null, only the association between a reference and a status is deleted)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=delEndemArgs['cd_tax']
delEndemArgs={k:v for (k,v) in delEndemArgs.items() if k!='cd_tax'}
res= deleteEndem(cd_tax=cd_tax,connection=connection,**delEndemArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageEndemPut_err_hand(connection, **putEndemArgs):
"""
Description
-----------
Modify the parameters of an endemic status of a species, and insert the references associated with the new endemic status
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
putEndemArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
endemstatus: Str [required]
Endemic status to insert or edit in the database
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=putEndemArgs['cd_tax']
putEndemArgs={k:v for (k,v) in putEndemArgs.items() if k!='cd_tax'}
res= modifyEndem(cd_tax=cd_tax,connection=connection,**putEndemArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageExotPost_err_hand(connection,**inputExot):
"""
Description
-----------
Add references to an exotic (alien/invasive) status, or insert references and status if the taxon has no status yet. If the taxon is not yet in the database, insert the taxon as well (by the same process as in the /manageTaxo endpoint). The optional parameter “priority” control the behavior of the function when the exotic status already exists in the database: if “high”, replace the preexisting status, if low, only add new references. If not provided, or null, and the status from the database is different from the provided status, returns an error and no modification is applied in the database.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
inputExot: dict
Dictionary with the following elements:
is_alien: Bool [required]
Part of the exotic status of a taxon: is it considered alien in Colombia?
is_invasive: Bool [required]
Part of the exotic status of a taxon: is it considered invasive in Colombia?
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
authorship: Str [optional]
Authorship of a taxon (usually corresponds to the difference between scientificname and canonicalname)
syno: Bool [optional]
Whether the taxon is a synonym
parentgbifkey: Int [optional]
gbifkey of the parent taxon
parentcanonicalname: Str [optional]
canonicalname of the parent taxon
parentscientificname: Str [optional]
scientificname of the parent taxon
synogbifkey: Int [optional]
Accepted taxon gbifkey (when the sent taxon is a synonym)
synocanonicalname: Str [optional]
Accepted taxon canonicalname (when the sent taxon is a synonym)
synoscientificname: Str [optional]
Accepted taxon scientificname (when the sent taxon is a synonym)
rank: Str [optional]
Taxonomic rank (level) of the provided taxon
min_gbif_conf: Int [optional]
Minimum value for the confidence in the GBIF matching: default value is 90, maximum is 100. Higher value means the taxon found in GBIF needs to have a closer spelling to the provided one in order to be accepted
no_gbif: Bool [optional]
Whether the provided taxon should be matched through the GBIF API. Note that even though ‘no_gbif’ is set to True, the parents taxa will be searched through the GBIF API if not found in the database
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
priority: Str [optional]
“high” if the provided status is prioritary on (must replace) the preexisting status, “low” if the preexisting status should not be modified (in this case only the new references are added in the database)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
cd_refs: List(Int)
List of Identifiers of bibliographic references
status_replaced: Bool
Whether the status has been replaced in the application of the POST method
status_created: Bool
Whether the status has been created in the POST method
"""
try:
res = manageInputTax(connection=connection, insert=True,**inputExot)
res.update(manageInputExot(res.get('cd_tax_acc'), connection = connection, **inputExot))
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageExotDel_err_hand(connection,**delExotArgs):
"""
Description
-----------
Delete a link between a taxon and its exotic status, or the status of a taxon.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
delExotArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
cd_ref: Int [optional]
Identificator of a bibliographic reference
delete_status: Bool [optional]
Whether to suppress the whole status of a taxon in the delete methods (if negative or null, only the association between a reference and a status is deleted)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=delExotArgs['cd_tax']
delExotArgs={k:v for (k,v) in delExotArgs.items() if k!='cd_tax'}
res= deleteExot(cd_tax=cd_tax,connection=connection,**delExotArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageExotPut_err_hand(connection, **putExotArgs):
"""
Description
-----------
Modify the parameters of an exotic status of a species, and insert the references associated with the new exotic status
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
putExotArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
is_alien: Bool [required]
Part of the exotic status of a taxon: is it considered alien in Colombia?
is_invasive: Bool [required]
Part of the exotic status of a taxon: is it considered invasive in Colombia?
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=putExotArgs['cd_tax']
putExotArgs={k:v for (k,v) in putExotArgs.items() if k!='cd_tax'}
res= modifyExot(cd_tax=cd_tax,connection=connection,**putExotArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageThreatPost_err_hand(connection,**inputThreat):
"""
Description
-----------
Add references to a threat status, or insert references and status if the taxon has no status yet. If the taxon is not yet in the database, insert the taxon as well (by the same process as in the /manageTaxo endpoint). The optional parameter “priority” control the behavior of the function when the threat status already exists in the database: if “high”, replace the preexisting status, if low, only add new references. If not provided, or null, and the status from the database is different from the provided status, returns an error and no modification is applied in the database.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
inputThreat: dict
Dictionary with the following elements:
threatstatus: Str [required]
IUCN threat status
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
gbifkey: Int [optional]
Identificator of a taxon in the GBIF Backbone database (=specieskey, key, acceptedkey etc.)
scientificname: Str [optional]
Complete name of a taxon, with authorship
canonicalname: Str [optional]
Name of the taxon without authorship. Formally correspond to canonicalNameWithMarker in GBIF DarwinCore format
authorship: Str [optional]
Authorship of a taxon (usually corresponds to the difference between scientificname and canonicalname)
syno: Bool [optional]
Whether the taxon is a synonym
parentgbifkey: Int [optional]
gbifkey of the parent taxon
parentcanonicalname: Str [optional]
canonicalname of the parent taxon
parentscientificname: Str [optional]
scientificname of the parent taxon
synogbifkey: Int [optional]
Accepted taxon gbifkey (when the sent taxon is a synonym)
synocanonicalname: Str [optional]
Accepted taxon canonicalname (when the sent taxon is a synonym)
synoscientificname: Str [optional]
Accepted taxon scientificname (when the sent taxon is a synonym)
rank: Str [optional]
Taxonomic rank (level) of the provided taxon
min_gbif_conf: Int [optional]
Minimum value for the confidence in the GBIF matching: default value is 90, maximum is 100. Higher value means the taxon found in GBIF needs to have a closer spelling to the provided one in order to be accepted
no_gbif: Bool [optional]
Whether the provided taxon should be matched through the GBIF API. Note that even though ‘no_gbif’ is set to True, the parents taxa will be searched through the GBIF API if not found in the database
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
priority: Str [optional]
“high” if the provided status is prioritary on (must replace) the preexisting status, “low” if the preexisting status should not be modified (in this case only the new references are added in the database)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
alreadyInDb: Bool
Whether the taxon was already in the database when the endpoint was accessed
foundGbif: Bool
Whether the taxon was found in GBIF
matchedname: Str
Name of the taxon matched with the provided one, in the API database, or from the GBIF API
acceptedname: Str
Name of the accepted taxon
gbifkey: Int
Identifier of a taxon in the GBIF backbone
syno: Bool
Whether a taxon is a synonym
insertedTax: List(Int)
List of inserted taxa
cd_refs: List(Int)
List of Identifiers of bibliographic references
status_replaced: Bool
Whether the status has been replaced in the application of the POST method
status_created: Bool
Whether the status has been created in the POST method
"""
try:
res = manageInputTax(connection=connection, insert=True,**inputThreat)
res.update(manageInputThreat(res.get('cd_tax_acc'), connection = connection, **inputThreat))
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageThreatDel_err_hand(connection,**delThreatArgs):
"""
Description
-----------
Delete a link between a taxon and its threat status, or the status of a taxon.
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
delThreatArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
cd_ref: Int [optional]
Identificator of a bibliographic reference
delete_status: Bool [optional]
Whether to suppress the whole status of a taxon in the delete methods (if negative or null, only the association between a reference and a status is deleted)
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=delThreatArgs['cd_tax']
delThreatArgs={k:v for (k,v) in delThreatArgs.items() if k!='cd_tax'}
res= deleteThreat(cd_tax=cd_tax,connection=connection,**delThreatArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageThreatPut_err_hand(connection, **putThreatArgs):
"""
Description
-----------
Modify the parameters of the threat status of a species, and insert the references associated with the new threat status
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
putThreatArgs: dict
Dictionary with the following elements:
cd_tax: Int [required]
Identificator of a taxon in the database
threatstatus: Str [required]
IUCN threat status
ref_citation: List(Str) [required]
Bibliographic references justifying the status of a taxon
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
comments: Str [optional]
Comments and supplementary information about a taxon status
replace_comment: Bool [optional]
Whether to delete the preexisting comment in a taxon status, before inserting the provided comments, when the status already exists.
Returns
-----------
cd_tax: Int
Identifier of a taxon in the API database
cd_refs: List(Int)
List of Identifiers of bibliographic references
"""
try:
cd_tax=putThreatArgs['cd_tax']
putThreatArgs={k:v for (k,v) in putThreatArgs.items() if k!='cd_tax'}
res= modifyThreat(cd_tax=cd_tax,connection=connection,**putThreatArgs)
except UnauthorizedValueError as e:
if e.var in ('threatstatus','priority','endemstatus'):
return {'error':str(e)}
else:
raise Abort500Error(str(e)) from e
except (UncompatibilityGbifKeyCanonicalname,UncompatibleStatusError,ModifyMissingStatusDbError,MissingArgError) as e:
return{'error':str(e)}
except DbIntegrityError as e:
raise Abort500Error(str(e)) from e
else:
return res
def manageRefDel_err_hand(connection, **delRefArgs):
"""
Description
-----------
Delete a reference, or join them. In the mergeInto parameter is provided, all references to cd_ref are replaced into references to mergeInto. Otherwise, references to cd_ref are deleted
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
delRefArgs: dict
Dictionary with the following elements:
cd_ref: Int [required]
Identificator of a bibliographic reference
mergeInto: Int [optional]
Identificator of the bibliographic reference which will be kept in the database in the case of merging references
Returns
-----------
cd_ref_modif: Int
Identifier of the modified bibliographic reference
cd_ref_del: Int
Identifier of the deleted bibliographic reference
"""
try:
res=dict()
if delRefArgs.get('mergeInto'):
res.update(mergeRefs(connection=connection,into_ref=delRefArgs.get('mergeInto'),from_ref=delRefArgs.get('cd_ref')))
res.update(deleteRef(connection,delRefArgs.get('cd_ref')))
except (UnauthorizedValueError,DeleteMissingElementDbError) as e:
return {'error':str(e)}
else:
connection.commit()
return(res)
def manageRefPut_err_hand(connection, **putRefArgs):
"""
Description
-----------
Modify the references
Parameters:
----------
connection: psycopg2 Connection
connection to the postgres database.
putRefArgs: dict
Dictionary with the following elements:
cd_ref: Int [required]
Identificator of a bibliographic reference
reference: Str [optional]
Bibliographic references for the taxonomic status of a taxon
link: List(Str) [optional]
Link, or link list for the resources associated with a bibliographic reference which justify the status of a taxon (if provided, it needs to have the same length as ref_citation)
Returns
-----------
cd_ref_modif: Int
Identifier of the modified bibliographic reference
cd_ref_del: Int
Identifier of the deleted bibliographic reference
"""
try:
res=modifyRef(connection, **putRefArgs)
except ModifyMissingRefDbError as e:
return {'error':str(e)}
else:
connection.commit()
return res
| 42.390012 | 602 | 0.688233 |
acecbc23313186fa02160a131cf741f3d512034c | 40,325 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2015-04-05T21:21:26.000Z | 2015-04-05T21:21:26.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2017-01-20T22:56:58.000Z | 2017-01-20T22:56:58.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 6 | 2015-04-21T13:14:08.000Z | 2020-12-03T07:27:52.000Z | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class wisite(base_resource) :
""" Configuration for WI site resource. """
def __init__(self) :
self._sitepath = ""
self._agurl = ""
self._staurl = ""
self._secondstaurl = ""
self._sessionreliability = ""
self._usetwotickets = ""
self._authenticationpoint = ""
self._agauthenticationmethod = ""
self._wiauthenticationmethods = []
self._defaultcustomtextlocale = ""
self._websessiontimeout = 0
self._defaultaccessmethod = ""
self._logintitle = ""
self._appwelcomemessage = ""
self._welcomemessage = ""
self._footertext = ""
self._loginsysmessage = ""
self._preloginbutton = ""
self._preloginmessage = ""
self._prelogintitle = ""
self._domainselection = ""
self._sitetype = ""
self._userinterfacebranding = ""
self._publishedresourcetype = ""
self._kioskmode = ""
self._showsearch = ""
self._showrefresh = ""
self._wiuserinterfacemodes = ""
self._userinterfacelayouts = ""
self._restrictdomains = ""
self._logindomains = ""
self._hidedomainfield = ""
self.___count = 0
@property
def sitepath(self) :
ur"""Path to the Web Interface site being created on the NetScaler appliance.<br/>Minimum length = 1<br/>Maximum length = 250.
"""
try :
return self._sitepath
except Exception as e:
raise e
@sitepath.setter
def sitepath(self, sitepath) :
ur"""Path to the Web Interface site being created on the NetScaler appliance.<br/>Minimum length = 1<br/>Maximum length = 250
"""
try :
self._sitepath = sitepath
except Exception as e:
raise e
@property
def agurl(self) :
ur"""URL of the Access Gateway.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._agurl
except Exception as e:
raise e
@agurl.setter
def agurl(self, agurl) :
ur"""URL of the Access Gateway.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._agurl = agurl
except Exception as e:
raise e
@property
def staurl(self) :
ur"""URL of the Secure Ticket Authority (STA) server.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._staurl
except Exception as e:
raise e
@staurl.setter
def staurl(self, staurl) :
ur"""URL of the Secure Ticket Authority (STA) server.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._staurl = staurl
except Exception as e:
raise e
@property
def secondstaurl(self) :
ur"""URL of the second Secure Ticket Authority (STA) server.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._secondstaurl
except Exception as e:
raise e
@secondstaurl.setter
def secondstaurl(self, secondstaurl) :
ur"""URL of the second Secure Ticket Authority (STA) server.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._secondstaurl = secondstaurl
except Exception as e:
raise e
@property
def sessionreliability(self) :
ur"""Enable session reliability through Access Gateway.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sessionreliability
except Exception as e:
raise e
@sessionreliability.setter
def sessionreliability(self, sessionreliability) :
ur"""Enable session reliability through Access Gateway.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._sessionreliability = sessionreliability
except Exception as e:
raise e
@property
def usetwotickets(self) :
ur"""Request tickets issued by two separate Secure Ticket Authorities (STA) when a resource is accessed.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._usetwotickets
except Exception as e:
raise e
@usetwotickets.setter
def usetwotickets(self, usetwotickets) :
ur"""Request tickets issued by two separate Secure Ticket Authorities (STA) when a resource is accessed.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._usetwotickets = usetwotickets
except Exception as e:
raise e
@property
def authenticationpoint(self) :
ur"""Authentication point for the Web Interface site.<br/>Possible values = WebInterface, AccessGateway.
"""
try :
return self._authenticationpoint
except Exception as e:
raise e
@authenticationpoint.setter
def authenticationpoint(self, authenticationpoint) :
ur"""Authentication point for the Web Interface site.<br/>Possible values = WebInterface, AccessGateway
"""
try :
self._authenticationpoint = authenticationpoint
except Exception as e:
raise e
@property
def agauthenticationmethod(self) :
ur"""Method for authenticating a Web Interface site if you have specified Web Interface as the authentication point.
Available settings function as follows:
* Explicit - Users must provide a user name and password to log on to the Web Interface.
* Anonymous - Users can log on to the Web Interface without providing a user name and password. They have access to resources published for anonymous users.<br/>Possible values = Explicit, SmartCard.
"""
try :
return self._agauthenticationmethod
except Exception as e:
raise e
@agauthenticationmethod.setter
def agauthenticationmethod(self, agauthenticationmethod) :
ur"""Method for authenticating a Web Interface site if you have specified Web Interface as the authentication point.
Available settings function as follows:
* Explicit - Users must provide a user name and password to log on to the Web Interface.
* Anonymous - Users can log on to the Web Interface without providing a user name and password. They have access to resources published for anonymous users.<br/>Possible values = Explicit, SmartCard
"""
try :
self._agauthenticationmethod = agauthenticationmethod
except Exception as e:
raise e
@property
def wiauthenticationmethods(self) :
ur"""The method of authentication to be used at Web Interface.<br/>Default value: Explicit<br/>Possible values = Explicit, Anonymous.
"""
try :
return self._wiauthenticationmethods
except Exception as e:
raise e
@wiauthenticationmethods.setter
def wiauthenticationmethods(self, wiauthenticationmethods) :
ur"""The method of authentication to be used at Web Interface.<br/>Default value: Explicit<br/>Possible values = Explicit, Anonymous
"""
try :
self._wiauthenticationmethods = wiauthenticationmethods
except Exception as e:
raise e
@property
def defaultcustomtextlocale(self) :
ur"""Default language for the Web Interface site.<br/>Default value: English<br/>Possible values = German, English, Spanish, French, Japanese, Korean, Russian, Chinese_simplified, Chinese_traditional.
"""
try :
return self._defaultcustomtextlocale
except Exception as e:
raise e
@defaultcustomtextlocale.setter
def defaultcustomtextlocale(self, defaultcustomtextlocale) :
ur"""Default language for the Web Interface site.<br/>Default value: English<br/>Possible values = German, English, Spanish, French, Japanese, Korean, Russian, Chinese_simplified, Chinese_traditional
"""
try :
self._defaultcustomtextlocale = defaultcustomtextlocale
except Exception as e:
raise e
@property
def websessiontimeout(self) :
ur"""Time-out, in minutes, for idle Web Interface browser sessions. If a client's session is idle for a time that exceeds the time-out value, the NetScaler appliance terminates the connection.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 1440.
"""
try :
return self._websessiontimeout
except Exception as e:
raise e
@websessiontimeout.setter
def websessiontimeout(self, websessiontimeout) :
ur"""Time-out, in minutes, for idle Web Interface browser sessions. If a client's session is idle for a time that exceeds the time-out value, the NetScaler appliance terminates the connection.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 1440
"""
try :
self._websessiontimeout = websessiontimeout
except Exception as e:
raise e
@property
def defaultaccessmethod(self) :
ur"""Default access method for clients accessing the Web Interface site.
Note: Before you configure an access method based on the client IP address, you must enable USIP mode on the Web Interface service to make the client's IP address available with the Web Interface.
Depending on whether the Web Interface site is configured to use an HTTP or HTTPS virtual server or to use access gateway, you can send clients or access gateway the IP address, or the alternate address, of a XenApp or XenDesktop server. Or, you can send the IP address translated from a mapping entry, which defines mapping of an internal address and port to an external address and port.
Note: In the NetScaler command line, mapping entries can be created by using the bind wi site command.<br/>Possible values = Direct, Alternate, Translated, GatewayDirect, GatewayAlternate, GatewayTranslated.
"""
try :
return self._defaultaccessmethod
except Exception as e:
raise e
@defaultaccessmethod.setter
def defaultaccessmethod(self, defaultaccessmethod) :
ur"""Default access method for clients accessing the Web Interface site.
Note: Before you configure an access method based on the client IP address, you must enable USIP mode on the Web Interface service to make the client's IP address available with the Web Interface.
Depending on whether the Web Interface site is configured to use an HTTP or HTTPS virtual server or to use access gateway, you can send clients or access gateway the IP address, or the alternate address, of a XenApp or XenDesktop server. Or, you can send the IP address translated from a mapping entry, which defines mapping of an internal address and port to an external address and port.
Note: In the NetScaler command line, mapping entries can be created by using the bind wi site command.<br/>Possible values = Direct, Alternate, Translated, GatewayDirect, GatewayAlternate, GatewayTranslated
"""
try :
self._defaultaccessmethod = defaultaccessmethod
except Exception as e:
raise e
@property
def logintitle(self) :
ur"""A custom login page title for the Web Interface site.<br/>Default value: "Welcome to Web Interface on NetScaler"<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._logintitle
except Exception as e:
raise e
@logintitle.setter
def logintitle(self, logintitle) :
ur"""A custom login page title for the Web Interface site.<br/>Default value: "Welcome to Web Interface on NetScaler"<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._logintitle = logintitle
except Exception as e:
raise e
@property
def appwelcomemessage(self) :
ur"""Specifies localized text to appear at the top of the main content area of the Applications screen. LanguageCode is en, de, es, fr, ja, or any other supported language identifier.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._appwelcomemessage
except Exception as e:
raise e
@appwelcomemessage.setter
def appwelcomemessage(self, appwelcomemessage) :
ur"""Specifies localized text to appear at the top of the main content area of the Applications screen. LanguageCode is en, de, es, fr, ja, or any other supported language identifier.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._appwelcomemessage = appwelcomemessage
except Exception as e:
raise e
@property
def welcomemessage(self) :
ur"""Localized welcome message that appears on the welcome area of the login screen.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._welcomemessage
except Exception as e:
raise e
@welcomemessage.setter
def welcomemessage(self, welcomemessage) :
ur"""Localized welcome message that appears on the welcome area of the login screen.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._welcomemessage = welcomemessage
except Exception as e:
raise e
@property
def footertext(self) :
ur"""Localized text that appears in the footer area of all pages.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._footertext
except Exception as e:
raise e
@footertext.setter
def footertext(self, footertext) :
ur"""Localized text that appears in the footer area of all pages.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._footertext = footertext
except Exception as e:
raise e
@property
def loginsysmessage(self) :
ur"""Localized text that appears at the bottom of the main content area of the login screen.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._loginsysmessage
except Exception as e:
raise e
@loginsysmessage.setter
def loginsysmessage(self, loginsysmessage) :
ur"""Localized text that appears at the bottom of the main content area of the login screen.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._loginsysmessage = loginsysmessage
except Exception as e:
raise e
@property
def preloginbutton(self) :
ur"""Localized text that appears as the name of the pre-login message confirmation button.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._preloginbutton
except Exception as e:
raise e
@preloginbutton.setter
def preloginbutton(self, preloginbutton) :
ur"""Localized text that appears as the name of the pre-login message confirmation button.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._preloginbutton = preloginbutton
except Exception as e:
raise e
@property
def preloginmessage(self) :
ur"""Localized text that appears on the pre-login message page.<br/>Minimum length = 1<br/>Maximum length = 2048.
"""
try :
return self._preloginmessage
except Exception as e:
raise e
@preloginmessage.setter
def preloginmessage(self, preloginmessage) :
ur"""Localized text that appears on the pre-login message page.<br/>Minimum length = 1<br/>Maximum length = 2048
"""
try :
self._preloginmessage = preloginmessage
except Exception as e:
raise e
@property
def prelogintitle(self) :
ur"""Localized text that appears as the title of the pre-login message page.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._prelogintitle
except Exception as e:
raise e
@prelogintitle.setter
def prelogintitle(self, prelogintitle) :
ur"""Localized text that appears as the title of the pre-login message page.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._prelogintitle = prelogintitle
except Exception as e:
raise e
@property
def domainselection(self) :
ur"""Domain names listed on the login screen for explicit authentication.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._domainselection
except Exception as e:
raise e
@domainselection.setter
def domainselection(self, domainselection) :
ur"""Domain names listed on the login screen for explicit authentication.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._domainselection = domainselection
except Exception as e:
raise e
@property
def sitetype(self) :
ur"""Type of access to the Web Interface site. Available settings function as follows:
* XenApp/XenDesktop web site - Configures the Web Interface site for access by a web browser.
* XenApp/XenDesktop services site - Configures the Web Interface site for access by the XenApp plug-in.<br/>Default value: XenAppWeb<br/>Possible values = XenAppWeb, XenAppServices.
"""
try :
return self._sitetype
except Exception as e:
raise e
@sitetype.setter
def sitetype(self, sitetype) :
ur"""Type of access to the Web Interface site. Available settings function as follows:
* XenApp/XenDesktop web site - Configures the Web Interface site for access by a web browser.
* XenApp/XenDesktop services site - Configures the Web Interface site for access by the XenApp plug-in.<br/>Default value: XenAppWeb<br/>Possible values = XenAppWeb, XenAppServices
"""
try :
self._sitetype = sitetype
except Exception as e:
raise e
@property
def userinterfacebranding(self) :
ur"""Specifies whether the site is focused towards users accessing applications or desktops. Setting the parameter to Desktops changes the functionality of the site to improve the experience for XenDesktop users. Citrix recommends using this setting for any deployment that includes XenDesktop.<br/>Default value: Applications<br/>Possible values = Desktops, Applications.
"""
try :
return self._userinterfacebranding
except Exception as e:
raise e
@userinterfacebranding.setter
def userinterfacebranding(self, userinterfacebranding) :
ur"""Specifies whether the site is focused towards users accessing applications or desktops. Setting the parameter to Desktops changes the functionality of the site to improve the experience for XenDesktop users. Citrix recommends using this setting for any deployment that includes XenDesktop.<br/>Default value: Applications<br/>Possible values = Desktops, Applications
"""
try :
self._userinterfacebranding = userinterfacebranding
except Exception as e:
raise e
@property
def publishedresourcetype(self) :
ur"""Method for accessing the published XenApp and XenDesktop resources.
Available settings function as follows:
* Online - Allows applications to be launched on the XenApp and XenDesktop servers.
* Offline - Allows streaming of applications to the client.
* DualMode - Allows both online and offline modes.<br/>Default value: Online<br/>Possible values = Online, Offline, DualMode.
"""
try :
return self._publishedresourcetype
except Exception as e:
raise e
@publishedresourcetype.setter
def publishedresourcetype(self, publishedresourcetype) :
ur"""Method for accessing the published XenApp and XenDesktop resources.
Available settings function as follows:
* Online - Allows applications to be launched on the XenApp and XenDesktop servers.
* Offline - Allows streaming of applications to the client.
* DualMode - Allows both online and offline modes.<br/>Default value: Online<br/>Possible values = Online, Offline, DualMode
"""
try :
self._publishedresourcetype = publishedresourcetype
except Exception as e:
raise e
@property
def kioskmode(self) :
ur"""User settings do not persist from one session to another.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._kioskmode
except Exception as e:
raise e
@kioskmode.setter
def kioskmode(self, kioskmode) :
ur"""User settings do not persist from one session to another.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._kioskmode = kioskmode
except Exception as e:
raise e
@property
def showsearch(self) :
ur"""Enables search option on XenApp websites.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._showsearch
except Exception as e:
raise e
@showsearch.setter
def showsearch(self, showsearch) :
ur"""Enables search option on XenApp websites.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._showsearch = showsearch
except Exception as e:
raise e
@property
def showrefresh(self) :
ur"""Provides the Refresh button on the applications screen.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._showrefresh
except Exception as e:
raise e
@showrefresh.setter
def showrefresh(self, showrefresh) :
ur"""Provides the Refresh button on the applications screen.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._showrefresh = showrefresh
except Exception as e:
raise e
@property
def wiuserinterfacemodes(self) :
ur"""Appearance of the login screen.
* Simple - Only the login fields for the selected authentication method are displayed.
* Advanced - Displays the navigation bar, which provides access to the pre-login messages and preferences screens.<br/>Default value: SIMPLE<br/>Possible values = SIMPLE, ADVANCED.
"""
try :
return self._wiuserinterfacemodes
except Exception as e:
raise e
@wiuserinterfacemodes.setter
def wiuserinterfacemodes(self, wiuserinterfacemodes) :
ur"""Appearance of the login screen.
* Simple - Only the login fields for the selected authentication method are displayed.
* Advanced - Displays the navigation bar, which provides access to the pre-login messages and preferences screens.<br/>Default value: SIMPLE<br/>Possible values = SIMPLE, ADVANCED
"""
try :
self._wiuserinterfacemodes = wiuserinterfacemodes
except Exception as e:
raise e
@property
def userinterfacelayouts(self) :
ur"""Specifies whether or not to use the compact user interface.<br/>Default value: AUTO<br/>Possible values = AUTO, NORMAL, COMPACT.
"""
try :
return self._userinterfacelayouts
except Exception as e:
raise e
@userinterfacelayouts.setter
def userinterfacelayouts(self, userinterfacelayouts) :
ur"""Specifies whether or not to use the compact user interface.<br/>Default value: AUTO<br/>Possible values = AUTO, NORMAL, COMPACT
"""
try :
self._userinterfacelayouts = userinterfacelayouts
except Exception as e:
raise e
@property
def restrictdomains(self) :
ur"""The RestrictDomains setting is used to enable/disable domain restrictions. If domain restriction is enabled, the LoginDomains list is used for validating the login domain. It is applied to all the authentication methods except Anonymous for XenApp Web and XenApp Services sites.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._restrictdomains
except Exception as e:
raise e
@restrictdomains.setter
def restrictdomains(self, restrictdomains) :
ur"""The RestrictDomains setting is used to enable/disable domain restrictions. If domain restriction is enabled, the LoginDomains list is used for validating the login domain. It is applied to all the authentication methods except Anonymous for XenApp Web and XenApp Services sites.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._restrictdomains = restrictdomains
except Exception as e:
raise e
@property
def logindomains(self) :
ur"""[List of NetBIOS domain names], Domain names to use for access restriction.
Only takes effect when used in conjunction with the RestrictDomains setting.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._logindomains
except Exception as e:
raise e
@logindomains.setter
def logindomains(self, logindomains) :
ur"""[List of NetBIOS domain names], Domain names to use for access restriction.
Only takes effect when used in conjunction with the RestrictDomains setting.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._logindomains = logindomains
except Exception as e:
raise e
@property
def hidedomainfield(self) :
ur"""The HideDomainField setting is used to control whether the domain field is displayed on the logon screen.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._hidedomainfield
except Exception as e:
raise e
@hidedomainfield.setter
def hidedomainfield(self, hidedomainfield) :
ur"""The HideDomainField setting is used to control whether the domain field is displayed on the logon screen.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._hidedomainfield = hidedomainfield
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(wisite_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.wisite
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.sitepath is not None :
return str(self.sitepath)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add wisite.
"""
try :
if type(resource) is not list :
addresource = wisite()
addresource.sitepath = resource.sitepath
addresource.agurl = resource.agurl
addresource.staurl = resource.staurl
addresource.secondstaurl = resource.secondstaurl
addresource.sessionreliability = resource.sessionreliability
addresource.usetwotickets = resource.usetwotickets
addresource.authenticationpoint = resource.authenticationpoint
addresource.agauthenticationmethod = resource.agauthenticationmethod
addresource.wiauthenticationmethods = resource.wiauthenticationmethods
addresource.defaultcustomtextlocale = resource.defaultcustomtextlocale
addresource.websessiontimeout = resource.websessiontimeout
addresource.defaultaccessmethod = resource.defaultaccessmethod
addresource.logintitle = resource.logintitle
addresource.appwelcomemessage = resource.appwelcomemessage
addresource.welcomemessage = resource.welcomemessage
addresource.footertext = resource.footertext
addresource.loginsysmessage = resource.loginsysmessage
addresource.preloginbutton = resource.preloginbutton
addresource.preloginmessage = resource.preloginmessage
addresource.prelogintitle = resource.prelogintitle
addresource.domainselection = resource.domainselection
addresource.sitetype = resource.sitetype
addresource.userinterfacebranding = resource.userinterfacebranding
addresource.publishedresourcetype = resource.publishedresourcetype
addresource.kioskmode = resource.kioskmode
addresource.showsearch = resource.showsearch
addresource.showrefresh = resource.showrefresh
addresource.wiuserinterfacemodes = resource.wiuserinterfacemodes
addresource.userinterfacelayouts = resource.userinterfacelayouts
addresource.restrictdomains = resource.restrictdomains
addresource.logindomains = resource.logindomains
addresource.hidedomainfield = resource.hidedomainfield
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].sitepath = resource[i].sitepath
addresources[i].agurl = resource[i].agurl
addresources[i].staurl = resource[i].staurl
addresources[i].secondstaurl = resource[i].secondstaurl
addresources[i].sessionreliability = resource[i].sessionreliability
addresources[i].usetwotickets = resource[i].usetwotickets
addresources[i].authenticationpoint = resource[i].authenticationpoint
addresources[i].agauthenticationmethod = resource[i].agauthenticationmethod
addresources[i].wiauthenticationmethods = resource[i].wiauthenticationmethods
addresources[i].defaultcustomtextlocale = resource[i].defaultcustomtextlocale
addresources[i].websessiontimeout = resource[i].websessiontimeout
addresources[i].defaultaccessmethod = resource[i].defaultaccessmethod
addresources[i].logintitle = resource[i].logintitle
addresources[i].appwelcomemessage = resource[i].appwelcomemessage
addresources[i].welcomemessage = resource[i].welcomemessage
addresources[i].footertext = resource[i].footertext
addresources[i].loginsysmessage = resource[i].loginsysmessage
addresources[i].preloginbutton = resource[i].preloginbutton
addresources[i].preloginmessage = resource[i].preloginmessage
addresources[i].prelogintitle = resource[i].prelogintitle
addresources[i].domainselection = resource[i].domainselection
addresources[i].sitetype = resource[i].sitetype
addresources[i].userinterfacebranding = resource[i].userinterfacebranding
addresources[i].publishedresourcetype = resource[i].publishedresourcetype
addresources[i].kioskmode = resource[i].kioskmode
addresources[i].showsearch = resource[i].showsearch
addresources[i].showrefresh = resource[i].showrefresh
addresources[i].wiuserinterfacemodes = resource[i].wiuserinterfacemodes
addresources[i].userinterfacelayouts = resource[i].userinterfacelayouts
addresources[i].restrictdomains = resource[i].restrictdomains
addresources[i].logindomains = resource[i].logindomains
addresources[i].hidedomainfield = resource[i].hidedomainfield
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete wisite.
"""
try :
if type(resource) is not list :
deleteresource = wisite()
if type(resource) != type(deleteresource):
deleteresource.sitepath = resource
else :
deleteresource.sitepath = resource.sitepath
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitepath = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitepath = resource[i].sitepath
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update wisite.
"""
try :
if type(resource) is not list :
updateresource = wisite()
updateresource.sitepath = resource.sitepath
updateresource.agurl = resource.agurl
updateresource.staurl = resource.staurl
updateresource.sessionreliability = resource.sessionreliability
updateresource.usetwotickets = resource.usetwotickets
updateresource.secondstaurl = resource.secondstaurl
updateresource.wiauthenticationmethods = resource.wiauthenticationmethods
updateresource.defaultaccessmethod = resource.defaultaccessmethod
updateresource.defaultcustomtextlocale = resource.defaultcustomtextlocale
updateresource.websessiontimeout = resource.websessiontimeout
updateresource.logintitle = resource.logintitle
updateresource.appwelcomemessage = resource.appwelcomemessage
updateresource.welcomemessage = resource.welcomemessage
updateresource.footertext = resource.footertext
updateresource.loginsysmessage = resource.loginsysmessage
updateresource.preloginbutton = resource.preloginbutton
updateresource.preloginmessage = resource.preloginmessage
updateresource.prelogintitle = resource.prelogintitle
updateresource.domainselection = resource.domainselection
updateresource.userinterfacebranding = resource.userinterfacebranding
updateresource.authenticationpoint = resource.authenticationpoint
updateresource.agauthenticationmethod = resource.agauthenticationmethod
updateresource.publishedresourcetype = resource.publishedresourcetype
updateresource.kioskmode = resource.kioskmode
updateresource.showsearch = resource.showsearch
updateresource.showrefresh = resource.showrefresh
updateresource.wiuserinterfacemodes = resource.wiuserinterfacemodes
updateresource.userinterfacelayouts = resource.userinterfacelayouts
updateresource.restrictdomains = resource.restrictdomains
updateresource.logindomains = resource.logindomains
updateresource.hidedomainfield = resource.hidedomainfield
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].sitepath = resource[i].sitepath
updateresources[i].agurl = resource[i].agurl
updateresources[i].staurl = resource[i].staurl
updateresources[i].sessionreliability = resource[i].sessionreliability
updateresources[i].usetwotickets = resource[i].usetwotickets
updateresources[i].secondstaurl = resource[i].secondstaurl
updateresources[i].wiauthenticationmethods = resource[i].wiauthenticationmethods
updateresources[i].defaultaccessmethod = resource[i].defaultaccessmethod
updateresources[i].defaultcustomtextlocale = resource[i].defaultcustomtextlocale
updateresources[i].websessiontimeout = resource[i].websessiontimeout
updateresources[i].logintitle = resource[i].logintitle
updateresources[i].appwelcomemessage = resource[i].appwelcomemessage
updateresources[i].welcomemessage = resource[i].welcomemessage
updateresources[i].footertext = resource[i].footertext
updateresources[i].loginsysmessage = resource[i].loginsysmessage
updateresources[i].preloginbutton = resource[i].preloginbutton
updateresources[i].preloginmessage = resource[i].preloginmessage
updateresources[i].prelogintitle = resource[i].prelogintitle
updateresources[i].domainselection = resource[i].domainselection
updateresources[i].userinterfacebranding = resource[i].userinterfacebranding
updateresources[i].authenticationpoint = resource[i].authenticationpoint
updateresources[i].agauthenticationmethod = resource[i].agauthenticationmethod
updateresources[i].publishedresourcetype = resource[i].publishedresourcetype
updateresources[i].kioskmode = resource[i].kioskmode
updateresources[i].showsearch = resource[i].showsearch
updateresources[i].showrefresh = resource[i].showrefresh
updateresources[i].wiuserinterfacemodes = resource[i].wiuserinterfacemodes
updateresources[i].userinterfacelayouts = resource[i].userinterfacelayouts
updateresources[i].restrictdomains = resource[i].restrictdomains
updateresources[i].logindomains = resource[i].logindomains
updateresources[i].hidedomainfield = resource[i].hidedomainfield
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of wisite resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = wisite()
if type(resource) != type(unsetresource):
unsetresource.sitepath = resource
else :
unsetresource.sitepath = resource.sitepath
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].sitepath = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ wisite() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].sitepath = resource[i].sitepath
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the wisite resources that are configured on netscaler.
"""
try :
if not name :
obj = wisite()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = wisite()
obj.sitepath = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [wisite() for _ in range(len(name))]
obj = [wisite() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = wisite()
obj[i].sitepath = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of wisite resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the wisite resources configured on NetScaler.
"""
try :
obj = wisite()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of wisite resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Sessionreliability:
ON = "ON"
OFF = "OFF"
class Showsearch:
ON = "ON"
OFF = "OFF"
class Showrefresh:
ON = "ON"
OFF = "OFF"
class Userinterfacelayouts:
AUTO = "AUTO"
NORMAL = "NORMAL"
COMPACT = "COMPACT"
class Wiauthenticationmethods:
Explicit = "Explicit"
Anonymous = "Anonymous"
class Defaultaccessmethod:
Direct = "Direct"
Alternate = "Alternate"
Translated = "Translated"
GatewayDirect = "GatewayDirect"
GatewayAlternate = "GatewayAlternate"
GatewayTranslated = "GatewayTranslated"
class Wiuserinterfacemodes:
SIMPLE = "SIMPLE"
ADVANCED = "ADVANCED"
class Publishedresourcetype:
Online = "Online"
Offline = "Offline"
DualMode = "DualMode"
class Usetwotickets:
ON = "ON"
OFF = "OFF"
class Agauthenticationmethod:
Explicit = "Explicit"
SmartCard = "SmartCard"
class Hidedomainfield:
ON = "ON"
OFF = "OFF"
class Authenticationpoint:
WebInterface = "WebInterface"
AccessGateway = "AccessGateway"
class Restrictdomains:
ON = "ON"
OFF = "OFF"
class Sitetype:
XenAppWeb = "XenAppWeb"
XenAppServices = "XenAppServices"
class Userinterfacebranding:
Desktops = "Desktops"
Applications = "Applications"
class Defaultcustomtextlocale:
German = "German"
English = "English"
Spanish = "Spanish"
French = "French"
Japanese = "Japanese"
Korean = "Korean"
Russian = "Russian"
Chinese_simplified = "Chinese_simplified"
Chinese_traditional = "Chinese_traditional"
class Kioskmode:
ON = "ON"
OFF = "OFF"
class wisite_response(base_response) :
def __init__(self, length=1) :
self.wisite = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.wisite = [wisite() for _ in range(length)]
| 37.407236 | 391 | 0.739392 |
acecbc92a67df85a997ef1e3448c593df96d7895 | 61,915 | py | Python | reo/process_results.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | null | null | null | reo/process_results.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | null | null | null | reo/process_results.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | null | null | null | # *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import sys
import traceback
import copy
import numpy as np
from reo.nested_outputs import nested_output_definitions
import logging
from celery import shared_task, Task
from reo.exceptions import REoptError, UnexpectedError
from reo.models import ModelManager, PVModel, FinancialModel, WindModel, AbsorptionChillerModel
from reo.src.profiler import Profiler
from reo.src.emissions_calculator import EmissionsCalculator
from reo.utilities import annuity, TONHOUR_TO_KWHT, MMBTU_TO_KWH, GAL_DIESEL_TO_KWH
from reo.nested_inputs import macrs_five_year, macrs_seven_year
from reo.src.proforma_metrics import calculate_proforma_metrics
from reo.src.storage import HotTES, ColdTES
log = logging.getLogger(__name__)
class ProcessResultsTask(Task):
"""
Used to define custom Error handling for celery task
"""
name = 'process_results'
max_retries = 0
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
log a bunch of stuff for debugging
save message: error and outputs: Scenario: status
need to stop rest of chain!?
:param exc: The exception raised by the task.
:param task_id: Unique id of the failed task. (not the run_uuid)
:param args: Original arguments for the task that failed.
:param kwargs: Original keyword arguments for the task that failed.
:param einfo: ExceptionInfo instance, containing the traceback.
:return: None, The return value of this handler is ignored.
"""
if isinstance(exc, REoptError):
exc.save_to_db()
self.data["messages"]["error"] = exc.message
self.data["outputs"]["Scenario"]["status"] = "An error occurred. See messages for more."
ModelManager.update_scenario_and_messages(self.data, run_uuid=self.run_uuid)
self.request.chain = None # stop the chain?
self.request.callback = None
self.request.chord = None # this seems to stop the infinite chord_unlock call
@shared_task(bind=True, base=ProcessResultsTask, ignore_result=True)
def process_results(self, dfm_list, data, meta, saveToDB=True):
"""
Processes the two outputs from reopt.jl bau and with-Tech scenarios
:param self: celery.Task
:param dfm_list: list of serialized dat_file_managers (passed from group of REopt runs)
:param data: nested dict mirroring API response format
:param meta: ={'run_uuid': run_uuid, 'api_version': api_version} from api.py
:param saveToDB: boolean for saving postgres models
:return: None
"""
profiler = Profiler()
class Results:
bau_attributes = [
"lcc",
"fuel_used_kwh",
"GridToLoad",
"year_one_energy_cost",
"year_one_demand_cost",
"year_one_fixed_cost",
"year_one_min_charge_adder",
"year_one_coincident_peak_cost",
"year_one_bill",
"year_one_utility_kwh",
"year_one_export_benefit",
"GridToLoad",
"total_energy_cost",
"total_demand_cost",
"total_fixed_cost",
"total_min_charge_adder",
"total_coincident_peak_cost",
"total_export_benefit",
"net_capital_costs_plus_om",
"gen_net_fixed_om_costs",
"gen_net_variable_om_costs",
"gen_total_fuel_cost",
"gen_year_one_fuel_cost",
"gen_year_one_variable_om_costs",
"year_one_boiler_fuel_cost",
"total_boiler_fuel_cost",
"julia_input_construction_seconds",
"julia_reopt_preamble_seconds",
"julia_reopt_variables_seconds",
"julia_reopt_constriants_seconds",
"julia_reopt_optimize_seconds",
"julia_reopt_postprocess_seconds",
"pyjulia_start_seconds",
"pyjulia_pkg_seconds",
"pyjulia_activate_seconds",
"pyjulia_include_model_seconds",
"pyjulia_make_model_seconds",
"pyjulia_include_reopt_seconds",
"pyjulia_run_reopt_seconds"
]
def __init__(self, results_dict, results_dict_bau, dm, inputs):
"""
Convenience (and legacy) class for handling REopt results
:param results_dict: flat dict of results from reopt.jl
:param results_dict_bau: flat dict of results from reopt.jl for bau case
:param dm: instance of DataManager class
:param inputs: dict, data['inputs']['Scenario']['Site']
"""
self.dm = dm
self.inputs = inputs
# remove invalid sizes due to optimization error margins
for r in [results_dict, results_dict_bau]:
for key, value in r.items():
if key.endswith('kw') or key.endswith('kwh'):
if value < 0:
r[key] = 0
# add bau outputs to results_dict
for k in Results.bau_attributes:
if results_dict_bau.get(k) is None:
results_dict[k + '_bau'] = 0
else:
results_dict[k + '_bau'] = results_dict_bau[k]
for i in range(len(self.inputs["PV"])):
# b/c of PV & PVNM techs in REopt, if both are zero then no value is written to REopt_results.json
i += 1
if results_dict.get('PV{}_kw'.format(i)) is None:
results_dict['PV{}_kw'.format(i)] = 0
pv_bau_keys = ["PV{}_net_fixed_om_costs".format(i),
"average_yearly_PV{}_energy_produced".format(i),
"year_one_PV{}_energy_produced".format(i),
"average_yearly_energy_produced_PV{}".format(i),
]
for k in pv_bau_keys:
if results_dict_bau.get(k) is None:
results_dict[k + '_bau'] = 0
else:
results_dict[k + '_bau'] = results_dict_bau[k]
# if wind is zero then no value is written to REopt results.json
if results_dict.get("wind_kw") is None:
results_dict['wind_kw'] = 0
# if generator is zero then no value is written to REopt results.json
if results_dict.get("generator_kw") is None:
results_dict['generator_kw'] = 0
# if CHP is zero then no value is written to REopt results.json
if results_dict.get("chp_kw") is None:
results_dict['chp_kw'] = 0
if results_dict.get("absorpchl_kw") is None:
results_dict['absorpchl_kw'] = 0
if results_dict.get("hot_tes_size_kwh") is None:
results_dict['hot_tes_size_kwh'] = 0
if results_dict.get("cold_tes_size_kwht") is None:
results_dict['cold_tes_size_kwht'] = 0
results_dict['npv'] = results_dict['lcc_bau'] - results_dict['lcc']
self.results_dict = results_dict
self.nested_outputs = self.setup_nested()
@property
def replacement_costs_future_and_present(self):
future_cost_inverter = self.inputs["Storage"]["replace_cost_us_dollars_per_kw"] * \
self.nested_outputs["Scenario"]["Site"]["Storage"]["size_kw"]
future_cost_storage = self.inputs["Storage"]["replace_cost_us_dollars_per_kwh"] * \
self.nested_outputs["Scenario"]["Site"]["Storage"]["size_kwh"]
future_cost = future_cost_inverter + future_cost_storage
tax_rate = self.inputs["Financial"]["owner_tax_pct"]
discount_rate = self.inputs["Financial"]["owner_discount_pct"]
present_cost = 0
present_cost += future_cost_inverter * (1 - tax_rate) / ((1 + discount_rate) **
self.inputs["Storage"]["inverter_replacement_year"])
present_cost += future_cost_storage * (1 - tax_rate) / ((1 + discount_rate) **
self.inputs["Storage"]["battery_replacement_year"])
return round(future_cost, 2), round(present_cost, 2)
@property
def upfront_capex(self):
upfront_capex = 0
upfront_capex += max(self.inputs["Generator"]["installed_cost_us_dollars_per_kw"]
* (self.nested_outputs["Scenario"]["Site"]["Generator"]["size_kw"]
- self.inputs["Generator"]["existing_kw"]), 0)
for pv in self.inputs["PV"]:
upfront_capex += max(pv["installed_cost_us_dollars_per_kw"]
* (self.nested_outputs["Scenario"]["Site"]["PV"][pv["pv_number"]-1]["size_kw"]
- pv["existing_kw"]), 0)
for tech in ["Storage", "Wind"]:
upfront_capex += (self.inputs[tech].get("installed_cost_us_dollars_per_kw") or 0) * \
(self.nested_outputs["Scenario"]["Site"][tech].get("size_kw") or 0)
# CHP.installed_cost_us_dollars_per_kw is now a list with potentially > 1 elements
for tech in ["CHP"]:
cost_list = self.inputs[tech].get("installed_cost_us_dollars_per_kw") or []
size_list = self.inputs[tech].get("tech_size_for_cost_curve") or []
chp_size = self.nested_outputs["Scenario"]["Site"][tech].get("size_kw")
if len(cost_list) > 1:
if chp_size <= size_list[0]:
upfront_capex += chp_size * cost_list[0] # Currently not handling non-zero cost ($) for 0 kW size input
elif chp_size > size_list[-1]:
upfront_capex += chp_size * cost_list[-1]
else:
for s in range(1, len(size_list)):
if (chp_size > size_list[s-1]) and (chp_size <= size_list[s]):
slope = (cost_list[s] * size_list[s] - cost_list[s-1] * size_list[s-1]) / \
(size_list[s] - size_list[s-1])
upfront_capex += cost_list[s-1] * size_list[s-1] + (chp_size - size_list[s-1]) * slope
elif len(cost_list) == 1:
upfront_capex += (cost_list[0] or 0) * (chp_size or 0)
# storage capacity
upfront_capex += (self.inputs["Storage"].get("installed_cost_us_dollars_per_kwh") or 0) * \
(self.nested_outputs["Scenario"]["Site"]["Storage"].get("size_kwh") or 0)
if self.nested_outputs["Scenario"]["Site"]["AbsorptionChiller"].get("size_ton"):
# Need to update two cost input attributes which are calculated in techs.py and updated in scenario.py
absorp_chl = AbsorptionChillerModel.objects.filter(run_uuid=data['outputs']['Scenario']['run_uuid'])[0]
self.inputs["AbsorptionChiller"].update(
{"installed_cost_us_dollars_per_ton": absorp_chl.installed_cost_us_dollars_per_ton,
"om_cost_us_dollars_per_ton": absorp_chl.om_cost_us_dollars_per_ton})
upfront_capex += (self.inputs["AbsorptionChiller"].get("installed_cost_us_dollars_per_ton") or 0) * \
(self.nested_outputs["Scenario"]["Site"]["AbsorptionChiller"].get("size_ton") or 0)
upfront_capex += (self.inputs["HotTES"].get("installed_cost_us_dollars_per_gal") or 0) * \
(self.nested_outputs["Scenario"]["Site"]["HotTES"].get("size_gal") or 0)
upfront_capex += (self.inputs["ColdTES"].get("installed_cost_us_dollars_per_gal") or 0) * \
(self.nested_outputs["Scenario"]["Site"]["ColdTES"].get("size_gal") or 0)
return round(upfront_capex, 2)
@property
def third_party_factor(self):
yrs = self.inputs["Financial"]["analysis_years"]
pwf_offtaker = annuity(yrs, 0, self.inputs["Financial"]["offtaker_discount_pct"])
pwf_owner = annuity(yrs, 0, self.inputs["Financial"]["owner_discount_pct"])
return (pwf_offtaker * (1 - self.inputs["Financial"]["offtaker_tax_pct"])) \
/ (pwf_owner * (1 - self.inputs["Financial"]["owner_tax_pct"]))
@property
def upfront_capex_after_incentives(self):
"""
The net_capital_costs output is the upfront capex after incentives, except it includes the battery
replacement cost in present value. So we calculate the upfront_capex_after_incentives as net_capital_costs
minus the battery replacement cost in present value.
Note that the owner_discount_pct and owner_tax_pct are set to the offtaker_discount_pct and offtaker_tax_pct
respectively when third_party_ownership is False.
"""
upfront_capex_after_incentives = self.nested_outputs["Scenario"]["Site"]["Financial"]["net_capital_costs"] \
/ self.third_party_factor
pwf_inverter = 1 / ((1 + self.inputs["Financial"]["owner_discount_pct"])
** self.inputs["Storage"]["inverter_replacement_year"])
pwf_storage = 1 / ((1 + self.inputs["Financial"]["owner_discount_pct"])
** self.inputs["Storage"]["battery_replacement_year"])
inverter_future_cost = self.inputs["Storage"]["replace_cost_us_dollars_per_kw"] * \
self.nested_outputs["Scenario"]["Site"]["Storage"]["size_kw"]
storage_future_cost = self.inputs["Storage"]["replace_cost_us_dollars_per_kwh"] * \
self.nested_outputs["Scenario"]["Site"]["Storage"]["size_kwh"]
# NOTE these upfront costs include the tax benefit available to commercial entities
upfront_capex_after_incentives -= inverter_future_cost * pwf_inverter * \
(1 - self.inputs["Financial"]["owner_tax_pct"])
upfront_capex_after_incentives -= storage_future_cost * pwf_storage * \
(1 - self.inputs["Financial"]["owner_tax_pct"])
return round(upfront_capex_after_incentives, 2)
def calculate_lcoe(self, tech_results_dict, tech_inputs_dict, financials):
"""
The LCOE is calculated as annualized costs (capital and O+M translated to current value) divided by annualized energy
output
:param tech_results_dict: dict of model results (i.e. outputs from PVModel )
:param tech_inputs_dict: dict of model results (i.e. inputs to PVModel )
:param financials: financial model storing input financial parameters
:return: float, LCOE in US dollars per kWh
"""
years = financials.analysis_years # length of financial life
if financials.third_party_ownership:
discount_pct = financials.owner_discount_pct
federal_tax_pct = financials.owner_tax_pct
else:
discount_pct = financials.offtaker_discount_pct
federal_tax_pct = financials.offtaker_tax_pct
new_kw = (tech_results_dict.get('size_kw') or 0) - (tech_inputs_dict.get('existing_kw') or 0) # new capacity
if new_kw == 0:
return None
capital_costs = new_kw * tech_inputs_dict['installed_cost_us_dollars_per_kw'] # pre-incentive capital costs
annual_om = new_kw * tech_inputs_dict['om_cost_us_dollars_per_kw'] # NPV of O&M charges escalated over financial life
om_series = [annual_om * (1+financials.om_cost_escalation_pct)**yr for yr in range(1, years+1)]
npv_om = sum([om * (1.0/(1.0+discount_pct))**yr for yr, om in enumerate(om_series,1)])
#Incentives as calculated in the spreadsheet, note utility incentives are applied before state incentives
utility_ibi = min(capital_costs * tech_inputs_dict['utility_ibi_pct'], tech_inputs_dict['utility_ibi_max_us_dollars'])
utility_cbi = min(new_kw * tech_inputs_dict['utility_rebate_us_dollars_per_kw'], tech_inputs_dict['utility_rebate_max_us_dollars'])
state_ibi = min((capital_costs - utility_ibi - utility_cbi) * tech_inputs_dict['state_ibi_pct'], tech_inputs_dict['state_ibi_max_us_dollars'])
state_cbi = min(new_kw * tech_inputs_dict['state_rebate_us_dollars_per_kw'], tech_inputs_dict['state_rebate_max_us_dollars'])
federal_cbi = new_kw * tech_inputs_dict['federal_rebate_us_dollars_per_kw']
ibi = utility_ibi + state_ibi #total investment-based incentives
cbi = utility_cbi + federal_cbi + state_cbi #total capacity-based incentives
#calculate energy in the BAU case, used twice later on
if 'year_one_energy_produced_bau_kwh' in tech_results_dict.keys():
existing_energy_bau = tech_results_dict['year_one_energy_produced_bau_kwh'] or 0
else:
existing_energy_bau = 0
#calculate the value of the production-based incentive stream
npv_pbi = 0
if tech_inputs_dict['pbi_max_us_dollars'] > 0:
for yr in range(years):
if yr < tech_inputs_dict['pbi_years']:
degredation_pct = (1- (tech_inputs_dict.get('degradation_pct') or 0))**yr
base_pbi = min(tech_inputs_dict['pbi_us_dollars_per_kwh'] * \
((tech_results_dict['year_one_energy_produced_kwh'] or 0) - existing_energy_bau) * \
degredation_pct, tech_inputs_dict['pbi_max_us_dollars'] * degredation_pct )
base_pbi = base_pbi * (1.0/(1.0+discount_pct))**(yr+1)
npv_pbi += base_pbi
npv_federal_itc = 0
depreciation_schedule = np.array([0.0 for _ in range(years)])
if tech_inputs_dict['macrs_option_years'] in [5,7]:
if tech_inputs_dict['macrs_option_years'] == 5:
schedule = macrs_five_year
if tech_inputs_dict['macrs_option_years'] == 7:
schedule = macrs_seven_year
federal_itc_basis = capital_costs - state_ibi - utility_ibi - state_cbi - utility_cbi - federal_cbi
federal_itc_amount = tech_inputs_dict['federal_itc_pct'] * federal_itc_basis
npv_federal_itc = federal_itc_amount * (1.0/(1.0+discount_pct))
macrs_bonus_basis = federal_itc_basis - (federal_itc_basis * tech_inputs_dict['federal_itc_pct'] * tech_inputs_dict['macrs_itc_reduction'])
macrs_basis = macrs_bonus_basis * (1 - tech_inputs_dict['macrs_bonus_pct'])
for i,r in enumerate(schedule):
if i < len(depreciation_schedule):
depreciation_schedule[i] = macrs_basis * r
depreciation_schedule[0] += (tech_inputs_dict['macrs_bonus_pct'] * macrs_bonus_basis)
tax_deductions = (np.array(om_series) + np.array(depreciation_schedule)) * federal_tax_pct
npv_tax_deductions = sum([i* (1.0/(1.0+discount_pct))**yr for yr,i in enumerate(tax_deductions,1)])
#we only care about the energy produced by new capacity in LCOE calcs
annual_energy = (tech_results_dict['year_one_energy_produced_kwh'] or 0) - existing_energy_bau
npv_annual_energy = sum([annual_energy * ((1.0/(1.0+discount_pct))**yr) * \
(1- (tech_inputs_dict.get('degradation_pct') or 0))**(yr-1) for yr, i in enumerate(tax_deductions,1)])
#LCOE is calculated as annualized costs divided by annualized energy
lcoe = (capital_costs + npv_om - npv_pbi - cbi - ibi - npv_federal_itc - npv_tax_deductions ) / \
(npv_annual_energy)
return round(lcoe,4)
def get_output(self):
self.get_nested()
output_dict = self.nested_outputs
return output_dict
@staticmethod
def setup_nested():
"""
Set up up empty nested dict for outputs.
:return: nested dict for outputs with values set to None. Results are filled in using "get_nested" method
"""
nested_outputs = dict()
nested_outputs["Scenario"] = dict()
nested_outputs["Scenario"]["Profile"] = dict()
nested_outputs["Scenario"]["Site"] = dict()
# Loop through all sub-site dicts and init
for name, d in nested_output_definitions["outputs"]["Scenario"]["Site"].items():
nested_outputs["Scenario"]["Site"][name] = dict()
for k in d.keys():
nested_outputs["Scenario"]["Site"][name].setdefault(k, None)
return nested_outputs
def get_nested(self):
"""
Translates the "flat" results_dict (which is just the JSON output from REopt mosel code)
into the nested output dict.
:return: None (modifies self.nested_outputs)
"""
# TODO: move the filling in of outputs to reopt.jl
self.nested_outputs["Scenario"]["status"] = self.results_dict["status"]
self.nested_outputs["Scenario"]["lower_bound"] = self.results_dict.get("lower_bound")
self.nested_outputs["Scenario"]["optimality_gap"] = self.results_dict.get("optimality_gap")
financials = FinancialModel.objects.filter(run_uuid=meta['run_uuid']).first() #getting financial inputs for wind and pv lcoe calculations
for name, d in nested_output_definitions["outputs"]["Scenario"]["Site"].items():
if name == "LoadProfile":
self.nested_outputs["Scenario"]["Site"][name]["year_one_electric_load_series_kw"] = self.dm["LoadProfile"].get("year_one_electric_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["critical_load_series_kw"] = self.dm["LoadProfile"].get("critical_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_kwh"] = self.dm["LoadProfile"].get("annual_kwh")
self.nested_outputs["Scenario"]["Site"][name]["resilience_check_flag"] = self.dm["LoadProfile"].get("resilience_check_flag")
self.nested_outputs["Scenario"]["Site"][name]["sustain_hours"] = int(self.dm["LoadProfile"].get("bau_sustained_time_steps") / (len(self.dm["LoadProfile"].get("year_one_electric_load_series_kw"))/8760))
self.nested_outputs["Scenario"]["Site"][name]["bau_sustained_time_steps"] = self.dm["LoadProfile"].get("bau_sustained_time_steps")
self.nested_outputs["Scenario"]["Site"][name]['loads_kw'] = self.dm["LoadProfile"].get("year_one_electric_load_series_kw")
elif name == "LoadProfileBoilerFuel":
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_boiler_fuel_load_mmbtu_bau"] = \
self.dm["LoadProfile"].get("annual_heating_mmbtu")
self.nested_outputs["Scenario"]["Site"][name]["year_one_boiler_fuel_load_series_mmbtu_per_hr"] = \
self.dm["LoadProfile"].get("year_one_boiler_fuel_load_series_mmbtu_per_hr")
self.nested_outputs["Scenario"]["Site"][name]["year_one_boiler_thermal_load_series_mmbtu_per_hr"] = \
[x * self.dm.get("boiler_efficiency", 0) \
for x in self.dm["LoadProfile"].get("year_one_boiler_fuel_load_series_mmbtu_per_hr")]
elif name == "LoadProfileChillerThermal":
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_kwh_bau"] = \
self.dm["LoadProfile"].get("annual_cooling_kwh")
self.nested_outputs["Scenario"]["Site"][name]["year_one_chiller_electric_load_series_kw"] = \
self.dm["LoadProfile"].get("year_one_chiller_electric_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["year_one_chiller_thermal_load_series_ton"] = \
[x * self.dm.get("elecchl_cop", 0) / TONHOUR_TO_KWHT \
for x in self.dm["LoadProfile"].get("year_one_chiller_electric_load_series_kw")]
elif name == "Financial":
self.nested_outputs["Scenario"]["Site"][name]["lcc_us_dollars"] = self.results_dict.get("lcc")
self.nested_outputs["Scenario"]["Site"][name]["lcc_bau_us_dollars"] = self.results_dict.get(
"lcc_bau")
self.nested_outputs["Scenario"]["Site"][name]["npv_us_dollars"] = self.results_dict.get("npv")
self.nested_outputs["Scenario"]["Site"][name][
"net_capital_costs_plus_om_us_dollars"] = self.results_dict.get("net_capital_costs_plus_om")
self.nested_outputs["Scenario"]["Site"][name]["net_om_us_dollars_bau"] = self.results_dict.get(
"net_capital_costs_plus_om_bau")
self.nested_outputs["Scenario"]["Site"][name]["net_capital_costs"] = self.results_dict.get(
"net_capital_costs")
self.nested_outputs["Scenario"]["Site"][name]["microgrid_upgrade_cost_us_dollars"] = \
self.results_dict.get("net_capital_costs") * financials.microgrid_upgrade_cost_pct
self.nested_outputs["Scenario"]["Site"][name]["total_om_costs_us_dollars"] = self.results_dict.get(
"total_om_costs_after_tax")
self.nested_outputs["Scenario"]["Site"][name]["year_one_om_costs_us_dollars"] = self.results_dict.get(
"year_one_om_costs_after_tax")
self.nested_outputs["Scenario"]["Site"][name]["year_one_om_costs_before_tax_us_dollars"] = \
self.results_dict.get("year_one_om_costs_before_tax")
elif name == "PV":
pv_models = list(PVModel.objects.filter(run_uuid=meta['run_uuid']).order_by('pv_number'))
template_pv = copy.deepcopy(self.nested_outputs['Scenario']["Site"][name])
self.nested_outputs['Scenario']["Site"][name] = []
for i, pv_model in enumerate(pv_models):
i += 1
pv = copy.deepcopy(template_pv)
pv["pv_number"] = i
pv["size_kw"] = self.results_dict.get("PV{}_kw".format(i)) or 0
pv["average_yearly_energy_produced_kwh"] = self.results_dict.get("average_yearly_energy_produced_PV{}".format(i))
pv["average_yearly_energy_produced_bau_kwh"] = self.results_dict.get("average_yearly_energy_produced_PV{}_bau".format(i))
pv["average_yearly_energy_exported_kwh"] = self.results_dict.get("average_annual_energy_exported_PV{}".format(i))
pv["year_one_energy_produced_kwh"] = self.results_dict.get("year_one_energy_produced_PV{}".format(i))
pv["year_one_energy_produced_bau_kwh"] = self.results_dict.get("year_one_PV{}_energy_produced_bau".format(i))
pv["year_one_to_battery_series_kw"] = self.results_dict.get("PV{}toBatt".format(i))
pv["year_one_to_load_series_kw"] = self.results_dict.get("PV{}toLoad".format(i))
pv["year_one_to_grid_series_kw"] = self.results_dict.get("PV{}toGrid".format(i))
pv['year_one_curtailed_production_series_kw'] = self.results_dict.get("PV{}toCurtail".format(i))
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_grid_series_kw")
if not pv.get("year_one_to_battery_series_kw") is None:
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_battery_series_kw")
else:
pv["year_one_power_production_series_kw"] = \
list(np.array(pv["year_one_power_production_series_kw"]) +
np.array(pv.get("year_one_to_battery_series_kw")))
if not pv.get("year_one_to_load_series_kw") is None:
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_load_series_kw")
else:
pv["year_one_power_production_series_kw"] = \
list(np.array(pv["year_one_power_production_series_kw"]) +
np.array(pv.get("year_one_to_load_series_kw")))
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = []
pv["existing_pv_om_cost_us_dollars"] = self.results_dict.get("PV{}_net_fixed_om_costs_bau".format(i))
pv["station_latitude"] = pv_model.station_latitude
pv["station_longitude"] = pv_model.station_longitude
pv["station_distance_km"] = pv_model.station_distance_km
pv['lcoe_us_dollars_per_kwh'] = self.calculate_lcoe(pv, pv_model.__dict__, financials)
self.nested_outputs['Scenario']["Site"][name].append(pv)
elif name == "Wind":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("wind_kw", 0)
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_produced_kwh"] = self.results_dict.get("average_wind_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_exported_kwh"] = self.results_dict.get(
"average_annual_energy_exported_wind")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_produced_kwh"] = self.results_dict.get("year_one_wind_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get("WINDtoBatt")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("WINDtoLoad")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("WINDtoGrid")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_curtailed_production_series_kw"] = self.results_dict.get("WINDtoCurtail")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_power_production_series_kw"] = self.compute_total_power(name)
if self.nested_outputs["Scenario"]["Site"][name]["size_kw"] > 0: #setting up
wind_model = WindModel.objects.get(run_uuid=meta['run_uuid'])
self.nested_outputs["Scenario"]["Site"][name]['lcoe_us_dollars_per_kwh'] = \
self.calculate_lcoe(self.nested_outputs["Scenario"]["Site"][name], wind_model.__dict__, financials)
data['inputs']['Scenario']["Site"]["Wind"]["installed_cost_us_dollars_per_kw"] = \
wind_model.installed_cost_us_dollars_per_kw
data['inputs']['Scenario']["Site"]["Wind"]["federal_itc_pct"] = wind_model.federal_itc_pct
else:
self.nested_outputs["Scenario"]["Site"][name]['lcoe_us_dollars_per_kwh'] = None
elif name == "Storage":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("batt_kw", 0)
self.nested_outputs["Scenario"]["Site"][name]["size_kwh"] = self.results_dict.get("batt_kwh", 0)
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("ElecFromBatt")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("ElecFromBattExport")
self.nested_outputs["Scenario"]["Site"][name]["year_one_soc_series_pct"] = \
self.results_dict.get("year_one_soc_series_pct")
elif name == "ElectricTariff":
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_us_dollars"] = self.results_dict.get("year_one_energy_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_us_dollars"] = self.results_dict.get("year_one_demand_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_cost_us_dollars"] = self.results_dict.get("year_one_fixed_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_min_charge_adder_us_dollars"] = self.results_dict.get("year_one_min_charge_adder")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_coincident_peak_cost_us_dollars"] = self.results_dict.get("year_one_coincident_peak_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_bau_us_dollars"] = self.results_dict.get("year_one_energy_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_bau_us_dollars"] = self.results_dict.get("year_one_demand_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_cost_bau_us_dollars"] = self.results_dict.get("year_one_fixed_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_min_charge_adder_bau_us_dollars"] = self.results_dict.get(
"year_one_min_charge_adder_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_coincident_peak_cost_bau_us_dollars"] = self.results_dict.get("year_one_coincident_peak_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_energy_cost_us_dollars"] = self.results_dict.get("total_energy_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_demand_cost_us_dollars"] = self.results_dict.get("total_demand_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_cost_us_dollars"] = self.results_dict.get("total_fixed_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_min_charge_adder_us_dollars"] = self.results_dict.get("total_min_charge_adder")
self.nested_outputs["Scenario"]["Site"][name][
"total_coincident_peak_cost_us_dollars"] = self.results_dict.get("total_coincident_peak_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_energy_cost_bau_us_dollars"] = self.results_dict.get("total_energy_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_demand_cost_bau_us_dollars"] = self.results_dict.get("total_demand_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_cost_bau_us_dollars"] = self.results_dict.get("total_fixed_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_min_charge_adder_bau_us_dollars"] = self.results_dict.get("total_min_charge_adder_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_coincident_peak_cost_bau_us_dollars"] = self.results_dict.get("total_coincident_peak_cost_bau")
self.nested_outputs["Scenario"]["Site"][name]["year_one_bill_us_dollars"] = self.results_dict.get(
"year_one_bill")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_bill_bau_us_dollars"] = self.results_dict.get("year_one_bill_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_export_benefit_us_dollars"] = self.results_dict.get("year_one_export_benefit")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_export_benefit_bau_us_dollars"] = self.results_dict.get("year_one_export_benefit_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_export_benefit_us_dollars"] = self.results_dict.get("total_export_benefit")
self.nested_outputs["Scenario"]["Site"][name][
"total_export_benefit_bau_us_dollars"] = self.results_dict.get("total_export_benefit_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_series_us_dollars_per_kwh"] = \
self.dm.get('year_one_energy_cost_series_us_dollars_per_kwh')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_series_us_dollars_per_kw"] = \
self.dm.get('year_one_demand_cost_series_us_dollars_per_kw')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get('GridToLoad')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_bau_kw"] = self.results_dict.get('GridToLoad_bau')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get('GridToBatt')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_supplied_kwh"] = self.results_dict.get("year_one_utility_kwh")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_supplied_kwh_bau"] = self.results_dict.get("year_one_utility_kwh_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_chp_standby_cost_us_dollars"] = self.results_dict.get("year_one_chp_standby_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_chp_standby_cost_us_dollars"] = self.results_dict.get("total_chp_standby_cost")
elif name == "FuelTariff":
self.nested_outputs["Scenario"]["Site"][name][
"total_boiler_fuel_cost_us_dollars"] = self.results_dict.get("total_boiler_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_boiler_fuel_cost_bau_us_dollars"] = self.results_dict.get("total_boiler_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_cost_us_dollars"] = self.results_dict.get("year_one_boiler_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_cost_bau_us_dollars"] = self.results_dict.get("year_one_boiler_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_chp_fuel_cost_us_dollars"] = self.results_dict.get("total_chp_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_chp_fuel_cost_us_dollars"] = self.results_dict.get("year_one_chp_fuel_cost")
elif name == "Generator":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("generator_kw", 0)
self.nested_outputs["Scenario"]["Site"][name]["fuel_used_gal"] = self.results_dict.get(
"fuel_used_kwh") / GAL_DIESEL_TO_KWH
self.nested_outputs["Scenario"]["Site"][name]["fuel_used_gal_bau"] = self.results_dict.get(
"fuel_used_kwh_bau") / GAL_DIESEL_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get('GENERATORtoLoad')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get('GENERATORtoBatt')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get('GENERATORtoGrid')
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_produced_kwh"] = self.results_dict.get(
"average_yearly_gen_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_exported_kwh"] = self.results_dict.get(
"average_annual_energy_exported_gen")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_produced_kwh"] = self.results_dict.get(
"year_one_gen_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_power_production_series_kw"] = self.compute_total_power(name)
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_fixed_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_fixed_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_om_cost_us_dollars"] = self.results_dict.get("gen_net_fixed_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_om_cost_us_dollars"] = self.results_dict.get("gen_year_one_fixed_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_variable_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_year_one_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_variable_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_variable_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_variable_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"total_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_total_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_total_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_year_one_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_fuel_cost_bau")
elif name == "CHP":
self.nested_outputs["Scenario"]["Site"][name][
"size_kw"] = self.results_dict.get("chp_kw")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fuel_used_mmbtu"] = self.results_dict.get("year_one_chp_fuel_used") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_energy_produced_kwh"] = self.results_dict.get("year_one_chp_electric_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_energy_produced_mmbtu"] = self.results_dict.get("year_one_chp_thermal_energy_produced") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_production_series_kw"] = self.results_dict.get("chp_electric_production_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get("chp_to_battery_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("chp_electric_to_load_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("chp_to_grid_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_load_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_tes_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_waste_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_waste_series")]
elif name == "Boiler":
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_consumption_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("fuel_to_boiler_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_thermal_production_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_production_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_load_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_tes_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_consumption_mmbtu"] = self.results_dict.get("year_one_fuel_to_boiler_kwh") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_thermal_production_mmbtu"] = self.results_dict.get("year_one_boiler_thermal_production_kwh") / MMBTU_TO_KWH
elif name == "ElectricChiller":
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_chiller_thermal_to_load_series_ton"] = [x / TONHOUR_TO_KWHT for x in self.results_dict.get("electric_chiller_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_chiller_thermal_to_tes_series_ton"] = [x / TONHOUR_TO_KWHT for x in self.results_dict.get("electric_chiller_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_chiller_electric_consumption_series_kw"] = self.results_dict.get("electric_chiller_consumption_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_chiller_electric_consumption_kwh"] = self.results_dict.get("year_one_electric_chiller_electric_kwh")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_chiller_thermal_production_tonhr"] = self.results_dict.get("year_one_electric_chiller_thermal_kwh") / TONHOUR_TO_KWHT
elif name == "AbsorptionChiller":
self.nested_outputs["Scenario"]["Site"][name][
"size_ton"] = self.results_dict.get("absorpchl_kw") / TONHOUR_TO_KWHT
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_thermal_to_load_series_ton"] = [x / TONHOUR_TO_KWHT for x in self.results_dict.get("absorption_chiller_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_thermal_to_tes_series_ton"] = [x / TONHOUR_TO_KWHT for x in self.results_dict.get("absorption_chiller_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_thermal_consumption_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("absorption_chiller_consumption_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_thermal_consumption_mmbtu"] = self.results_dict.get("year_one_absorp_chiller_thermal_consumption_kwh") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_thermal_production_tonhr"] = self.results_dict.get("year_one_absorp_chiller_thermal_prod_kwh") / TONHOUR_TO_KWHT
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_electric_consumption_series_kw"] = self.results_dict.get("absorption_chiller_electric_consumption_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_absorp_chl_electric_consumption_kwh"] = self.results_dict.get("year_one_absorp_chiller_electric_consumption_kwh")
elif name == "HotTES":
self.nested_outputs["Scenario"]["Site"][name][
"size_gal"] = self.results_dict.get("hot_tes_size_kwh",0) / HotTES.gal_to_kwh_conversion
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_from_hot_tes_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("hot_tes_thermal_production_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_hot_tes_soc_series_pct"] = self.results_dict.get("hot_tes_pct_soc_series")
elif name == "ColdTES":
self.nested_outputs["Scenario"]["Site"][name][
"size_gal"] = self.results_dict.get("cold_tes_size_kwht",0) / ColdTES.gal_to_kwh_conversion
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_from_cold_tes_series_ton"] = [x / TONHOUR_TO_KWHT for x in self.results_dict.get("cold_tes_thermal_production_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_cold_tes_soc_series_pct"] = self.results_dict.get("cold_tes_pct_soc_series")
# outputs that depend on multiple object results:
future_replacement_cost, present_replacement_cost = self.replacement_costs_future_and_present
self.nested_outputs["Scenario"]["Site"]["Financial"]["initial_capital_costs"] = self.upfront_capex
self.nested_outputs["Scenario"]["Site"]["Financial"]["replacement_costs"] = future_replacement_cost
self.nested_outputs["Scenario"]["Site"]["Financial"]["om_and_replacement_present_cost_after_tax_us_dollars"] = \
present_replacement_cost + self.results_dict.get("total_om_costs_after_tax", 0)
self.nested_outputs["Scenario"]["Site"]["Financial"]["initial_capital_costs_after_incentives"] = \
self.upfront_capex_after_incentives
if self.third_party_factor != 1:
self.nested_outputs["Scenario"]["Site"]["Financial"][
"developer_om_and_replacement_present_cost_after_tax_us_dollars"] = \
self.nested_outputs["Scenario"]["Site"]["Financial"][
"om_and_replacement_present_cost_after_tax_us_dollars"] / self.third_party_factor
if self.nested_outputs["Scenario"]["Site"]["LoadProfile"]["annual_calculated_kwh"] > 0:
self.nested_outputs["Scenario"]["Site"]["renewable_electricity_energy_pct"] = \
self.nested_outputs["Scenario"]["Site"]["Wind"].get("average_yearly_energy_produced_kwh") or 0
for pv in self.nested_outputs["Scenario"]["Site"]["PV"]:
self.nested_outputs["Scenario"]["Site"]["renewable_electricity_energy_pct"] += \
pv.get("average_yearly_energy_produced_kwh") or 0
self.nested_outputs["Scenario"]["Site"]["renewable_electricity_energy_pct"] = round(
self.nested_outputs["Scenario"]["Site"]["renewable_electricity_energy_pct"] /
self.nested_outputs["Scenario"]["Site"]["LoadProfile"]["annual_calculated_kwh"], 4)
else:
#If this is not set to None it will contain a dictionary of data parameters
self.nested_outputs["Scenario"]["Site"]["renewable_electricity_energy_pct"] = None
time_outputs = [k for k in self.bau_attributes if (k.startswith("julia") or k.startswith("pyjulia"))]
for k in time_outputs:
self.nested_outputs["Scenario"]["Profile"][k] = self.results_dict.get(k)
self.nested_outputs["Scenario"]["Profile"][k + "_bau"] = self.results_dict.get(k + "_bau")
def compute_total_power(self, tech):
power_lists = list()
d = self.nested_outputs["Scenario"]["Site"][tech]
if d.get("year_one_to_load_series_kw") is not None:
power_lists.append(d["year_one_to_load_series_kw"])
if d.get("year_one_to_battery_series_kw") is not None:
power_lists.append(d["year_one_to_battery_series_kw"])
if d.get("year_one_to_grid_series_kw") is not None:
power_lists.append(d["year_one_to_grid_series_kw"])
power = [sum(x) for x in zip(*power_lists)]
return power
self.data = data
self.run_uuid = data['outputs']['Scenario']['run_uuid']
self.user_uuid = data['outputs']['Scenario'].get('user_uuid')
try:
results_object = Results(results_dict=dfm_list[0]['results'], results_dict_bau=dfm_list[1]['results_bau'],
dm=dfm_list[0], inputs=data['inputs']['Scenario']['Site'])
results = results_object.get_output()
data['outputs'].update(results)
data['outputs']['Scenario'].update(meta) # run_uuid and api_version
#simple payback needs all data to be computed so running that calculation here
simple_payback, irr, net_present_cost, annualized_payment_to_third_party_us_dollars, \
offtaker_annual_free_cashflow_series_us_dollars, offtaker_annual_free_cashflow_series_bau_us_dollars, \
offtaker_discounted_annual_free_cashflow_series_us_dollars, offtaker_discounted_annual_free_cashflow_series_bau_us_dollars,\
developer_annual_free_cashflow_series_us_dollars = \
calculate_proforma_metrics(data)
data['outputs']['Scenario']['Site']['Financial']['simple_payback_years'] = simple_payback
data['outputs']['Scenario']['Site']['Financial']['irr_pct'] = irr if not np.isnan(irr or np.nan) else None
data['outputs']['Scenario']['Site']['Financial']['net_present_cost_us_dollars'] = net_present_cost
data['outputs']['Scenario']['Site']['Financial']['annualized_payment_to_third_party_us_dollars'] = \
annualized_payment_to_third_party_us_dollars
data['outputs']['Scenario']['Site']['Financial']['developer_annual_free_cashflow_series_us_dollars'] = \
developer_annual_free_cashflow_series_us_dollars
data['outputs']['Scenario']['Site']['Financial']['offtaker_annual_free_cashflow_series_us_dollars'] = \
offtaker_annual_free_cashflow_series_us_dollars
data['outputs']['Scenario']['Site']['Financial']['offtaker_annual_free_cashflow_series_bau_us_dollars'] = \
offtaker_annual_free_cashflow_series_bau_us_dollars
data['outputs']['Scenario']['Site']['Financial']['offtaker_discounted_annual_free_cashflow_series_us_dollars'] = \
offtaker_discounted_annual_free_cashflow_series_us_dollars
data['outputs']['Scenario']['Site']['Financial']['offtaker_discounted_annual_free_cashflow_series_bau_us_dollars'] = \
offtaker_discounted_annual_free_cashflow_series_bau_us_dollars
data = EmissionsCalculator.add_to_data(data)
pv_watts_station_check = data['outputs']['Scenario']['Site']['PV'][0].get('station_distance_km') or 0
if pv_watts_station_check > 322:
pv_warning = ("The best available solar resource data is {} miles from the site's coordinates."
" Beyond 200 miles, we display this warning. Consider choosing an alternative location closer"
" to the continental US with similar solar irradiance and weather patterns and rerunning the analysis."
" For more information, see https://maps.nrel.gov/nsrdb-viewer/ and the documenation at https://nsrdb.nrel.gov/"
).format(round(pv_watts_station_check*0.621,0))
if data.get('messages') is None:
data['messages'] = {"PVWatts Warning": pv_warning}
else:
data['messages']["PVWatts Warning"] = pv_warning
# Calculate avoided outage costs moved to resilience stats
#calc_avoided_outage_costs(data, present_worth_factor=dfm_list[0]['pwf_e'], run_uuid=self.run_uuid)
data = EmissionsCalculator.add_to_data(data)
if len(data['outputs']['Scenario']['Site']['PV']) == 1:
data['outputs']['Scenario']['Site']['PV'] = data['outputs']['Scenario']['Site']['PV'][0]
profiler.profileEnd()
data['outputs']["Scenario"]["Profile"]["parse_run_outputs_seconds"] = profiler.getDuration()
if saveToDB:
ModelManager.update(data, run_uuid=self.run_uuid)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.info("Results.py raising the error: {}, detail: {}".format(exc_type, exc_value))
raise UnexpectedError(exc_type, exc_value.args[0], traceback.format_tb(exc_traceback), task=self.name, run_uuid=self.run_uuid,
user_uuid=self.user_uuid)
| 69.645669 | 221 | 0.61326 |
acecbd438dc3dab0c06c93d3ac05d307f1b73e82 | 6,459 | py | Python | sampling/margin.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | 2 | 2021-08-10T14:18:26.000Z | 2022-01-15T04:58:35.000Z | sampling/margin.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | null | null | null | sampling/margin.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | 1 | 2022-02-15T09:44:47.000Z | 2022-02-15T09:44:47.000Z | import os
import sys
import json
import csv
import random
import argparse
import torch
import dataloaders
import models
import inspect
import math
from datetime import datetime
from utils import losses
from utils import Logger
from utils.torchsummary import summary
from trainer import Trainer
from torchvision import transforms
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import wandb
from wandb import AlertLevel
class Margin_Sampling():
def __init__(self):
pass
def get_instance(self, module, name, config, *args):
# GET THE CORRESPONDING CLASS / FCT
return getattr(module, config[name]['type'])(*args, **config[name]['args'])
def create_episodedir(self, cfg, episode):
episode_dir = os.path.join(cfg['exp_dir'], "episode"+str(episode))
if not os.path.exists(episode_dir):
os.mkdir(episode_dir)
else:
print("=============================")
print("Episode directory already exists: {}. Reusing it may lead to loss of old data in the directory.".format(episode_dir))
print("=============================")
cfg['episode'] = episode
cfg['episode_dir'] = episode_dir
cfg['trainer']['save_dir'] = os.path.join(episode_dir,cfg['trainer']['original_save_dir'])
cfg['trainer']['log_dir'] = os.path.join(episode_dir,cfg['trainer']['original_log_dir'])
cfg['labeled_loader']['args']['load_from'] = os.path.join(episode_dir, "labeled.txt")
cfg['unlabeled_loader']['args']['load_from'] = os.path.join(episode_dir, "unlabeled.txt")
return cfg
def train_model(self, args, config):
train_logger = Logger()
# DATA LOADERS
labeled_loader = self.get_instance(dataloaders, 'labeled_loader', config)
val_loader = self.get_instance(dataloaders, 'val_loader', config)
test_loader = self.get_instance(dataloaders, 'test_loader', config)
# MODEL
model = self.get_instance(models, 'arch', config, labeled_loader.dataset.num_classes)
#print(f'\n{model}\n')
# LOSS
loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])
# TRAINING
trainer = Trainer(
model=model,
loss=loss,
resume=args.resume,
config=config,
train_loader=labeled_loader,
val_loader=val_loader,
test_loader=test_loader,
train_logger=train_logger)
trainer.train()
config['checkpoint_dir'] = trainer._get_checkpoint_dir()
config_save_path = os.path.join(config['checkpoint_dir'], 'updated_config.json')
with open(config_save_path, 'w') as handle:
json.dump(config, handle, indent=4, sort_keys=True)
return config
def margin_score(self, prob_map):
highest_score_idx = np.sort(prob_map, axis=0)[1, :, :]
second_highest_score_idx = np.sort(prob_map, axis=0)[0, :, :]
margin = highest_score_idx - second_highest_score_idx
margin = margin.mean()
return margin
def update_pools(self, args, config, episode):
unlabeled_loader = self.get_instance(dataloaders, 'unlabeled_loader', config)
unlabeled_file = os.path.join(config["episode_dir"],"unlabeled.txt")
unlabeled_reader = csv.reader(open(unlabeled_file, 'rt'))
unlabeled_image_set = [r[0] for r in unlabeled_reader]
# Model
model = self.get_instance(models, 'arch',
config, unlabeled_loader.dataset.num_classes)
availble_gpus = list(range(torch.cuda.device_count()))
device = torch.device('cuda:0' if len(availble_gpus) > 0 else 'cpu')
# Load checkpoint
checkpoint = torch.load(os.path.join(config['exp_dir'],
"best_model.pth"), map_location=device)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint.keys():
checkpoint = checkpoint['state_dict']
# If during training, we used data parallel
if 'module' in list(checkpoint.keys())[0] and not isinstance(model,
torch.nn.DataParallel):
# for gpu inference, use data parallel
if "cuda" in device.type:
model = torch.nn.DataParallel(model)
else:
# for cpu inference, remove module
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k[7:]
new_state_dict[name] = v
checkpoint = new_state_dict
# load
model.load_state_dict(checkpoint)
model.to(device)
model.eval()
loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])
information_content = []
tbar = tqdm(unlabeled_loader, ncols=130)
with torch.no_grad():
for img_idx, (data, target) in enumerate(tbar):
data, target = data.to(device), target.to(device)
output, _ = model(data)
output = output.squeeze(0).cpu().numpy()
output = F.softmax(torch.from_numpy(output))
uncertainty_score = self.margin_score(output.numpy())
information_content.append([unlabeled_image_set[img_idx],
uncertainty_score])
information_content = sorted(information_content,
key= lambda x: x[1], reverse=False)
information_content = information_content[:args.batch_size]
new_batch = [x[0] for x in information_content]
labeled = os.path.join(config['episode_dir'],"labeled.txt")
labeled_reader = csv.reader(open(labeled, 'rt'))
labeled_image_set = [r[0] for r in labeled_reader]
new_labeled = labeled_image_set + new_batch
new_labeled.sort()
new_unlabeled = list(set(unlabeled_image_set) - set(new_batch))
new_unlabeled.sort()
config = self.create_episodedir(config, episode+1)
with open(os.path.join(config['episode_dir'], "labeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_labeled:
writer.writerow([image])
with open(os.path.join(config['episode_dir'], "unlabeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_unlabeled:
writer.writerow([image])
return config
| 34.913514 | 136 | 0.618362 |
acecbe737e6d80e281a2d7d64d960654ea715e70 | 3,961 | py | Python | pytorch-frontend/tools/amd_build/build_amd.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 40 | 2021-06-01T07:37:59.000Z | 2022-03-25T01:42:09.000Z | pytorch-frontend/tools/amd_build/build_amd.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 14 | 2021-06-01T11:52:46.000Z | 2022-03-25T02:13:08.000Z | pytorch-frontend/tools/amd_build/build_amd.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 7 | 2021-07-20T19:34:26.000Z | 2022-03-13T21:07:36.000Z | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import os
import argparse
import sys
sys.path.append(os.path.realpath(os.path.join(
__file__,
os.path.pardir,
os.path.pardir,
os.path.pardir,
'torch',
'utils')))
from hipify import hipify_python
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--out-of-place-only',
action='store_true',
help="Whether to only run hipify out-of-place on source files")
parser.add_argument(
'--project-directory',
type=str,
default='',
help="The root of the project.",
required=False)
parser.add_argument(
'--output-directory',
type=str,
default='',
help="The directory to store the hipified project",
required=False)
parser.add_argument(
'--extra-include-dir',
type=str,
default=[],
nargs='+',
help="The list of extra directories in caffe2 to hipify",
required=False)
args = parser.parse_args()
amd_build_dir = os.path.dirname(os.path.realpath(__file__))
proj_dir = os.path.join(os.path.dirname(os.path.dirname(amd_build_dir)))
if args.project_directory:
proj_dir = args.project_directory
out_dir = proj_dir
if args.output_directory:
out_dir = args.output_directory
includes = [
"caffe2/operators/*",
"caffe2/sgd/*",
"caffe2/image/*",
"caffe2/transforms/*",
"caffe2/video/*",
"caffe2/distributed/*",
"caffe2/queue/*",
"caffe2/contrib/aten/*",
"binaries/*",
"caffe2/**/*_test*",
"caffe2/core/*",
"caffe2/db/*",
"caffe2/utils/*",
"caffe2/contrib/gloo/*",
"caffe2/contrib/nccl/*",
"c10/cuda/*",
"c10/cuda/test/CMakeLists.txt",
"modules/*",
# PyTorch paths
# Keep this synchronized with is_pytorch_file in hipify_python.py
"aten/src/ATen/cuda/*",
"aten/src/ATen/native/cuda/*",
"aten/src/ATen/native/cudnn/*",
"aten/src/ATen/native/sparse/cuda/*",
"aten/src/ATen/native/quantized/cuda/*",
"aten/src/THC/*",
"aten/src/THCUNN/*",
"aten/src/ATen/test/*",
# CMakeLists.txt isn't processed by default, but there are a few
# we do want to handle, so explicitly specify them
"aten/src/THC/CMakeLists.txt",
"aten/src/THCUNN/CMakeLists.txt",
"torch/*",
"tools/autograd/templates/python_variable_methods.cpp",
]
for new_dir in args.extra_include_dir:
abs_new_dir = os.path.join(proj_dir, new_dir)
if os.path.exists(abs_new_dir):
new_dir = os.path.join(new_dir, '**/*')
includes.append(new_dir)
ignores = [
"caffe2/operators/depthwise_3x3_conv_op_cudnn.cu",
"caffe2/operators/pool_op_cudnn.cu",
'*/hip/*',
# These files are compatible with both cuda and hip
"aten/src/ATen/core/*",
# generated files we shouldn't frob
"torch/lib/tmp_install/*",
"torch/include/*",
]
# Check if the compiler is hip-clang.
def is_hip_clang():
try:
hip_path = os.getenv('HIP_PATH', '/opt/rocm/hip')
return 'HIP_COMPILER=clang' in open(hip_path + '/lib/.hipInfo').read()
except IOError:
return False
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace(' hip_hcc ', ' amdhip64 ') for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
hipify_python.hipify(
project_directory=proj_dir,
output_directory=out_dir,
includes=includes,
ignores=ignores,
out_of_place_only=args.out_of_place_only,
hip_clang_launch=is_hip_clang())
| 28.092199 | 113 | 0.663216 |
acecbfbe0d71b7f8ebaa0cc1108189d754fb77bd | 10,748 | py | Python | locust/main.py | n89nanda/locust | 40c755824e12640b3866e4c7fbace8d3c12ea873 | [
"MIT"
] | null | null | null | locust/main.py | n89nanda/locust | 40c755824e12640b3866e4c7fbace8d3c12ea873 | [
"MIT"
] | null | null | null | locust/main.py | n89nanda/locust | 40c755824e12640b3866e4c7fbace8d3c12ea873 | [
"MIT"
] | 1 | 2020-06-30T19:34:41.000Z | 2020-06-30T19:34:41.000Z | import inspect
import logging
import os
import importlib
import signal
import socket
import sys
import time
import gevent
import locust
from .event import Events
from .argument_parser import parse_locustfile_option, parse_options
from .core import HttpLocust, Locust
from .env import Environment
from .inspectlocust import get_task_ratio_dict, print_task_ratio
from .log import console_logger, setup_logging
from .runners import LocalLocustRunner, MasterLocustRunner, SlaveLocustRunner
from .stats import (print_error_report, print_percentile_stats, print_stats,
stats_printer, stats_writer, write_stat_csvs)
from .util.timespan import parse_timespan
from .web import WebUI
_internals = [Locust, HttpLocust]
version = locust.__version__
def is_locust(tup):
"""
Takes (name, object) tuple, returns True if it's a public Locust subclass.
"""
name, item = tup
return bool(
inspect.isclass(item)
and issubclass(item, Locust)
and hasattr(item, "task_set")
and getattr(item, "task_set")
and not name.startswith('_')
)
def load_locustfile(path):
"""
Import given locustfile path and return (docstring, callables).
Specifically, the locustfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Locust" test.
"""
def __import_locustfile__(filename, path):
"""
Loads the locust file as a module, similar to performing `import`
"""
source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)
return source.load_module()
# Start with making sure the current working dir is in the sys.path
sys.path.insert(0, os.getcwd())
# Get directory and locustfile name
directory, locustfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other locustfiles -- like Locusts's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import
imported = __import_locustfile__(locustfile, path)
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Return our two-tuple
locusts = dict(filter(is_locust, vars(imported).items()))
return imported.__doc__, locusts
def create_environment(options, events=None):
"""
Create an Environment instance from options
"""
return Environment(
events=events,
host=options.host,
options=options,
reset_stats=options.reset_stats,
step_load=options.step_load,
stop_timeout=options.stop_timeout,
)
def main():
# find specified locustfile and make sure it exists, using a very simplified
# command line parser that is only used to parse the -f option
locustfile = parse_locustfile_option()
# import the locustfile
docstring, locusts = load_locustfile(locustfile)
# parse all command line options
options = parse_options()
# setup logging
if not options.skip_log_setup:
setup_logging(options.loglevel, options.logfile)
logger = logging.getLogger(__name__)
if options.list_commands:
console_logger.info("Available Locusts:")
for name in locusts:
console_logger.info(" " + name)
sys.exit(0)
if not locusts:
logger.error("No Locust class found!")
sys.exit(1)
# make sure specified Locust exists
if options.locust_classes:
missing = set(options.locust_classes) - set(locusts.keys())
if missing:
logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
sys.exit(1)
else:
names = set(options.locust_classes) & set(locusts.keys())
locust_classes = [locusts[n] for n in names]
else:
# list() call is needed to consume the dict_view object in Python 3
locust_classes = list(locusts.values())
# create locust Environment
environment = create_environment(options, events=locust.events)
if options.show_task_ratio:
console_logger.info("\n Task ratio per locust class")
console_logger.info( "-" * 80)
print_task_ratio(locust_classes)
console_logger.info("\n Total task ratio")
console_logger.info("-" * 80)
print_task_ratio(locust_classes, total=True)
sys.exit(0)
if options.show_task_ratio_json:
from json import dumps
task_data = {
"per_class": get_task_ratio_dict(locust_classes),
"total": get_task_ratio_dict(locust_classes, total=True)
}
console_logger.info(dumps(task_data))
sys.exit(0)
if options.step_time:
if not options.step_load:
logger.error("The --step-time argument can only be used together with --step-load")
sys.exit(1)
if options.slave:
logger.error("--step-time should be specified on the master node, and not on slave nodes")
sys.exit(1)
try:
options.step_time = parse_timespan(options.step_time)
except ValueError:
logger.error("Valid --step-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.")
sys.exit(1)
if options.master:
runner = MasterLocustRunner(
environment,
locust_classes,
master_bind_host=options.master_bind_host,
master_bind_port=options.master_bind_port,
)
elif options.slave:
try:
runner = SlaveLocustRunner(
environment,
locust_classes,
master_host=options.master_host,
master_port=options.master_port,
)
except socket.error as e:
logger.error("Failed to connect to the Locust master: %s", e)
sys.exit(-1)
else:
runner = LocalLocustRunner(environment, locust_classes)
# main_greenlet is pointing to runners.greenlet by default, it will point the web greenlet later if in web mode
main_greenlet = runner.greenlet
if options.run_time:
if not options.no_web:
logger.error("The --run-time argument can only be used together with --no-web")
sys.exit(1)
if options.slave:
logger.error("--run-time should be specified on the master node, and not on slave nodes")
sys.exit(1)
try:
options.run_time = parse_timespan(options.run_time)
except ValueError:
logger.error("Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.")
sys.exit(1)
def spawn_run_time_limit_greenlet():
logger.info("Run time limit set to %s seconds" % options.run_time)
def timelimit_stop():
logger.info("Time limit reached. Stopping Locust.")
runner.quit()
gevent.spawn_later(options.run_time, timelimit_stop)
# start Web UI
if not options.no_web and not options.slave:
# spawn web greenlet
logger.info("Starting web monitor at http://%s:%s" % (options.web_host or "*", options.web_port))
web_ui = WebUI(environment=environment, runner=runner)
main_greenlet = gevent.spawn(web_ui.start, host=options.web_host, port=options.web_port)
else:
web_ui = None
# Fire locust init event which can be used by end-users' code to run setup code that
# need access to the Environment, Runner or WebUI
environment.events.init.fire(environment=environment, runner=runner, web_ui=web_ui)
if options.no_web:
# headless mode
if options.master:
# what for slave nodes to connect
while len(runner.clients.ready) < options.expect_slaves:
logging.info("Waiting for slaves to be ready, %s of %s connected",
len(runner.clients.ready), options.expect_slaves)
time.sleep(1)
if not options.slave:
# start the test
if options.step_time:
runner.start_stepload(options.num_clients, options.hatch_rate, options.step_clients, options.step_time)
else:
runner.start(options.num_clients, options.hatch_rate)
if options.run_time:
spawn_run_time_limit_greenlet()
stats_printer_greenlet = None
if not options.only_summary and (options.print_stats or (options.no_web and not options.slave)):
# spawn stats printing greenlet
stats_printer_greenlet = gevent.spawn(stats_printer(runner.stats))
if options.csvfilebase:
gevent.spawn(stats_writer, runner.stats, options.csvfilebase, options.stats_history_enabled)
def shutdown(code=0):
"""
Shut down locust by firing quitting event, printing/writing stats and exiting
"""
logger.info("Shutting down (exit code %s), bye." % code)
if stats_printer_greenlet is not None:
stats_printer_greenlet.kill(block=False)
logger.info("Cleaning up runner...")
if runner is not None:
runner.quit()
logger.info("Running teardowns...")
environment.events.quitting.fire(reverse=True)
print_stats(runner.stats, current=False)
print_percentile_stats(runner.stats)
if options.csvfilebase:
write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
print_error_report(runner.stats)
sys.exit(code)
# install SIGTERM handler
def sig_term_handler():
logger.info("Got SIGTERM signal")
shutdown(0)
gevent.signal(signal.SIGTERM, sig_term_handler)
try:
logger.info("Starting Locust %s" % version)
main_greenlet.join()
code = 0
if len(runner.errors) or len(runner.exceptions):
code = options.exit_code_on_error
shutdown(code=code)
except KeyboardInterrupt as e:
shutdown(0)
| 35.946488 | 119 | 0.644492 |
acecc0264b35a21542684999d9f882c7b21d6009 | 11,207 | py | Python | packages/python/plotly/plotly/graph_objs/choropleth/hoverlabel/_font.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/choropleth/hoverlabel/_font.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/choropleth/hoverlabel/_font.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choropleth.hoverlabel"
_path_str = "choropleth.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.858006 | 82 | 0.559293 |
acecc3a70ea18796a1b15dc1dbb7b99fbe554a89 | 1,144 | py | Python | setup_cpp_ext.py | forkbabu/Chunkmogrify | 16d2ce67159dd0c891cda74cc8c88853cb5615f4 | [
"MIT"
] | 13 | 2022-01-10T23:35:13.000Z | 2022-03-19T15:49:02.000Z | setup_cpp_ext.py | bycloudai/Chunkmogrify | 10fb9ab3eef0fb50cec0e474ab48333032ee3c3b | [
"MIT"
] | null | null | null | setup_cpp_ext.py | bycloudai/Chunkmogrify | 10fb9ab3eef0fb50cec0e474ab48333032ee3c3b | [
"MIT"
] | null | null | null | #
# Author: David Futschik
# Provided as part of the Chunkmogrify project, 2021.
#
import platform
import setuptools
import numpy as np
from setuptools import sandbox
platform_specific_flags = []
if platform.system() == "Windows":
platform_specific_flags += ["/permissive-", "/Ox"]
else:
platform_specific_flags += ["-O3"]
ext_modules = [
setuptools.Extension('_C_canvas',
sources=['extensions/canvas_to_masks.cpp'],
include_dirs=[np.get_include()],
extra_compile_args=platform_specific_flags,
language='c++'),
setuptools.Extension('_C_heatmap',
sources=['extensions/heatmap.cpp'],
include_dirs=[np.get_include()],
extra_compile_args=platform_specific_flags,
language='c++')
]
def checked_build(force=False):
def do_build():
sandbox.run_setup('setup_cpp_ext.py', ['build_ext', '--inplace'])
try:
import _C_canvas
import _C_heatmap
if force: do_build()
except ImportError:
do_build()
if __name__ == "__main__":
setuptools.setup(
ext_modules=ext_modules
) | 26.604651 | 73 | 0.643357 |
acecc3aaaff92a72fe954fb18883ae750ad14974 | 2,526 | py | Python | note/meiduo34/mall/apps/orders/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | note/meiduo34/mall/apps/orders/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | note/meiduo34/mall/apps/orders/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.db import models
from utils.models import BaseModel
from users.models import User, Address
from goods.models import SKU
# Create your models here.
class OrderInfo(BaseModel):
"""
订单信息
"""
PAY_METHODS_ENUM = {
"CASH": 1,
"ALIPAY": 2
}
PAY_METHOD_CHOICES = (
(1, "货到付款"),
(2, "支付宝"),
)
ORDER_STATUS_ENUM = {
"UNPAID": 1,
"UNSEND": 2,
"UNRECEIVED": 3,
"UNCOMMENT": 4,
"FINISHED": 5
}
ORDER_STATUS_CHOICES = (
(1, "待支付"),
(2, "待发货"),
(3, "待收货"),
(4, "待评价"),
(5, "已完成"),
(6, "已取消"),
)
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
user = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name="下单用户")
address = models.ForeignKey(Address, on_delete=models.PROTECT, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
freight = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
class Meta:
db_table = "tb_order_info"
verbose_name = '订单基本信息'
verbose_name_plural = verbose_name
class OrderGoods(BaseModel):
"""
订单商品
"""
SCORE_CHOICES = (
(0, '0分'),
(1, '20分'),
(2, '40分'),
(3, '60分'),
(4, '80分'),
(5, '100分'),
)
order = models.ForeignKey(OrderInfo, related_name='skus', on_delete=models.CASCADE, verbose_name="订单")
sku = models.ForeignKey(SKU, on_delete=models.PROTECT, verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
comment = models.TextField(default="", verbose_name="评价信息")
score = models.SmallIntegerField(choices=SCORE_CHOICES, default=5, verbose_name='满意度评分')
is_anonymous = models.BooleanField(default=False, verbose_name='是否匿名评价')
is_commented = models.BooleanField(default=False, verbose_name='是否评价了')
class Meta:
db_table = "tb_order_goods"
verbose_name = '订单商品'
verbose_name_plural = verbose_name | 30.804878 | 106 | 0.641726 |
acecc451aad65244b9776005ef2f6b3cee6bc50b | 2,112 | py | Python | tools/leetcode.037.Sudoku Solver/leetcode.037.Sudoku Solver.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | 4 | 2015-10-10T00:30:55.000Z | 2020-07-27T19:45:54.000Z | tools/leetcode.037.Sudoku Solver/leetcode.037.Sudoku Solver.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | tools/leetcode.037.Sudoku Solver/leetcode.037.Sudoku Solver.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solveSudoku(self, board):
hset = [{'1','2','3','4','5','6','7','8','9'} for _ in range(9)]
vset = [{'1','2','3','4','5','6','7','8','9'} for _ in range(9)]
boxset = [{'1','2','3','4','5','6','7','8','9'}for _ in range(9)]
temp = self.helper(board,hset,vset,boxset)
board[:] = temp[:]
def helper(self,board,hset,vset,boxset):
working = []
for i in range(9):
for j in range(9):
if board[i][j] == '.':
working.append([(i,j),hset[i],vset[j],boxset[(i//3) * 3 + j // 3]])
else:
hset[i] -= {board[i][j]}
vset[j] -= {board[i][j]}
boxset[(i//3) * 3 + j // 3] -= {board[i][j]}
while working:
working.sort(key=lambda x: len(x[1]&x[2]&x[3]))
cur = working.pop(0)
candi = (cur[1]&cur[2]&cur[3])
i = cur[0][0]
j = cur[0][1]
if len(candi) == 0: return []
if len(candi) == 1:
num = candi.pop()
hset[i] -= {num}
vset[j] -= {num}
boxset[(i//3) * 3 + j // 3] -= {num}
board[i][j] = num
else:
l = len(candi)
for k in range(l):
num = candi.pop()
board[i][j] = num
hset[i] -= {num}
vset[j] -= {num}
boxset[(i//3) * 3 + j // 3] -= {num}
temp = self.helper([a[:] for a in board],[set(a) for a in hset],[set(a) for a in vset],[set(a) for a in boxset])
if temp:
board = temp
return board
board[i][j] = '.'
hset[i].add(num)
vset[j].add(num)
boxset[(i//3) * 3 + j // 3].add(num)
return []
return board
| 2,112 | 2,112 | 0.369792 |
acecc54f6b470d357c9de4934a7f963c13dcfd6e | 7,023 | py | Python | lncrna/cox/LIHC/patient_info.py | OmnesRes/onco_lnc | e8d20e43026ffe4651bd25783db36cabc2c1519f | [
"MIT"
] | 33 | 2016-06-03T17:19:58.000Z | 2021-07-08T03:09:40.000Z | lncrna/cox/LIHC/patient_info.py | OmnesRes/onco_lnc | e8d20e43026ffe4651bd25783db36cabc2c1519f | [
"MIT"
] | 3 | 2016-07-13T23:12:18.000Z | 2016-09-15T19:35:22.000Z | lncrna/cox/LIHC/patient_info.py | OmnesRes/onco_lnc | e8d20e43026ffe4651bd25783db36cabc2c1519f | [
"MIT"
] | 19 | 2016-04-13T15:12:29.000Z | 2021-07-08T03:11:19.000Z | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','lncrna','LIHC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=''
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| 31.213333 | 132 | 0.705681 |
acecc5bbc351b5f3695f699ddecbdb49997d6f67 | 467 | py | Python | test/categories/test_language.py | bethwilliamson/qual-id | 7bbaf89d28df7740cb5de7dd0893c8bf4a36fa60 | [
"MIT"
] | null | null | null | test/categories/test_language.py | bethwilliamson/qual-id | 7bbaf89d28df7740cb5de7dd0893c8bf4a36fa60 | [
"MIT"
] | null | null | null | test/categories/test_language.py | bethwilliamson/qual-id | 7bbaf89d28df7740cb5de7dd0893c8bf4a36fa60 | [
"MIT"
] | null | null | null | import unittest
from qual_id.categories.language import Language
from test.utils.category_helper import CategoryHelper
class TestLanguage(unittest.TestCase):
def setUp(self):
self.language = Language()
def test__get_values__is_valid(self):
error_message = CategoryHelper.get_values_error_message(self.language)
self.assertTrue(error_message == "", error_message)
if __name__ == "__main__": # pragma: no cover
unittest.main()
| 27.470588 | 78 | 0.747323 |
acecc649ada79cf0ef7c9ecf2b3f43bd442a11bf | 1,431 | py | Python | var/spack/repos/builtin/packages/freetype/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/freetype/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/freetype/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Freetype(AutotoolsPackage):
"""FreeType is a freely available software library to render fonts.
It is written in C, designed to be small, efficient, highly customizable,
and portable while capable of producing high-quality output (glyph images)
of most vector and bitmap font formats."""
homepage = "https://www.freetype.org/index.html"
url = "http://download.savannah.gnu.org/releases/freetype/freetype-2.7.1.tar.gz"
version('2.9.1', 'ec391504e55498adceb30baceebd147a6e963f636eb617424bcfc47a169898ce')
version('2.7.1', '78701bee8d249578d83bb9a2f3aa3616')
version('2.7', '337139e5c7c5bd645fe130608e0fa8b5')
version('2.5.3', 'cafe9f210e45360279c730d27bf071e9')
depends_on('libpng')
depends_on('bzip2')
depends_on('pkgconfig', type='build')
def configure_args(self):
args = ['--with-harfbuzz=no']
if self.spec.satisfies('@2.9.1:'):
args.append('--enable-freetype-config')
return args
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.prepend_path('CPATH', self.prefix.include.freetype2)
run_env.prepend_path('CPATH', self.prefix.include.freetype2)
| 39.75 | 89 | 0.716981 |
acecc76453637fcc2fa93acbbb979d3c8fca00b3 | 1,664 | py | Python | deeplens/media/segmentation.py | primkey7607/deeplens-cv | ffd2548cca6fe36330cf851b21ae3412d567040f | [
"MIT"
] | 11 | 2019-10-07T22:06:30.000Z | 2020-08-26T22:10:53.000Z | deeplens/media/segmentation.py | primkey7607/deeplens-cv | ffd2548cca6fe36330cf851b21ae3412d567040f | [
"MIT"
] | 16 | 2019-11-02T00:32:00.000Z | 2022-02-10T00:23:32.000Z | deeplens/media/segmentation.py | primkey7607/deeplens-cv | ffd2548cca6fe36330cf851b21ae3412d567040f | [
"MIT"
] | 9 | 2019-10-07T13:33:13.000Z | 2020-09-27T09:50:58.000Z | import cv2
from deeplens.struct import *
from deeplens.dataflow.xform import *
from deeplens.dataflow.map import *
import numpy as np
DEFAULT_FRAME_RATE = 30.0
def avg_change(input, blur):
vstream = VideoStream(input, limit=100)
prev = None
change_array = []
for frame in vstream[Grayscale()][Blur(blur)]:
if not (prev is None):
frameDelta = cv2.absdiff(frame['data'],prev)
change_array.append(np.mean(frameDelta))
prev = frame['data']
return np.mean(change_array)
#takes in a file and breaks it up into discrete shots (returns a list of new files)
def shot_segmentation(input, blur=11, threshold=50, frame_rate=DEFAULT_FRAME_RATE, skip=1, encoding='XVID'):
# Define the codec and create VideoWriter object
counter = 0
seq = 0
output_files = []
#gather video statistics
#mean = avg_change(input,blur)
#print(mean, std)
vstream = VideoStream(input)
prev = None
for frame in vstream:
if counter == 0:
fourcc = cv2.VideoWriter_fourcc(*encoding)
file_name = input + '.' + str(seq) + '.avi'
out = cv2.VideoWriter(file_name,
fourcc,
frame_rate,
(vstream.width, vstream.height),
True)
orig = frame['data'].copy()
img = cv2.cvtColor(frame['data'], cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (blur, blur), 0)
if not (prev is None) and counter % skip == 0:
frameDelta = cv2.absdiff(img,prev)
if np.mean(frameDelta) > threshold:
output_files.append(((frame['frame']-counter,frame['frame']), file_name))
counter = 0
seq += 1
out.release()
prev = img
continue
out.write(orig)
prev = img
counter += 1
return output_files
| 21.333333 | 108 | 0.673077 |
acecc7afdc6d36340ca909915f788eb69374e163 | 662 | py | Python | Manannikov_K_DZ_2/test_job_4.py | manannikovkonstantin/1824_GB_Python_1 | 0f9372951072caef1370710917ef6ce5d276b4da | [
"MIT"
] | null | null | null | Manannikov_K_DZ_2/test_job_4.py | manannikovkonstantin/1824_GB_Python_1 | 0f9372951072caef1370710917ef6ce5d276b4da | [
"MIT"
] | null | null | null | Manannikov_K_DZ_2/test_job_4.py | manannikovkonstantin/1824_GB_Python_1 | 0f9372951072caef1370710917ef6ce5d276b4da | [
"MIT"
] | null | null | null | """Дан список, содержащий искажённые данные с должностями и именами сотрудников:
['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор аэлита']
Известно, что имя сотрудника всегда в конце строки. Сформировать из этих имён и вывести на экран фразы вида:
'Привет, Игорь!' Подумать, как получить имена сотрудников из элементов списка, как привести их к корректному виду.
Можно ли при этом не создавать новый список?"""
for i in ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор аэлита']:
mgs = i.split(" ")[-1].lower().title()
print(f"'Привет, {mgs}!'")
| 55.166667 | 120 | 0.747734 |
acecc87d735b13f3335387af0ec93fa29754ffcb | 3,698 | py | Python | addons/gitlab/utils.py | laurenrevere/osf.io | f08daebf6ff280375ef966fdce4d3b6602c2236e | [
"Apache-2.0"
] | null | null | null | addons/gitlab/utils.py | laurenrevere/osf.io | f08daebf6ff280375ef966fdce4d3b6602c2236e | [
"Apache-2.0"
] | 18 | 2020-03-24T15:26:02.000Z | 2022-03-08T21:30:39.000Z | addons/gitlab/utils.py | kounoAkihiro/SV-RDM-OSF | 76fb0c739f4cdabf03b5bfd2bc63d83b1c2d4796 | [
"Apache-2.0"
] | 1 | 2021-10-04T21:16:56.000Z | 2021-10-04T21:16:56.000Z | import hmac
import uuid
import urllib
import hashlib
import httplib as http
from framework.exceptions import HTTPError
from addons.base.exceptions import HookError
from addons.gitlab.api import GitLabClient
MESSAGE_BASE = 'via the Open Science Framework'
MESSAGES = {
'add': 'Added {0}'.format(MESSAGE_BASE),
'move': 'Moved {0}'.format(MESSAGE_BASE),
'copy': 'Copied {0}'.format(MESSAGE_BASE),
'update': 'Updated {0}'.format(MESSAGE_BASE),
'delete': 'Deleted {0}'.format(MESSAGE_BASE),
}
def make_hook_secret():
return str(uuid.uuid4()).replace('-', '')
HOOK_SIGNATURE_KEY = 'X-Hub-Signature'
def verify_hook_signature(node_settings, data, headers):
"""Verify hook signature.
:param GitLabNodeSettings node_settings:
:param dict data: JSON response body
:param dict headers: Request headers
:raises: HookError if signature is missing or invalid
"""
if node_settings.hook_secret is None:
raise HookError('No secret key')
digest = hmac.new(
str(node_settings.hook_secret),
data,
digestmod=hashlib.sha1
).hexdigest()
signature = headers.get(HOOK_SIGNATURE_KEY, '').replace('sha1=', '')
if digest != signature:
raise HookError('Invalid signature')
def get_path(kwargs, required=True):
path = kwargs.get('path')
if path:
return urllib.unquote_plus(path)
elif required:
raise HTTPError(http.BAD_REQUEST)
def get_refs(addon, branch=None, sha=None, connection=None):
"""Get the appropriate branch name and sha given the addon settings object,
and optionally the branch and sha from the request arguments.
:param str branch: Branch name. If None, return the default branch from the
repo settings.
:param str sha: The SHA.
:param GitLab connection: GitLab API object. If None, one will be created
from the addon's user settings.
"""
connection = connection or GitLabClient(external_account=addon.external_account)
if sha and not branch:
raise HTTPError(http.BAD_REQUEST)
# Get default branch if not provided
if not branch:
repo = connection.repo(addon.repo_id)
if repo is None:
return None, None, None
branch = repo['default_branch']
# Get data from GitLab API if not registered
branches = connection.branches(addon.repo_id)
# Use registered SHA if provided
for each in branches:
if branch == each['name']:
sha = each['commit']['id']
break
return branch, sha, branches
def check_permissions(node_settings, auth, connection, branch, sha=None, repo=None):
user_settings = node_settings.user_settings
has_access = False
has_auth = bool(user_settings and user_settings.has_auth)
if has_auth:
repo = repo or connection.repo(node_settings.repo_id)
project_permissions = repo['permissions'].get('project_access') or {}
group_permissions = repo['permissions'].get('group_access') or {}
has_access = (
repo is not None and (
# See https://docs.gitlab.com/ee/api/members.html
project_permissions.get('access_level', 0) >= 30 or
group_permissions.get('access_level', 0) >= 30
)
)
if sha:
current_branch = connection.branches(node_settings.repo_id, branch)
# TODO Will I ever return false?
is_head = sha == current_branch['commit']['id']
else:
is_head = True
can_edit = (
node_settings.owner.can_edit(auth) and
not node_settings.owner.is_registration and
has_access and
is_head
)
return can_edit
| 30.561983 | 84 | 0.664143 |
acecc92286013713e9aa7fb9a58e14bbe9130517 | 13,218 | py | Python | utility.py | syaoming/molle | 51d036555e06d634dd53f56d479ab41561e36618 | [
"MIT"
] | null | null | null | utility.py | syaoming/molle | 51d036555e06d634dd53f56d479ab41561e36618 | [
"MIT"
] | null | null | null | utility.py | syaoming/molle | 51d036555e06d634dd53f56d479ab41561e36618 | [
"MIT"
] | null | null | null | from z3 import *
from operator import or_
from pprint import pprint
def _sorted_inters(inter_list, sp):
''' Sorts the inter_list = [('from', 'to', 'positive'), ...] into a dict,
where keyvalue is a couple of number tuples, wich integer codes as keys.
e.g.
{1: ( (2, 3), (5, 9) )} : species 1 is activated by 2 and 3, and repressed
by 5 and 9.
'''
d = dict([(c, ([], [])) for c in range(len(sp))]) # initialization
for i in inter_list:
f, t = i[:2]
if 'negative' in i: idx = 1
elif 'positive' in i: idx = 0
else:
print 'no +/- assigend to interactions %d'%(inter_list)
raise(Error)
tcode, fcode = sp.index(t), sp.index(f)
d.setdefault(tcode, ([], []))[idx].append(fcode)
return d
def readModel(f, opmt = True):
''' Take a file Object as input, return a tuple of 6 objects:
species: a tuple of gene name.
logics : a dict. { gene_name: list_of_allowed_logic_numbers }
kofe : a dict. { "FE": list_of_FEable_gene, "KO": list_of_KOable_gene }
defI : a dict of defined interations. Processed by _sorted_inters()
optI : a dict of optional interactions.
'''
species = []
logics = {}
kofe = {'KO':[], 'FE':[]}
def_inters_list = []
opt_inters_list = []
# read the components line
for c in f.readline().strip().split(','):
# get the gene name and +- mark
if '(' in c : gene_ = c[:c.index('(')].strip()
else: gene_ = c.strip()
gene = filter(lambda x: not x in '+-', gene_)
mark = filter(lambda x: x in '+-', gene_)
# add to kofe if the gene has mark
if('+' in mark): kofe['FE'].append(gene)
if('-' in mark): kofe['KO'].append(gene)
# record the allowed logics; if no, set to range(18)
if '(' in c:
left, right = c.index('('), c.index(')')
rules = tuple( int(i) for i in c[left+1:right].split() )
else:
rules = tuple(range(18))
logics[gene] = rules
species.append(gene)
# read the interaction lines
total_opt = total_def = 0
for line in f.readlines():
l = line.strip().split()
if(not l): continue # skip empty line
if 'optional' in l:
opt_inters_list.append(tuple(l[:3]))
total_opt += 1
else:
def_inters_list.append(tuple(l[:3]))
total_def += 1
defI = _sorted_inters(def_inters_list, species)
optI = _sorted_inters(opt_inters_list, species)
return (species, logics, kofe, defI, optI)
# kept from old version
def _addExp(d, name, time_point, state_names_list):
d.setdefault(name, []).append( (int(time_point), state_names_list) )
# kept from old version
def _addState(d, state_name, gene, value):
d.setdefault(state_name, []).append( (gene, int(value)) )
# kept from old version
def readExp(f):
'''
Take the file for experiment constrains, return two dicts:
exps: the Experimental constrains for every experiment
states: records the mapping of shortcut name to node states
'''
exps = dict()
states = dict()
shortcut = ''
for l in f.readlines():
l = l.strip();
if(not l): continue; # skip empty line
try: l = l[:l.index('"')] # remove commment
except ValueError: None
try: l = l[:l.index(';')] # remove ;
except ValueError: None
if(shortcut): # inside the braket { }
if(l[0] == '{'): continue # skip left bracket
elif(l[0] == '}'): shortcut = '' # exit the braket;
else:
(left, right) = l.split('=');
name = left.strip();
value = right.split()[0];
_addState(states, shortcut, name, value); # record configuration
l = l.split();
if(l[0] == "//"): continue # comment line
elif(l[0] == "under"): _addExp(exps, l[1], l[3], l[4:]) # recordexp
elif(l[0] == "let"):
shortcut = l[1]; # ready to enter the braket
try: shortcut = shortcut[:shortcut.index(':')]
except ValueError: None
return (exps, states);
def compati(l, actn, repn):
''' Speed up the solving.
Not sure with the validicity when actn == 0 of such approach. '''
if len(l) < 16: return l
if actn == 0:
if repn == 0: return (-1, )
else: # only repressors
return filter(lambda x: x > 15, l) or (-1, )
elif repn == 0: # only activator
return filter(lambda x: x < 2, l) or (-1, )
else:
return l
zero = BitVecVal(0, 1)
def Any(bvs):
return reduce(or_, bvs, zero)
def _concat(bvs):
if len(bvs) == 1: return bvs[0]
else: return Concat(bvs)
def _create_bit_rule(num, act_list, rep_list, A, R):
''' Create the update rule that return bit-vector of length 1. '''
if num == -1: return BoolVal(False) # special case
# initialization
if act_list: act = _concat(act_list)
else: act = A = zero
if rep_list: rep = _concat(rep_list)
else: rep = R = zero
# creating result
if num == 0:
return And(R == 0, A != 0, A & act == A)
elif num == 1:
return And(R == 0, A & act != 0)
#return And(R == 0, A != 0, A & act != 0)
elif num == 2:
return Or( And(R == 0, A != 0, A & act == A),
And(R != 0, rep & R == 0, A & act != 0) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(R != 0, A != 0, rep & R == 0, A & act != 0) )
elif num == 3:
return And(A & act != 0, rep & R == 0)
elif num == 4:
return And( A != 0, A & act == A,
Or(R == 0, rep & R != R) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(A != 0, A & act == A, rep & R != R) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(R != 0, A != 0, A & act == A, rep & R != R) )
elif num == 5:
return Or( And(R == 0, act & A != 0),
And(A != 0, act & A == A, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A == A, rep & R != R) )
elif num == 6:
return Or( And(R == 0, A != 0, act & A == A),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A != 0, rep & R != R) )
elif num == 7:
return Or( And(R == 0, act & A != 0),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A != 0, rep & R != R) )
elif num == 8:
return And(A != 0, act & A == A)
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A == A) )
elif num == 9:
return Or( And(R == 0, act & A != 0),
And(R != 0, A != 0, act & A == A) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A == A) )
elif num == 10:
return Or( And(A != 0, act & A == A),
And(R != 0, act & A != 0, rep & R == 0) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, Or(act & A == A,
# And(act & A != 0, rep & R == 0))) )
elif num == 11:
return Or( And(R == 0, A != 0, act & A != 0),
And(R != 0, A != 0, Or(act & A == A,
And(act & A != 0, rep & R == 0))) )
elif num == 12:
return Or( And(A != 0, act & A == A),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, Or(act & A == A,
# And(act & A != 0, rep & R != R))) )
elif num == 13:
return Or( And(R == 0, A != 0, act & A != 0),
And(R != 0, A != 0, Or(act & A == A,
And(act & A != 0, rep & R != R))) )
elif num == 14:
return Or( And(R == 0, A != 0, act & A == A),
And(R != 0, act & A != 0) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A != 0) )
elif num == 15:
return act & A != 0
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A != 0) )
elif num == 16:
return And(A == 0, rep & R != 0, rep & R != R)
#return And(A == 0, R != 0, rep & R != 0, rep & R != R)
elif num == 17:
return And(A == 0, R != 0, rep & R == 0)
else:
print "Strange Num"
raise ValueError
def _with_kofe(kofe_idx, ko, fe, expr):
koc, fec = kofe_idx
if koc:
ko = Extract(koc-1,koc-1,ko) == 1 # a trick to avoid 0 == False
if fec:
fe = Extract(fec-1,fec-1,fe) == 1
return Or(fe, And(Not(ko), expr))
else: return And(Not(ko), expr)
elif fec:
fe = Extract(fec-1,fec-1,fe) == 1
return Or(fe, expr)
else: return expr
def makeFunction(acts, reps, kofe_index, logic, A, R):
''' Makes a function that takes q, A, R, and return a coresponding z3 expr.
A is the acticators-selecting bit-vector, R for repressors.
'''
return lambda q, ko, fe: simplify(
_with_kofe(kofe_index, ko, fe,
_create_bit_rule(logic,
[Extract(i,i,q) for i in acts],
[Extract(i,i,q) for i in reps],
A, R)))
def isExpOf2(bvv):
return len(filter(lambda x: x == '1', bin(bvv.as_long()))) == 1
### Output Utilities ###
#########################
boolf = BoolVal(False)
def conv_time(secs, th = 300):
if secs > th: return '%.1f min'%( secs / 60 )
return '%.1f sec'%secs
def _Or(l):
if(not l): return boolf
if(len(l) == 1): return l[0]
else: return Or(l);
def _And(l):
if(not l): return boolf
if(len(l) == 1): return l[0]
else: return And(l);
def _create_sym_rule(num, act, rep):
if num < -1 or num > 17:
return Bool('Strang, num=%d, act=%s, rep=%s'%(num,str(act), str(rep)))
if num == -1: return boolf
if act:
actt = [Bool(node) for node in act]
if rep:
rept = [Bool(node) for node in rep]
if act:
if not rep:
if num%2 == 0: return _And(actt)
else: return _Or(actt)
elif num == 0: return boolf
elif num == 1: return boolf
elif(num < 4): return And(_Or(actt), Not(_Or(rept)))
elif(num < 6): return And(_And(actt), Not(_And(rept)));
elif(num < 8): return And(_Or(actt), Not(_And(rept)))
elif(num < 10): return _And(actt)
elif(num < 12): return Or(_And(actt), And(_Or(actt), Not(_Or(rept))))
elif(num < 14): return Or(_And(actt), And(_Or(actt), Not(_And(rept))))
elif(num < 16): return _Or(actt)
else: return boolf
if rep:
if num == 16: return And(_Or(rept), Not(_And(rept)))
elif num==17: return Not(_Or(rept));
else: return boolf
else: return boolf # no act no rep
def checkBit(i, bv):
# simplify is necessary
return simplify(Extract(i, i, bv)).as_long() == 1
def bv2logic(lbvv, llist):
''' convert a bit-vector to a integer, as logic function number.'''
assert isExpOf2(lbvv)
lcode = len(bin(lbvv.as_long()).lstrip('0b')) - 1
return llist[lcode]
def bv2inters(ibvv, ilist, species):
if is_true(simplify(ibvv == 0)): return []
assert is_false(simplify(ibvv == 0))
l = ibvv.size() - 1
return [species[c] for i, c in enumerate(ilist) if checkBit(l-i, ibvv)]
def getDetail(m, A_, R_, L_, species, inters, logics):
A = {}; R = {}; L = {}
for c, s in enumerate(species):
L[s] = bv2logic(m[L_[s]], logics[s])
if A_[s]: A[s] = bv2inters(m[A_[s]] or zero, inters[c][0], species)
else: A[s] = []
if R_[s]: R[s] = bv2inters(m[R_[s]] or zero, inters[c][1], species)
else: R[s] = []
return (A, R, L)
def printModel(species, A, R, L, config = True, model = True):
''' Print the solved model nicely. '''
# printing the model
if config:
print ">>\tConfigurations: "
for s in species:
print ">>\t\t%s:%d%s%s" \
%(s, L[s],
A[s] and '\t<- ' + ','.join(A[s]) or '',
R[s] and '\t|- ' + ','.join(R[s]) or '')
if model:
print ">>\tModel: "
for s in species: print ">>\t\t%s' = %s" \
%(s,simplify( _create_sym_rule(L[s], A[s], R[s]) ))
from smtplib import SMTP, SMTPAuthenticationError
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def mailMe(addr, pw, content, title = 'Computation Finished'):
msg = MIMEMultipart('alternative')
msg['Subject'] = title
msg['From'] = msg['To'] = addr
msg.attach(MIMEText(content, 'plain'))
server = SMTP('smtp.qq.com')
try:
server.login(addr, pw)
server.sendmail(addr, addr, msg.as_string())
server.quit()
except SMTPAuthenticationError:
print ">> SMTP: login fail with %s:%s"%(addr, pw)
| 35.821138 | 79 | 0.494175 |
acecc952886ec086f73919f9e37e1d892cedfd6a | 128 | py | Python | Lesson_5/packet/math_op.py | mirdinemris/Python_lesson_2 | bf2fce1dbd6ae635d6aa631703b9930b164972b0 | [
"MIT"
] | null | null | null | Lesson_5/packet/math_op.py | mirdinemris/Python_lesson_2 | bf2fce1dbd6ae635d6aa631703b9930b164972b0 | [
"MIT"
] | 1 | 2020-04-14T14:13:57.000Z | 2020-04-14T14:13:57.000Z | Lesson_5/packet/math_op.py | mirdinemris/Python_lesson_2 | bf2fce1dbd6ae635d6aa631703b9930b164972b0 | [
"MIT"
] | null | null | null | def my_add(x,y):
return x+y
def my_sub(x,y):
return x-y
def My_mult(x,y):
return x*y
def my_div(x,y):
return x/y | 16 | 17 | 0.601563 |
acecca3d5dbb4a98767c98e5b9f3db7006c29edb | 14,087 | py | Python | test/functional/interface_aRIA_cli.py | vatsal2312/aRIA_AriaCurrency | 446ae1e9a6134d9f3159d9c09c645a86fdc86e54 | [
"MIT"
] | 2 | 2022-01-16T18:19:55.000Z | 2022-02-16T13:19:53.000Z | test/functional/interface_aRIA_cli.py | oneitguy/aRIA | bd8c86cf98fd715868808767f08f146d5e867e44 | [
"MIT"
] | 3 | 2022-01-18T18:15:25.000Z | 2022-03-02T01:16:13.000Z | test/functional/interface_aRIA_cli.py | oneitguy/aRIA | bd8c86cf98fd715868808767f08f146d5e867e44 | [
"MIT"
] | 2 | 2022-01-16T18:20:26.000Z | 2022-03-04T21:52:22.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The aRIA Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test aRIA-cli"""
from decimal import Decimal
from test_framework.test_framework import aRIATestFramework
from test_framework.util import (
assert_equal,
assert_raises_process_error,
assert_raises_rpc_error,
get_auth_cookie,
)
# The block reward of coinbaseoutput.nValue (50) BTC/block matures after
# COINBASE_MATURITY (100) blocks. Therefore, after mining 101 blocks we expect
# node 0 to have a balance of (BLOCKS - COINBASE_MATURITY) * 50 BTC/block.
BLOCKS = 101
BALANCE = (BLOCKS - 100) * 50
JSON_PARSING_ERROR = 'error: Error parsing JSON: foo'
BLOCKS_VALUE_OF_ZERO = 'error: the first argument (number of blocks to generate, default: 1) must be an integer value greater than zero'
TOO_MANY_ARGS = 'error: too many arguments (maximum 2 for nblocks and maxtries)'
WALLET_NOT_LOADED = 'Requested wallet does not exist or is not loaded'
WALLET_NOT_SPECIFIED = 'Wallet file not specified'
class TestaRIACli(aRIATestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_cli()
def run_test(self):
"""Main test logic"""
self.nodes[0].generate(BLOCKS)
self.log.info("Compare responses from getblockchaininfo RPC and `aRIA-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir, self.chain)
self.log.info("Test -stdinrpcpass option")
assert_equal(BLOCKS, self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input='foo').echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(['foo', 'bar'], self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input=password + '\nfoo\nbar').echo())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input='foo').echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Test -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Test -getinfo returns expected network and blockchain info")
if self.is_wallet_compiled():
self.nodes[0].encryptwallet(password)
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['headers'], blockchain_info['headers'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(
cli_get_info['connections'],
{
'in': network_info['connections_in'],
'out': network_info['connections_out'],
'total': network_info['connections']
}
)
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['chain'], blockchain_info['chain'])
if self.is_wallet_compiled():
self.log.info("Test -getinfo and aRIA-cli getwalletinfo return expected wallet info")
assert_equal(cli_get_info['balance'], BALANCE)
assert 'balances' not in cli_get_info.keys()
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['unlocked_until'], wallet_info['unlocked_until'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
assert_equal(self.nodes[0].cli.getwalletinfo(), wallet_info)
# Setup to test -getinfo, -generate, and -rpcwallet= with multiple wallets.
wallets = [self.default_wallet_name, 'Encrypted', 'secret']
amounts = [BALANCE + Decimal('9.999928'), Decimal(9), Decimal(31)]
self.nodes[0].createwallet(wallet_name=wallets[1])
self.nodes[0].createwallet(wallet_name=wallets[2])
w1 = self.nodes[0].get_wallet_rpc(wallets[0])
w2 = self.nodes[0].get_wallet_rpc(wallets[1])
w3 = self.nodes[0].get_wallet_rpc(wallets[2])
rpcwallet2 = '-rpcwallet={}'.format(wallets[1])
rpcwallet3 = '-rpcwallet={}'.format(wallets[2])
w1.walletpassphrase(password, self.rpc_timeout)
w2.encryptwallet(password)
w1.sendtoaddress(w2.getnewaddress(), amounts[1])
w1.sendtoaddress(w3.getnewaddress(), amounts[2])
# Mine a block to confirm; adds a block reward (50 BTC) to the default wallet.
self.nodes[0].generate(1)
self.log.info("Test -getinfo with multiple wallets and -rpcwallet returns specified wallet balance")
for i in range(len(wallets)):
cli_get_info = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[i])).send_cli()
assert 'balances' not in cli_get_info.keys()
assert_equal(cli_get_info['balance'], amounts[i])
self.log.info("Test -getinfo with multiple wallets and -rpcwallet=non-existing-wallet returns no balances")
cli_get_info_keys = self.nodes[0].cli('-getinfo', '-rpcwallet=does-not-exist').send_cli().keys()
assert 'balance' not in cli_get_info_keys
assert 'balances' not in cli_get_info_keys
self.log.info("Test -getinfo with multiple wallets returns all loaded wallet names and balances")
assert_equal(set(self.nodes[0].listwallets()), set(wallets))
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
assert 'balance' not in cli_get_info.keys()
assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets, amounts)})
# Unload the default wallet and re-verify.
self.nodes[0].unloadwallet(wallets[0])
assert wallets[0] not in self.nodes[0].listwallets()
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
assert 'balance' not in cli_get_info.keys()
assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets[1:], amounts[1:])})
self.log.info("Test -getinfo after unloading all wallets except a non-default one returns its balance")
self.nodes[0].unloadwallet(wallets[2])
assert_equal(self.nodes[0].listwallets(), [wallets[1]])
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
assert 'balances' not in cli_get_info.keys()
assert_equal(cli_get_info['balance'], amounts[1])
self.log.info("Test -getinfo with -rpcwallet=remaining-non-default-wallet returns only its balance")
cli_get_info = self.nodes[0].cli('-getinfo', rpcwallet2).send_cli()
assert 'balances' not in cli_get_info.keys()
assert_equal(cli_get_info['balance'], amounts[1])
self.log.info("Test -getinfo with -rpcwallet=unloaded wallet returns no balances")
cli_get_info = self.nodes[0].cli('-getinfo', rpcwallet3).send_cli()
assert 'balance' not in cli_get_info_keys
assert 'balances' not in cli_get_info_keys
# Test aRIA-cli -generate.
n1 = 3
n2 = 4
w2.walletpassphrase(password, self.rpc_timeout)
blocks = self.nodes[0].getblockcount()
self.log.info('Test -generate with no args')
generate = self.nodes[0].cli('-generate').send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), 1)
assert_equal(self.nodes[0].getblockcount(), blocks + 1)
self.log.info('Test -generate with bad args')
assert_raises_process_error(1, JSON_PARSING_ERROR, self.nodes[0].cli('-generate', 'foo').echo)
assert_raises_process_error(1, BLOCKS_VALUE_OF_ZERO, self.nodes[0].cli('-generate', 0).echo)
assert_raises_process_error(1, TOO_MANY_ARGS, self.nodes[0].cli('-generate', 1, 2, 3).echo)
self.log.info('Test -generate with nblocks')
generate = self.nodes[0].cli('-generate', n1).send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), n1)
assert_equal(self.nodes[0].getblockcount(), blocks + 1 + n1)
self.log.info('Test -generate with nblocks and maxtries')
generate = self.nodes[0].cli('-generate', n2, 1000000).send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), n2)
assert_equal(self.nodes[0].getblockcount(), blocks + 1 + n1 + n2)
self.log.info('Test -generate -rpcwallet in single-wallet mode')
generate = self.nodes[0].cli(rpcwallet2, '-generate').send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), 1)
assert_equal(self.nodes[0].getblockcount(), blocks + 2 + n1 + n2)
self.log.info('Test -generate -rpcwallet=unloaded wallet raises RPC error')
assert_raises_rpc_error(-18, WALLET_NOT_LOADED, self.nodes[0].cli(rpcwallet3, '-generate').echo)
assert_raises_rpc_error(-18, WALLET_NOT_LOADED, self.nodes[0].cli(rpcwallet3, '-generate', 'foo').echo)
assert_raises_rpc_error(-18, WALLET_NOT_LOADED, self.nodes[0].cli(rpcwallet3, '-generate', 0).echo)
assert_raises_rpc_error(-18, WALLET_NOT_LOADED, self.nodes[0].cli(rpcwallet3, '-generate', 1, 2, 3).echo)
# Test aRIA-cli -generate with -rpcwallet in multiwallet mode.
self.nodes[0].loadwallet(wallets[2])
n3 = 4
n4 = 10
blocks = self.nodes[0].getblockcount()
self.log.info('Test -generate -rpcwallet with no args')
generate = self.nodes[0].cli(rpcwallet2, '-generate').send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), 1)
assert_equal(self.nodes[0].getblockcount(), blocks + 1)
self.log.info('Test -generate -rpcwallet with bad args')
assert_raises_process_error(1, JSON_PARSING_ERROR, self.nodes[0].cli(rpcwallet2, '-generate', 'foo').echo)
assert_raises_process_error(1, BLOCKS_VALUE_OF_ZERO, self.nodes[0].cli(rpcwallet2, '-generate', 0).echo)
assert_raises_process_error(1, TOO_MANY_ARGS, self.nodes[0].cli(rpcwallet2, '-generate', 1, 2, 3).echo)
self.log.info('Test -generate -rpcwallet with nblocks')
generate = self.nodes[0].cli(rpcwallet2, '-generate', n3).send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), n3)
assert_equal(self.nodes[0].getblockcount(), blocks + 1 + n3)
self.log.info('Test -generate -rpcwallet with nblocks and maxtries')
generate = self.nodes[0].cli(rpcwallet2, '-generate', n4, 1000000).send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
assert_equal(len(generate["blocks"]), n4)
assert_equal(self.nodes[0].getblockcount(), blocks + 1 + n3 + n4)
self.log.info('Test -generate without -rpcwallet in multiwallet mode raises RPC error')
assert_raises_rpc_error(-19, WALLET_NOT_SPECIFIED, self.nodes[0].cli('-generate').echo)
assert_raises_rpc_error(-19, WALLET_NOT_SPECIFIED, self.nodes[0].cli('-generate', 'foo').echo)
assert_raises_rpc_error(-19, WALLET_NOT_SPECIFIED, self.nodes[0].cli('-generate', 0).echo)
assert_raises_rpc_error(-19, WALLET_NOT_SPECIFIED, self.nodes[0].cli('-generate', 1, 2, 3).echo)
else:
self.log.info("*** Wallet not compiled; cli getwalletinfo and -getinfo wallet tests skipped")
self.nodes[0].generate(25) # maintain block parity with the wallet_compiled conditional branch
self.log.info("Test -version with node stopped")
self.stop_node(0)
cli_response = self.nodes[0].cli('-version').send_cli()
assert "{} RPC client version".format(self.config['environment']['PACKAGE_NAME']) in cli_response
self.log.info("Test -rpcwait option successfully waits for RPC connection")
self.nodes[0].start() # start node without RPC connection
self.nodes[0].wait_for_cookie_credentials() # ensure cookie file is available to avoid race condition
blocks = self.nodes[0].cli('-rpcwait').send_cli('getblockcount')
self.nodes[0].wait_for_rpc_connection()
assert_equal(blocks, BLOCKS + 25)
if __name__ == '__main__':
TestaRIACli().main()
| 56.348 | 166 | 0.651807 |
aceccbb7153da8ab599ed70c6628a76e9a4d6d91 | 773 | py | Python | count-neighbours.py | denisbalyko/checkio-solution | fc512b17578df31d8f6f21cd347424d9353b9c77 | [
"MIT"
] | 13 | 2015-07-09T09:56:56.000Z | 2021-12-24T07:14:15.000Z | count-neighbours.py | denisbalyko/checkio-solution | fc512b17578df31d8f6f21cd347424d9353b9c77 | [
"MIT"
] | null | null | null | count-neighbours.py | denisbalyko/checkio-solution | fc512b17578df31d8f6f21cd347424d9353b9c77 | [
"MIT"
] | 8 | 2016-12-07T15:00:14.000Z | 2020-04-14T23:54:09.000Z | # -*- coding: utf-8 -*-
def count_neighbours(grid, row, col):
directions = ((-1, 1), # ↖
(0, 1), # ↑
(1, 1), # ↗
(-1, 0), # ←
(1, 0), # →
(-1, -1),# ↙
(0, -1), # ↓
(1, -1) # ↘
)
s = 0
# want to go in directions / хотим пойти по направлениям
for direct in directions:
# check did not fall off the board / проверяем не упали ли с доски ?
if (col+direct[0] >= 0) and (col+direct[0] < len(grid[0])) and \
(row+direct[1] >= 0) and (row+direct[1] < len(grid)):
# If ok, then adds content / Если ок, добавим содержимое
s += grid[row+direct[1]][col+direct[0]]
return s
| 35.136364 | 76 | 0.419146 |
acecccccdf689763a1b358f37baff053a916245b | 15,235 | py | Python | keras/applications/imagenet_utils.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 37,222 | 2017-12-13T00:52:55.000Z | 2022-03-31T22:34:35.000Z | keras/applications/imagenet_utils.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 7,624 | 2017-12-13T01:03:40.000Z | 2022-03-31T23:57:24.000Z | keras/applications/imagenet_utils.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 14,914 | 2017-12-13T02:30:46.000Z | 2022-03-30T14:49:16.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding."""
import json
import warnings
import numpy as np
from keras import activations
from keras import backend
from keras.utils import data_utils
from tensorflow.python.util.tf_export import keras_export
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
PREPROCESS_INPUT_DOC = """
Preprocesses a tensor or Numpy array encoding a batch of images.
Usage example with `applications.MobileNet`:
```python
i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
x = tf.cast(i, tf.float32)
x = tf.keras.applications.mobilenet.preprocess_input(x)
core = tf.keras.applications.MobileNet()
x = core(x)
model = tf.keras.Model(inputs=[i], outputs=[x])
image = tf.image.decode_png(tf.io.read_file('file.png'))
result = model(image)
```
Args:
x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
channels, with values in the range [0, 255].
The preprocessed data are written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
{ret}
Raises:
{error}
"""
PREPROCESS_INPUT_MODE_DOC = """
mode: One of "caffe", "tf" or "torch". Defaults to "caffe".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
"""
PREPROCESS_INPUT_DEFAULT_ERROR_DOC = """
ValueError: In case of unknown `mode` or `data_format` argument."""
PREPROCESS_INPUT_ERROR_DOC = """
ValueError: In case of unknown `data_format` argument."""
PREPROCESS_INPUT_RET_DOC_TF = """
The inputs pixel values are scaled between -1 and 1, sample-wise."""
PREPROCESS_INPUT_RET_DOC_TORCH = """
The input pixels values are scaled between 0 and 1 and each channel is
normalized with respect to the ImageNet dataset."""
PREPROCESS_INPUT_RET_DOC_CAFFE = """
The images are converted from RGB to BGR, then each color channel is
zero-centered with respect to the ImageNet dataset, without scaling."""
@keras_export('keras.applications.imagenet_utils.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(
x, data_format=data_format, mode=mode)
else:
return _preprocess_symbolic_input(
x, data_format=data_format, mode=mode)
preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(
mode=PREPROCESS_INPUT_MODE_DOC,
ret='',
error=PREPROCESS_INPUT_DEFAULT_ERROR_DOC)
@keras_export('keras.applications.imagenet_utils.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Args:
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return. Defaults to 5.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a Numpy array encoding a batch of images.
Args:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
"""
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == 'tf':
x /= 127.5
x -= 1.
return x
elif mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Args:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
if mode == 'tf':
x /= 127.5
x -= 1.
return x
elif mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
mean_tensor = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(mean_tensor):
x = backend.bias_add(
x, backend.cast(mean_tensor, backend.dtype(x)), data_format=data_format)
else:
x = backend.bias_add(x, mean_tensor, data_format)
if std is not None:
std_tensor = backend.constant(np.array(std), dtype=backend.dtype(x))
if data_format == 'channels_first':
std_tensor = backend.reshape(std_tensor, (-1, 1, 1))
x /= std_tensor
return x
def obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.',
stacklevel=2)
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.',
stacklevel=2)
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting `include_top=True` '
'and loading `imagenet` weights, '
f'`input_shape` should be {default_shape}. '
f'Received: input_shape={input_shape}')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError(f'Input size must be at least {min_size}'
f'x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least '
f'{min_size}x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
f'Received: input_shape={input_shape}')
return input_shape
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def validate_activation(classifier_activation, weights):
"""validates that the classifer_activation is compatible with the weights.
Args:
classifier_activation: str or callable activation function
weights: The pretrained weights to load.
Raises:
ValueError: if an activation other than `None` or `softmax` are used with
pretrained weights.
"""
if weights is None:
return
classifier_activation = activations.get(classifier_activation)
if classifier_activation not in {
activations.get('softmax'),
activations.get(None)
}:
raise ValueError('Only `None` and `softmax` activations are allowed '
'for the `classifier_activation` argument when using '
'pretrained weights, with `include_top=True`; Received: '
f'classifier_activation={classifier_activation}')
| 34.546485 | 80 | 0.62363 |
aceccde44e365c2828363039906740c4cc10409f | 3,861 | py | Python | DeepHyperion-MNIST/properties.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | 5 | 2021-04-28T09:35:44.000Z | 2021-09-10T18:18:08.000Z | DeepHyperion-MNIST/properties.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | null | null | null | DeepHyperion-MNIST/properties.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | 2 | 2021-04-26T12:46:44.000Z | 2021-09-16T08:27:53.000Z | # Make sure that any of this properties can be overridden using env.properties
import os
from os.path import join
import json
import uuid
class MissingEnvironmentVariable(Exception):
pass
# GA Setup
POPSIZE = int(os.getenv('DH_POPSIZE', '800'))
NGEN = int(os.getenv('DH_NGEN', '500000'))
RUNTIME = int(os.getenv('DH_RUNTIME', '36'))
INTERVAL = int(os.getenv('DH_INTERVAL', '900'))
# Mutation Hyperparameters
# range of the mutation
MUTLOWERBOUND = float(os.getenv('DH_MUTLOWERBOUND', '0.01'))
MUTUPPERBOUND = float(os.getenv('DH_MUTUPPERBOUND', '0.6'))
SELECTIONOP = str(os.getenv('DH_SELECTIONOP', 'random')) # random or ranked or dynamic_ranked
SELECTIONPROB = float(os.getenv('DH_SELECTIONPROB', '0.0'))
RANK_BIAS = float(os.getenv('DH_RANK_BIAS', '1.5')) # value between 1 and 2
RANK_BASE = str(os.getenv('DH_RANK_BASE', 'contribution_score')) # perf or density or contribution_score
# Dataset
EXPECTED_LABEL = int(os.getenv('DH_EXPECTED_LABEL', '5'))
#------- NOT TUNING ----------
# mutation operator probability
MUTOPPROB = float(os.getenv('DH_MUTOPPROB', '0.5'))
MUTOFPROB = float(os.getenv('DH_MUTOFPROB', '0.5'))
IMG_SIZE = int(os.getenv('DH_IMG_SIZE', '28'))
num_classes = int(os.getenv('DH_NUM_CLASSES', '10'))
INITIALPOP = os.getenv('DH_INITIALPOP', 'seeded')
MODEL = os.getenv('DH_MODEL', 'models/model_mnist.h5')
ORIGINAL_SEEDS = os.getenv('DH_ORIGINAL_SEEDS', 'bootstraps_five')
BITMAP_THRESHOLD = float(os.getenv('DH_BITMAP_THRESHOLD', '0.5'))
DISTANCE_SEED = float(os.getenv('DH_DISTANCE_SEED', '5.0'))
DISTANCE = float(os.getenv('DH_DISTANCE', '2.0'))
FEATURES = os.getenv('FEATURES', ["Bitmaps", "Moves"])
NUM_CELLS = int(os.getenv("NUM_CELLS", '25'))
# FEATURES = os.getenv('FEATURES', ["Orientation","Bitmaps"])
# FEATURES = os.getenv('FEATURES', ["Orientation","Moves"])
TSHD_TYPE = os.getenv('TSHD_TYPE', '1') # 1: threshold on vectorized-rasterized seed, use DISTANCE = 2
# # TODO Mayber there's a better way to handle this
try:
NAME = str(os.environ['NAME'])
except Exception:
NAME = None
try:
THE_HASH = str(os.environ['THE_HASH'])
except Exception:
THE_HASH = str(uuid.uuid4().hex)
print("Generate random Hash", str(THE_HASH))
try:
RUN = int(os.environ['RUN_ID'])
except KeyError:
raise MissingEnvironmentVariable("RUN_ID does not exist. Please specify a value for this ENV variable")
except Exception:
raise MissingEnvironmentVariable("Some other error?")
try:
FEATURES = str(os.environ['FEATURES'])
FEATURES = FEATURES.split(',')
except KeyError:
raise MissingEnvironmentVariable("FEATURES does not exist. Please specify a value for this ENV variable")
except Exception:
raise MissingEnvironmentVariable("Some other error?")
def to_json(folder):
if TSHD_TYPE == '0':
tshd_val = None
elif TSHD_TYPE == '1':
tshd_val = str(DISTANCE)
elif TSHD_TYPE == '2':
tshd_val = str(DISTANCE_SEED)
config = {
'name': str(NAME),
'hash': str(THE_HASH),
'popsize': str(POPSIZE),
'initial pop': str(INITIALPOP),
'label': str(EXPECTED_LABEL),
'mut low': str(MUTLOWERBOUND),
'mut up': str(MUTUPPERBOUND),
'model': str(MODEL),
'runtime': str(RUNTIME),
'run': str(RUN),
'features': str(FEATURES),
'tshd_type': str(TSHD_TYPE),
'tshd_value': str(tshd_val),
'ranked prob': str(SELECTIONPROB),
'rank bias' : str(RANK_BIAS),
'rank base' : str(RANK_BASE),
'selection': str(SELECTIONOP),
}
filedest = join(folder, "config.json")
with open(filedest, 'w') as f:
(json.dump(config, f, sort_keys=True, indent=4))
| 33 | 114 | 0.643098 |
acecce21b5c6f929f802872f1a27003d1411a15b | 141 | py | Python | an_example_pypi_project/setup.py | ssalonen/python-packaging | 8012dd702b304c385ae57644c100fd5e506a790f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | an_example_pypi_project/setup.py | ssalonen/python-packaging | 8012dd702b304c385ae57644c100fd5e506a790f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | an_example_pypi_project/setup.py | ssalonen/python-packaging | 8012dd702b304c385ae57644c100fd5e506a790f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from setuptools import setup
setup(
name = "an_example_pypi_project",
version = "0.0.4",
packages=['an_example_pypi_project'],
) | 20.142857 | 41 | 0.695035 |
aceccec7acc71922867b939e71a2cf983ffb6e17 | 3,094 | py | Python | 1_step_traffic_junction/plot.py | FaithMai/LICA | e98da2f66e2af1e35418fe419267656303d848aa | [
"MIT"
] | 46 | 2020-10-20T12:50:03.000Z | 2022-03-02T15:51:22.000Z | 1_step_traffic_junction/plot.py | FaithMai/LICA | e98da2f66e2af1e35418fe419267656303d848aa | [
"MIT"
] | 6 | 2020-10-24T06:42:23.000Z | 2021-11-01T11:22:42.000Z | 1_step_traffic_junction/plot.py | FaithMai/LICA | e98da2f66e2af1e35418fe419267656303d848aa | [
"MIT"
] | 12 | 2020-10-27T12:00:12.000Z | 2021-12-14T02:15:35.000Z | import json
import os
import re
import sys
import random
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from coma import parallel_run as run_coma
from mix import parallel_run as run_mix
plt.style.use(['science'])
plt.rc('grid', linestyle=':', color='gray')
plt.grid(which='major')
def show(i, ax, means, stds, legend, x=None, scale='linear', alpha=1):
means = np.array(means) * 100
stds = np.array(stds) * 100
if x is None:
x = list(range(1, len(means)+1))
ax.plot(x, means, label=legend, alpha=alpha)
ax.fill_between(x, means-(stds), means+(stds), alpha=0.1, edgecolor='face')
ax.set_xlim([1, 60])
ax.set_ylim([0, 100])
if i == 0:
ax.set_ylabel('Move Probability (\%)')
else:
ax.set_yticklabels([])
ax.legend(loc='best', prop={'size': 8})
ax.set_xlabel('Steps', fontsize=13)
ax.yaxis.set_ticks([0,25,50,75,100])
if __name__ == '__main__':
repeat = int(sys.argv[1])
# default values
k = int(sys.argv[2]) if len(sys.argv) >= 3 else 5
lrfac = int(sys.argv[3]) if len(sys.argv) >= 4 else 2
print(f'repeat={repeat}, k={k}, lrfac={lrfac}')
mix_outs = run_mix(repeat, k, lrfac)
coma_outs = run_coma(repeat, k, lrfac)
outcomes = ((1,0), (0,1), (0,0), (1,1))
print('Outcomes:', outcomes)
print('Mix outcome counts:', [len(mix_outs[o]) for o in outcomes])
print('Coma outcome counts:', [len(coma_outs[o]) for o in outcomes])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 2.5), constrained_layout=True)
success_outcomes = ((1,0), (0,1))
for i, (ax, outcome) in enumerate(zip((ax1, ax2), success_outcomes)):
print('Success Outcome', outcome)
mix_runs = mix_outs[outcome]
coma_runs = coma_outs[outcome]
a1_mix_means = []
a2_mix_means = []
a1_mix_stds = []
a2_mix_stds = []
for step_vals in zip(*mix_runs):
a1_stepvals, a2_stepvals = zip(*step_vals)
a1_mix_means.append(np.mean(a1_stepvals))
a1_mix_stds.append(np.std(a1_stepvals))
a2_mix_means.append(np.mean(a2_stepvals))
a2_mix_stds.append(np.std(a2_stepvals))
a1_coma_means = []
a2_coma_means = []
a1_coma_stds = []
a2_coma_stds = []
for step_vals in zip(*coma_runs):
a1_stepvals, a2_stepvals = zip(*step_vals)
a1_coma_means.append(np.mean(a1_stepvals))
a1_coma_stds.append(np.std(a1_stepvals))
a2_coma_means.append(np.mean(a2_stepvals))
a2_coma_stds.append(np.std(a2_stepvals))
show(i, ax, a1_mix_means, a1_mix_stds, 'LICA 1')
show(i, ax, a2_mix_means, a2_mix_stds, 'LICA 2')
show(i, ax, a1_coma_means, a1_coma_stds, 'COMA 1')
show(i, ax, a2_coma_means, a2_coma_stds, 'COMA 2')
ax.grid()
rand = random.randint(0,1000)
filename = f'outcome-subplot-{repeat}-{k}-{lrfac}-{rand:03}.pdf'
plt.savefig(filename)
print(f'Runs with {repeat} repeats saved to {filename}')
| 28.915888 | 83 | 0.615385 |
acecd00e84fc527dbee55afe080cd4cca00db751 | 1,236 | py | Python | days/01-03-datetimes/code/schedule.py | rwalden1993/100daysofcode-Python | e80e16e2a74ecfe4b7fa68c4665dd8c13961a619 | [
"MIT"
] | null | null | null | days/01-03-datetimes/code/schedule.py | rwalden1993/100daysofcode-Python | e80e16e2a74ecfe4b7fa68c4665dd8c13961a619 | [
"MIT"
] | null | null | null | days/01-03-datetimes/code/schedule.py | rwalden1993/100daysofcode-Python | e80e16e2a74ecfe4b7fa68c4665dd8c13961a619 | [
"MIT"
] | null | null | null | from datetime import date
#global variables
schedule = {}
quit_program = False
def handle_user_input(user_input):
global quit_program
if user_input == 'add':
add_event()
elif user_input == 'view':
print_schedule()
elif user_input == 'delete':
delete_entry()
elif user_input == 'quit':
quit_program = True
def add_event():
global schedule
date_string = input("Enter date (yyyy-mm-dd):")
event = input("Enter event: ")
schedule[event] = date.fromisoformat(date_string)
print(f'{event} added to schedule')
def print_schedule():
global schedule
for x, y in schedule.items():
print(x, y)
def delete_entry():
global schedule
event = input('Event to delete: ')
schedule.pop(event)
if __name__ == '__main__':
# begin program
print("Hello! Welcome to your schedule!")
print("Options:")
print("\t \'add\' : Add new event to your schedule")
print("\t \'view\' : View your schedule")
print("\t \'delete\' : Delete an event from your schedule")
print("\t \'quit\' : Quit the program")
print('')
while not quit_program:
user_input = input('Command: ')
handle_user_input(user_input)
| 24.72 | 63 | 0.627832 |
acecd07c6ffb8ef0a1a3315a1827b3efbabab6b5 | 1,960 | py | Python | src/order/migrations/0001_initial.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | src/order/migrations/0001_initial.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | 9 | 2021-03-19T01:50:04.000Z | 2022-03-12T00:23:18.000Z | src/order/migrations/0001_initial.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-03 05:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_paid', models.BooleanField(default=False)),
('shipping_cost', models.IntegerField(blank=True, default=0, null=True)),
('transaction_id', models.CharField(blank=True, max_length=256)),
('billing_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='billing_address', to='accounts.Address')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shipping_address', to='accounts.Address')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('Product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.Product')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='order.Order')),
],
),
]
| 47.804878 | 181 | 0.641837 |
acecd0eccd3b7451d33e16f106a0693db17e845b | 7,542 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/net_tools/netcup_dns.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/modules/net_tools/netcup_dns.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/modules/net_tools/netcup_dns.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: netcup_dns
notes: []
version_added: 2.7.0
short_description: manage Netcup DNS records
description:
- "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
options:
api_key:
description:
- API key for authentification, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
required: True
api_password:
description:
- API password for authentification, must be obtained via the netcup CCP (https://ccp.netcup.net)
required: True
customer_id:
description:
- Netcup customer id
required: True
domain:
description:
- Domainname the records should be added / removed
required: True
record:
description:
- Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
default: "@"
aliases: [ name ]
type:
description:
- Record type
choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
required: True
value:
description:
- Record value
required: true
solo:
type: bool
default: False
description:
- Whether the record should be the only one for that record type and record name. Only use with C(state=present)
- This will delete all other records with the same record name and type.
priority:
description:
- Record priority. Required for C(type=MX)
required: False
state:
description:
- Whether the record should exist or not
required: False
default: present
choices: [ 'present', 'absent' ]
requirements:
- "nc-dnsapi >= 0.1.3"
author: "Nicolai Buchwitz (@nbuchwitz)"
'''
EXAMPLES = '''
- name: Create a record of type A
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "mail"
type: "A"
value: "127.0.0.1"
- name: Delete that record
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "mail"
type: "A"
value: "127.0.0.1"
state: absent
- name: Create a wildcard record
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "*"
type: "A"
value: "127.0.1.1"
- name: Set the MX record for example.com
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
type: "MX"
value: "mail.example.com"
- name: Set a record and ensure that this is the only one
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
name: "demo"
domain: "example.com"
type: "AAAA"
value: "::1"
solo: true
'''
RETURN = '''
records:
description: list containing all records
returned: success
type: complex
contains:
name:
description: the record name
returned: success
type: str
sample: fancy-hostname
type:
description: the record type
returned: succcess
type: str
sample: A
value:
description: the record destination
returned: success
type: str
sample: 127.0.0.1
priority:
description: the record priority (only relevant if type=MX)
returned: success
type: int
sample: 0
id:
description: internal id of the record
returned: success
type: int
sample: 12345
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
NCDNSAPI_IMP_ERR = None
try:
import nc_dnsapi
from nc_dnsapi import DNSRecord
HAS_NCDNSAPI = True
except ImportError:
NCDNSAPI_IMP_ERR = traceback.format_exc()
HAS_NCDNSAPI = False
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
api_password=dict(required=True, no_log=True),
customer_id=dict(required=True, type='int'),
domain=dict(required=True),
record=dict(required=False, default='@', aliases=['name']),
type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
value=dict(required=True),
priority=dict(required=False, type='int'),
solo=dict(required=False, type='bool', default=False),
state=dict(required=False, choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_NCDNSAPI:
module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
api_key = module.params.get('api_key')
api_password = module.params.get('api_password')
customer_id = module.params.get('customer_id')
domain = module.params.get('domain')
record_type = module.params.get('type')
record = module.params.get('record')
value = module.params.get('value')
priority = module.params.get('priority')
solo = module.params.get('solo')
state = module.params.get('state')
if record_type == 'MX' and not priority:
module.fail_json(msg="record type MX required the 'priority' argument")
has_changed = False
all_records = []
try:
with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
all_records = api.dns_records(domain)
record = DNSRecord(record, record_type, value, priority=priority)
# try to get existing record
record_exists = False
for r in all_records:
if r == record:
record_exists = True
record = r
break
if state == 'present':
if solo:
obsolete_records = [r for r in all_records if
r.hostname == record.hostname
and r.type == record.type
and not r.destination == record.destination]
if obsolete_records:
if not module.check_mode:
all_records = api.delete_dns_records(domain, obsolete_records)
has_changed = True
if not record_exists:
if not module.check_mode:
all_records = api.add_dns_record(domain, record)
has_changed = True
elif state == 'absent' and record_exists:
if not module.check_mode:
all_records = api.delete_dns_record(domain, record)
has_changed = True
except Exception as ex:
module.fail_json(msg=ex.message)
module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
def record_data(r):
return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
if __name__ == '__main__':
main()
| 28.247191 | 120 | 0.59109 |
acecd14735bacf079d558b8a9792c104d339ab45 | 9,058 | py | Python | GrungyPolygon/COLLADA ImportPlus.py | grungypolygon/posertools-daesupplement | a9b080b5a242e29af87a07ed66acc4d954b3917c | [
"Apache-2.0"
] | 1 | 2017-03-12T23:10:27.000Z | 2017-03-12T23:10:27.000Z | GrungyPolygon/COLLADA ImportPlus.py | grungypolygon/posertools-daesupplement | a9b080b5a242e29af87a07ed66acc4d954b3917c | [
"Apache-2.0"
] | null | null | null | GrungyPolygon/COLLADA ImportPlus.py | grungypolygon/posertools-daesupplement | a9b080b5a242e29af87a07ed66acc4d954b3917c | [
"Apache-2.0"
] | null | null | null | import os.path
import numpy
import poser as P
import wx
import collada
AXIS_NORM = numpy.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
AXIS_Z_UP = numpy.matrix([
[1, 0, 0, 0],
[0, 0, -1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
])
# METER_TO_POSER_UNIT = (1 / 2.62128)
COLLADA_NAMESPACE = '''http://www.collada.org/2005/11/COLLADASchema'''
class OptionsDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, title='COLLADA import adjustments', size=(500, -1),
style=wx.DEFAULT_DIALOG_STYLE)
self._adjust_axis = 'Z_UP'
self._adjust_scale = '2.62128'
self._adjust_hierarchy = True
self.axis_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.cb_adjust_axis = wx.CheckBox(self, -1, 'Adjust axis')
self.cb_adjust_axis.SetValue(True)
self.cb_adjust_axis.Bind(wx.EVT_CHECKBOX, self.toggle_adjust_axis)
self.db_adjust_axis = wx.Choice(self, -1, choices=['Z_UP', 'Y_UP'])
self.db_adjust_axis.SetSelection(0)
self.db_adjust_axis.Bind(wx.EVT_CHOICE, self.choose_adjust_axis)
self.axis_sizer.Add(self.cb_adjust_axis, 1, wx.EXPAND | wx.RIGHT, 10)
self.axis_sizer.Add(self.db_adjust_axis, 1, wx.EXPAND)
self.scale_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.cb_adjust_scale = wx.CheckBox(self, -1, 'Adjust scale')
self.cb_adjust_scale.SetValue(True)
self.cb_adjust_scale.Bind(wx.EVT_CHECKBOX, self.toggle_adjust_scale)
self.tc_adjust_scale = wx.TextCtrl(self, -1, '2.62128')
self.cb_adjust_scale.Bind(wx.EVT_TEXT, self.update_adjust_scale)
self.scale_sizer.Add(self.cb_adjust_scale, 1, wx.EXPAND | wx.RIGHT, 10)
self.scale_sizer.Add(self.tc_adjust_scale, 0, wx.EXPAND)
self.cb_adjust_hierarchy = wx.CheckBox(self, -1, 'Adjust hierarchy')
self.cb_adjust_hierarchy.SetValue(True)
self.cb_adjust_hierarchy.Bind(wx.EVT_CHECKBOX, self.toggle_adjust_hierarchy)
self.cb_import_animations = wx.CheckBox(self, -1, 'Import animation')
self.cb_import_animations.Disable()
self.buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.btn_cancel = wx.Button(self, -1, 'Cancel')
self.btn_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
self.btn_import = wx.Button(self, -1, 'Import')
self.btn_import.Bind(wx.EVT_BUTTON, self.OnImport)
self.buttons_sizer.Add(self.btn_cancel, 1, wx.EXPAND | wx.ALIGN_RIGHT | wx.RIGHT, 20)
self.buttons_sizer.Add(self.btn_import, 1)
# Use some sizers to see layout options
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.axis_sizer, 1, wx.EXPAND | wx.ALL, 10)
self.sizer.Add(self.scale_sizer, 1, wx.EXPAND | wx.ALL, 10)
self.sizer.Add(self.cb_adjust_hierarchy, 1, wx.EXPAND | wx.ALL, 10)
self.sizer.Add(self.cb_import_animations, 1, wx.EXPAND | wx.ALL, 10)
self.sizer.Add(self.buttons_sizer, 1, wx.EXPAND | wx.ALL, 10)
# Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.sizer.Fit(self)
self.ShowModal()
self._canceled = False
def choose_adjust_axis(self, e):
self._adjust_axis = self.db_adjust_axis.GetStringSelection()
def toggle_adjust_axis(self, e):
if self.cb_adjust_axis.GetValue():
self._adjust_axis = self.db_adjust_axis.GetStringSelection()
else:
self._adjust_axis = None
def update_adjust_scale(self, e):
self._adjust_scale = self.db_adjust_scale.GetValue()
def toggle_adjust_scale(self, e):
if self.cb_adjust_scale.GetValue():
self._adjust_scale = self.tc_adjust_scale.GetValue()
else:
self._adjust_scale = None
def toggle_adjust_hierarchy(self, e):
if self.cb_adjust_hierarchy.GetValue():
self._adjust_hierarchy = self.tc_adjust_scale.GetValue()
else:
self._adjust_hierarchy = None
def OnCancel(self, e):
self.EndModal(wx.ID_CANCEL)
def OnImport(self, e):
self.EndModal(wx.ID_OK)
@property
def canceled(self):
return self._canceled
@property
def adjust_axis(self):
return self._adjust_axis
@property
def adjust_scale(self):
return self._adjust_scale
@property
def adjust_hierarchy(self):
return self._adjust_hierarchy
def get_vert_pos(vertex, extend=False):
pos = []
for val in [vertex.X(), vertex.Y(), vertex.Z()]:
pos.append(val)
if extend:
pos.append(1)
return pos
def set_vert_pos(vertex, pos):
vertex.SetX(pos[0])
vertex.SetY(pos[1])
vertex.SetZ(pos[2])
def get_actor_pos(actor, extend=False):
pos = []
for param in [P.kParmCodeXTRAN, P.kParmCodeYTRAN, P.kParmCodeZTRAN]:
pos.append(actor.ParameterByCode(param).Value())
if extend:
pos.append(1)
return pos
def set_actor_pos(actor, pos):
params = [P.kParmCodeXTRAN, P.kParmCodeYTRAN, P.kParmCodeZTRAN]
for idx, param in enumerate(params):
actor.ParameterByCode(param).SetValue(pos[idx])
def import_dae(file_path, dae_object, options):
poser_dae_import_opts = {}
P.Scene().ImExporter().Import('DAE', None, file_path)
def parse_dae_file(file_path):
collada_root = collada.parse_collada_file(file_path)
objs = collada.get_scene_objects(collada_root, 'Scene')
return objs
def options_dialog(file_path):
man = P.WxAuiManager()
root = man.GetManagedWindow()
return OptionsDialog(root)
def check_dae_file(file_path):
file_name = os.path.basename(file_path)
if os.path.isfile(file_path):
with open(file_path, 'r') as dae_file:
for line in dae_file.readlines():
if COLLADA_NAMESPACE in line:
return 0
else:
P.DialogSimple.MessageBox('''"{}" doesn't look like a COLLADA file.'''.format(file_name))
else:
P.DialogSimple.MessageBox('''"{}" is not a file.'''.format(file_name))
return 1
def matrix_and_scale(options):
if options.adjust_axis:
adjust_matrices = {
'Z_UP': AXIS_Z_UP
}
matrix = adjust_matrices.get(options.adjust_axis, AXIS_NORM)
else:
matrix = AXIS_NORM
if options.adjust_scale:
scale = (1.0 / float(options.adjust_scale))
else:
scale = 1.0
return matrix, scale
def adjust_axis(actor, options):
adjust_matrix, adjust_scale = matrix_and_scale(options)
pos = get_actor_pos(actor, True)
new_pos = numpy.multiply((pos * adjust_matrix), adjust_scale).A[0]
set_actor_pos(actor, new_pos)
def adjust_vertices(actor, options):
adjust_matrix, adjust_scale = matrix_and_scale(options)
geo = actor.Geometry()
for vert in geo.Vertices():
pos = get_vert_pos(vert, True)
new_pos = numpy.multiply((pos * adjust_matrix), adjust_scale).A[0]
set_vert_pos(vert, new_pos)
def adjust_hierarchy(actor, obj):
if obj.parent is not None:
try:
parent_actor = P.Scene().Actor(obj.parent.name)
actor.SetParent(parent_actor)
except P.error as err:
print("Poser err on object '{}': {}".format(name, err))
def create_grouping(name):
# Yes, it seems that this is the only way to create groupings
# via Python with a custom name.
# CreateGrouping() does not return an Actor or takes a name.
prev_list = [x.Name() for x in P.Scene().Actors()]
P.Scene().CreateGrouping()
new_list = [x.Name() for x in P.Scene().Actors()]
generated_grouping_name = (set(new_list) - set(prev_list)).pop()
grouping = P.Scene().Actor(generated_grouping_name)
grouping.SetName(name)
return grouping
options = options_dialog(None)
if not options.GetReturnCode() == wx.ID_CANCEL:
open_dialog = P.DialogFileChooser(
P.kDialogFileChooserOpen,
None,
'Select Collada DAE file to import',
P.ContentRootLocation()
)
result = open_dialog.Show()
if result == 1:
file_path = open_dialog.Path()
check = check_dae_file(file_path)
if check == 0:
dae_objects = parse_dae_file(file_path)
import_dae(file_path, dae_objects, options)
if options.adjust_hierarchy:
for name, obj in dae_objects.items():
if obj.mesh is None:
create_grouping(name)
for name, obj in dae_objects.items():
try:
actor = P.Scene().Actor(name)
if options.adjust_axis or options.adjust_scale:
adjust_axis(actor, options)
adjust_vertices(actor, options)
if options.adjust_hierarchy:
adjust_hierarchy(actor, obj)
except P.error as err:
print("Poser err on object '{}': {}".format(name, err))
| 32.120567 | 105 | 0.636564 |
acecd21a43ea37d057f08a4462060a6b3f44ac2a | 216 | py | Python | restfulapicrud/bookapi/viewsets.py | GianCarlosB/django-restful-api-crawler-test | 825ad952d81c10782b443568fff1451c53f3ec0c | [
"Apache-2.0"
] | null | null | null | restfulapicrud/bookapi/viewsets.py | GianCarlosB/django-restful-api-crawler-test | 825ad952d81c10782b443568fff1451c53f3ec0c | [
"Apache-2.0"
] | 5 | 2021-03-19T02:36:13.000Z | 2021-09-22T18:56:52.000Z | restfulapicrud/bookapi/viewsets.py | GianCarlosB/django-restful-api-crawler-test | 825ad952d81c10782b443568fff1451c53f3ec0c | [
"Apache-2.0"
] | null | null | null | from rest_framework import viewsets
from . import models
from . import serializers
class BookViewset(viewsets.ModelViewSet):
queryset = models.Book.objects.all()
serializer_class = serializers.BookSerializer | 30.857143 | 49 | 0.805556 |
acecd23d62bdfe737ea9cdfc5dd8c9463bc86f8d | 8,442 | py | Python | sktime/transformers/series_as_features/rocket.py | ronsg83/sktime | b22d05566553cfefa4d6f9deea854524fc3bc467 | [
"BSD-3-Clause"
] | 1 | 2020-10-05T19:49:24.000Z | 2020-10-05T19:49:24.000Z | sktime/transformers/series_as_features/rocket.py | ronsg83/sktime | b22d05566553cfefa4d6f9deea854524fc3bc467 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformers/series_as_features/rocket.py | ronsg83/sktime | b22d05566553cfefa4d6f9deea854524fc3bc467 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sktime.transformers.series_as_features.base import BaseSeriesAsFeaturesTransformer
from sktime.utils.check_imports import _check_soft_deps
from sktime.utils.data_container import nested_to_3d_numpy
from sktime.utils.validation.series_as_features import check_X
_check_soft_deps("numba")
from numba import njit, prange # noqa: E402
__author__ = "Angus Dempster"
__all__ = ["Rocket"]
class Rocket(BaseSeriesAsFeaturesTransformer):
"""ROCKET
RandOm Convolutional KErnel Transform
@article{dempster_etal_2019,
author = {Dempster, Angus and Petitjean, Francois and Webb,
Geoffrey I},
title = {ROCKET: Exceptionally fast and accurate time series
classification using random convolutional kernels},
year = {2019},
journal = {arXiv:1910.13051}
}
Parameters
----------
num_kernels : int, number of random convolutional kernels (default 10,000)
normalise : boolean, whether or not to normalise the input time
series per instance (default True)
random_state : int (ignored unless int due to compatability with Numba),
random seed (optional, default None)
"""
def __init__(self, num_kernels=10_000, normalise=True, random_state=None):
self.num_kernels = num_kernels
self.normalise = normalise
self.random_state = random_state if isinstance(random_state, int) else None
super(Rocket, self).__init__()
def fit(self, X, y=None):
"""Infers time series length and number of channels / dimensions (
for multivariate time series) from input pandas DataFrame,
and generates random kernels.
Parameters
----------
X : pandas DataFrame, input time series (sktime format)
y : array_like, target values (optional, ignored as irrelevant)
Returns
-------
self
"""
X = check_X(X)
_, self.n_columns = X.shape
n_timepoints = X.applymap(lambda series: series.size).max().max()
self.kernels = _generate_kernels(
n_timepoints, self.num_kernels, self.n_columns, self.random_state
)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Transforms input time series using random convolutional kernels.
Parameters
----------
X : pandas DataFrame, input time series (sktime format)
y : array_like, target values (optional, ignored as irrelevant)
Returns
-------
pandas DataFrame, transformed features
"""
self.check_is_fitted()
X = check_X(X)
_X = nested_to_3d_numpy(X)
if self.normalise:
_X = (_X - _X.mean(axis=-1, keepdims=True)) / (
_X.std(axis=-1, keepdims=True) + 1e-8
)
return pd.DataFrame(_apply_kernels(_X, self.kernels))
@njit(
"Tuple((float64[:],int32[:],float64[:],int32[:],int32[:],int32[:],"
"int32[:]))(int64,int64,int64,optional(int64))"
)
def _generate_kernels(n_timepoints, num_kernels, n_columns, seed):
if seed is not None:
np.random.seed(seed)
candidate_lengths = np.array((7, 9, 11), dtype=np.int32)
lengths = np.random.choice(candidate_lengths, num_kernels)
num_channel_indices = np.zeros(num_kernels, dtype=np.int32)
for i in range(num_kernels):
limit = min(n_columns, lengths[i])
num_channel_indices[i] = 2 ** np.random.uniform(0, np.log2(limit + 1))
channel_indices = np.zeros(num_channel_indices.sum(), dtype=np.int32)
weights = np.zeros(
np.int32(
np.dot(lengths.astype(np.float64), num_channel_indices.astype(np.float64))
),
dtype=np.float64,
)
biases = np.zeros(num_kernels, dtype=np.float64)
dilations = np.zeros(num_kernels, dtype=np.int32)
paddings = np.zeros(num_kernels, dtype=np.int32)
a1 = 0 # for weights
a2 = 0 # for channel_indices
for i in range(num_kernels):
_length = lengths[i]
_num_channel_indices = num_channel_indices[i]
_weights = np.random.normal(0, 1, _num_channel_indices * _length)
b1 = a1 + (_num_channel_indices * _length)
b2 = a2 + _num_channel_indices
a3 = 0 # for weights (per channel)
for _j in range(_num_channel_indices):
b3 = a3 + _length
_weights[a3:b3] = _weights[a3:b3] - _weights[a3:b3].mean()
a3 = b3
weights[a1:b1] = _weights
channel_indices[a2:b2] = np.random.choice(
np.arange(0, n_columns), _num_channel_indices, replace=False
)
biases[i] = np.random.uniform(-1, 1)
dilation = 2 ** np.random.uniform(
0, np.log2((n_timepoints - 1) / (_length - 1))
)
dilation = np.int32(dilation)
dilations[i] = dilation
padding = ((_length - 1) * dilation) // 2 if np.random.randint(2) == 1 else 0
paddings[i] = padding
a1 = b1
a2 = b2
return (
weights,
lengths,
biases,
dilations,
paddings,
num_channel_indices,
channel_indices,
)
@njit(fastmath=True)
def _apply_kernel_univariate(X, weights, length, bias, dilation, padding):
n_timepoints = len(X)
output_length = (n_timepoints + (2 * padding)) - ((length - 1) * dilation)
_ppv = 0
_max = np.NINF
end = (n_timepoints + padding) - ((length - 1) * dilation)
for i in range(-padding, end):
_sum = bias
index = i
for j in range(length):
if index > -1 and index < n_timepoints:
_sum = _sum + weights[j] * X[index]
index = index + dilation
if _sum > _max:
_max = _sum
if _sum > 0:
_ppv += 1
return _ppv / output_length, _max
@njit(fastmath=True)
def _apply_kernel_multivariate(
X, weights, length, bias, dilation, padding, num_channel_indices, channel_indices
):
n_columns, n_timepoints = X.shape
output_length = (n_timepoints + (2 * padding)) - ((length - 1) * dilation)
_ppv = 0
_max = np.NINF
end = (n_timepoints + padding) - ((length - 1) * dilation)
for i in range(-padding, end):
_sum = bias
index = i
for j in range(length):
if index > -1 and index < n_timepoints:
for k in range(num_channel_indices):
_sum = _sum + weights[k, j] * X[channel_indices[k], index]
index = index + dilation
if _sum > _max:
_max = _sum
if _sum > 0:
_ppv += 1
return _ppv / output_length, _max
@njit(
"float64[:,:](float64[:,:,:],Tuple((float64[::1],int32[:],float64[:],"
"int32[:],int32[:],int32[:],int32[:])))",
parallel=True,
fastmath=True,
)
def _apply_kernels(X, kernels):
(
weights,
lengths,
biases,
dilations,
paddings,
num_channel_indices,
channel_indices,
) = kernels
n_instances, n_columns, _ = X.shape
num_kernels = len(lengths)
_X = np.zeros(
(n_instances, num_kernels * 2), dtype=np.float64
) # 2 features per kernel
for i in prange(n_instances):
a1 = 0 # for weights
a2 = 0 # for channel_indices
a3 = 0 # for features
for j in range(num_kernels):
b1 = a1 + num_channel_indices[j] * lengths[j]
b2 = a2 + num_channel_indices[j]
b3 = a3 + 2
if num_channel_indices[j] == 1:
_X[i, a3:b3] = _apply_kernel_univariate(
X[i, channel_indices[a2]],
weights[a1:b1],
lengths[j],
biases[j],
dilations[j],
paddings[j],
)
else:
_weights = weights[a1:b1].reshape((num_channel_indices[j], lengths[j]))
_X[i, a3:b3] = _apply_kernel_multivariate(
X[i],
_weights,
lengths[j],
biases[j],
dilations[j],
paddings[j],
num_channel_indices[j],
channel_indices[a2:b2],
)
a1 = b1
a2 = b2
a3 = b3
return _X
| 27.588235 | 87 | 0.577233 |
acecd2417e119f7b4676a8dd4d0da408b5a5578f | 4,784 | py | Python | 03_Linear_Regression/08_Implementing_Logistic_Regression/08_logistic_regression.py | varunjha089/tensorflow_cookbook | c1fa5051c860ecb6de875db975465ced06f43ba6 | [
"MIT"
] | 4 | 2018-03-04T15:39:10.000Z | 2020-11-11T00:46:34.000Z | 03_Linear_Regression/08_Implementing_Logistic_Regression/08_logistic_regression.py | varunjha089/tensorflow_cookbook | c1fa5051c860ecb6de875db975465ced06f43ba6 | [
"MIT"
] | 2 | 2018-03-07T14:31:22.000Z | 2018-03-07T15:04:17.000Z | 03_Linear_Regression/08_Implementing_Logistic_Regression/08_logistic_regression.py | varunjha089/tensorflow_cookbook | c1fa5051c860ecb6de875db975465ced06f43ba6 | [
"MIT"
] | 3 | 2018-02-12T09:47:32.000Z | 2019-07-16T15:39:02.000Z | # Logistic Regression
#----------------------------------
#
# This function shows how to use TensorFlow to
# solve logistic regression.
# y = sigmoid(Ax + b)
#
# We will use the low birth weight data, specifically:
# y = 0 or 1 = low birth weight
# x = demographic and medical history data
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
from tensorflow.python.framework import ops
import os.path
import csv
ops.reset_default_graph()
# Create graph
sess = tf.Session()
###
# Obtain and prepare data for modeling
###
# name of data file
birth_weight_file = 'birth_weight.csv'
# download data and create data file if file does not exist in current directory
if not os.path.exists(birth_weight_file):
birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')
birth_header = birth_data[0].split('\t')
birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1]
with open(birth_weight_file, "w") as f:
writer = csv.writer(f)
writer.writerow(birth_header)
writer.writerows(birth_data)
f.close()
# read birth weight data into memory
birth_data = []
with open(birth_weight_file, newline='') as csvfile:
csv_reader = csv.reader(csvfile)
birth_header = next(csv_reader)
for row in csv_reader:
birth_data.append(row)
birth_data = [[float(x) for x in row] for row in birth_data]
# Pull out target variable
y_vals = np.array([x[0] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[1:8] for x in birth_data])
# set for reproducible results
seed = 99
np.random.seed(seed)
tf.set_random_seed(seed)
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m-col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
###
# Define Tensorflow computational graph¶
###
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 7], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[7,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
###
# Train model
###
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Training loop
loss_vec = []
train_acc = []
test_acc = []
for i in range(1500):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])})
train_acc.append(temp_acc_train)
temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_acc.append(temp_acc_test)
if (i+1)%300==0:
print('Loss = ' + str(temp_loss))
###
# Display model performance
###
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.show()
# Plot train and test accuracy
plt.plot(train_acc, 'k-', label='Train Set Accuracy')
plt.plot(test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| 29.714286 | 159 | 0.725962 |
acecd29e81897d0941a32923fa29ff7b7c5c9088 | 981 | py | Python | pywikibot/compat/catlib.py | jkjkjkjkjk/pywikibot-core | f3748c95ea694083ae00534973d0d1dd018a5b43 | [
"MIT"
] | 2 | 2017-06-19T16:48:34.000Z | 2017-07-07T14:15:28.000Z | pywikibot/compat/catlib.py | jkjkjkjkjk/pywikibot-core | f3748c95ea694083ae00534973d0d1dd018a5b43 | [
"MIT"
] | 11 | 2018-12-07T18:20:05.000Z | 2022-03-11T23:12:42.000Z | pywikibot/compat/catlib.py | jkjkjkjkjk/pywikibot-core | f3748c95ea694083ae00534973d0d1dd018a5b43 | [
"MIT"
] | 3 | 2018-12-09T10:18:35.000Z | 2020-09-12T13:50:14.000Z | # -*- coding: utf-8 -*-
"""
WARNING: THIS MODULE EXISTS SOLELY TO PROVIDE BACKWARDS-COMPATIBILITY.
Do not use in new scripts; use the source to find the appropriate
function/method instead.
"""
#
# (C) Pywikibot team, 2008
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: acecd29e81897d0941a32923fa29ff7b7c5c9088 $'
from pywikibot import Category
from pywikibot.tools import ModuleDeprecationWrapper
def change_category(article, oldCat, newCat, comment=None, sortKey=None,
inPlace=True):
"""Change the category of the article."""
return article.change_category(oldCat, newCat, comment, sortKey, inPlace)
__all__ = ('Category', 'change_category',)
wrapper = ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('Category', replacement_name='pywikibot.Category')
wrapper._add_deprecated_attr('change_category', replacement_name='Page.change_category')
| 29.727273 | 88 | 0.767584 |
acecd2d2b8cc07b6bcdb35af37f52e021ff23dd1 | 3,313 | py | Python | appengine/findit/findit_v2/handlers/async_action.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/findit_v2/handlers/async_action.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/findit_v2/handlers/async_action.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from gae_libs.handlers.base_handler import BaseHandler, Permission
from google.appengine.ext import ndb
from findit_v2.model.culprit_action import CulpritAction
from findit_v2.services.projects import GetProjectAPI
class AsyncAction(BaseHandler):
"""Performs a culprit action asynchronously."""
PERMISSION_LEVEL = Permission.APP_SELF
def HandlePost(self):
# Args are a json-encoded dict sent as the push task's payload.
try:
message = self._RunAction(json.loads(self.request.body))
except RuntimeError as rte:
message = rte.message
raise
finally:
if message:
logging.error(message)
def _RunAction(self, task_args):
try:
project_api = GetProjectAPI(task_args['project'])
culprit = ndb.Key(urlsafe=task_args['culprit_key']).get()
if task_args['action'] == 'NotifyCulprit':
return self._Notify(project_api, task_args, culprit)
elif task_args['action'] == 'RequestReview':
return self._RequestReview(project_api, task_args, culprit)
elif task_args['action'] == 'CommitRevert':
return self._CommitRevert(project_api, task_args, culprit)
return 'Unknown action %s' % task_args['action']
except KeyError as ke:
return 'Push task is missing required argument: %s' % ke.message
def _Notify(self, project_api, task_args, culprit):
success = project_api.gerrit_actions.NotifyCulprit(
culprit,
task_args['message'],
silent_notification=task_args['silent_notification'])
if not success:
raise RuntimeError('Notification failed')
def _RequestReview(self, project_api, task_args, culprit):
revert = project_api.gerrit_actions.CreateRevert(
culprit, task_args['revert_description'])
if not revert:
raise RuntimeError('Revert creation failed')
logging.info('Requesting revert %s to be reviewed', revert['id'])
success = project_api.gerrit_actions.RequestReview(
revert, task_args['request_review_message'])
if not success:
raise RuntimeError('Requesting revert review failed')
self._Save(culprit, revert, False)
def _CommitRevert(self, project_api, task_args, culprit):
revert = project_api.gerrit_actions.CreateRevert(
culprit, task_args['revert_description'])
if not revert:
raise RuntimeError('Revert creation failed')
logging.info('Submitting revert %s', revert['id'])
success = project_api.gerrit_actions.CommitRevert(
revert, task_args['request_confirmation_message'])
if not success:
raise RuntimeError('Submitting revert failed')
self._Save(culprit, revert, True)
return None
@ndb.transactional
def _Save(self, culprit, revert, committed):
action = CulpritAction.CreateKey(culprit).get()
if not action.revert_change:
action.revert_change = revert['id']
action.revert_committed = committed
action.put()
else:
logging.warning(
'Possible duplicate revert for culrpit %s.'
'We created %s, but datastore says %s already exists.', culprit,
revert['id'], action.revert_change)
| 37.224719 | 74 | 0.711138 |
acecd4854ece07ed6dc1f5dcc1823fb46d54e700 | 1,051 | py | Python | common/check_file_type.py | tetrascience/ts-lib-task-script-utils | 23213c526e7723b1365576cede45b9e9e8e6fd73 | [
"Apache-2.0"
] | null | null | null | common/check_file_type.py | tetrascience/ts-lib-task-script-utils | 23213c526e7723b1365576cede45b9e9e8e6fd73 | [
"Apache-2.0"
] | 1 | 2021-01-21T23:04:13.000Z | 2021-01-21T23:04:13.000Z | common/check_file_type.py | tetrascience/ts-lib-task-script-utils | 23213c526e7723b1365576cede45b9e9e8e6fd73 | [
"Apache-2.0"
] | null | null | null | import typing as t
import sys
def check_file_type(filename: str, expected_type: t.Union[str, list]):
"""Checks if a given file has the type or types that are passed in
Args:
filename (str): the filename in question
expected_type (t.Union[str, list]): either a string with the expected
filetype or a list of strings with expected file types
"""
# get the extension from filename
extension = filename.rstrip().split('.')[-1].lower()
try:
if type(expected_type) == str:
# lower case both the extension and expected_type, do a string match
assert extension == expected_type.lower()
elif type(expected_type) == list:
assert extension in [file_type.lower() for file_type in expected_type]
else:
raise ValueError(f'expected string or list but received {expected_type}')
except AssertionError:
sys.exit(f"The pipeline is expecting the file type to be {expected_type}, but the provided file has a file type of {extension}.")
| 42.04 | 137 | 0.669838 |
acecd549527d3d605982841b1ef628e8446daab9 | 1,440 | py | Python | experiments/cnn_comp/cnn_analyze.py | slyubomirsky/relay-bench-1 | abe5a262ee7ded76748130d0fcfbc80e570311c1 | [
"Apache-2.0"
] | null | null | null | experiments/cnn_comp/cnn_analyze.py | slyubomirsky/relay-bench-1 | abe5a262ee7ded76748130d0fcfbc80e570311c1 | [
"Apache-2.0"
] | null | null | null | experiments/cnn_comp/cnn_analyze.py | slyubomirsky/relay-bench-1 | abe5a262ee7ded76748130d0fcfbc80e570311c1 | [
"Apache-2.0"
] | null | null | null | from validate_config import validate
from exp_templates import analysis_template
def generate_listing_settings(config):
frameworks = config['frameworks']
nice_name = {
'relay': 'Relay',
'tf': 'TensorFlow',
'pt': 'Pytorch',
'mxnet': 'MxNet',
'nnvm': 'NNVM'
}
extra_fields = {
'relay': {'opt_level': config['relay_opt']},
'nnvm': {'opt_level': config['nnvm_opt']},
'tf': {'enable_xla': False},
'mxnet': {},
'pt': {}
}
listing_settings = {
nice_name[fw]: (fw, {field: value for field, value in extra_fields[fw].items()})
for fw in frameworks
}
if 'tf' in frameworks and config['use_xla']:
listing_settings['TF XLA'] = ('tf', {'enable_xla': True})
return listing_settings
def generate_data_query(config, dev, network, settings):
fw = settings[0]
special_fields = settings[1]
num_reps = config['n_inputs']
batch_size = list(config['batch_sizes'])[0]
fields = (['network', 'device', 'batch_size', *list(special_fields.keys())])
field_values = {
'network': network,
'device': dev,
'batch_size': batch_size,
**special_fields
}
return [fw, 'cnn_comp', num_reps, fields, field_values]
if __name__ == '__main__':
analysis_template(validate, generate_listing_settings,
generate_data_query, use_networks=True)
| 26.181818 | 88 | 0.599306 |
acecd5edfff289eb7c42c69e2a03d03d6c037458 | 1,481 | py | Python | app/models.py | NIelsen-Mudaki/Minute-Pitch | 9a1ee25f72ed42b5a196208c5ffc9a850ee17add | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | app/models.py | NIelsen-Mudaki/Minute-Pitch | 9a1ee25f72ed42b5a196208c5ffc9a850ee17add | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | app/models.py | NIelsen-Mudaki/Minute-Pitch | 9a1ee25f72ed42b5a196208c5ffc9a850ee17add | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | from datetime import datetime
from flask_login import UserMixin, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True,nullable=False,unique=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(255))
password = db.Column(db.String(255))
def save(self):
db.session.add()
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def set_password(self, password):
pass_hash = generate_password_hash(password)
self.password = pass_hash
def check_password(self, password):
return check_password_hash(self.password, password)
def __repr__(self):
return f'User: {self.username}'
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(user_id)
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
post = db.Column(db.String(3000))
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f"Post Title: {self.title}" | 27.425926 | 75 | 0.679271 |
acecd6aec8729ae864f22877b899fe6b6f671d32 | 16,454 | py | Python | experimental/language_structure/psl/psl_model_multiwoz_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for MultiWoz rules."""
import tensorflow as tf
import constrained_evaluation as eval_model # local file import from experimental.language_structure.psl
import data # local file import from experimental.language_structure.psl
import psl_model_multiwoz as model # local file import from experimental.language_structure.psl
import psl_model_multiwoz_test_util as test_util # local file import from experimental.language_structure.psl
class PslRulesTest(tf.test.TestCase):
def setUp(self):
super(PslRulesTest, self).setUp()
self.config = test_util.TEST_MULTIWOZ_CONFIG
self.data = test_util.DATA
tf.random.set_seed(self.config['default_seed'])
train_dialogs = data.add_features(
self.data['train_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
train_data = data.pad_dialogs(train_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_train_labels = data.one_hot_string_encoding(self.data['train_labels'],
self.config['class_map'])
train_labels = data.pad_one_hot_labels(raw_train_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.train_ds = data.list_to_dataset(train_data[0], train_labels[0],
self.config['shuffle_train'],
self.config['batch_size'])
test_dialogs = data.add_features(
self.data['test_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
test_data = data.pad_dialogs(test_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_test_labels = data.one_hot_string_encoding(self.data['test_labels'],
self.config['class_map'])
self.test_labels = data.pad_one_hot_labels(raw_test_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.test_ds = data.list_to_dataset(test_data[0], self.test_labels[0],
self.config['shuffle_test'],
self.config['batch_size'])
def check_greet(self, predictions, mask, class_map):
for dialog_pred, dialog_mask in zip(predictions, mask):
first = True
for utterance_pred, utterance_mask in zip(dialog_pred, dialog_mask):
if first or utterance_mask == 0:
first = False
continue
if utterance_pred == class_map['greet']:
return False
return True
def test_psl_rule_1_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
result = self.check_greet(predictions, self.test_labels[1],
self.config['class_map'])
self.assertTrue(result)
def test_psl_rule_1(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_1(logits=tf.constant(logits))
self.assertEqual(loss, 1.4)
def test_psl_rule_2_run_model(self):
rule_weights = (10.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[2][0], self.config['class_map']['greet'])
self.assertEqual(predictions[3][0], self.config['class_map']['greet'])
def test_psl_rule_2(self):
rule_weights = (1.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_2(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.6)
def test_psl_rule_3_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[0][0],
self.config['class_map']['init_request'])
self.assertEqual(predictions[1][0],
self.config['class_map']['init_request'])
def test_psl_rule_3(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_3(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.8)
def test_psl_rule_4_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][1],
self.config['class_map']['second_request'])
self.assertEqual(predictions[2][1],
self.config['class_map']['second_request'])
def test_psl_rule_4(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_4(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.8, err=1e-6)
def test_psl_rule_5_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][1],
self.config['class_map']['init_request'])
self.assertNotEqual(predictions[2][1],
self.config['class_map']['init_request'])
def test_psl_rule_5(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_5(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_6_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][0], self.config['class_map']['greet'])
self.assertNotEqual(predictions[2][0], self.config['class_map']['greet'])
def test_psl_rule_6(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_6(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_7_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][2], self.config['class_map']['end'])
self.assertEqual(predictions[2][3], self.config['class_map']['end'])
def test_psl_rule_7(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_7(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.1, err=1e-6)
def test_psl_rule_8(self):
rule_weights = (1.0,)
rule_names = ('rule_8',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_8(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
def test_psl_rule_9(self):
rule_weights = (1.0,)
rule_names = ('rule_9',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_9(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.8, err=1e-6)
def test_psl_rule_10(self):
rule_weights = (1.0,)
rule_names = ('rule_10',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_10(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.3, err=1e-6)
def test_psl_rule_11(self):
rule_weights = (1.0,)
rule_names = ('rule_11',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_11(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.7, err=1e-6)
def test_psl_rule_12(self):
rule_weights = (1.0,)
rule_names = ('rule_12',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_12(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.1, err=1e-6)
def test_compute_loss_per_rule(self):
rule_weights = (1.0, 2.0)
rule_names = ('rule_11', 'rule_12')
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss_per_rule = psl_constraints.compute_loss_per_rule(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertArrayNear(loss_per_rule, [0.7, 0.2], err=1e-6)
def test_compute_loss(self):
rule_weights = (1.0, 2.0)
rule_names = ('rule_11', 'rule_12')
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.compute_loss(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
if __name__ == '__main__':
tf.test.main()
| 41.550505 | 110 | 0.656679 |
acecd6d10dddecb3b67753a7eac25d5610f9c944 | 3,395 | py | Python | compiler/repo_processor.py | ejr004/simple_grid_yaml_compiler | 38dfebd9225454001cf4b2716f4be21c9d363fcb | [
"Apache-2.0"
] | null | null | null | compiler/repo_processor.py | ejr004/simple_grid_yaml_compiler | 38dfebd9225454001cf4b2716f4be21c9d363fcb | [
"Apache-2.0"
] | null | null | null | compiler/repo_processor.py | ejr004/simple_grid_yaml_compiler | 38dfebd9225454001cf4b2716f4be21c9d363fcb | [
"Apache-2.0"
] | null | null | null | import re
import urllib2
from urlparse import urlparse, urljoin
def generate_default_file_name(repo_info):
return './.temp/' + repo_info['repo_name'] + '_defaults.yaml'
def generate_config_schema_file_name(repo_info):
return './.temp/' + repo_info['repo_name'] + '_schema.yaml'
def generate_meta_info_file_name(repo_info):
return './.temp/' + repo_info['repo_name'] + '_info.yaml'
def analyse_repo_url(repo_url):
repo_analysis = re.search('//.*/(.*)/(.*)', repo_url)
org_name = repo_analysis.group(1)
repo_name = repo_analysis.group(2)
##TODO fetch branch info
branch = 'master'
return {
'org_name':org_name,
'repo_name': repo_name,
'branch_name': branch
}
def get_meta_info(repo_url):
try:
base_url = urlparse("https://raw.githubusercontent.com/")
repo_info = analyse_repo_url(repo_url)
repo_info_list = [repo_info['org_name'], repo_info['repo_name'], repo_info['branch_name'], 'meta-info.yaml']
relative_url = urlparse("/".join(x.strip() for x in repo_info_list))
meta_info_url = urljoin(base_url.geturl(), relative_url.geturl())
response = urllib2.urlopen(meta_info_url)
meta_info = response.read()
fname = generate_meta_info_file_name(repo_info)
with open(fname, 'w') as f:
f.write(meta_info)
f.close()
return f
except Exception as ex:
print ex.message
def get_default_values(repo_url, default_file_name):
try:
default_data_base_url = urlparse("https://raw.githubusercontent.com/")
repo_info = analyse_repo_url(repo_url)
repo_info_list = [repo_info['org_name'], repo_info['repo_name'], repo_info['branch_name'], default_file_name]
default_data_relative_url = urlparse("/".join(x.strip() for x in repo_info_list))
default_data_url = urljoin(default_data_base_url.geturl(), default_data_relative_url.geturl())
response = urllib2.urlopen(default_data_url)
default_data = response.read()
fname = generate_default_file_name(repo_info)
with open(fname, 'w') as f:
f.write(default_data)
f.close()
# process runtime variables
runtime_variables = get_runtime_variables(fname)
return fname
except Exception as ex:
print(ex.message)
def get_config_schema(repo_url):
try:
base_url= urlparse("https://raw.githubusercontent.com/")
repo_info = analyse_repo_url(repo_url)
repo_info_list = [repo_info['org_name'], repo_info['repo_name'], repo_info['branch_name'], 'config-schema.yaml']
relative_url = urlparse("/".join(x.strip() for x in repo_info_list))
config_schema_url = urljoin(base_url.geturl(), relative_url.geturl())
response = urllib2.urlopen(config_schema_url)
config_schema = response.read()
fname = generate_config_schema_file_name(repo_info)
with open(fname, 'w') as f:
f.write(config_schema)
f.close()
return f
except Exception as ex:
print(ex.message)
def get_runtime_variables(fname):
f = open(fname, 'r')
for l in f.readlines():
re.search("runtime-variable-mapping:", l)
"""
runtime-variable-mapping:
- &ce_host lightweight-component[name='cream'][type='compute-element'][]
"""
| 35.736842 | 120 | 0.657143 |
acecd79151046d660e7c74986be282f5a055d155 | 6,515 | py | Python | tensorflow_federated/python/learning/framework/encoding_utils_test.py | lc0/federated | 29f3997888c5757f6d167acc37ba9fa9cdfbd55b | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/framework/encoding_utils_test.py | lc0/federated | 29f3997888c5757f6d167acc37ba9fa9cdfbd55b | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/framework/encoding_utils_test.py | lc0/federated | 29f3997888c5757f6d167acc37ba9fa9cdfbd55b | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learning.framework.encoding_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.learning import model_examples
from tensorflow_federated.python.learning.framework import encoding_utils
from tensorflow_federated.python.learning.framework import optimizer_utils
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
class EncodingUtilsTest(test.TestCase, parameterized.TestCase):
"""Tests for utilities for building StatefulFns."""
def test_broadcast_from_model_fn_encoder_fn(self):
model_fn = model_examples.TrainableLinearRegression
broadcast_fn = encoding_utils.build_encoded_broadcast_from_model(
model_fn, _test_encoder_fn())
self.assertIsInstance(broadcast_fn, tff.utils.StatefulBroadcastFn)
class IterativeProcessTest(test.TestCase, parameterized.TestCase):
"""End-to-end tests using `tff.utils.IterativeProcess`."""
def test_iterative_process_with_encoding(self):
model_fn = model_examples.TrainableLinearRegression
broadcast_fn = encoding_utils.build_encoded_broadcast_from_model(
model_fn, _test_encoder_fn())
iterative_process = optimizer_utils.build_model_delta_optimizer_process(
model_fn=model_fn,
model_to_client_delta_fn=DummyClientDeltaFn,
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
stateful_model_broadcast_fn=broadcast_fn)
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.], [3., 4.]],
'y': [[5.], [6.]]
}).batch(2)
federated_ds = [ds] * 3
state = iterative_process.initialize()
self.assertEqual(state.model_broadcast_state.trainable.a[0], 1)
state, _ = iterative_process.next(state, federated_ds)
self.assertEqual(state.model_broadcast_state.trainable.a[0], 2)
class DummyClientDeltaFn(optimizer_utils.ClientDeltaFn):
def __init__(self, model_fn):
self._model = model_fn()
@property
def variables(self):
return []
@tf.function
def __call__(self, dataset, initial_weights):
# Iterate over the dataset to get new metric values.
def reduce_fn(dummy, batch):
self._model.train_on_batch(batch)
return dummy
dataset.reduce(tf.constant(0.0), reduce_fn)
# Create some fake weight deltas to send back.
trainable_weights_delta = tf.nest.map_structure(lambda x: -tf.ones_like(x),
initial_weights.trainable)
client_weight = tf.constant(1.0)
return optimizer_utils.ClientOutput(
trainable_weights_delta,
weights_delta_weight=client_weight,
model_output=self._model.report_local_outputs(),
optimizer_output={'client_weight': client_weight})
# TODO(b/137613901): Remove this in next update of tfmot package, when
# te.testing is available.
@te.core.tf_style_adaptive_encoding_stage
class PlusOneOverNEncodingStage(te.core.AdaptiveEncodingStageInterface):
"""[Example] adaptive encoding stage, adding 1/N in N-th iteration.
This is an example implementation of an `AdaptiveEncodingStageInterface` that
modifies state, which controls the creation of params. This is also a simple
example of how an `EncodingStageInterface` can be wrapped as an
`AdaptiveEncodingStageInterface`, without modifying the wrapped encode and
decode methods.
"""
ENCODED_VALUES_KEY = 'pn_values'
ADD_PARAM_KEY = 'pn_add'
ITERATION_STATE_KEY = 'pn_iteration'
@property
def name(self):
"""See base class."""
return 'plus_one_over_n'
@property
def compressible_tensors_keys(self):
"""See base class."""
return [self.ENCODED_VALUES_KEY]
@property
def commutes_with_sum(self):
"""See base class."""
return False
@property
def decode_needs_input_shape(self):
"""See base class."""
return False
@property
def state_update_aggregation_modes(self):
"""See base class."""
return {}
def initial_state(self):
"""See base class."""
return {self.ITERATION_STATE_KEY: tf.constant(1, dtype=tf.int32)}
def update_state(self, state, state_update_tensors):
"""See base class."""
del state_update_tensors # Unused.
return {
self.ITERATION_STATE_KEY:
state[self.ITERATION_STATE_KEY] + tf.constant(1, dtype=tf.int32)
}
def get_params(self, state):
"""See base class."""
params = {
self.ADD_PARAM_KEY: 1 / tf.to_float(state[self.ITERATION_STATE_KEY])
}
return params, params
def encode(self, x, encode_params):
"""See base class."""
return {self.ENCODED_VALUES_KEY: x + encode_params[self.ADD_PARAM_KEY]}, {}
def decode(self,
encoded_tensors,
decode_params,
num_summands=None,
shape=None):
"""See base class."""
del num_summands # Unused.
del shape # Unused.
decoded_x = (
encoded_tensors[self.ENCODED_VALUES_KEY] -
decode_params[self.ADD_PARAM_KEY])
return decoded_x
def _test_encoder_fn():
"""Returns an example mapping of tensor to encoder, determined by shape."""
identity_encoder = te.encoders.identity()
test_encoder = te.core.EncoderComposer(PlusOneOverNEncodingStage()).make()
def encoder_fn(tensor):
if np.prod(tensor.shape) > 1:
encoder = te.core.SimpleEncoder(test_encoder,
tf.TensorSpec(tensor.shape, tensor.dtype))
else:
encoder = te.core.SimpleEncoder(identity_encoder,
tf.TensorSpec(tensor.shape, tensor.dtype))
return encoder
return encoder_fn
if __name__ == '__main__':
test.main()
| 33.239796 | 84 | 0.720952 |
acecd7ba700e5c5944dc3cd3d40e814f4702475d | 1,314 | py | Python | src/Modules/Hvac/hvac.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2016-09-21T19:30:21.000Z | 2016-09-21T19:30:21.000Z | src/Modules/Hvac/hvac.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | null | null | null | src/Modules/Hvac/hvac.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2020-07-23T11:13:36.000Z | 2020-07-23T11:13:36.000Z | """
@name: PyHouse/src/Modules/Hvac/hvac.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2016 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 12, 2015
@Summary:
This is the controlling portion of a complete HVAC system.
PyHouse.House.Hvac.
Thermostats
"""
# Import system type stuff
# Import PyMh files
from Modules.Computer import logging_pyh as Logger
from Modules.Core.data_objects import ThermostatData
from Modules.Hvac.hvac_xml import XML as hvacXML
LOG = Logger.getLogger('PyHouse.Hvac ')
class Utility(object):
"""
"""
class API(Utility):
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info("Initialized.")
def LoadXml(self, p_pyhouse_obj):
l_obj = hvacXML.read_hvac_xml(p_pyhouse_obj)
p_pyhouse_obj.House.Hvac = l_obj
return l_obj
def Start(self):
LOG.info("Started.")
def Stop(self):
LOG.info("Stopped.")
def SaveXml(self, p_xml):
l_xml = hvacXML.write_hvac_xml(self.m_pyhouse_obj, p_xml)
p_xml.append(l_xml)
LOG.info("Saved Hvac XML.")
return l_xml
# ## END DBK
| 22.655172 | 66 | 0.627854 |
acecd839244baa725fa61a8ec51810236996f179 | 1,035 | py | Python | setup.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 215 | 2015-01-09T12:18:19.000Z | 2022-01-31T00:18:29.000Z | setup.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 93 | 2015-02-03T22:39:02.000Z | 2022-01-26T04:12:16.000Z | setup.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 48 | 2015-04-29T09:23:34.000Z | 2022-01-21T13:50:39.000Z | import os
from setuptools import setup
install_requires = [
'Flask',
'Flask-Babel',
]
if os.path.exists('README'):
with open('README') as f:
readme = f.read()
else:
readme = None
setup(
name='Flask-Table',
packages=['flask_table'],
version='0.5.0',
author='Andrew Plummer',
author_email='plummer574@gmail.com',
url='https://github.com/plumdog/flask_table',
description='HTML tables for use with the Flask micro-framework',
install_requires=install_requires,
test_suite='tests',
tests_require=['flask-testing'],
long_description=readme,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Framework :: Flask',
])
| 26.538462 | 69 | 0.617391 |
acecd888e4cde2236f71dfacb4bf7b67cc8f7645 | 107 | py | Python | quantity/digger/datasource/impl/__init__.py | wyjcpu/quantity | a53126a430f12b5bac81a52b2fe749cc497faf36 | [
"MIT"
] | null | null | null | quantity/digger/datasource/impl/__init__.py | wyjcpu/quantity | a53126a430f12b5bac81a52b2fe749cc497faf36 | [
"MIT"
] | null | null | null | quantity/digger/datasource/impl/__init__.py | wyjcpu/quantity | a53126a430f12b5bac81a52b2fe749cc497faf36 | [
"MIT"
] | 1 | 2021-05-11T09:33:59.000Z | 2021-05-11T09:33:59.000Z | # -*- coding: utf-8 -*-
import csv_source
import tushare_source
import mongodb_source
import sqlite_source
| 17.833333 | 23 | 0.794393 |
acecd951e20b502c2e407a1b556d62b62cbf772d | 15,955 | py | Python | ckan/authz.py | Pardhu448/ckan | f4b820b6d6a5b51c7bd4b8906a9b1b57b61d82c1 | [
"Apache-2.0"
] | 1 | 2020-02-08T16:16:51.000Z | 2020-02-08T16:16:51.000Z | ckan/authz.py | Pardhu448/ckan | f4b820b6d6a5b51c7bd4b8906a9b1b57b61d82c1 | [
"Apache-2.0"
] | null | null | null | ckan/authz.py | Pardhu448/ckan | f4b820b6d6a5b51c7bd4b8906a9b1b57b61d82c1 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import functools
import sys
from collections import defaultdict, OrderedDict
from logging import getLogger
import six
from ckan.common import config
from ckan.common import asbool
import ckan.plugins as p
import ckan.model as model
from ckan.common import _, c
import ckan.lib.maintain as maintain
log = getLogger(__name__)
class AuthFunctions:
''' This is a private cache used by get_auth_function() and should never be
accessed directly we will create an instance of it and then remove it.'''
_functions = {}
def clear(self):
''' clear any stored auth functions. '''
self._functions.clear()
def keys(self):
''' Return a list of known auth functions.'''
if not self._functions:
self._build()
return self._functions.keys()
def get(self, function):
''' Return the requested auth function. '''
if not self._functions:
self._build()
return self._functions.get(function)
@staticmethod
def _is_chained_auth_function(func):
'''
Helper function to check if a function is a chained auth function, i.e.
it has been decorated with the chain auth function decorator.
'''
return getattr(func, 'chained_auth_function', False)
def _build(self):
''' Gather the auth functions.
First get the default ones in the ckan/logic/auth directory Rather than
writing them out in full will use __import__ to load anything from
ckan.auth that looks like it might be an authorisation function'''
module_root = 'ckan.logic.auth'
for auth_module_name in ['get', 'create', 'update', 'delete', 'patch']:
module_path = '%s.%s' % (module_root, auth_module_name,)
try:
module = __import__(module_path)
except ImportError:
log.debug('No auth module for action "%s"' % auth_module_name)
continue
for part in module_path.split('.')[1:]:
module = getattr(module, part)
for key, v in module.__dict__.items():
if not key.startswith('_'):
# Whitelist all auth functions defined in
# logic/auth/get.py as not requiring an authorized user,
# as well as ensuring that the rest do. In both cases, do
# nothing if a decorator has already been used to define
# the behaviour
if not hasattr(v, 'auth_allow_anonymous_access'):
if auth_module_name == 'get':
v.auth_allow_anonymous_access = True
else:
v.auth_allow_anonymous_access = False
self._functions[key] = v
# Then overwrite them with any specific ones in the plugins:
resolved_auth_function_plugins = {}
fetched_auth_functions = {}
chained_auth_functions = defaultdict(list)
for plugin in p.PluginImplementations(p.IAuthFunctions):
for name, auth_function in plugin.get_auth_functions().items():
if self._is_chained_auth_function(auth_function):
chained_auth_functions[name].append(auth_function)
elif name in resolved_auth_function_plugins:
raise Exception(
'The auth function %r is already implemented in %r' % (
name,
resolved_auth_function_plugins[name]
)
)
else:
resolved_auth_function_plugins[name] = plugin.name
fetched_auth_functions[name] = auth_function
for name, func_list in six.iteritems(chained_auth_functions):
if (name not in fetched_auth_functions and
name not in self._functions):
raise Exception('The auth %r is not found for chained auth' % (
name))
# create the chain of functions in the correct order
for func in reversed(func_list):
if name in fetched_auth_functions:
prev_func = fetched_auth_functions[name]
else:
# fallback to chaining off the builtin auth function
prev_func = self._functions[name]
fetched_auth_functions[name] = (
functools.partial(func, prev_func))
# Use the updated ones in preference to the originals.
self._functions.update(fetched_auth_functions)
_AuthFunctions = AuthFunctions()
#remove the class
del AuthFunctions
def clear_auth_functions_cache():
_AuthFunctions.clear()
def auth_functions_list():
'''Returns a list of the names of the auth functions available. Currently
this is to allow the Auth Audit to know if an auth function is available
for a given action.'''
return _AuthFunctions.keys()
def is_sysadmin(username):
''' Returns True is username is a sysadmin '''
user = _get_user(username)
return user and user.sysadmin
def _get_user(username):
''' Try to get the user from c, if possible, and fallback to using the DB '''
if not username:
return None
# See if we can get the user without touching the DB
try:
if c.userobj and c.userobj.name == username:
return c.userobj
except AttributeError:
# c.userobj not set
pass
except TypeError:
# c is not available
pass
# Get user from the DB
return model.User.get(username)
def get_group_or_org_admin_ids(group_id):
if not group_id:
return []
group_id = model.Group.get(group_id).id
q = model.Session.query(model.Member) \
.filter(model.Member.group_id == group_id) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.capacity == 'admin')
return [a.table_id for a in q.all()]
def is_authorized_boolean(action, context, data_dict=None):
''' runs the auth function but just returns True if allowed else False
'''
outcome = is_authorized(action, context, data_dict=data_dict)
return outcome.get('success', False)
def is_authorized(action, context, data_dict=None):
if context.get('ignore_auth'):
return {'success': True}
auth_function = _AuthFunctions.get(action)
if auth_function:
username = context.get('user')
user = _get_user(username)
if user:
# deleted users are always unauthorized
if user.is_deleted():
return {'success': False}
# sysadmins can do anything unless the auth_sysadmins_check
# decorator was used in which case they are treated like all other
# users.
elif user.sysadmin:
if not getattr(auth_function, 'auth_sysadmins_check', False):
return {'success': True}
# If the auth function is flagged as not allowing anonymous access,
# and an existing user object is not provided in the context, deny
# access straight away
if not getattr(auth_function, 'auth_allow_anonymous_access', False) \
and not context.get('auth_user_obj'):
return {
'success': False,
'msg': 'Action {0} requires an authenticated user'.format(
(auth_function if not isinstance(auth_function, functools.partial)
else auth_function.func).__name__)
}
return auth_function(context, data_dict)
else:
raise ValueError(_('Authorization function not found: %s' % action))
# these are the permissions that roles have
ROLE_PERMISSIONS = OrderedDict([
('admin', ['admin']),
('editor', ['read', 'delete_dataset', 'create_dataset', 'update_dataset', 'manage_group']),
('member', ['read', 'manage_group']),
])
def _trans_role_admin():
return _('Admin')
def _trans_role_editor():
return _('Editor')
def _trans_role_member():
return _('Member')
def trans_role(role):
module = sys.modules[__name__]
return getattr(module, '_trans_role_%s' % role)()
def roles_list():
''' returns list of roles for forms '''
roles = []
for role in ROLE_PERMISSIONS:
roles.append(dict(text=trans_role(role), value=role))
return roles
def roles_trans():
''' return dict of roles with translation '''
roles = {}
for role in ROLE_PERMISSIONS:
roles[role] = trans_role(role)
return roles
def get_roles_with_permission(permission):
''' returns the roles with the permission requested '''
roles = []
for role in ROLE_PERMISSIONS:
permissions = ROLE_PERMISSIONS[role]
if permission in permissions or 'admin' in permissions:
roles.append(role)
return roles
def has_user_permission_for_group_or_org(group_id, user_name, permission):
''' Check if the user has the given permissions for the group, allowing for
sysadmin rights and permission cascading down a group hierarchy.
'''
if not group_id:
return False
group = model.Group.get(group_id)
if not group:
return False
group_id = group.id
# Sys admins can do anything
if is_sysadmin(user_name):
return True
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
if _has_user_permission_for_groups(user_id, permission, [group_id]):
return True
# Handle when permissions cascade. Check the user's roles on groups higher
# in the group hierarchy for permission.
for capacity in check_config_permission('roles_that_cascade_to_sub_groups'):
parent_groups = group.get_parent_group_hierarchy(type=group.type)
group_ids = [group_.id for group_ in parent_groups]
if _has_user_permission_for_groups(user_id, permission, group_ids,
capacity=capacity):
return True
return False
def _has_user_permission_for_groups(user_id, permission, group_ids,
capacity=None):
''' Check if the user has the given permissions for the particular
group (ignoring permissions cascading in a group hierarchy).
Can also be filtered by a particular capacity.
'''
if not group_ids:
return False
# get any roles the user has for the group
q = model.Session.query(model.Member) \
.filter(model.Member.group_id.in_(group_ids)) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_id == user_id)
if capacity:
q = q.filter(model.Member.capacity == capacity)
# see if any role has the required permission
# admin permission allows anything for the group
for row in q.all():
perms = ROLE_PERMISSIONS.get(row.capacity, [])
if 'admin' in perms or permission in perms:
return True
return False
def users_role_for_group_or_org(group_id, user_name):
''' Returns the user's role for the group. (Ignores privileges that cascade
in a group hierarchy.)
'''
if not group_id:
return None
group_id = model.Group.get(group_id).id
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return None
# get any roles the user has for the group
q = model.Session.query(model.Member) \
.filter(model.Member.group_id == group_id) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_id == user_id)
# return the first role we find
for row in q.all():
return row.capacity
return None
def has_user_permission_for_some_org(user_name, permission):
''' Check if the user has the given permission for any organization. '''
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
roles = get_roles_with_permission(permission)
if not roles:
return False
# get any groups the user has with the needed role
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id)
group_ids = []
for row in q.all():
group_ids.append(row.group_id)
# if not in any groups has no permissions
if not group_ids:
return False
# see if any of the groups are orgs
q = model.Session.query(model.Group) \
.filter(model.Group.is_organization == True) \
.filter(model.Group.state == 'active') \
.filter(model.Group.id.in_(group_ids))
return bool(q.count())
def get_user_id_for_username(user_name, allow_none=False):
''' Helper function to get user id '''
# first check if we have the user object already and get from there
try:
if c.userobj and c.userobj.name == user_name:
return c.userobj.id
except (TypeError, AttributeError):
# c is not available
pass
user = model.User.get(user_name)
if user:
return user.id
if allow_none:
return None
raise Exception('Not logged in user')
CONFIG_PERMISSIONS_DEFAULTS = {
# permission and default
# these are prefixed with ckan.auth. in config to override
'anon_create_dataset': False,
'create_dataset_if_not_in_organization': True,
'create_unowned_dataset': True,
'user_create_groups': True,
'user_create_organizations': True,
'user_delete_groups': True,
'user_delete_organizations': True,
'create_user_via_api': False,
'create_user_via_web': True,
'roles_that_cascade_to_sub_groups': 'admin',
'public_activity_stream_detail': False,
}
def check_config_permission(permission):
'''Returns the configuration value for the provided permission
Permission is a string indentifying the auth permission (eg
`anon_create_dataset`), optionally prefixed with `ckan.auth.`.
The possible values for `permission` are the keys of
CONFIG_PERMISSIONS_DEFAULTS. These can be overriden in the config file
by prefixing them with `ckan.auth.`.
Returns the permission value, generally True or False, except on
`roles_that_cascade_to_sub_groups` which is a list of strings.
'''
key = permission.replace('ckan.auth.', '')
if key not in CONFIG_PERMISSIONS_DEFAULTS:
return False
default_value = CONFIG_PERMISSIONS_DEFAULTS.get(key)
config_key = 'ckan.auth.' + key
value = config.get(config_key, default_value)
if key == 'roles_that_cascade_to_sub_groups':
# This permission is set as a list of strings (space separated)
value = value.split() if value else []
else:
value = asbool(value)
return value
@maintain.deprecated('Use auth_is_loggedin_user instead')
def auth_is_registered_user():
'''
This function is deprecated, please use the auth_is_loggedin_user instead
'''
return auth_is_loggedin_user()
def auth_is_loggedin_user():
''' Do we have a logged in user '''
try:
context_user = c.user
except TypeError:
context_user = None
return bool(context_user)
def auth_is_anon_user(context):
''' Is this an anonymous user?
eg Not logged in if a web request and not user defined in context
if logic functions called directly
See ckan/lib/base.py:232 for pylons context object logic
'''
context_user = context.get('user')
is_anon_user = not bool(context_user)
return is_anon_user
| 33.448637 | 95 | 0.641742 |
acecd9c0d591e258c89df3404ed1d1c49f89f044 | 5,483 | py | Python | scrapy/linkextractors/sgml.py | eliasdorneles/scrapy | 71bd79e70fb10ed4899b15ca3ffa9aaa16567727 | [
"BSD-3-Clause"
] | 1 | 2015-08-25T09:42:58.000Z | 2015-08-25T09:42:58.000Z | scrapy/linkextractors/sgml.py | eliasdorneles/scrapy | 71bd79e70fb10ed4899b15ca3ffa9aaa16567727 | [
"BSD-3-Clause"
] | null | null | null | scrapy/linkextractors/sgml.py | eliasdorneles/scrapy | 71bd79e70fb10ed4899b15ca3ffa9aaa16567727 | [
"BSD-3-Clause"
] | null | null | null | """
SGMLParser-based Link extractors
"""
from six.moves.urllib.parse import urljoin
import warnings
from sgmllib import SGMLParser
from w3lib.url import safe_url_string
from scrapy.selector import Selector
from scrapy.link import Link
from scrapy.linkextractors import FilteringLinkExtractor
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import unique as unique_list, to_unicode
from scrapy.utils.response import get_base_url
from scrapy.exceptions import ScrapyDeprecationWarning
class BaseSgmlLinkExtractor(SGMLParser):
def __init__(self, tag="a", attr="href", unique=False, process_value=None):
warnings.warn(
"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
SGMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_value = (lambda v: v) if process_value is None else process_value
self.current_link = None
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
""" Do the real extraction work """
self.reset()
self.feed(response_text)
self.close()
ret = []
if base_url is None:
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in self.links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
try:
link.url = urljoin(base_url, link.url)
except ValueError:
continue
link.url = safe_url_string(link.url, response_encoding)
link.text = to_unicode(link.text, response_encoding, errors='replace').strip()
ret.append(link)
return ret
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if necessary
"""
links = unique_list(links, key=lambda link: link.url) if self.unique else links
return links
def extract_links(self, response):
# wrapper needed to allow to work directly with text
links = self._extract_links(response.body, response.url, response.encoding)
links = self._process_links(links)
return links
def reset(self):
SGMLParser.reset(self)
self.links = []
self.base_url = None
self.current_link = None
def unknown_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_value(value)
if url is not None:
link = Link(url=url, nofollow=True if dict(attrs).get('rel') == 'nofollow' else False)
self.links.append(link)
self.current_link = link
def unknown_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
class SgmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True,
process_value=None, deny_extensions=None, restrict_css=()):
warnings.warn(
"SgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process_value=process_value)
super(SgmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions)
# FIXME: was added to fix a RegexLinkExtractor testcase
self.base_url = None
def extract_links(self, response):
base_url = None
if self.restrict_xpaths:
base_url = get_base_url(response)
body = u''.join(f
for x in self.restrict_xpaths
for f in response.xpath(x).extract()
).encode(response.encoding, errors='xmlcharrefreplace')
else:
body = response.body
links = self._extract_links(body, response.url, response.encoding, base_url)
links = self._process_links(links)
return links
| 38.076389 | 110 | 0.634689 |
acecdb43f05639689fa03fac11631a56fc1b082a | 9,222 | py | Python | nipyapi/nifi/models/flow_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/flow_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/flow_dto.py | esecules/nipyapi | e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlowDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'process_groups': 'list[ProcessGroupEntity]',
'remote_process_groups': 'list[RemoteProcessGroupEntity]',
'processors': 'list[ProcessorEntity]',
'input_ports': 'list[PortEntity]',
'output_ports': 'list[PortEntity]',
'connections': 'list[ConnectionEntity]',
'labels': 'list[LabelEntity]',
'funnels': 'list[FunnelEntity]'
}
attribute_map = {
'process_groups': 'processGroups',
'remote_process_groups': 'remoteProcessGroups',
'processors': 'processors',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts',
'connections': 'connections',
'labels': 'labels',
'funnels': 'funnels'
}
def __init__(self, process_groups=None, remote_process_groups=None, processors=None, input_ports=None, output_ports=None, connections=None, labels=None, funnels=None):
"""
FlowDTO - a model defined in Swagger
"""
self._process_groups = None
self._remote_process_groups = None
self._processors = None
self._input_ports = None
self._output_ports = None
self._connections = None
self._labels = None
self._funnels = None
if process_groups is not None:
self.process_groups = process_groups
if remote_process_groups is not None:
self.remote_process_groups = remote_process_groups
if processors is not None:
self.processors = processors
if input_ports is not None:
self.input_ports = input_ports
if output_ports is not None:
self.output_ports = output_ports
if connections is not None:
self.connections = connections
if labels is not None:
self.labels = labels
if funnels is not None:
self.funnels = funnels
@property
def process_groups(self):
"""
Gets the process_groups of this FlowDTO.
The process groups in this flow.
:return: The process_groups of this FlowDTO.
:rtype: list[ProcessGroupEntity]
"""
return self._process_groups
@process_groups.setter
def process_groups(self, process_groups):
"""
Sets the process_groups of this FlowDTO.
The process groups in this flow.
:param process_groups: The process_groups of this FlowDTO.
:type: list[ProcessGroupEntity]
"""
self._process_groups = process_groups
@property
def remote_process_groups(self):
"""
Gets the remote_process_groups of this FlowDTO.
The remote process groups in this flow.
:return: The remote_process_groups of this FlowDTO.
:rtype: list[RemoteProcessGroupEntity]
"""
return self._remote_process_groups
@remote_process_groups.setter
def remote_process_groups(self, remote_process_groups):
"""
Sets the remote_process_groups of this FlowDTO.
The remote process groups in this flow.
:param remote_process_groups: The remote_process_groups of this FlowDTO.
:type: list[RemoteProcessGroupEntity]
"""
self._remote_process_groups = remote_process_groups
@property
def processors(self):
"""
Gets the processors of this FlowDTO.
The processors in this flow.
:return: The processors of this FlowDTO.
:rtype: list[ProcessorEntity]
"""
return self._processors
@processors.setter
def processors(self, processors):
"""
Sets the processors of this FlowDTO.
The processors in this flow.
:param processors: The processors of this FlowDTO.
:type: list[ProcessorEntity]
"""
self._processors = processors
@property
def input_ports(self):
"""
Gets the input_ports of this FlowDTO.
The input ports in this flow.
:return: The input_ports of this FlowDTO.
:rtype: list[PortEntity]
"""
return self._input_ports
@input_ports.setter
def input_ports(self, input_ports):
"""
Sets the input_ports of this FlowDTO.
The input ports in this flow.
:param input_ports: The input_ports of this FlowDTO.
:type: list[PortEntity]
"""
self._input_ports = input_ports
@property
def output_ports(self):
"""
Gets the output_ports of this FlowDTO.
The output ports in this flow.
:return: The output_ports of this FlowDTO.
:rtype: list[PortEntity]
"""
return self._output_ports
@output_ports.setter
def output_ports(self, output_ports):
"""
Sets the output_ports of this FlowDTO.
The output ports in this flow.
:param output_ports: The output_ports of this FlowDTO.
:type: list[PortEntity]
"""
self._output_ports = output_ports
@property
def connections(self):
"""
Gets the connections of this FlowDTO.
The connections in this flow.
:return: The connections of this FlowDTO.
:rtype: list[ConnectionEntity]
"""
return self._connections
@connections.setter
def connections(self, connections):
"""
Sets the connections of this FlowDTO.
The connections in this flow.
:param connections: The connections of this FlowDTO.
:type: list[ConnectionEntity]
"""
self._connections = connections
@property
def labels(self):
"""
Gets the labels of this FlowDTO.
The labels in this flow.
:return: The labels of this FlowDTO.
:rtype: list[LabelEntity]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""
Sets the labels of this FlowDTO.
The labels in this flow.
:param labels: The labels of this FlowDTO.
:type: list[LabelEntity]
"""
self._labels = labels
@property
def funnels(self):
"""
Gets the funnels of this FlowDTO.
The funnels in this flow.
:return: The funnels of this FlowDTO.
:rtype: list[FunnelEntity]
"""
return self._funnels
@funnels.setter
def funnels(self, funnels):
"""
Sets the funnels of this FlowDTO.
The funnels in this flow.
:param funnels: The funnels of this FlowDTO.
:type: list[FunnelEntity]
"""
self._funnels = funnels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlowDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.639752 | 479 | 0.589568 |
acecdd4363a72160a78c6c2a6d9d7ac31a179219 | 4,265 | py | Python | pokercoach.py | streamlit-badge-bot/poker-coach | 7ede95a74b1fe83fdb52571180a4613d16b7bd99 | [
"MIT"
] | 2 | 2020-06-23T18:06:18.000Z | 2021-12-18T15:08:19.000Z | pokercoach.py | streamlit-badge-bot/poker-coach | 7ede95a74b1fe83fdb52571180a4613d16b7bd99 | [
"MIT"
] | null | null | null | pokercoach.py | streamlit-badge-bot/poker-coach | 7ede95a74b1fe83fdb52571180a4613d16b7bd99 | [
"MIT"
] | 1 | 2020-11-28T20:00:03.000Z | 2020-11-28T20:00:03.000Z | """ Poker coach web user ui. """
import joblib
import streamlit as st
import numpy as np
import pandas as pd
import st_state_patch
import poker_coach
from poker_coach import handviz
exception = None
# Session State
s = st.State()
if not s:
s.random_state = np.random.randint(0, 1e9)
# Sidebar
st.sidebar.markdown("# My Poker Coach\nPractice short-stacked no limit hold'em")
st.sidebar.subheader("Scenario")
scenario_options = ("Open Shove", "Call Shove")
scenario = st.sidebar.selectbox(label="Select scenario:", options=scenario_options)
n_players = st.sidebar.slider(
label="Number of Players:", min_value=2, max_value=9, value=9
)
st.sidebar.subheader("Field")
field_mode = st.sidebar.slider(
label="Action mode (%)", min_value=1, max_value=99, value=20, step=1
)
field_bandwidth = st.sidebar.slider(
label="Action bandwidth (%)", min_value=1, max_value=99, value=50, step=1
)
field_min = float(np.clip(field_mode - (field_bandwidth / 2), 0, 100))
field_max = float(np.clip(field_mode + (field_bandwidth / 2), 0, 100))
st.sidebar.subheader("Evaluation")
eval_options = [
"Monte Carlo",
"Model",
]
eval_method = st.sidebar.selectbox(label="Evaluation method:", options=eval_options)
if "Monte Carlo" in eval_method:
monte_carlo = st.sidebar.number_input(
label="Number of runs:", min_value=100, value=10000, step=100
)
else:
model = joblib.load("model.pkl")
# Main
if st.button("Next"):
s.random_state = np.random.randint(0, 1e9)
if "Open Shove" in scenario:
scene = poker_coach.PushFoldScenario(
n_seats=n_players,
field=(field_min, field_mode, field_max),
random_state=s.random_state,
)
fig = handviz.hand(
n_seats=n_players,
pot=scene.pot,
hero_name=scene.hero_position,
hero_hand=scene.hero_hand,
hero_chips=scene.hero_chips,
villains_names=scene.villains_after_position,
villains_ranges=scene.villains_after_range,
villains_chips=scene.villains_after_chips,
)
st.pyplot(fig, clear_figure=True)
push = st.button("Push")
fold = st.button("Fold")
if push or fold:
with st.spinner("Calculating..."):
if "Monte Carlo" in eval_method:
equities = scene.eval_ranges(
hero_hand=scene.hero_hand,
villains_range=scene.villains_after_range,
times=monte_carlo,
)
else:
hero_descr = poker_coach.equity.hand_to_descr(scene.hero_hand)
hero_rng = poker_coach.equity.descr_to_percentage(hero_descr)
equities = np.array(
[
model.predict(np.array([[hero_rng, villain_rng]]))[0]
for villain_rng in scene.villains_after_range
]
)
win_value = scene.pot + np.minimum(
scene.villains_after_chips, scene.hero_chips
)
lose_value = -1 * np.minimum(scene.villains_after_chips, scene.hero_chips)
showdown_value = scene.expected_value(
chances=equities, success=win_value, failure=lose_value,
)
fold_equity = scene.pot * (1 - scene.villains_after_range / 100)
expected_values = np.add(showdown_value, fold_equity)
df = pd.DataFrame(
{
"Equity (%)": equities,
"Showdown Value (BB)": showdown_value,
"Fold Equity (BB)": fold_equity,
"Expected Value (BB)": expected_values,
},
index=scene.villains_after_position,
)
result = bool(min(expected_values) > 0)
correct = (result and push) or (not result and fold)
if correct:
st.success(f"Correct")
else:
st.error(f"Wrong")
st.table(data=df.style.format("{:.2f}"))
else:
raise NotImplementedError("To be developed.")
st.sidebar.markdown("")
st.sidebar.markdown("")
st.sidebar.markdown("Powered by [bluff](https://github.com/matheusccouto/bluff)")
st.sidebar.markdown("Contribute on [github](https://github.com/matheusccouto/poker-coach)")
| 30.248227 | 91 | 0.621102 |
acecdd5560d0dac021e7927aca97585a8b403b6c | 1,381 | py | Python | python/ray/serve/examples/doc/e2e_summarizer_client.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | python/ray/serve/examples/doc/e2e_summarizer_client.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | python/ray/serve/examples/doc/e2e_summarizer_client.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | # __client_class_start__
# File name: summarizer_client.py
import requests
article_text = (
"HOUSTON -- Men have landed and walked on the moon. "
"Two Americans, astronauts of Apollo 11, steered their fragile "
"four-legged lunar module safely and smoothly to the historic landing "
"yesterday at 4:17:40 P.M., Eastern daylight time. Neil A. Armstrong, the "
"38-year-old commander, radioed to earth and the mission control room "
'here: "Houston, Tranquility Base here. The Eagle has landed." The '
"first men to reach the moon -- Armstrong and his co-pilot, Col. Edwin E. "
"Aldrin Jr. of the Air Force -- brought their ship to rest on a level, "
"rock-strewn plain near the southwestern shore of the arid Sea of "
"Tranquility. About six and a half hours later, Armstrong opened the "
"landing craft's hatch, stepped slowly down the ladder and declared as "
"he planted the first human footprint on the lunar crust: \"That's one "
'small step for man, one giant leap for mankind." His first step on the '
"moon came at 10:56:20 P.M., as a television camera outside the craft "
"transmitted his every move to an awed and excited audience of hundreds "
"of millions of people on earth."
)
response = requests.get("http://127.0.0.1:8000/Summarizer?txt=" + article_text).text
print(response)
# __client_class_end__
| 49.321429 | 84 | 0.716872 |
acecddef1998c8ad7a8012e5f71121c2427138de | 4,562 | py | Python | Lib/site-packages/numjy/interpolate/interpolate.py | Yaqiang/jythonlab | d031d85e5bd5f19943c6a410c56ceb734c533534 | [
"CNRI-Jython",
"Apache-2.0"
] | 2 | 2019-03-21T07:14:19.000Z | 2020-06-23T12:53:15.000Z | Lib/site-packages/numjy/interpolate/interpolate.py | Yaqiang/jythonlab | d031d85e5bd5f19943c6a410c56ceb734c533534 | [
"CNRI-Jython",
"Apache-2.0"
] | null | null | null | Lib/site-packages/numjy/interpolate/interpolate.py | Yaqiang/jythonlab | d031d85e5bd5f19943c6a410c56ceb734c533534 | [
"CNRI-Jython",
"Apache-2.0"
] | null | null | null | # coding=utf-8
from org.meteothink.math.interpolate import InterpUtil
from org.meteothink.math import ArrayMath, ArrayUtil
from org.meteothink.ndarray import Array
from numjy.core.multiarray import NDArray
__all__ = [
'interp1d','RectBivariateSpline'
]
class interp1d(object):
'''
Interpolate a 1-D function.
:param x: (*array_like*) A 1-D array of real values.
:param y: (*array_like*) A 1-D array of real values. The length of y must be equal to the length of x.
:param kind: (*boolean*) Specifies the kind of interpolation as a string (‘linear’,
‘cubic’,‘akima’,‘divided’,‘loess’,‘neville’). Default is ‘linear’.
'''
def __init__(self, x, y, kind='linear'):
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
if isinstance(y, list):
y = NDArray(ArrayUtil.array(y))
self._func = InterpUtil.getInterpFunc(x.asarray(), y.asarray(), kind)
def __call__(self, x):
'''
Evaluate the interpolate vlaues.
:param x: (*array_like*) Points to evaluate the interpolant at.
'''
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
r = InterpUtil.evaluate(self._func, x)
if isinstance(r, float):
return r
else:
return NDArray(r)
class interp2d(object):
'''
Interpolate over a 2-D grid.
x, y and z are arrays of values used to approximate some function f: z = f(x, y).
This class returns a function whose call method uses spline interpolation to find
the value of new points.
If x and y represent a regular grid, consider using RectBivariateSpline.
:param x: (*array_like*) 1-D arrays of x coordinate in strictly ascending order.
:param y: (*array_like*) 1-D arrays of y coordinate in strictly ascending order.
:param z: (*array_like*) 2-D array of data with shape (x.size,y.size).
:param kind: (*boolean*) Specifies the kind of interpolation as a string (‘linear’,
‘nearest’). Default is ‘linear’.
'''
def __init__(self, x, y, z, kind='linear'):
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
if isinstance(y, list):
y = NDArray(ArrayUtil.array(y))
if isinstance(z, list):
z = NDArray(ArrayUtil.array(z))
self._func = InterpUtil.getBiInterpFunc(x.asarray(), y.asarray(), z.asarray())
def __call__(self, x, y):
'''
Evaluate the interpolate vlaues.
:param x: (*array_like*) X to evaluate the interpolant at.
:param y: (*array_like*) Y to evaluate the interpolant at.
'''
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
if isinstance(x, (NDArray, DimArray)):
x = x.asarray()
if isinstance(y, list):
y = NDArray(ArrayUtil.array(y))
r = InterpUtil.evaluate(self._func, x, y)
if isinstance(r, float):
return r
else:
return NDArray(r)
class RectBivariateSpline(object):
'''
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
:param x: (*array_like*) 1-D arrays of x coordinate in strictly ascending order.
:param y: (*array_like*) 1-D arrays of y coordinate in strictly ascending order.
:param z: (*array_like*) 2-D array of data with shape (x.size,y.size).
'''
def __init__(self, x, y, z):
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
if isinstance(y, list):
y = NDArray(ArrayUtil.array(y))
if isinstance(z, list):
z = NDArray(ArrayUtil.array(z))
self._func = InterpUtil.getBiInterpFunc(x.asarray(), y.asarray(), z.asarray())
def __call__(self, x, y):
'''
Evaluate the interpolate vlaues.
:param x: (*array_like*) X to evaluate the interpolant at.
:param y: (*array_like*) Y to evaluate the interpolant at.
'''
if isinstance(x, list):
x = NDArray(ArrayUtil.array(x))
if isinstance(x, (NDArray, DimArray)):
x = x.asarray()
if isinstance(y, list):
y = NDArray(ArrayUtil.array(y))
r = InterpUtil.evaluate(self._func, x, y)
if isinstance(r, float):
return r
else:
return NDArray(r)
############################################################ | 36.496 | 106 | 0.583954 |
acecde3d77e85e35d4beb0b8aa65560fd3cf26df | 1,858 | py | Python | test/helper.py | DBastrak/rdflib | 56e8445d6465b9a42b68088f8f553171cfd7ee08 | [
"BSD-3-Clause"
] | 1 | 2020-09-01T16:54:38.000Z | 2020-09-01T16:54:38.000Z | test/helper.py | DBastrak/rdflib | 56e8445d6465b9a42b68088f8f553171cfd7ee08 | [
"BSD-3-Clause"
] | 14 | 2021-07-12T19:07:42.000Z | 2022-01-31T19:10:37.000Z | test/helper.py | DBastrak/rdflib | 56e8445d6465b9a42b68088f8f553171cfd7ee08 | [
"BSD-3-Clause"
] | null | null | null | import time
import urllib.error
import rdflib
import rdflib.query
MAX_RETRY = 10
BACKOFF_FACTOR = 1.5
def query_with_retry(graph: rdflib.Graph, query: str, **kwargs) -> rdflib.query.Result: # type: ignore[return]
"""Query graph an retry on failure, returns preloaded result
The tests run against outside network targets which results
in flaky tests. Therefor retries are needed to increase stability.
There are two main categories why these might fail:
* Resource shortage on the server running the tests (e.g. out of ports)
* Issues outside the server (network, target server, etc)
As fast feedback is important the retry should be done quickly.
Therefor the first retry is done after 100ms. But if the issue is
outside the server running the tests it we need to be good
citizenship of the internet and not hit servers of others at
a constant rate. (Also it might get us banned)
Therefor this function implements a backoff mechanism.
When adjusting the parameters please keep in mind that several
tests might run on the same machine at the same time
on our CI, and we really don't want to hit any rate limiting.
The maximum time the function waits is:
>>> sum((BACKOFF_FACTOR ** backoff) / 10 for backoff in range(MAX_RETRY))
11.3330078125
"""
backoff = 0
for i in range(MAX_RETRY):
try:
result = graph.query(query, **kwargs)
result.bindings # access bindings to ensure no lazy loading
return result
except urllib.error.URLError as e:
if i == MAX_RETRY -1:
raise e
backoff_s = (BACKOFF_FACTOR ** backoff) / 10
print(f"Network error {e} during query, waiting for {backoff_s:.2f}s and retrying")
time.sleep(backoff_s)
backoff += 1
| 34.407407 | 111 | 0.681378 |
acecdefd01ad9905e6e0e0ce53e7eb39fc23104c | 521 | py | Python | tiles.py | Bacorn42/image-to-cc-level | 5ee9fb4f5969b4a6a6fab89f37ec2e05cc71b281 | [
"MIT"
] | null | null | null | tiles.py | Bacorn42/image-to-cc-level | 5ee9fb4f5969b4a6a6fab89f37ec2e05cc71b281 | [
"MIT"
] | null | null | null | tiles.py | Bacorn42/image-to-cc-level | 5ee9fb4f5969b4a6a6fab89f37ec2e05cc71b281 | [
"MIT"
] | null | null | null | from enum import Enum
class Tile(Enum):
FLOOR = 0x00
WALL = 0x01
WATER = 0x03
FIRE = 0x04
BLOCK = 0x0A
DIRT = 0x0B
ICE = 0x0C
EXIT = 0x15
BLUE_DOOR = 0x16
RED_DOOR = 0x17
GREEN_DOOR = 0x18
YELLOW_DOOR = 0x19
FAKE_WALL = 0x1E
GRAVEL = 0x2D
RECESSED_WALL = 0x2E
PINK_BALL = 0x48
TANK = 0x4C
BLOB = 0x5C
BLUE_KEY = 0x64
RED_KEY = 0x65
GREEN_KEY = 0x66
YELLOW_KEY = 0x67
def isTransparent(self):
return self.value >= 0x48
| 17.366667 | 33 | 0.59309 |
acece0aecf691ed6641dcfdef5ed455257c72464 | 4,585 | py | Python | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_sub_customer_res_fee_records_response.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_sub_customer_res_fee_records_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_sub_customer_res_fee_records_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListSubCustomerResFeeRecordsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'fee_records': 'list[SubCustomerResFeeRecordV2]',
'count': 'int',
'currency': 'str'
}
attribute_map = {
'fee_records': 'fee_records',
'count': 'count',
'currency': 'currency'
}
def __init__(self, fee_records=None, count=None, currency=None):
"""ListSubCustomerResFeeRecordsResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._fee_records = None
self._count = None
self._currency = None
self.discriminator = None
if fee_records is not None:
self.fee_records = fee_records
if count is not None:
self.count = count
if currency is not None:
self.currency = currency
@property
def fee_records(self):
"""Gets the fee_records of this ListSubCustomerResFeeRecordsResponse.
客户的消费记录数据。 具体请参见表2。
:return: The fee_records of this ListSubCustomerResFeeRecordsResponse.
:rtype: list[SubCustomerResFeeRecordV2]
"""
return self._fee_records
@fee_records.setter
def fee_records(self, fee_records):
"""Sets the fee_records of this ListSubCustomerResFeeRecordsResponse.
客户的消费记录数据。 具体请参见表2。
:param fee_records: The fee_records of this ListSubCustomerResFeeRecordsResponse.
:type: list[SubCustomerResFeeRecordV2]
"""
self._fee_records = fee_records
@property
def count(self):
"""Gets the count of this ListSubCustomerResFeeRecordsResponse.
结果集数量,只有成功才返回这个参数。
:return: The count of this ListSubCustomerResFeeRecordsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListSubCustomerResFeeRecordsResponse.
结果集数量,只有成功才返回这个参数。
:param count: The count of this ListSubCustomerResFeeRecordsResponse.
:type: int
"""
self._count = count
@property
def currency(self):
"""Gets the currency of this ListSubCustomerResFeeRecordsResponse.
币种。 CNY:人民币
:return: The currency of this ListSubCustomerResFeeRecordsResponse.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this ListSubCustomerResFeeRecordsResponse.
币种。 CNY:人民币
:param currency: The currency of this ListSubCustomerResFeeRecordsResponse.
:type: str
"""
self._currency = currency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSubCustomerResFeeRecordsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.787879 | 89 | 0.590622 |
acece282e188f6a3dd54eda5a246458446eb7501 | 380 | py | Python | photoFaces.py | pheelyli/facenet-retinaface-pytorch | ea9fba016aa0db9f5e03ec6bb13c79c72cdba0d1 | [
"MIT"
] | null | null | null | photoFaces.py | pheelyli/facenet-retinaface-pytorch | ea9fba016aa0db9f5e03ec6bb13c79c72cdba0d1 | [
"MIT"
] | null | null | null | photoFaces.py | pheelyli/facenet-retinaface-pytorch | ea9fba016aa0db9f5e03ec6bb13c79c72cdba0d1 | [
"MIT"
] | null | null | null |
class Photos:
def __init__(self, dirname):
self.dirname = dirname
self.photos = []
class Photo:
def __init__(self, filename):
self.filename = filename
self.faces = []
class Face:
def __init__(self, personName,x,y,w,h):
self.personName = personName
self.x = x
self.y = y
self.w = w
self.h = h | 19 | 43 | 0.55 |
acece3258f8493602b88aa328771de3b94c0056d | 5,553 | py | Python | python/cirq/rpiDump.py | seunomonije/quantum | 5c82e466f39880bd38dcedb45e48866883da36e9 | [
"MIT"
] | null | null | null | python/cirq/rpiDump.py | seunomonije/quantum | 5c82e466f39880bd38dcedb45e48866883da36e9 | [
"MIT"
] | null | null | null | python/cirq/rpiDump.py | seunomonije/quantum | 5c82e466f39880bd38dcedb45e48866883da36e9 | [
"MIT"
] | null | null | null | import cirq
from cirq.circuits import InsertStrategy
import numpy as np
import matplotlib
import random
import socket
import select
import time
import pickle
# HELPER FUNCTIONS
def bitstring(bits):
return ''.join('1' if e else '0' for e in bits)
def entangle(current, target, circuit):
circuit.append([
cirq.H(current),
cirq.CNOT(current, target)
])
def superdenseCoding():
qreg = [cirq.LineQubit(x) for x in range(2)]
circuit = cirq.Circuit()
# operations dictionary for each message
message = {
"00" : [],
"01" : [cirq.X(qreg[0])],
"10" : [cirq.Z(qreg[0])],
"11" : [cirq.X(qreg[0]), cirq.Z(qreg[0])]
}
# Alice creates a bell pair
circuit.append(cirq.H(qreg[0]))
circuit.append(cirq.CNOT(qreg[0], qreg[1]))
# Choose a random message to send to bob
msgChosenByAlice = random.choice(list(message.keys()))
print("Alice's sent message =", msgChosenByAlice)
# Encode the message with the requested operation
circuit.append(message[msgChosenByAlice])
# Bob applies the CNOT then the Hadamard gate
circuit.append(cirq.CNOT(qreg[0], qreg[1]))
circuit.append(cirq.H(qreg[0]))
# Then Bob decides to measure, insert strategy inline just for circuit clarity
circuit.append([cirq.measure(qreg[0]), cirq.measure(qreg[1])], strategy = InsertStrategy.INLINE)
print("\nCircuit:")
print(circuit)
sim = cirq.Simulator()
res = sim.run(circuit, repetitions=1)
print("\nBob's received message=", bitstring(res.measurements.values()))
def entangle2Qubits(xVal, yVal):
######## ENTANGLEMENT ########
msg = cirq.LineQubit(0)
# Pauli gates, rotate the state around the associated axis with by one (val)-turn
ranq = cirq.X(msg)**xVal, cirq.Y(msg)**yVal
circuit = cirq.Circuit()
circuit.append(ranq)
sim = cirq.Simulator()
original_message = sim.simulate(circuit)
expected = cirq.bloch_vector_from_state_vector(original_message.final_state, 0)
print("expected x: ", expected[0],
"expected y: ", expected[1],
"expected z: ", expected[2],
"\n")
alice, bob = cirq.LineQubit.range(1, 3)
circuit.append([cirq.H(alice), cirq.CNOT(alice, bob)])
circuit.append([cirq.CNOT(msg, alice), cirq.H(msg)])
circuit.append(cirq.measure(msg, alice))
circuit.append([cirq.CNOT(alice, bob), cirq.CZ(msg, bob)])
print(circuit)
print("\n final results:")
final_results = sim.simulate(circuit)
teleported = cirq.bloch_vector_from_state_vector(final_results.final_state, 2)
print("x: ", teleported[0],
"y: ", teleported[1],
"z: ", teleported[2])
## can just return teleported because we expect teleported and expected to be the same
return teleported
def bellInequalityTest():
alice = cirq.GridQubit(0,0)
bob = cirq.GridQubit(1,0)
alice_referee = cirq.GridQubit(0,1)
bob_referee = cirq.GridQubit(1,1)
circuit = cirq.Circuit()
entangle(alice, bob, circuit)
# Add this pauli-x gate, not sure why book doesn't say
circuit.append(cirq.X(alice)**-0.25)
# Referee generates random bit, which Alice and bob will read
circuit.append([
cirq.H(alice_referee),
cirq.H(bob_referee)
], strategy=InsertStrategy.INLINE)
# Alice and Bob do sqrt(X) gate based on the refs values
circuit.append([
cirq.CNOT(alice_referee, alice)**0.5,
cirq.CNOT(bob_referee, bob)**0.5
])
# Measure the results
circuit.append([
cirq.measure(alice, key='a'),
cirq.measure(bob, key='b'),
cirq.measure(alice_referee, key='x'),
cirq.measure(bob_referee, key='y'),
], strategy = InsertStrategy.INLINE)
print("\nCircuit:")
print(circuit)
print()
repetitions = 1000
print('Simulation {} repetitions...'.format(repetitions))
result = cirq.Simulator().run(program=circuit, repetitions=repetitions)
# Result collection
a = np.array(result.measurements['a'][:, 0])
b = np.array(result.measurements['b'][:, 0])
x = np.array(result.measurements['x'][:, 0])
y = np.array(result.measurements['y'][:, 0])
# Get winning percentage
outcomes = a ^ b == x & y
win_percent = len([e for e in outcomes if e]) * 100 / repetitions
# Print data
print('\nResults')
print('a:', bitstring(a))
print('b:', bitstring(b))
print('x:', bitstring(x))
print('y:', bitstring(y))
print('(a XOR b) == (x AND y):\n', bitstring(outcomes))
print('Win rate: {}%'.format(win_percent))
def deutchAlgorithmOperations(qubit1, qubit2, oracle):
yield cirq.X(qubit1)
yield cirq.H(qubit1), cirq.H(qubit2)
yield oracle
yield cirq.H(qubit1)
yield cirq.measure(qubit1)
def deutschAlgorithm():
q0, q1 = cirq.LineQubit.range(2)
oracles = {
'0' : [],
'1' : [cirq.X(q1)],
'X' : [cirq.CNOT(q0, q1)],
'NOTX' : [cirq.CNOT(q0, q1), cirq.X(q1)]
}
# Display the circuits
for key, oracle in oracles.items():
print('Circuit for {}...'.format(key))
print(cirq.Circuit.from_ops(deutchAlgorithmOperations(q0, q1, oracle)), end="\n\n")
simulator = cirq.Simulator()
# Execute the circuit for each oracle
for key,oracle in oracles.items():
result = simulator.run(
cirq.Circuit.from_ops(deutchAlgorithmOperations(q0, q1, oracle)),
repetitions = 10
)
print('oracle: {:<4} results: {}'.format(key,result))
| 29.226316 | 100 | 0.63029 |
acece32a7f4d27fa1ffccbfc4da26b82ba51b21e | 390 | py | Python | src/onegov/town6/views/dashboard.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/town6/views/dashboard.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/town6/views/dashboard.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from onegov.core.security import Secret
from onegov.org.views.dashboard import dashboard
from onegov.town6 import TownApp
from onegov.org.models import Dashboard
from onegov.town6.layout import DashboardLayout
@TownApp.html(model=Dashboard, template='dashboard.pt', permission=Secret)
def town_dashboard(self, request):
return dashboard(self, request, DashboardLayout(self, request))
| 35.454545 | 74 | 0.820513 |
acece3339f466c00f13fb6fd8a9240de15825e37 | 8,327 | py | Python | storage.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | storage.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | storage.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | import mimetypes
import os
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.files.uploadedfile import UploadedFile
from django.core.files.uploadhandler import FileUploadHandler, \
StopFutureHandlers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.http.multipartparser import ChunkIter, Parser, LazyStream, FILE
from django.utils.encoding import smart_str, force_unicode
from google.appengine.api import files
from google.appengine.api.images import get_serving_url, NotImageError
from google.appengine.ext.blobstore import BlobInfo, BlobKey, delete, \
create_upload_url, BLOB_KEY_HEADER, BLOB_RANGE_HEADER, BlobReader
def prepare_upload(request, url, **kwargs):
return create_upload_url(url), {}
def serve_file(request, file, save_as, content_type, **kwargs):
if isinstance(file, BlobKey):
blobkey = file
elif hasattr(file, 'file') and hasattr(file.file, 'blobstore_info'):
blobkey = file.file.blobstore_info.key()
elif hasattr(file, 'blobstore_info'):
blobkey = file.blobstore_info.key()
else:
raise ValueError("The provided file can't be served via the "
"Google App Engine Blobstore.")
response = HttpResponse(content_type=content_type)
response[BLOB_KEY_HEADER] = str(blobkey)
response['Accept-Ranges'] = 'bytes'
http_range = request.META.get('HTTP_RANGE')
if http_range is not None:
response[BLOB_RANGE_HEADER] = http_range
if save_as:
response['Content-Disposition'] = smart_str(
u'attachment; filename="%s"' % save_as)
info = BlobInfo.get(blobkey)
if info.size is not None:
response['Content-Length'] = info.size
return response
class BlobstoreStorage(Storage):
"""Google App Engine Blobstore storage backend."""
def _open(self, name, mode='rb'):
return BlobstoreFile(name, mode, self)
def _save(self, name, content):
name = name.replace('\\', '/')
if hasattr(content, 'file') and \
hasattr(content.file, 'blobstore_info'):
data = content.file.blobstore_info
elif hasattr(content, 'blobstore_info'):
data = content.blobstore_info
elif isinstance(content, File):
guessed_type = mimetypes.guess_type(name)[0]
file_name = files.blobstore.create(mime_type=guessed_type or 'application/octet-stream',
_blobinfo_uploaded_filename=name)
with files.open(file_name, 'a') as f:
for chunk in content.chunks():
f.write(chunk)
files.finalize(file_name)
data = files.blobstore.get_blob_key(file_name)
else:
raise ValueError("The App Engine storage backend only supports "
"BlobstoreFile instances or File instances.")
if isinstance(data, (BlobInfo, BlobKey)):
# We change the file name to the BlobKey's str() value.
if isinstance(data, BlobInfo):
data = data.key()
return '%s/%s' % (data, name.lstrip('/'))
else:
raise ValueError("The App Engine Blobstore only supports "
"BlobInfo values. Data can't be uploaded "
"directly. You have to use the file upload "
"handler.")
def delete(self, name):
delete(self._get_key(name))
def exists(self, name):
return self._get_blobinfo(name) is not None
def size(self, name):
return self._get_blobinfo(name).size
def url(self, name):
try:
#return a protocol-less URL, because django can't/won't pass
#down an argument saying whether it should be secure or not
url = get_serving_url(self._get_blobinfo(name))
return re.sub("http://", "//", url)
except NotImageError:
return None
def created_time(self, name):
return self._get_blobinfo(name).creation
def get_valid_name(self, name):
return force_unicode(name).strip().replace('\\', '/')
def get_available_name(self, name):
return name.replace('\\', '/')
def _get_key(self, name):
return BlobKey(name.split('/', 1)[0])
def _get_blobinfo(self, name):
return BlobInfo.get(self._get_key(name))
class BlobstoreFile(File):
def __init__(self, name, mode, storage):
self.name = name
self._storage = storage
self._mode = mode
self.blobstore_info = storage._get_blobinfo(name)
@property
def size(self):
return self.blobstore_info.size
def write(self, content):
raise NotImplementedError()
@property
def file(self):
if not hasattr(self, '_file'):
self._file = BlobReader(self.blobstore_info.key())
return self._file
class BlobstoreFileUploadHandler(FileUploadHandler):
"""
File upload handler for the Google App Engine Blobstore.
"""
def __init__(self, request=None):
super(BlobstoreFileUploadHandler, self).__init__(request)
self.blobkey = None
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):
"""
We can kill a lot of this hackery in Django 1.7 when content_type_extra is actually passed in!
"""
self.data.seek(0) #Rewind
data = self.data.read()
parts = data.split(self.boundary)
for part in parts:
match = re.search('blob-key="?(?P<blob_key>[a-zA-Z0-9_=-]+)', part)
blob_key = match.groupdict().get('blob_key') if match else None
if not blob_key:
continue
#OK, we have a blob key, but is it the one for the field?
match = re.search('\sname="?(?P<field_name>[a-zA-Z0-9_]+)', part)
name = match.groupdict().get('field_name') if match else None
if name != field_name:
#Nope, not for this field
continue
self.blobkey = blob_key
break
if self.blobkey:
self.blobkey = BlobKey(self.blobkey)
raise StopFutureHandlers()
else:
return super(BlobstoreFileUploadHandler, self).new_file(field_name, file_name, content_type, content_length, charset)
def handle_raw_input(self, input_data, META, content_length, boundary, encoding):
"""
App Engine, for some reason, allows seeking back the wsgi.input. However, FakePayload during testing (correctly) does not
because that's what the WSGI spec says. However, to make this work we need to abuse the seeking (at least till Django 1.7)
"""
self.boundary = boundary
self.data = StringIO(input_data.body) #Create a string IO object
return None #Pass back to Django
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if not self.blobkey:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.blobkey:
return
return BlobstoreUploadedFile(
blobinfo=BlobInfo(self.blobkey),
charset=self.charset)
class BlobstoreUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, blobinfo, charset):
super(BlobstoreUploadedFile, self).__init__(
BlobReader(blobinfo.key()), blobinfo.filename,
blobinfo.content_type, blobinfo.size, charset)
self.blobstore_info = blobinfo
def open(self, mode=None):
pass
def chunks(self, chunk_size=1024 * 128):
self.file.seek(0)
while True:
content = self.read(chunk_size)
if not content:
break
yield content
def multiple_chunks(self, chunk_size=1024 * 128):
return True
| 33.987755 | 134 | 0.627717 |
acece3582a22169d4e3b9288443fa667ae917bc2 | 25,441 | py | Python | qa/rpc-tests/fundrawtransaction-hd.py | momopay/momo | 2a2ace2f3d2a0b2fd3c9257cbb0225b078ea5c49 | [
"MIT"
] | 3 | 2018-08-26T14:30:53.000Z | 2020-02-13T16:05:05.000Z | qa/rpc-tests/fundrawtransaction-hd.py | momopay/momo | 2a2ace2f3d2a0b2fd3c9257cbb0225b078ea5c49 | [
"MIT"
] | 4 | 2018-03-21T23:19:15.000Z | 2021-07-21T18:11:46.000Z | qa/rpc-tests/fundrawtransaction-hd.py | momopay/momo | 2a2ace2f3d2a0b2fd3c9257cbb0225b078ea5c49 | [
"MIT"
] | 4 | 2018-03-17T20:50:00.000Z | 2018-08-03T16:06:53.000Z | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent momod feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 MOC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent momod feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.770833 | 214 | 0.558233 |
acece51c1a08ef9e5ca6c8051091740531443c1e | 6,197 | py | Python | sdk/python/pulumi_aws/ecr/_inputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ecr/_inputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ecr/_inputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'ReplicationConfigurationReplicationConfigurationArgs',
'ReplicationConfigurationReplicationConfigurationRuleArgs',
'ReplicationConfigurationReplicationConfigurationRuleDestinationArgs',
'RepositoryEncryptionConfigurationArgs',
'RepositoryImageScanningConfigurationArgs',
]
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationArgs:
def __init__(__self__, *,
rule: pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']):
"""
:param pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs'] rule: The replication rules for a replication configuration. See Rule.
"""
pulumi.set(__self__, "rule", rule)
@property
@pulumi.getter
def rule(self) -> pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']:
"""
The replication rules for a replication configuration. See Rule.
"""
return pulumi.get(self, "rule")
@rule.setter
def rule(self, value: pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']):
pulumi.set(self, "rule", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationRuleArgs:
def __init__(__self__, *,
destinations: pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]]):
"""
:param pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]] destinations: the details of a replication destination. See Destination.
"""
pulumi.set(__self__, "destinations", destinations)
@property
@pulumi.getter
def destinations(self) -> pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]]:
"""
the details of a replication destination. See Destination.
"""
return pulumi.get(self, "destinations")
@destinations.setter
def destinations(self, value: pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]]):
pulumi.set(self, "destinations", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationRuleDestinationArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
registry_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] region: A Region to replicate to.
:param pulumi.Input[str] registry_id: The account ID of the destination registry to replicate to.
"""
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "registry_id", registry_id)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
"""
A Region to replicate to.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> pulumi.Input[str]:
"""
The account ID of the destination registry to replicate to.
"""
return pulumi.get(self, "registry_id")
@registry_id.setter
def registry_id(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_id", value)
@pulumi.input_type
class RepositoryEncryptionConfigurationArgs:
def __init__(__self__, *,
encryption_type: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] encryption_type: The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`.
:param pulumi.Input[str] kms_key: The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR.
"""
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if kms_key is not None:
pulumi.set(__self__, "kms_key", kms_key)
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> Optional[pulumi.Input[str]]:
"""
The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`.
"""
return pulumi.get(self, "encryption_type")
@encryption_type.setter
def encryption_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_type", value)
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR.
"""
return pulumi.get(self, "kms_key")
@kms_key.setter
def kms_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key", value)
@pulumi.input_type
class RepositoryImageScanningConfigurationArgs:
def __init__(__self__, *,
scan_on_push: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] scan_on_push: Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false).
"""
pulumi.set(__self__, "scan_on_push", scan_on_push)
@property
@pulumi.getter(name="scanOnPush")
def scan_on_push(self) -> pulumi.Input[bool]:
"""
Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false).
"""
return pulumi.get(self, "scan_on_push")
@scan_on_push.setter
def scan_on_push(self, value: pulumi.Input[bool]):
pulumi.set(self, "scan_on_push", value)
| 38.490683 | 195 | 0.685493 |
acece70b1805458c848e3084dcd274aca46fcad1 | 3,671 | py | Python | dpmModule/item/RootAbyss.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | 2 | 2020-12-18T17:02:21.000Z | 2021-02-01T04:16:33.000Z | dpmModule/item/RootAbyss.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | null | null | null | dpmModule/item/RootAbyss.py | Jeongwoo-KGI/maplestory_dpm_calc | c474419146e377a05a724e9975a047649b7effa7 | [
"MIT"
] | null | null | null | from . import ItemKernel as it
ExMDF = it.ExMDF
## Armors ##
#No upgrade
Top = it.Item(name="이글아이 아머", level = 150, main_option = ExMDF(stat_main = 30, stat_sub = 30, att = 2, armor_ignore = 5))
Bottom = it.Item(name="트릭스터 팬츠", level = 150, main_option = ExMDF(stat_main = 30, stat_sub = 30, att = 2, armor_ignore = 5))
Head = it.Item(name="하이네스 햇", level = 150, main_option = ExMDF(stat_main = 40, stat_sub = 40, att = 2, armor_ignore = 10))
_valueMap = [[86, [0,11,16,21,28,36]],
[125,[0,15,22,31,40,52]],
[128,[0,16,23,31,41,53]],
[153,[0,19,27,38,49,63]],
[160,[0,20,29,39,52,66]],
[164,[0,20,29,40,53,68]],
[171,[0,21,31,42,55,71]],
[175,[0,21,31,43,56,72]],
[201,[0,25,36,49,65,83]],
[204,[0,25,36,50,66,84]],
[81,[0,13,18,25,33,42]],
[169,[0,9,20,32,47,64]]]#Need blade & Zero weapon
WeaponFactory = it.WeaponFactoryClass(150, _valueMap, modifier = it.ExMDF(stat_main = 40, stat_sub = 40, boss_pdamage = 30, armor_ignore = 10))
class Factory():
@staticmethod
def getArmorSetDict(star, enhance, potential = it.ExMDF(), additional_potential = it.ExMDF(), bonus = it.ExMDF(), hammer = True):
if not hammer:
upgrades = [11,7,7]
else:
upgrades = [12,8,8]
#TODO : Simplyfy this dirty codes.
if enhance == 100:
scrolls = [[upgrades[i],0,0] for i in range(3)]
elif enhance == 70:
scrolls = [[0,upgrades[i],0] for i in range(3)]
elif enhance == 30:
scrolls = [[0,0,upgrades[i]] for i in range(3)]
else:
raise TypeError("enhance must be 100, 70, or 30.")
package = {"head" : Head.copy(), "top" : Top.copy(), "bottom" : Bottom.copy()}
keylist = ["head", "top", "bottom"]
for idx, itemkey in zip([0,1,2], keylist):
item = package[itemkey]
item.set_potential(potential)
item.set_additional_potential(additional_potential)
item.add_main_option(bonus)
item.add_main_option(it.EnhancerFactory.get_armor_starforce_enhancement(150, star))
item.add_main_option(it.EnhancerFactory.get_armor_scroll_enhancement(150, elist = scrolls[idx]))
return package
@staticmethod
def getWeapon(_type, star, elist, potential = it.ExMDF(), additional_potential = it.ExMDF(), bonusAttIndex = 0, bonusElse = it.ExMDF()):
return WeaponFactory.getWeapon(_type, star = star, elist = elist, potential = potential, additional_potential = additional_potential, bonusAttIndex = bonusAttIndex, bonusElse = bonusElse)
@staticmethod
def getBlade(_type, star, elist, potential = it.ExMDF(), additional_potential = it.ExMDF(), bonusElse = it.ExMDF()):
return WeaponFactory.getBlade(_type, star = star, elist = elist, potential = potential, additional_potential = additional_potential, bonusElse = bonusElse)
@staticmethod
def getZeroSubweapon(_type, potential = it.ExMDF(), additional_potential = it.ExMDF(), bonusElse = it.ExMDF()):
return WeaponFactory.getZeroSubweapon(_type, potential = potential, additional_potential = additional_potential, bonusElse = bonusElse)
@staticmethod
def getSetOption(rank):
li = [it.ExMDF(),
it.ExMDF(stat_main=20, stat_sub=20),
it.ExMDF(att=50),
it.ExMDF(boss_pdamage=30)]
retval = li[0]
for i in range(rank):
retval += li[i]
return retval | 45.320988 | 195 | 0.591664 |
acece7ac0f9b4eccfa9e4b7a722f3287ce7dce35 | 3,761 | py | Python | src/rest/user_token.py | RestBasePlatform/restbase | 796aa4555709056de06953697ce014c1ca4fd1d0 | [
"Apache-2.0"
] | 5 | 2021-01-31T15:28:09.000Z | 2021-08-24T12:06:55.000Z | src/rest/user_token.py | RestBasePlatform/restbase | 796aa4555709056de06953697ce014c1ca4fd1d0 | [
"Apache-2.0"
] | 4 | 2021-04-05T16:42:00.000Z | 2021-08-24T12:06:58.000Z | src/rest/user_token.py | RestBasePlatform/restbase | 796aa4555709056de06953697ce014c1ca4fd1d0 | [
"Apache-2.0"
] | null | null | null | from flask import make_response
from flask import request
from flask_restful import Resource
from .common_rest import RestCommon
from fields import ADMIN_TOKEN_FIELD_NAME
from fields import DESCRIPTION_FIELD_NAME
from fields import USER_TOKEN_FIELD_NAME
from fields import USER_TOKEN_NAME_FIELD_NAME
class UserToken(Resource):
def __init__(self, rest_helper: RestCommon):
super().__init__()
self.rest_helper = rest_helper
def put(self):
if not self.rest_helper.request_validator.validate_generate_user_token(request):
self.rest_helper.logger.log_incorrect_request("/UserToken/Get/", request)
return make_response(*self.rest_helper.get_bad_request_answer())
token = request.headers.get(ADMIN_TOKEN_FIELD_NAME)
if not self.rest_helper.token_worker.is_token_admin(token):
self.rest_helper.logger.log_access_denied("/UserToken/Get/", request, token)
return make_response("Access denied", 403)
if (
request.args.get(USER_TOKEN_FIELD_NAME)
in self.rest_helper.local_worker.get_tokens_list()
) or (
request.args.get(USER_TOKEN_NAME_FIELD_NAME)
in self.rest_helper.local_worker.get_user_tokens_names_list()
):
self.rest_helper.logger.log_status_execution(
"/UserToken/Get/", token, "error", request, "Token already exists"
)
return make_response(
{"status": "error", "message": "Token already exists"}, 409
)
try:
new_token = self.rest_helper.token_worker.add_token(
token=request.args.get(USER_TOKEN_FIELD_NAME),
description=request.args.get(DESCRIPTION_FIELD_NAME),
token_name=request.args.get(USER_TOKEN_NAME_FIELD_NAME),
)
self.rest_helper.logger.log_status_execution(
"/UserToken/Get/", token, "success", request, "Token already exists"
)
return make_response({"status": "success", "new_token": new_token}, 201)
except Exception as e:
self.rest_helper.logger.log_status_execution(
"/UserToken/Get/", token, "error", request, str(e)
)
return make_response({"status": "Unexpected error"}, 400)
class ListUserToken(Resource):
def __init__(self, rest_helper: RestCommon):
super().__init__()
self.rest_helper = rest_helper
def get(self):
if not self.rest_helper.request_validator.is_admin_header_valid(
request.headers
):
self.rest_helper.logger.log_incorrect_request(
"/ListUserToken/Get/", request
)
return make_response(*self.rest_helper.get_bad_request_answer())
token = request.headers.get(ADMIN_TOKEN_FIELD_NAME)
if not self.rest_helper.token_worker.is_token_admin(token):
self.rest_helper.logger.log_access_denied(
"/ListUserToken/Get/", request, token
)
return make_response("Access denied", 403)
tokens = self.rest_helper.local_worker.get_user_tokens_objects_list()
return_attrs = [
"token",
"description",
"granted_tables",
"admin_access",
"create_date",
]
response_list = []
for token in tokens:
response_list.append({attr: getattr(token, attr) for attr in return_attrs})
self.rest_helper.logger.log_status_execution(
"/ListUserToken/Get/",
token,
"success",
request,
)
return make_response({"status": "success", "tokens": response_list}, 201)
| 36.514563 | 88 | 0.63414 |
acece8211ee3f3ebba87eca1b93f5700a86cc6f1 | 564 | py | Python | SciComputing with Python/CMB/settings.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | SciComputing with Python/CMB/settings.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | SciComputing with Python/CMB/settings.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created June 2014
@author: etodorov
"""
global resX, resY, partRows, partCols, cellRows, cellCols, CMB_image_name
resX = 1280 #integer, screen width
resY = 720 #integer, screen heigh
partRows = 36 #integer, particle resolution vertical
partCols = 64 #integer, particle resolution horizontal
cellRows = 5 #integer, cell resolution vertical
cellCols = 7 #integer, cell resolution horizontal
#white - hot; black - cold
CMB_image_name = "cmb_9yr_1024gray.png" #string, black and white png with cosmic microwave background data
| 35.25 | 107 | 0.746454 |
acece856f19a66c9f88e7fec1edd3699e9b9df7a | 46 | py | Python | fun_mooc/__init__.py | pdefromont/fun_mooc | 9c1ae95e6b0383aef8f319f86c33de252c1700a3 | [
"MIT"
] | null | null | null | fun_mooc/__init__.py | pdefromont/fun_mooc | 9c1ae95e6b0383aef8f319f86c33de252c1700a3 | [
"MIT"
] | null | null | null | fun_mooc/__init__.py | pdefromont/fun_mooc | 9c1ae95e6b0383aef8f319f86c33de252c1700a3 | [
"MIT"
] | null | null | null | # -*-coding:utf-8 -*-
from .mooc import MOOC
| 11.5 | 22 | 0.608696 |
acece85ee2d6b7b2b5df90ef0f61ad785118839c | 1,950 | py | Python | tests/test_Global.py | geieracmsu/URS | ff27f18c083c1be11de6a9d81c0c144b2023765d | [
"MIT"
] | null | null | null | tests/test_Global.py | geieracmsu/URS | ff27f18c083c1be11de6a9d81c0c144b2023765d | [
"MIT"
] | null | null | null | tests/test_Global.py | geieracmsu/URS | ff27f18c083c1be11de6a9d81c0c144b2023765d | [
"MIT"
] | null | null | null | import datetime as dt
from urs.utils import Global
### Function names are pretty self-explanatory, so I will not be adding comments
### above the functions.
### Includes a total of 9 tests.
class TestGlobalVariables():
"""
Testing all global variables found on lines 7-37 in Global.py.
"""
def test_date_variable(self):
assert Global.date == dt.datetime.now().strftime("%m-%d-%Y")
def test_export_options_list(self):
assert Global.eo == ["csv", "json"]
def test_options_list(self):
assert Global.options == ["y", "n"]
def test_scrape_types_list(self):
assert Global.s_t == ["subreddit", "redditor", "comments"]
def test_subreddit_categories_list(self):
assert Global.categories == ["Hot", "New", "Controversial", "Top", \
"Rising", "Search"]
def test_subreddit_short_cat_list(self):
categories = ["Hot", "New", "Controversial", "Top", "Rising", "Search"]
assert Global.short_cat == [cat[0] for cat in categories]
class TestConvertTime():
"""
Test convert_time() function on line 40 in Global.py.
"""
def test_convert_time(self):
unix_time = 1592291124
converted_time = "06-16-2020 07:05:24"
assert Global.convert_time(unix_time) == converted_time
class TestMakeDictionary():
"""
Test make_list_dict() function on line 44 and make_none_dict() function on
line 48 in Global.py.
"""
def test_make_list_dict(self):
item = [1, 2, 3, 4]
correct_list_dict = {
1: [],
2: [],
3: [],
4: []
}
assert Global.make_list_dict(item) == correct_list_dict
def test_make_none_dict(self):
item = [1, 2, 3, 4]
correct_none_dict = {
1: None,
2: None,
3: None,
4: None
}
assert Global.make_none_dict(item) == correct_none_dict | 27.083333 | 81 | 0.597436 |
acece874ab88d7621755e5cc09ae21949d10f0fb | 194 | py | Python | python_utility_functions/__init__.py | daniel-men/python_utility_functions | b627162c884f72a33412cc32a311615287b6df53 | [
"MIT"
] | null | null | null | python_utility_functions/__init__.py | daniel-men/python_utility_functions | b627162c884f72a33412cc32a311615287b6df53 | [
"MIT"
] | null | null | null | python_utility_functions/__init__.py | daniel-men/python_utility_functions | b627162c884f72a33412cc32a311615287b6df53 | [
"MIT"
] | null | null | null | name = 'python_utility_functions'
from .image_utilities import *
from .visualization_utilities import *
from .python_utilities import *
from .path_utilities import *
from .io_utilities import * | 27.714286 | 38 | 0.814433 |
acece8888aa232dc246ca128fc774f0851045677 | 17,171 | py | Python | python/ambassador/diagnostics/envoy_stats.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | null | null | null | python/ambassador/diagnostics/envoy_stats.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 190 | 2021-04-22T11:35:09.000Z | 2022-03-30T22:12:03.000Z | python/ambassador/diagnostics/envoy_stats.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 1 | 2022-03-03T18:23:49.000Z | 2022-03-03T18:23:49.000Z | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import Any, Callable, Dict, List, Optional, Union
import logging
import requests
import threading
import time
from dataclasses import dataclass
from dataclasses import field as dc_field
def percentage(x: float, y: float) -> int:
if y == 0:
return 0
else:
return int(((x * 100) / y) + 0.5)
@dataclass(frozen=True)
class EnvoyStats:
max_live_age: int = 120
max_ready_age: int = 120
created: float = 0.0
last_update: Optional[float] = None
last_attempt: Optional[float] = None
update_errors: int = 0
# Yes yes yes I know -- the contents of these dicts are not immutable.
# That's OK for now, but realize that you mustn't go munging around altering
# things in here once they're assigned!
requests: Dict[str, Any] = dc_field(default_factory=dict)
clusters: Dict[str, Any] = dc_field(default_factory=dict)
envoy: Dict[str, Any] = dc_field(default_factory=dict)
def is_alive(self) -> bool:
"""
Make sure we've heard from Envoy within max_live_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
consider Envoy alive if we haven't yet been running for max_live_age
seconds -- basically, Envoy gets a grace period to start running at
boot time.
"""
epoch = self.last_update
if not epoch:
epoch = self.created
return (time.time() - epoch) <= self.max_live_age
def is_ready(self) -> bool:
"""
Make sure we've heard from Envoy within max_ready_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
then Envoy is not yet ready, and is_ready() returns False.
"""
epoch = self.last_update
if not epoch:
return False
return (time.time() - epoch) <= self.max_ready_age
def time_since_boot(self) -> float:
""" Return the number of seconds since Envoy booted. """
return time.time() - self.created
def time_since_update(self) -> Optional[float]:
"""
Return the number of seconds since we last heard from Envoy, or None if
we've never heard from Envoy.
"""
if not self.last_update:
return None
else:
return time.time() - self.last_update
def cluster_stats(self, name: str) -> Dict[str, Union[str, bool]]:
if not self.last_update:
# No updates.
return {
'valid': False,
'reason': "No stats updates have succeeded",
'health': "no stats yet",
'hmetric': 'startup',
'hcolor': 'grey'
}
# OK, we should be OK.
when = self.last_update
cstat = self.clusters
if name not in cstat:
return {
'valid': False,
'reason': "Cluster %s is not defined" % name,
'health': "undefined cluster",
'hmetric': 'undefined cluster',
'hcolor': 'orange',
}
cstat = dict(**cstat[name])
cstat.update({
'valid': True,
'reason': "Cluster %s updated at %d" % (name, when)
})
pct = cstat.get('healthy_percent', None)
if pct != None:
color = 'green'
if pct < 70:
color = 'red'
elif pct < 90:
color = 'yellow'
cstat.update({
'health': "%d%% healthy" % pct,
'hmetric': int(pct),
'hcolor': color
})
else:
cstat.update({
'health': "no requests yet",
'hmetric': 'waiting',
'hcolor': 'grey'
})
return cstat
LogLevelFetcher = Callable[[Optional[str]], Optional[str]]
EnvoyStatsFetcher = Callable[[], Optional[str]]
class EnvoyStatsMgr:
# fetch_log_levels and fetch_envoy_stats are debugging hooks
def __init__(self, logger: logging.Logger, max_live_age: int=120, max_ready_age: int=120,
fetch_log_levels: Optional[LogLevelFetcher] = None,
fetch_envoy_stats: Optional[EnvoyStatsFetcher] = None) -> None:
self.logger = logger
self.loginfo: Dict[str, Union[str, List[str]]] = {}
self.update_lock = threading.Lock()
self.access_lock = threading.Lock()
self.fetch_log_levels = fetch_log_levels or self._fetch_log_levels
self.fetch_envoy_stats = fetch_envoy_stats or self._fetch_envoy_stats
self.stats = EnvoyStats(
created=time.time(),
max_live_age=max_live_age,
max_ready_age=max_ready_age
)
def _fetch_log_levels(self, level: Optional[str]) -> Optional[str]:
try:
url = "http://127.0.0.1:8001/logging"
if level:
url += "?level=%s" % level
r = requests.post(url)
# OMFG. Querying log levels returns with a 404 code.
if (r.status_code != 200) and (r.status_code != 404):
self.logger.warning("EnvoyStats.update_log_levels failed: %s" % r.text)
return None
return r.text
except Exception as e:
self.logger.warning("EnvoyStats.update_log_levels failed: %s" % e)
return None
def _fetch_envoy_stats(self) -> Optional[str]:
try:
r = requests.get("http://127.0.0.1:8001/stats")
if r.status_code != 200:
self.logger.warning("EnvoyStats.update failed: %s" % r.text)
return None
return r.text
except OSError as e:
self.logger.warning("EnvoyStats.update failed: %s" % e)
return None
def update_log_levels(self, last_attempt: float, level: Optional[str]=None) -> bool:
"""
Heavy lifting around updating the Envoy log levels.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_log_levels does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
"""
# self.logger.info("updating levels")
text = self.fetch_log_levels(level)
if not text:
# Ew.
with self.access_lock:
# EnvoyStats is immutable, so...
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=self.stats.last_update,
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors + 1, # THIS IS A CHANGE
requests=self.stats.requests,
clusters=self.stats.clusters,
envoy=self.stats.envoy
)
self.stats = new_stats
return False
levels: Dict[str, Dict[str, bool]] = {}
for line in text.split("\n"):
if not line:
continue
if line.startswith(' '):
( logtype, level ) = line[2:].split(": ")
x = levels.setdefault(level, {})
x[logtype] = True
# self.logger.info("levels: %s" % levels)
loginfo: Dict[str, Union[str, List[str]]]
if len(levels.keys()) == 1:
loginfo = { 'all': list(levels.keys())[0] }
else:
loginfo = { x: list(levels[x].keys()) for x in levels.keys() }
with self.access_lock:
self.loginfo = loginfo
# self.logger.info("loginfo: %s" % self.loginfo)
return True
def get_stats(self) -> EnvoyStats:
"""
Get the current Envoy stats object, safely.
You MUST NOT hold the access_lock when calling this method.
"""
with self.access_lock:
return self.stats
def get_prometheus_stats(self) -> str:
try:
r = requests.get("http://127.0.0.1:8001/stats/prometheus")
except OSError as e:
self.logger.warning("EnvoyStats.get_prometheus_state failed: %s" % e)
return ''
if r.status_code != 200:
self.logger.warning("EnvoyStats.get_prometheus_state failed: %s" % r.text)
return ''
return r.text
def update_envoy_stats(self, last_attempt: float) -> None:
"""
Heavy lifting around updating the Envoy stats.
You MUST hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
update_envoy_stats does all the work of talking to Envoy and computing
new stats, then grabs the access_lock just long enough to update the data
structures for others to look at.
"""
text = self.fetch_envoy_stats()
if not text:
# EnvoyStats is immutable, so...
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=self.stats.last_update,
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors + 1, # THIS IS A CHANGE
requests=self.stats.requests,
clusters=self.stats.clusters,
envoy=self.stats.envoy
)
with self.access_lock:
self.stats = new_stats
return
# Parse stats into a hierarchy.
envoy_stats: Dict[str, Any] = {} # Ew.
for line in text.split("\n"):
if not line:
continue
# self.logger.info('line: %s' % line)
key, value = line.split(":")
keypath = key.split('.')
node = envoy_stats
for key in keypath[:-1]:
if key not in node:
node[key] = {}
node = node[key]
value = value.strip()
# Skip histograms for the moment.
# if value.startswith("P0("):
# continue
# # for field in value.split(' '):
# # if field.startswith('P95('):
# # value = field.split(',')
try:
node[keypath[-1]] = int(value)
except:
continue
# Now dig into clusters a bit more.
requests_info = {}
active_clusters = {}
if ("http" in envoy_stats) and ("ingress_http" in envoy_stats["http"]):
ingress_stats = envoy_stats["http"]["ingress_http"]
requests_total = ingress_stats.get("downstream_rq_total", 0)
requests_4xx = ingress_stats.get('downstream_rq_4xx', 0)
requests_5xx = ingress_stats.get('downstream_rq_5xx', 0)
requests_bad = requests_4xx + requests_5xx
requests_ok = requests_total - requests_bad
requests_info = {
"total": requests_total,
"4xx": requests_4xx,
"5xx": requests_5xx,
"bad": requests_bad,
"ok": requests_ok,
}
if "cluster" in envoy_stats:
for cluster_name in envoy_stats['cluster']:
cluster = envoy_stats['cluster'][cluster_name]
# # Toss any _%d -- that's madness with our Istio code at the moment.
# cluster_name = re.sub('_\d+$', '', cluster_name)
# mapping_name = active_cluster_map[cluster_name]
# active_mappings[mapping_name] = {}
# self.logger.info("cluster %s stats: %s" % (cluster_name, cluster))
healthy_percent: Optional[int]
healthy_members = cluster['membership_healthy']
total_members = cluster['membership_total']
healthy_percent = percentage(healthy_members, total_members)
update_attempts = cluster['update_attempt']
update_successes = cluster['update_success']
update_percent = percentage(update_successes, update_attempts)
# Weird.
# upstream_ok = cluster.get('upstream_rq_2xx', 0)
# upstream_total = cluster.get('upstream_rq_pending_total', 0)
upstream_total = cluster.get('upstream_rq_completed', 0)
upstream_4xx = cluster.get('upstream_rq_4xx', 0)
upstream_5xx = cluster.get('upstream_rq_5xx', 0)
upstream_bad = upstream_5xx # used to include 4XX here, but that seems wrong.
upstream_ok = upstream_total - upstream_bad
# self.logger.info("%s total %s bad %s ok %s" % (cluster_name, upstream_total, upstream_bad, upstream_ok))
if upstream_total > 0:
healthy_percent = percentage(upstream_ok, upstream_total)
# self.logger.debug("cluster %s is %d%% healthy" % (cluster_name, healthy_percent))
else:
healthy_percent = None
# self.logger.debug("cluster %s has had no requests" % cluster_name)
active_clusters[cluster_name] = {
'healthy_members': healthy_members,
'total_members': total_members,
'healthy_percent': healthy_percent,
'update_attempts': update_attempts,
'update_successes': update_successes,
'update_percent': update_percent,
'upstream_ok': upstream_ok,
'upstream_4xx': upstream_4xx,
'upstream_5xx': upstream_5xx,
'upstream_bad': upstream_bad
}
# OK, we're now officially finished with all the hard stuff.
last_update = time.time()
# Finally, set up the new EnvoyStats.
new_stats = EnvoyStats(
max_live_age=self.stats.max_live_age,
max_ready_age=self.stats.max_ready_age,
created=self.stats.created,
last_update=last_update, # THIS IS A CHANGE
last_attempt=last_attempt, # THIS IS A CHANGE
update_errors=self.stats.update_errors,
requests=requests_info, # THIS IS A CHANGE
clusters=active_clusters, # THIS IS A CHANGE
envoy=envoy_stats # THIS IS A CHANGE
)
# Make sure we hold the access_lock while messing with self.stats!
with self.access_lock:
self.stats = new_stats
# self.logger.info("stats updated")
def update(self) -> None:
"""
Update the Envoy stats object, including our take on Envoy's loglevel and
lower-level statistics.
You MUST NOT hold the update lock when calling this method.
You MUST NOT hold the access lock when calling this method.
The first thing that update_envoy_stats does is to acquire the update_lock.
If it cannot do so immediately, it assumes that another update is already
running, and returns without doing anything further.
update_envoy_stats uses update_log_levels and update_envoy_stats to do all
the heavy lifting around talking to Envoy, managing the access_lock, and
actually writing new data into the Envoy stats object.
"""
# self.logger.info("updating estats")
# First up, try bailing early.
if not self.update_lock.acquire(blocking=False):
self.logger.warning("EStats update: skipping due to lock contention")
return
# If here, we have the lock. Make sure it gets released!
try:
# Remember when we started.
last_attempt = time.time()
self.update_log_levels(last_attempt)
self.update_envoy_stats(last_attempt)
except Exception as e:
self.logger.exception("could not update Envoy stats: %s" % e)
finally:
self.update_lock.release()
| 34.971487 | 122 | 0.565197 |
acece8f918b717ad67ece3e4af5ae7987dad46e5 | 1,168 | py | Python | meiduo_mall/meiduo_mall/apps/payment/migrations/0001_initial.py | Elvira521feng/Django_demo | 957f58c38e6176910d0c554d6c450d445d62b453 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/migrations/0001_initial.py | Elvira521feng/Django_demo | 957f58c38e6176910d0c554d6c450d445d62b453 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/migrations/0001_initial.py | Elvira521feng/Django_demo | 957f58c38e6176910d0c554d6c450d445d62b453 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-09-29 03:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('trade_id', models.CharField(blank=True, max_length=100, null=True, unique=True, verbose_name='支付编号')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.OrderInfo', verbose_name='订单')),
],
options={
'db_table': 'tb_payment',
'verbose_name': '支付信息',
'verbose_name_plural': '支付信息',
},
),
]
| 34.352941 | 132 | 0.595034 |
acece984915bef0badf44877b464f835c362882e | 10,310 | py | Python | pkg/suggestion/v1beta1/hyperopt/base_service.py | xuhdev/katib | fd1dd0d9e503044f5aa24f9b14340f786b4a94cf | [
"Apache-2.0"
] | null | null | null | pkg/suggestion/v1beta1/hyperopt/base_service.py | xuhdev/katib | fd1dd0d9e503044f5aa24f9b14340f786b4a94cf | [
"Apache-2.0"
] | null | null | null | pkg/suggestion/v1beta1/hyperopt/base_service.py | xuhdev/katib | fd1dd0d9e503044f5aa24f9b14340f786b4a94cf | [
"Apache-2.0"
] | 2 | 2020-03-03T06:15:14.000Z | 2020-03-31T05:39:05.000Z | import hyperopt
import numpy as np
import logging
from pkg.suggestion.v1beta1.internal.constant import INTEGER, DOUBLE, CATEGORICAL, DISCRETE, MAX_GOAL
from pkg.suggestion.v1beta1.internal.trial import Assignment
logger = logging.getLogger(__name__)
TPE_ALGORITHM_NAME = "tpe"
RANDOM_ALGORITHM_NAME = "random"
class BaseHyperoptService(object):
def __init__(self,
algorithm_name=TPE_ALGORITHM_NAME,
algorithm_conf=None,
search_space=None):
self.algorithm_name = algorithm_name
self.algorithm_conf = algorithm_conf or {}
# pop common configurations
random_state = self.algorithm_conf.pop('random_state', None)
if self.algorithm_name == TPE_ALGORITHM_NAME:
self.hyperopt_algorithm = hyperopt.tpe.suggest
elif self.algorithm_name == RANDOM_ALGORITHM_NAME:
self.hyperopt_algorithm = hyperopt.rand.suggest
# elif algorithm_name == 'hyperopt-anneal':
# self.hyperopt_algorithm = hyperopt.anneal.suggest_batch
# elif algorithm_name == 'hyperopt-mix':
# self.hyperopt_algorithm = hyperopt.mix.suggest
self.search_space = search_space
# New hyperopt variables
self.hyperopt_rstate = np.random.RandomState(random_state)
self.create_hyperopt_domain()
self.create_fmin()
self.is_first_run = True
def create_hyperopt_domain(self):
# Construct search space, example: {"x": hyperopt.hp.uniform('x', -10, 10), "x2": hyperopt.hp.uniform('x2', -10, 10)}
hyperopt_search_space = {}
for param in self.search_space.params:
if param.type == INTEGER:
hyperopt_search_space[param.name] = hyperopt.hp.quniform(
param.name,
float(param.min),
float(param.max),
float(param.step))
elif param.type == DOUBLE:
hyperopt_search_space[param.name] = hyperopt.hp.uniform(
param.name,
float(param.min),
float(param.max))
elif param.type == CATEGORICAL or param.type == DISCRETE:
hyperopt_search_space[param.name] = hyperopt.hp.choice(
param.name, param.list)
self.hyperopt_domain = hyperopt.Domain(
None, hyperopt_search_space, pass_expr_memo_ctrl=None)
def create_fmin(self):
self.fmin = hyperopt.FMinIter(
self.hyperopt_algorithm,
self.hyperopt_domain,
trials=hyperopt.Trials(),
max_evals=-1,
rstate=self.hyperopt_rstate,
verbose=False)
self.fmin.catch_eval_exceptions = False
def getSuggestions(self, trials, request_number):
"""
Get the new suggested trials with the given algorithm.
"""
recorded_trials_names = self.fmin.trials.specs
hyperopt_trial_new_ids = []
hyperopt_trial_specs = []
hyperopt_trial_results = []
hyperopt_trial_miscs = []
# Update hyperopt FMin with new completed Trials
for trial in trials:
if {"trial-name": trial.name} not in recorded_trials_names:
# Produce new id for the new Trial
new_id = self.fmin.trials.new_trial_ids(1)
hyperopt_trial_new_ids.append(new_id[0])
hyperopt_trial_miscs_idxs = {}
# Example: {'l1_normalization': [0.1], 'learning_rate': [0.1], 'hidden2': [1], 'optimizer': [1]}
hyperopt_trial_miscs_vals = {}
# Insert Trial assignment to the misc
hyperopt_trial_misc = dict(
tid=new_id[0], cmd=self.hyperopt_domain.cmd, workdir=self.hyperopt_domain.workdir)
for param in self.search_space.params:
parameter_value = None
for assignment in trial.assignments:
if assignment.name == param.name:
parameter_value = assignment.value
break
if param.type == INTEGER:
hyperopt_trial_miscs_idxs[param.name] = new_id
hyperopt_trial_miscs_vals[param.name] = [int(parameter_value)]
elif param.type == DOUBLE:
hyperopt_trial_miscs_idxs[param.name] = new_id
hyperopt_trial_miscs_vals[param.name] = [float(parameter_value)]
elif param.type == DISCRETE or param.type == CATEGORICAL:
index_of_value_in_list = param.list.index(parameter_value)
hyperopt_trial_miscs_idxs[param.name] = new_id
hyperopt_trial_miscs_vals[param.name] = [index_of_value_in_list]
hyperopt_trial_misc["idxs"] = hyperopt_trial_miscs_idxs
hyperopt_trial_misc["vals"] = hyperopt_trial_miscs_vals
hyperopt_trial_miscs.append(hyperopt_trial_misc)
# Insert Trial name to the spec
hyperopt_trial_spec = {
"trial-name": trial.name
}
hyperopt_trial_specs.append(hyperopt_trial_spec)
# Insert Trial result to the result
# TODO: Use negative objective value for loss or not
# TODO: Do we need to analyse additional_metrics?
objective_for_hyperopt = float(trial.target_metric.value)
if self.search_space.goal == MAX_GOAL:
# Now hyperopt only supports fmin and we need to reverse objective value for maximization
objective_for_hyperopt = -1 * objective_for_hyperopt
hyperopt_trial_result = {
"loss": objective_for_hyperopt,
"status": hyperopt.STATUS_OK
}
hyperopt_trial_results.append(hyperopt_trial_result)
if len(trials) > 0:
# Create new Trial doc
hyperopt_trials = hyperopt.Trials().new_trial_docs(
tids=hyperopt_trial_new_ids,
specs=hyperopt_trial_specs,
results=hyperopt_trial_results,
miscs=hyperopt_trial_miscs)
for i, _ in enumerate(hyperopt_trials):
hyperopt_trials[i]["state"] = hyperopt.JOB_STATE_DONE
# Insert new set of Trial to FMin object
# Example: of inserting doc with tunning lr
# [{
# 'state':2,
# 'tid':5,
# 'spec':{
# 'trial-name':'tpe-example-48xl8whg'
# },
# 'result':{
# 'loss':-0.1135,
# 'status':'ok'
# },
# 'misc':{
# 'tid':5,
# 'cmd':('domain_attachment','FMinIter_Domain'),
# 'workdir':None,
# 'idxs':{
# '--lr':[5]
# },
# 'vals':{
# '--lr':[0.025351232898626827]
# }
# },
# 'exp_key':None,
# 'owner':None,
# 'version':0,
# 'book_time':None,
# 'refresh_time':None
# }]
self.fmin.trials.insert_trial_docs(hyperopt_trials)
self.fmin.trials.refresh()
# Produce new request_number ids to make new Suggestion
hyperopt_trial_new_ids = self.fmin.trials.new_trial_ids(request_number)
random_state = self.fmin.rstate.randint(2**31 - 1)
if self.algorithm_name == RANDOM_ALGORITHM_NAME:
new_trials = self.hyperopt_algorithm(
new_ids=hyperopt_trial_new_ids,
domain=self.fmin.domain,
trials=self.fmin.trials,
seed=random_state)
elif self.algorithm_name == TPE_ALGORITHM_NAME:
# n_startup_jobs indicates for how many Trials we run random suggestion
# This must be request_number value
# After this tpe suggestion starts analyse Trial info.
# On the first run we can run suggest just once with n_startup_jobs
# Next suggest runs must be for each new Trial generation
if self.is_first_run:
new_trials = self.hyperopt_algorithm(
new_ids=hyperopt_trial_new_ids,
domain=self.fmin.domain,
trials=self.fmin.trials,
seed=random_state,
n_startup_jobs=request_number,
**self.algorithm_conf)
self.is_first_run = False
else:
new_trials = []
for i in range(request_number):
# hyperopt_algorithm always returns one new Trial
new_trials.append(self.hyperopt_algorithm(
new_ids=[hyperopt_trial_new_ids[i]],
domain=self.fmin.domain,
trials=self.fmin.trials,
seed=random_state,
n_startup_jobs=request_number,
**self.algorithm_conf)[0])
# Construct return advisor Trials from new hyperopt Trials
list_of_assignments = []
for i in range(request_number):
vals = new_trials[i]['misc']['vals']
list_of_assignments.append(BaseHyperoptService.convert(self.search_space, vals))
return list_of_assignments
@staticmethod
def convert(search_space, vals):
assignments = []
for param in search_space.params:
if param.type == INTEGER:
assignments.append(Assignment(param.name, int(vals[param.name][0])))
elif param.type == DOUBLE:
assignments.append(Assignment(param.name, vals[param.name][0]))
elif param.type == CATEGORICAL or param.type == DISCRETE:
assignments.append(
Assignment(param.name, param.list[vals[param.name][0]]))
return assignments
| 43.686441 | 125 | 0.55839 |
acece9e8dfcfc6028ef58040adc7c049a58e571e | 31,181 | py | Python | src/sagemaker/workflow/steps.py | minlu1021/sagemaker-python-sdk | 7268e82d45b522424891ddd8267ad00ef0cffd23 | [
"Apache-2.0"
] | 1 | 2021-04-09T16:54:06.000Z | 2021-04-09T16:54:06.000Z | src/sagemaker/workflow/steps.py | minlu1021/sagemaker-python-sdk | 7268e82d45b522424891ddd8267ad00ef0cffd23 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/workflow/steps.py | minlu1021/sagemaker-python-sdk | 7268e82d45b522424891ddd8267ad00ef0cffd23 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""The step definitions for workflow."""
from __future__ import absolute_import
import abc
from enum import Enum
from typing import Dict, List, Union
import attr
from sagemaker.estimator import EstimatorBase, _TrainingJob
from sagemaker.inputs import (
CompilationInput,
CreateModelInput,
FileSystemInput,
TrainingInput,
TransformInput,
)
from sagemaker.model import Model
from sagemaker.processing import (
ProcessingInput,
ProcessingJob,
ProcessingOutput,
Processor,
)
from sagemaker.transformer import Transformer, _TransformJob
from sagemaker.tuner import HyperparameterTuner, _TuningJob
from sagemaker.workflow.entities import DefaultEnumMeta, Entity, RequestType
from sagemaker.workflow.functions import Join
from sagemaker.workflow.properties import Properties, PropertyFile
from sagemaker.workflow.retry import RetryPolicy
class StepTypeEnum(Enum, metaclass=DefaultEnumMeta):
"""Enum of step types."""
CONDITION = "Condition"
CREATE_MODEL = "Model"
PROCESSING = "Processing"
REGISTER_MODEL = "RegisterModel"
TRAINING = "Training"
TRANSFORM = "Transform"
CALLBACK = "Callback"
TUNING = "Tuning"
COMPILATION = "Compilation"
LAMBDA = "Lambda"
@attr.s
class Step(Entity):
"""Pipeline step for workflow.
Attributes:
name (str): The name of the step.
display_name (str): The display name of the step.
description (str): The description of the step.
step_type (StepTypeEnum): The type of the step.
depends_on (List[str] or List[Step]): The list of step names or step
instances the current step depends on
retry_policies (List[RetryPolicy]): The custom retry policy configuration
"""
name: str = attr.ib(factory=str)
display_name: str = attr.ib(default=None)
description: str = attr.ib(default=None)
step_type: StepTypeEnum = attr.ib(factory=StepTypeEnum.factory)
depends_on: Union[List[str], List["Step"]] = attr.ib(default=None)
@property
@abc.abstractmethod
def arguments(self) -> RequestType:
"""The arguments to the particular step service call."""
@property
@abc.abstractmethod
def properties(self):
"""The properties of the particular step."""
def to_request(self) -> RequestType:
"""Gets the request structure for workflow service calls."""
request_dict = {
"Name": self.name,
"Type": self.step_type.value,
"Arguments": self.arguments,
}
if self.depends_on:
request_dict["DependsOn"] = self._resolve_depends_on(self.depends_on)
if self.display_name:
request_dict["DisplayName"] = self.display_name
if self.description:
request_dict["Description"] = self.description
return request_dict
def add_depends_on(self, step_names: Union[List[str], List["Step"]]):
"""Add step names or step instances to the current step depends on list"""
if not step_names:
return
if not self.depends_on:
self.depends_on = []
self.depends_on.extend(step_names)
@property
def ref(self) -> Dict[str, str]:
"""Gets a reference dict for steps"""
return {"Name": self.name}
@staticmethod
def _resolve_depends_on(depends_on_list: Union[List[str], List["Step"]]) -> List[str]:
"""Resolve the step depends on list"""
depends_on = []
for step in depends_on_list:
if isinstance(step, Step):
depends_on.append(step.name)
elif isinstance(step, str):
depends_on.append(step)
else:
raise ValueError(f"Invalid input step name: {step}")
return depends_on
@attr.s
class CacheConfig:
"""Configuration class to enable caching in pipeline workflow.
If caching is enabled, the pipeline attempts to find a previous execution of a step
that was called with the same arguments. Step caching only considers successful execution.
If a successful previous execution is found, the pipeline propagates the values
from previous execution rather than recomputing the step. When multiple successful executions
exist within the timeout period, it uses the result for the most recent successful execution.
Attributes:
enable_caching (bool): To enable step caching. Defaults to `False`.
expire_after (str): If step caching is enabled, a timeout also needs to defined.
It defines how old a previous execution can be to be considered for reuse.
Value should be an ISO 8601 duration string. Defaults to `None`.
Examples::
'p30d' # 30 days
'P4DT12H' # 4 days and 12 hours
'T12H' # 12 hours
"""
enable_caching: bool = attr.ib(default=False)
expire_after = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(str))
)
@property
def config(self):
"""Configures caching in pipeline steps."""
config = {"Enabled": self.enable_caching}
if self.expire_after is not None:
config["ExpireAfter"] = self.expire_after
return {"CacheConfig": config}
class ConfigurableRetryStep(Step):
"""ConfigurableRetryStep step for workflow."""
def __init__(
self,
name: str,
step_type: StepTypeEnum,
display_name: str = None,
description: str = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
):
super().__init__(
name=name,
display_name=display_name,
step_type=step_type,
description=description,
depends_on=depends_on,
)
self.retry_policies = [] if not retry_policies else retry_policies
def add_retry_policy(self, retry_policy: RetryPolicy):
"""Add a retry policy to the current step retry policies list."""
if not retry_policy:
return
if not self.retry_policies:
self.retry_policies = []
self.retry_policies.append(retry_policy)
def to_request(self) -> RequestType:
"""Gets the request structure for ConfigurableRetryStep"""
step_dict = super().to_request()
if self.retry_policies:
step_dict["RetryPolicies"] = self._resolve_retry_policy(self.retry_policies)
return step_dict
@staticmethod
def _resolve_retry_policy(retry_policy_list: List[RetryPolicy]) -> List[RequestType]:
"""Resolve the step retry policy list"""
return [retry_policy.to_request() for retry_policy in retry_policy_list]
class TrainingStep(ConfigurableRetryStep):
"""Training step for workflow."""
def __init__(
self,
name: str,
estimator: EstimatorBase,
display_name: str = None,
description: str = None,
inputs: Union[TrainingInput, dict, str, FileSystemInput] = None,
cache_config: CacheConfig = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
):
"""Construct a TrainingStep, given an `EstimatorBase` instance.
In addition to the estimator instance, the other arguments are those that are supplied to
the `fit` method of the `sagemaker.estimator.Estimator`.
Args:
name (str): The name of the training step.
estimator (EstimatorBase): A `sagemaker.estimator.EstimatorBase` instance.
display_name (str): The display name of the training step.
description (str): The description of the training step.
inputs (Union[str, dict, TrainingInput, FileSystemInput]): Information
about the training data. This can be one of three types:
* (str) the S3 location where training data is saved, or a file:// path in
local mode.
* (dict[str, str] or dict[str, sagemaker.inputs.TrainingInput]) If using multiple
channels for training data, you can specify a dict mapping channel names to
strings or :func:`~sagemaker.inputs.TrainingInput` objects.
* (sagemaker.inputs.TrainingInput) - channel configuration for S3 data sources
that can provide additional information as well as the path to the training
dataset.
See :func:`sagemaker.inputs.TrainingInput` for full details.
* (sagemaker.inputs.FileSystemInput) - channel configuration for
a file system data source that can provide additional information as well as
the path to the training dataset.
cache_config (CacheConfig): A `sagemaker.workflow.steps.CacheConfig` instance.
depends_on (List[str] or List[Step]): A list of step names or step instances
this `sagemaker.workflow.steps.TrainingStep` depends on
retry_policies (List[RetryPolicy]): A list of retry policy
"""
super(TrainingStep, self).__init__(
name, StepTypeEnum.TRAINING, display_name, description, depends_on, retry_policies
)
self.estimator = estimator
self.inputs = inputs
self._properties = Properties(
path=f"Steps.{name}", shape_name="DescribeTrainingJobResponse"
)
self.cache_config = cache_config
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_training_job`.
NOTE: The CreateTrainingJob request is not quite the args list that workflow needs.
The TrainingJobName and ExperimentConfig attributes cannot be included.
"""
self.estimator._prepare_for_training()
train_args = _TrainingJob._get_train_args(
self.estimator, self.inputs, experiment_config=dict()
)
request_dict = self.estimator.sagemaker_session._get_train_request(**train_args)
request_dict.pop("TrainingJobName")
return request_dict
@property
def properties(self):
"""A Properties object representing the DescribeTrainingJobResponse data model."""
return self._properties
def to_request(self) -> RequestType:
"""Updates the dictionary with cache configuration."""
request_dict = super().to_request()
if self.cache_config:
request_dict.update(self.cache_config.config)
return request_dict
class CreateModelStep(ConfigurableRetryStep):
"""CreateModel step for workflow."""
def __init__(
self,
name: str,
model: Model,
inputs: CreateModelInput,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
display_name: str = None,
description: str = None,
):
"""Construct a CreateModelStep, given an `sagemaker.model.Model` instance.
In addition to the Model instance, the other arguments are those that are supplied to
the `_create_sagemaker_model` method of the `sagemaker.model.Model._create_sagemaker_model`.
Args:
name (str): The name of the CreateModel step.
model (Model): A `sagemaker.model.Model` instance.
inputs (CreateModelInput): A `sagemaker.inputs.CreateModelInput` instance.
Defaults to `None`.
depends_on (List[str] or List[Step]): A list of step names or step instances
this `sagemaker.workflow.steps.CreateModelStep` depends on
retry_policies (List[RetryPolicy]): A list of retry policy
display_name (str): The display name of the CreateModel step.
description (str): The description of the CreateModel step.
"""
super(CreateModelStep, self).__init__(
name, StepTypeEnum.CREATE_MODEL, display_name, description, depends_on, retry_policies
)
self.model = model
self.inputs = inputs or CreateModelInput()
self._properties = Properties(path=f"Steps.{name}", shape_name="DescribeModelOutput")
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_model`.
NOTE: The CreateModelRequest is not quite the args list that workflow needs.
ModelName cannot be included in the arguments.
"""
request_dict = self.model.sagemaker_session._create_model_request(
name="",
role=self.model.role,
container_defs=self.model.prepare_container_def(
instance_type=self.inputs.instance_type,
accelerator_type=self.inputs.accelerator_type,
),
vpc_config=self.model.vpc_config,
enable_network_isolation=self.model.enable_network_isolation(),
)
request_dict.pop("ModelName")
return request_dict
@property
def properties(self):
"""A Properties object representing the DescribeModelResponse data model."""
return self._properties
class TransformStep(ConfigurableRetryStep):
"""Transform step for workflow."""
def __init__(
self,
name: str,
transformer: Transformer,
inputs: TransformInput,
display_name: str = None,
description: str = None,
cache_config: CacheConfig = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
):
"""Constructs a TransformStep, given an `Transformer` instance.
In addition to the transformer instance, the other arguments are those that are supplied to
the `transform` method of the `sagemaker.transformer.Transformer`.
Args:
name (str): The name of the transform step.
transformer (Transformer): A `sagemaker.transformer.Transformer` instance.
inputs (TransformInput): A `sagemaker.inputs.TransformInput` instance.
cache_config (CacheConfig): A `sagemaker.workflow.steps.CacheConfig` instance.
display_name (str): The display name of the transform step.
description (str): The description of the transform step.
depends_on (List[str]): A list of step names this `sagemaker.workflow.steps.TransformStep`
depends on
retry_policies (List[RetryPolicy]): A list of retry policy
"""
super(TransformStep, self).__init__(
name, StepTypeEnum.TRANSFORM, display_name, description, depends_on, retry_policies
)
self.transformer = transformer
self.inputs = inputs
self.cache_config = cache_config
self._properties = Properties(
path=f"Steps.{name}", shape_name="DescribeTransformJobResponse"
)
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_transform_job`.
NOTE: The CreateTransformJob request is not quite the args list that workflow needs.
TransformJobName and ExperimentConfig cannot be included in the arguments.
"""
transform_args = _TransformJob._get_transform_args(
transformer=self.transformer,
data=self.inputs.data,
data_type=self.inputs.data_type,
content_type=self.inputs.content_type,
compression_type=self.inputs.compression_type,
split_type=self.inputs.split_type,
input_filter=self.inputs.input_filter,
output_filter=self.inputs.output_filter,
join_source=self.inputs.join_source,
model_client_config=self.inputs.model_client_config,
experiment_config=dict(),
)
request_dict = self.transformer.sagemaker_session._get_transform_request(**transform_args)
request_dict.pop("TransformJobName")
return request_dict
@property
def properties(self):
"""A Properties object representing the DescribeTransformJobResponse data model."""
return self._properties
def to_request(self) -> RequestType:
"""Updates the dictionary with cache configuration."""
request_dict = super().to_request()
if self.cache_config:
request_dict.update(self.cache_config.config)
return request_dict
class ProcessingStep(ConfigurableRetryStep):
"""Processing step for workflow."""
def __init__(
self,
name: str,
processor: Processor,
display_name: str = None,
description: str = None,
inputs: List[ProcessingInput] = None,
outputs: List[ProcessingOutput] = None,
job_arguments: List[str] = None,
code: str = None,
property_files: List[PropertyFile] = None,
cache_config: CacheConfig = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
):
"""Construct a ProcessingStep, given a `Processor` instance.
In addition to the processor instance, the other arguments are those that are supplied to
the `process` method of the `sagemaker.processing.Processor`.
Args:
name (str): The name of the processing step.
processor (Processor): A `sagemaker.processing.Processor` instance.
display_name (str): The display name of the processing step.
description (str): The description of the processing step.
inputs (List[ProcessingInput]): A list of `sagemaker.processing.ProcessorInput`
instances. Defaults to `None`.
outputs (List[ProcessingOutput]): A list of `sagemaker.processing.ProcessorOutput`
instances. Defaults to `None`.
job_arguments (List[str]): A list of strings to be passed into the processing job.
Defaults to `None`.
code (str): This can be an S3 URI or a local path to a file with the framework
script to run. Defaults to `None`.
property_files (List[PropertyFile]): A list of property files that workflow looks
for and resolves from the configured processing output list.
cache_config (CacheConfig): A `sagemaker.workflow.steps.CacheConfig` instance.
depends_on (List[str] or List[Step]): A list of step names or step instance
this `sagemaker.workflow.steps.ProcessingStep` depends on
retry_policies (List[RetryPolicy]): A list of retry policy
"""
super(ProcessingStep, self).__init__(
name, StepTypeEnum.PROCESSING, display_name, description, depends_on, retry_policies
)
self.processor = processor
self.inputs = inputs
self.outputs = outputs
self.job_arguments = job_arguments
self.code = code
self.property_files = property_files
# Examine why run method in sagemaker.processing.Processor mutates the processor instance
# by setting the instance's arguments attribute. Refactor Processor.run, if possible.
self.processor.arguments = job_arguments
self._properties = Properties(
path=f"Steps.{name}", shape_name="DescribeProcessingJobResponse"
)
self.cache_config = cache_config
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_processing_job`.
NOTE: The CreateProcessingJob request is not quite the args list that workflow needs.
ProcessingJobName and ExperimentConfig cannot be included in the arguments.
"""
normalized_inputs, normalized_outputs = self.processor._normalize_args(
arguments=self.job_arguments,
inputs=self.inputs,
outputs=self.outputs,
code=self.code,
)
process_args = ProcessingJob._get_process_args(
self.processor, normalized_inputs, normalized_outputs, experiment_config=dict()
)
request_dict = self.processor.sagemaker_session._get_process_request(**process_args)
request_dict.pop("ProcessingJobName")
return request_dict
@property
def properties(self):
"""A Properties object representing the DescribeProcessingJobResponse data model."""
return self._properties
def to_request(self) -> RequestType:
"""Get the request structure for workflow service calls."""
request_dict = super(ProcessingStep, self).to_request()
if self.cache_config:
request_dict.update(self.cache_config.config)
if self.property_files:
request_dict["PropertyFiles"] = [
property_file.expr for property_file in self.property_files
]
return request_dict
class TuningStep(ConfigurableRetryStep):
"""Tuning step for workflow."""
def __init__(
self,
name: str,
tuner: HyperparameterTuner,
display_name: str = None,
description: str = None,
inputs=None,
job_arguments: List[str] = None,
cache_config: CacheConfig = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
):
"""Construct a TuningStep, given a `HyperparameterTuner` instance.
In addition to the tuner instance, the other arguments are those that are supplied to
the `fit` method of the `sagemaker.tuner.HyperparameterTuner`.
Args:
name (str): The name of the tuning step.
tuner (HyperparameterTuner): A `sagemaker.tuner.HyperparameterTuner` instance.
display_name (str): The display name of the tuning step.
description (str): The description of the tuning step.
inputs: Information about the training data. Please refer to the
``fit()`` method of the associated estimator, as this can take
any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.inputs.TrainingInput]) -
If using multiple channels for training data, you can specify
a dict mapping channel names to strings or
:func:`~sagemaker.inputs.TrainingInput` objects.
* (sagemaker.inputs.TrainingInput) - Channel configuration for S3 data sources
that can provide additional information about the training dataset.
See :func:`sagemaker.inputs.TrainingInput` for full details.
* (sagemaker.session.FileSystemInput) - channel configuration for
a file system data source that can provide additional information as well as
the path to the training dataset.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (sagemaker.amazon.amazon_estimator.FileSystemRecordSet) -
Amazon SageMaker channel configuration for a file system data source for
Amazon algorithms.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects,
where each instance is a different channel of training data.
* (list[sagemaker.amazon.amazon_estimator.FileSystemRecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.FileSystemRecordSet` objects,
where each instance is a different channel of training data.
job_arguments (List[str]): A list of strings to be passed into the processing job.
Defaults to `None`.
cache_config (CacheConfig): A `sagemaker.workflow.steps.CacheConfig` instance.
depends_on (List[str] or List[Step]): A list of step names or step instance
this `sagemaker.workflow.steps.ProcessingStep` depends on
retry_policies (List[RetryPolicy]): A list of retry policy
"""
super(TuningStep, self).__init__(
name, StepTypeEnum.TUNING, display_name, description, depends_on, retry_policies
)
self.tuner = tuner
self.inputs = inputs
self.job_arguments = job_arguments
self._properties = Properties(
path=f"Steps.{name}",
shape_names=[
"DescribeHyperParameterTuningJobResponse",
"ListTrainingJobsForHyperParameterTuningJobResponse",
],
)
self.cache_config = cache_config
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_hyper_parameter_tuning_job`.
NOTE: The CreateHyperParameterTuningJob request is not quite the
args list that workflow needs.
The HyperParameterTuningJobName attribute cannot be included.
"""
if self.tuner.estimator is not None:
self.tuner.estimator._prepare_for_training()
else:
for _, estimator in self.tuner.estimator_dict.items():
estimator._prepare_for_training()
self.tuner._prepare_for_tuning()
tuner_args = _TuningJob._get_tuner_args(self.tuner, self.inputs)
request_dict = self.tuner.sagemaker_session._get_tuning_request(**tuner_args)
request_dict.pop("HyperParameterTuningJobName")
return request_dict
@property
def properties(self):
"""A Properties object representing
`DescribeHyperParameterTuningJobResponse` and
`ListTrainingJobsForHyperParameterTuningJobResponse` data model.
"""
return self._properties
def to_request(self) -> RequestType:
"""Updates the dictionary with cache configuration."""
request_dict = super().to_request()
if self.cache_config:
request_dict.update(self.cache_config.config)
return request_dict
def get_top_model_s3_uri(self, top_k: int, s3_bucket: str, prefix: str = "") -> Join:
"""Get the model artifact s3 uri from the top performing training jobs.
Args:
top_k (int): the index of the top performing training job
tuning step stores up to 50 top performing training jobs, hence
a valid top_k value is from 0 to 49. The best training job
model is at index 0
s3_bucket (str): the s3 bucket to store the training job output artifact
prefix (str): the s3 key prefix to store the training job output artifact
"""
values = ["s3:/", s3_bucket]
if prefix != "" and prefix is not None:
values.append(prefix)
return Join(
on="/",
values=values
+ [
self.properties.TrainingJobSummaries[top_k].TrainingJobName,
"output/model.tar.gz",
],
)
class CompilationStep(ConfigurableRetryStep):
"""Compilation step for workflow."""
def __init__(
self,
name: str,
estimator: EstimatorBase,
model: Model,
inputs: CompilationInput = None,
job_arguments: List[str] = None,
depends_on: Union[List[str], List[Step]] = None,
retry_policies: List[RetryPolicy] = None,
display_name: str = None,
description: str = None,
cache_config: CacheConfig = None,
):
"""Construct a CompilationStep.
Given an `EstimatorBase` and a `sagemaker.model.Model` instance construct a CompilationStep.
In addition to the estimator and Model instances, the other arguments are those that are
supplied to the `compile_model` method of the `sagemaker.model.Model.compile_model`.
Args:
name (str): The name of the compilation step.
estimator (EstimatorBase): A `sagemaker.estimator.EstimatorBase` instance.
model (Model): A `sagemaker.model.Model` instance.
inputs (CompilationInput): A `sagemaker.inputs.CompilationInput` instance.
Defaults to `None`.
job_arguments (List[str]): A list of strings to be passed into the processing job.
Defaults to `None`.
depends_on (List[str] or List[Step]): A list of step names or step instances
this `sagemaker.workflow.steps.CompilationStep` depends on
retry_policies (List[RetryPolicy]): A list of retry policy
display_name (str): The display name of the compilation step.
description (str): The description of the compilation step.
cache_config (CacheConfig): A `sagemaker.workflow.steps.CacheConfig` instance.
"""
super(CompilationStep, self).__init__(
name, StepTypeEnum.COMPILATION, display_name, description, depends_on, retry_policies
)
self.estimator = estimator
self.model = model
self.inputs = inputs
self.job_arguments = job_arguments
self._properties = Properties(
path=f"Steps.{name}", shape_name="DescribeCompilationJobResponse"
)
self.cache_config = cache_config
@property
def arguments(self) -> RequestType:
"""The arguments dict that is used to call `create_compilation_job`.
NOTE: The CreateTrainingJob request is not quite the args list that workflow needs.
The TrainingJobName and ExperimentConfig attributes cannot be included.
"""
compilation_args = self.model._get_compilation_args(self.estimator, self.inputs)
request_dict = self.model.sagemaker_session._get_compilation_request(**compilation_args)
request_dict.pop("CompilationJobName")
return request_dict
@property
def properties(self):
"""A Properties object representing the DescribeTrainingJobResponse data model."""
return self._properties
def to_request(self) -> RequestType:
"""Updates the dictionary with cache configuration."""
request_dict = super().to_request()
if self.cache_config:
request_dict.update(self.cache_config.config)
return request_dict
| 40.973719 | 102 | 0.651037 |
acecea8f5ddaf1c8d31352839b051dd71e223b8c | 15,859 | py | Python | functions/transfer.py | pollostrazon/GAPS-ASIC-Analysis | 2c38d3329574eacb86a6b989a724e7f281a63e30 | [
"MIT"
] | null | null | null | functions/transfer.py | pollostrazon/GAPS-ASIC-Analysis | 2c38d3329574eacb86a6b989a724e7f281a63e30 | [
"MIT"
] | null | null | null | functions/transfer.py | pollostrazon/GAPS-ASIC-Analysis | 2c38d3329574eacb86a6b989a724e7f281a63e30 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import os
"""=== TRANSFER FUNCTION ===
As the name states
### Measures
- Kink
- Gain in high and low frequency"""
def transfer(isProcessed, enablePlot, singleMeasures, peaking_time, input_path, out_path_current_CH, out_path_he, out_path_le, out_path_current_TAU, n_ch, n_tau, n_fthr):
list_tau = [peaking_time] if singleMeasures else np.arange(n_tau)
for j in list_tau:
fname = input_path + "TransferFunction_fast_tau{}.dat".format(j)
break
try:
daca = np.loadtxt(fname, comments='#', usecols=(1), unpack=True)
except OSError:
print('No Pedestal file was found. Please check that pedestal file name meets "TransferFunction_fast_tauX.dat" pattern.')
exit(1)
sep = os.path.sep
degree_fit = 4
colours = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7']
# Processed files
processed_path = out_path_current_CH + 'Processed' + sep
if (not(os.path.exists(processed_path))):
os.makedirs(processed_path)
processed_path_plot = processed_path + 'SupportData' + sep
if (not(os.path.exists(processed_path_plot))):
os.makedirs(processed_path_plot)
file_m_y = processed_path_plot + 'y.dat'
file_high_gain_lin = processed_path + 'high_gain_lin.dat'
file_high_gain_poly_wide_range = processed_path + 'high_gain_poly_wide_range.dat'
file_low_gain_lin = processed_path + 'low_gain_lin.dat'
file_lin_intercept_high = processed_path_plot + 'lin_intercept_high.dat'
file_lin_intercept_low = processed_path_plot + 'lin_intercept_low.dat'
file_poly_coeff_wide = processed_path_plot + 'poly_coeff_wide.dat'
# Processed
dac = np.unique(daca)
m_y = np.zeros((n_tau, n_ch * len(dac)), dtype='float')
m_high_gain_lin = np.zeros((n_tau, n_ch), dtype='float')
m_high_gain_poly_wide_range = np.zeros((n_tau, n_ch), dtype='float')
m_low_gain_lin = np.zeros((n_tau, n_ch), dtype='float')
m_lin_intercept_high = np.zeros((n_tau, n_ch), dtype='float')
m_lin_intercept_low = np.zeros((n_tau, n_ch), dtype='float')
m_poly_coeff_wide = np.zeros((n_tau, n_ch * (degree_fit + 1)), dtype='float')
if (isProcessed):
try:
m_y = np.loadtxt(file_m_y, dtype='float', delimiter='\t')
m_high_gain_lin = np.loadtxt(file_high_gain_lin, dtype='float', delimiter='\t')
m_high_gain_poly_wide_range = np.loadtxt(file_high_gain_poly_wide_range, dtype='float', delimiter='\t')
m_low_gain_lin = np.loadtxt(file_low_gain_lin, dtype='float', delimiter='\t')
m_lin_intercept_high = np.loadtxt(file_lin_intercept_high, dtype='float', delimiter='\t')
m_lin_intercept_low = np.loadtxt(file_lin_intercept_low, dtype='float', delimiter='\t')
m_poly_coeff_wide = np.loadtxt(file_poly_coeff_wide, dtype='float', delimiter='\t')
except OSError:
print('There are no processed files or some are missing for this analysis')
exit(1)
else:
"""=== PROCESSING ==="""
# Data write tau for each channel
data_gain_low = np.zeros((n_tau, 1 + n_ch))
data_gain_high_lin = np.zeros((n_tau, 1 + n_ch))
data_gain_high_poly = np.zeros((n_tau, 1 + n_ch))
for i in range(n_ch):
print('CH #{}'.format(i))
for j in list_tau:
print(" tau: {}".format(j))
try:
fname = input_path + "TransferFunction_fast_tau" + str(j) + ".dat"
daca, typea, ch, val = np.loadtxt(fname, comments='#', usecols=(1, 2, 3, 4), unpack=True)
print('File TransferFunction_fast_tau{}.dat found!'.format(j))
except OSError:
print('File TransferFunction_fast_tau{}.dat not found! Ignoring...'.format(j))
pass
val[np.where((typea == 1) | (typea == 11))] = np.nan
y = np.zeros(len(dac))
for k in range(len(dac)):
idx = (ch == i).nonzero()[0]
jdx = (daca[idx] == dac[k]).nonzero()[0]
y[k] = np.nanmean(val[idx[jdx]])
m_y[j, i * len(dac) + k] = y[k]
for i in range(n_ch):
print('#CH {}'.format(i))
for j in list_tau:
print('tau {}'.format(j))
# Low energy
# linear interpolation [50, 100]
base_idx = i * len(dac)
this_y = m_y[j, base_idx:base_idx + len(dac)]
dac_le = dac[np.where((dac >= 50) & (dac <= 100))]
y_le = this_y[np.where((dac >= 50) & (dac <= 100))]
(high_gain, intercept, r_value, p_value, std_err) = stats.linregress(dac_le, y_le)
m_lin_intercept_high[j, i] = intercept
m_high_gain_lin[j, i] = high_gain
print(' Low Enegry gain lin: {:.3f}'.format(high_gain))
print(' r_value: {:.3f}'.format(r_value))
# polynomial interpolation with weighted initial points and derivative extraction [10 - 500]
dac_le = dac[np.where(dac <= 500)]
y_le = this_y[np.where(dac <= 500)]
try:
popt = np.polyfit(dac_le[1:], y_le[1:], deg=degree_fit)
except RuntimeError:
print(' Not fitted for tau{}'.format(j))
m_high_gain_poly_wide_range[j, i] = popt[degree_fit - 1]
for d in range(degree_fit + 1):
m_poly_coeff_wide[j, i * (degree_fit + 1) + d] = popt[d]
poly = np.poly1d(popt)
print(' Low Enegry gain poly: {:.3f}'.format(popt[degree_fit - 1]))
ss_res = np.sum((y_le - poly(dac_le))**2)
ss_tot = np.sum((y_le - np.mean(y_le))**2)
r_value = 1 - (ss_res / ss_tot)
print(' r_value: {:.3f}'.format(r_value))
# High energy
# linear interpolation [20000, 60000]
dac_he = dac[np.where((dac >= 20000) & (dac <= 60000))]
y_he = this_y[np.where((dac >= 20000) & (dac <= 60000))]
if y_he.any():
(low_gain, intercept, r_value, p_value, std_err) = stats.linregress(dac_he, y_he)
m_low_gain_lin[j, i] = low_gain
m_lin_intercept_low[j, i] = intercept
print(' High energy gain: {:.3f}'.format(low_gain))
print(' r_value: {:.3f}'.format(r_value))
# Save processed data
header = 'Peak times (rows) and Channels, DAC (cols)\n'
header = header + '\t'.join(['ch{}-dac{}'.format(i, daci) for i, daci in zip(range(n_ch), range(len(dac)))])
np.savetxt(file_m_y, m_y, delimiter='\t', header=header)
header_tau_ch = 'Peak times (rows) and Channels (cols)\n'
header_tau_ch = header_tau_ch + '\t'.join(['ch{}'.format(i) for i in range(n_ch)])
np.savetxt(file_high_gain_lin, m_high_gain_lin, delimiter='\t', header=header_tau_ch)
np.savetxt(file_high_gain_poly_wide_range, m_high_gain_poly_wide_range, delimiter='\t', header=header_tau_ch)
np.savetxt(file_low_gain_lin, m_low_gain_lin, delimiter='\t', header=header_tau_ch)
np.savetxt(file_lin_intercept_high, m_lin_intercept_high, delimiter='\t', header=header_tau_ch)
np.savetxt(file_lin_intercept_low, m_lin_intercept_low, delimiter='\t', header=header_tau_ch)
header = 'Peak times (rows) and Channels, poly coefficient (cols)\n'
header = header + '\t'.join(['ch{}-coef{}'.format(i, polyi) for i, polyi in zip(range(n_ch), range(degree_fit))])
np.savetxt(file_poly_coeff_wide, m_poly_coeff_wide, delimiter='\t', header=header)
print('Chip')
header = 'DAC (rows) and Channels (cols)\n'
header = header + 'dac\t' + '\t'.join(['ch{}'.format(i, polyi) for i, polyi in zip(range(n_ch), range(degree_fit))])
for j in list_tau:
if (y_he.any()):
gain_mu, gain_sigma = np.mean(m_low_gain_lin[j]), np.std(m_low_gain_lin[j])
#print(' tau ','{}'.format(i), ' Low gain mean:','{:.3f}'.format(j, gain_mu),', Low gain sigma','{:.3f}'.format(gain_sigma))
print(' tau {}, Low gain mean: {:.3f}, Low gain sigma: {:.3f}'.format(j, gain_mu, gain_sigma))
gain_mu, gain_sigma = np.mean(m_high_gain_lin[j]), np.std(m_high_gain_lin[j])
print(' tau {}, High gain lin mean: {:.3f}, High gain lin sigma: {:.3f}'.format(j, gain_mu, gain_sigma))
gain_mu, gain_sigma = np.mean(m_high_gain_poly_wide_range[j]), np.std(m_high_gain_poly_wide_range[j])
print(' tau {}, High gain poly mean: {:.3f}, High gain poly sigma: {:.3f}'.format(j, gain_mu, gain_sigma))
# Save data for each tau, values of ADC for each channel
file_tau_ch = out_path_current_TAU + 'Values_tf_allch_tau' + str(j) + '.dat'
m_out = np.zeros((len(dac), n_ch + 1), dtype='float')
m_out[:, 0] = dac.transpose()
for i in range(n_ch):
m_out[:, i + 1] = m_y[j, i * len(dac):i * len(dac) + len(dac)].transpose()
np.savetxt(file_tau_ch, m_out, delimiter='\t', header=header_tau_ch)
if (enablePlot):
"""=== PLOTTING ==="""
print('Plotting')
for i in range(n_ch):
print(' #CH {}'.format(i))
y = np.zeros((1, len(dac)), dtype='float')
fig, ax = plt.subplots(figsize=(10, 6))
fig3, ax3 = plt.subplots(figsize=(10, 6))
out_path_single = out_path_he + 'Channel_' + str(i) + sep
if (not(os.path.exists(out_path_single))):
os.makedirs(out_path_single)
for j in list_tau:
print(' tau {}'.format(j))
fig2, ax2 = plt.subplots(figsize=(12, 6))
y = m_y[j][i * len(dac):i * len(dac) + len(dac)]
popt = np.array(m_poly_coeff_wide[j][i * (degree_fit + 1): i * (degree_fit + 1) + degree_fit + 1])
ax.set_ylim(0, 2250)
ax.plot(dac, y, label='$\\tau_{}$'.format(j))
#spl = interpolate.UnivariateSpline(thightdac_le, y_le, s=1000, k=1)
xnew = np.linspace(0, 500, 1000, endpoint=True)
ax2.set_ylim(100, 500)
#ax2.plot(dac_le, y_le, 'o', xnew, spl(xnew), '--', label='$\\tau_{}, G_0 = {:.3f}$'.format(j, gain_high), c=colours[j])
ax2.plot(dac[np.where(dac <= 500)], y[np.where(dac <= 500)], 'o', mfc='none', label='data', color='k')
#p2, = ax2.plot(xnew, spl(xnew), '--', label='_', c=colours[j])
ax2.plot(xnew, m_lin_intercept_high[j][i] + m_high_gain_lin[j][i] * xnew, '-', label='linear interpolation, $G_0$ = {:.3f}'.format(m_high_gain_lin[j][i]), color='b')
#ax2.plot(xnew, cubic_function(xnew, popt[0], popt[1], popt[2], popt[3]), '--', label='cubic interpolation [50-200], $G_0$ = {:.3f}'.format(popt[2]), color='r')
#ax2.plot(xnew, cubic_function(xnew, popt_2[0], popt_2[1], popt_2[2], popt_2[3]), '-.', label='cubic interpolation [50-500], $G_0$ = {:.3f}'.format(popt_2[2]), color='g')
poly = np.poly1d(popt)
ax2.plot(xnew, poly(xnew), '-.', label='power 4 interpolation [10-500], $G_0$ = {:.3f}'.format(popt[degree_fit - 1]), color='r')
#ax2.plot(-10,-10, label='$\\tau_{}, G_0 = {:.3f}$'.format(j, gain_high), linestyle='--', marker='o', c=colours[j])
plt.xlabel("Cal_Voltage [$DAC_{inj}$ code]")
plt.ylabel("Channel_out [ADC code]")
plt.title("Low energy gain for channel #{}, tau: {}".format(i, j))
chartBox = ax2.get_position()
ax2.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.7, chartBox.height])
ax2.legend(loc=7, bbox_to_anchor=(1.55, 0.5), borderaxespad=0, frameon=True, ncol=1)
#plt.legend(loc = "lower right")
plt.grid(True)
# plt.show()
plt.savefig(out_path_single + 'TransferFunctionLowEnergy_ch' + str(i) + '_tau' + str(j) + '.svg', format='svg', bbox_inches="tight")
plt.close()
# High energy
dac_he = dac[np.where((dac >= 20000) & (dac <= 60000))]
y_he = y[np.where((dac >= 20000) & (dac <= 60000))]
if (y_he.any()):
xnew = np.linspace(dac_he[0], dac_he[-1], num=len(y_he), endpoint=True)
p1, = ax3.plot(dac_he, y_he, 'o', label='_', c=colours[j])
p2, = ax3.plot(xnew, m_lin_intercept_low[j][i] + m_low_gain_lin[j][i] * xnew, '--', label='_', c=colours[j])
ax3.plot(0, 0, label='$\\tau_{}, G_0 = {:.3f}$'.format(j, m_low_gain_lin[j][i]), linestyle='--', marker='o', c=colours[j])
# l = ax3.legend([(p1, p2)], ['$\\tau_{}, G_0 = {:.3f}$'.format(j, gain_low)], numpoints=1,
# handler_map={tuple: legend_handler.HandlerTuple(ndivide=None)})
#plt.legend(['data', 'knots'], loc='best')
# plt.show()
if (y_he.any()):
plt.xlabel("Cal_Voltage [$DAC_{inj}$ code]")
plt.ylabel("Channel_out [ADC code]")
plt.title("High energy gain for channel #{}".format(i))
chartBox = ax3.get_position()
ax3.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.9, chartBox.height])
ax3.legend(loc=7, bbox_to_anchor=(1.23, 0.5), borderaxespad=0, frameon=True, ncol=1)
plt.ylim(1000, 2000)
plt.xlim(19000, 61000)
#plt.legend(loc = "lower right")
plt.grid(True)
# plt.show()
plt.savefig(out_path_le + 'TransferFunctionHighEnergy_ch' + str(i) + '.svg', format='svg', bbox_inches="tight")
plt.close()
plt.xlabel("Cal_Voltage [$DAC_{inj}$ code]")
plt.ylabel("Channel_out [ADC code]")
plt.title("Transfer function of channel #{}".format(i))
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.95, chartBox.height])
ax.legend(loc=7, bbox_to_anchor=(1.15, 0.5), borderaxespad=0, frameon=True, ncol=1)
#plt.legend(loc = "lower right")
plt.grid(True)
# plt.show()
plt.savefig(out_path_current_CH + 'TransferFunction_ch' + str(i) + '.svg', format='svg', bbox_inches="tight")
plt.close()
# Plot channel for each tau
for j in list_tau:
plt.figure(figsize=(12, 11))
ax = plt.subplot(111)
ax.set_ylim(0, 2250)
for i in range(n_ch):
ax.plot(dac, m_y[j][i * len(dac): i * len(dac) + len(dac)], label='CH #{}'.format(i))
plt.xlabel("Cal_Voltage [$DAC_{inj}$ code]")
plt.ylabel("Channel_out [ADC code]")
plt.title("Transfer function of $\\tau_{}$".format(j))
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.95, chartBox.height])
ax.legend(loc=7, bbox_to_anchor=(1.15, 0.5), borderaxespad=0, frameon=True, ncol=1)
#plt.legend(loc = "lower right")
plt.grid(True)
# plt.show()
plt.savefig(out_path_current_TAU + 'TransferFunction_tau' + str(j) + '_allch.svg', format='svg', bbox_inches="tight")
plt.close()
return m_high_gain_lin, m_high_gain_poly_wide_range, degree_fit
| 53.397306 | 186 | 0.560628 |
aceceb7a4d46a0dc3ee21538d6f4d0ce865601b7 | 4,173 | py | Python | util/transformer.py | HiYKY/candock | fdbfced6f91f1d9a264bd6cdf9f957c03ec5d5d2 | [
"MIT"
] | null | null | null | util/transformer.py | HiYKY/candock | fdbfced6f91f1d9a264bd6cdf9f957c03ec5d5d2 | [
"MIT"
] | null | null | null | util/transformer.py | HiYKY/candock | fdbfced6f91f1d9a264bd6cdf9f957c03ec5d5d2 | [
"MIT"
] | 1 | 2022-01-21T03:25:24.000Z | 2022-01-21T03:25:24.000Z | import os
import random
import numpy as np
import torch
from . import dsp
from . import array_operation as arr
# import dsp
def shuffledata(data,target):
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(target)
# return data,target
def k_fold_generator(length,fold_num,fold_index = 'auto'):
sequence = np.linspace(0,length-1,length,dtype='int')
train_sequence = [];eval_sequence = []
if fold_num == 0 or fold_num == 1:
if fold_index != 'auto' :
fold_index = [0]+fold_index+[length]
else:
fold_index = [0]+[int(length*0.8)]+[length]
train_sequence.append(sequence[:fold_index[1]])
eval_sequence.append(sequence[fold_index[1]:])
else:
if fold_index != 'auto' :
fold_index = [0]+fold_index+[length]
else:
fold_index = []
for i in range(fold_num):
fold_index.append(length//fold_num*i)
fold_index.append(length)
for i in range(len(fold_index)-1):
eval_sequence.append(sequence[fold_index[i]:fold_index[i+1]])
train_sequence.append(np.concatenate((sequence[0:fold_index[i]],sequence[fold_index[i+1]:]),axis=0))
if fold_num > 1:
print('fold_index:',fold_index)
return train_sequence,eval_sequence
def batch_generator(data,target,sequence,shuffle = True):
batchsize = len(sequence)
out_data = np.zeros((batchsize,data.shape[1],data.shape[2]), data.dtype)
out_target = np.zeros((batchsize), target.dtype)
for i in range(batchsize):
out_data[i] = data[sequence[i]]
out_target[i] = target[sequence[i]]
return out_data,out_target
def ToTensor(data,target=None,gpu_id=0):
if target is not None:
data = torch.from_numpy(data).float()
target = torch.from_numpy(target).long()
if gpu_id != -1:
data = data.cuda()
target = target.cuda()
return data,target
else:
data = torch.from_numpy(data).float()
if gpu_id != -1:
data = data.cuda()
return data
def random_transform_1d(data,finesize,test_flag):
batch_size,ch,length = data.shape
if test_flag:
move = int((length-finesize)*0.5)
result = data[:,:,move:move+finesize]
else:
#random crop
move = int((length-finesize)*random.random())
result = data[:,:,move:move+finesize]
#random flip
if random.random()<0.5:
result = result[:,:,::-1]
#random amp
result = result*random.uniform(0.9,1.1)
#add noise
# noise = np.random.rand(ch,finesize)
# result = result + (noise-0.5)*0.01
return result
def random_transform_2d(img,finesize = (224,244),test_flag = True):
h,w = img.shape[:2]
if test_flag:
h_move = int((h-finesize[0])*0.5)
w_move = int((w-finesize[1])*0.5)
result = img[h_move:h_move+finesize[0],w_move:w_move+finesize[1]]
else:
#random crop
h_move = int((h-finesize[0])*random.random())
w_move = int((w-finesize[1])*random.random())
result = img[h_move:h_move+finesize[0],w_move:w_move+finesize[1]]
#random flip
if random.random()<0.5:
result = result[:,::-1]
#random amp
result = result*random.uniform(0.9,1.1)+random.uniform(-0.05,0.05)
return result
def ToInputShape(data,opt,test_flag = False):
#data = data.astype(np.float32)
if opt.model_type == '1d':
result = random_transform_1d(data, opt.finesize, test_flag=test_flag)
elif opt.model_type == '2d':
result = []
h,w = opt.stft_shape
for i in range(opt.batchsize):
for j in range(opt.input_nc):
spectrum = dsp.signal2spectrum(data[i][j],opt.stft_size,opt.stft_stride, opt.stft_n_downsample, not opt.stft_no_log)
spectrum = random_transform_2d(spectrum,(h,int(w*0.9)),test_flag=test_flag)
result.append(spectrum)
result = (np.array(result)).reshape(opt.batchsize,opt.input_nc,h,int(w*0.9))
return result
| 34.204918 | 132 | 0.611311 |
aceceb8965552a540c86f5097e4929b0c0aab852 | 2,699 | py | Python | datasets/voxel_dataset.py | sunbelbd/PaddleEBM | 218fabea1a43ebcc208729397aa2d69044ab323b | [
"Apache-2.0"
] | null | null | null | datasets/voxel_dataset.py | sunbelbd/PaddleEBM | 218fabea1a43ebcc208729397aa2d69044ab323b | [
"Apache-2.0"
] | null | null | null | datasets/voxel_dataset.py | sunbelbd/PaddleEBM | 218fabea1a43ebcc208729397aa2d69044ab323b | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import scipy.io
from .builder import DATASETS
from .base_dataset import BaseDataset
@DATASETS.register()
class VoxelDataSet(BaseDataset):
"""Import voxel from mat files.
"""
def __init__(self, dataroot, data_size=100000, resolution=64, mode="train", category="modelnet10"):
"""Initialize this dataset class.
Args:
dataroot (str): Directory of dataset.
preprocess (list[dict]): A sequence of data preprocess config.
"""
super(VoxelDataSet, self).__init__()
self.dataset = self.load_data(dataroot, mode, category)
self.dataset = self.dataset[:data_size].astype(np.float32)
if resolution == 32:
self.dataset = self._down_sampling(self.dataset)
elif resolution != 64:
raise "Resolution should be 32 or 64"
self.dataset = self._normalization(self.dataset)
def load_data(self, dataroot, mode="train", category="modelnet10"):
train_data = []
if category == "modelnet40":
categories = ['cup', 'bookshelf', 'lamp', 'stool', 'desk', 'toilet', 'night_stand', 'bowl', 'door', 'flower_pot', 'plant', 'stairs', 'bottle', 'mantel', 'sofa', 'laptop', 'xbox', 'tent', 'piano', 'car', 'wardrobe', 'tv_stand', 'cone', 'range_hood', 'bathtub', 'curtain', 'sink', 'glass_box', 'bed', 'chair', 'person', 'radio', 'dresser', 'bench', 'airplane', 'guitar', 'keyboard', 'table', 'monitor', 'vase']
for cat in categories:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (cat, mode)), "rb") as f:
d = scipy.io.loadmat(f)["voxel"]
train_data.append(d)
train_data = np.concatenate(train_data)
elif category == "modelnet10":
categories = ['desk', 'toilet', 'night_stand', 'sofa', 'bathtub', 'bed', 'chair', 'dresser', 'table', 'monitor']
for cat in categories:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (cat, mode)), "rb") as f:
d = scipy.io.loadmat(f)["voxel"]
train_data.append(d)
train_data = np.concatenate(train_data)
else:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (category, mode)), "rb") as f:
train_data = scipy.io.loadmat(f)["voxel"]
return train_data
def _down_sampling(self, data):
import skimage.measure
return skimage.measure.block_reduce(data, (1,2,2,2), np.max)
def _normalization(self, data):
data_mean = data.mean()
print("Perform normalization, mean = %.4f" % data_mean)
return data - data_mean
| 44.245902 | 420 | 0.59096 |
aceceb898d76c25df6149f291eeb9c3df27d0d05 | 1,942 | py | Python | lib/surface/dataplex/assets/set_iam_policy.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/dataplex/assets/set_iam_policy.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/dataplex/assets/set_iam_policy.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud dataplex asset set-iam-policy-binding` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataplex import asset
from googlecloudsdk.api_lib.util import exceptions as gcloud_exception
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataplex import resource_args
from googlecloudsdk.command_lib.iam import iam_util
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetIamPolicy(base.Command):
"""Set an IAM policy binding to a asset."""
detailed_help = {
'EXAMPLES':
"""\
To set an IAM policy of an asset, run:
$ {command} projects/test-project/locations/us-central1/lakes/test-lake/zones/test-zone/assets/test-asset policy.json
policy.json is the relative path to the json file.
""",
}
@staticmethod
def Args(parser):
resource_args.AddAssetResourceArg(parser, 'to set IAM policy to.')
iam_util.AddArgForPolicyFile(parser)
@gcloud_exception.CatchHTTPErrorRaiseHTTPException(
'Status code: {status_code}. {status_message}.')
def Run(self, args):
asset_ref = args.CONCEPTS.asset.Parse()
result = asset.SetIamPolicyFromFile(asset_ref, args.policy_file)
return result
| 35.309091 | 129 | 0.747168 |
aceced386f74e5dda3a8dd0fbe3f4235597e8fd7 | 945 | py | Python | tests/life/test_ideas.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 747 | 2017-06-28T04:58:53.000Z | 2022-02-14T21:40:52.000Z | tests/life/test_ideas.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 235 | 2017-06-30T12:58:02.000Z | 2019-05-02T02:56:18.000Z | tests/life/test_ideas.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 237 | 2017-06-12T21:03:03.000Z | 2021-09-16T14:48:59.000Z | # coding=utf-8
from unittest import TestCase
from click.testing import CliRunner
import yoda
class TestHealth(TestCase):
"""
Test for the following commands:
| Module: health
| command: health
"""
def __init__(self, methodName="runTest"):
super(TestHealth, self).__init__()
self.runner = CliRunner()
def runTest(self):
result = self.runner.invoke(yoda.cli, ["ideas", "status"])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(yoda.cli, ["ideas", "show"])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(yoda.cli, ["ideas", "remove"])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(yoda.cli, ["ideas", "add"])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(yoda.cli, ["ideas", "remove"])
self.assertEqual(result.exit_code, 0)
| 27 | 66 | 0.628571 |
aceced8cedefa8014c5d092d85c327b38663bc13 | 682 | py | Python | op_builder/__init__.py | vfdev-5/DeepSpeed | e2dfcadf3b30437a6232f6c689e695aa44931a4c | [
"MIT"
] | 1 | 2021-03-08T04:10:17.000Z | 2021-03-08T04:10:17.000Z | op_builder/__init__.py | vfdev-5/DeepSpeed | e2dfcadf3b30437a6232f6c689e695aa44931a4c | [
"MIT"
] | 2 | 2020-05-28T01:37:24.000Z | 2022-02-26T06:51:48.000Z | op_builder/__init__.py | vfdev-5/DeepSpeed | e2dfcadf3b30437a6232f6c689e695aa44931a4c | [
"MIT"
] | 1 | 2021-01-23T16:07:54.000Z | 2021-01-23T16:07:54.000Z | from .cpu_adam import CPUAdamBuilder
from .fused_adam import FusedAdamBuilder
from .fused_lamb import FusedLambBuilder
from .sparse_attn import SparseAttnBuilder
from .transformer import TransformerBuilder
from .stochastic_transformer import StochasticTransformerBuilder
from .utils import UtilsBuilder
from .builder import get_default_compute_capatabilities
# TODO: infer this list instead of hard coded
# List of all available ops
__op_builders__ = [
CPUAdamBuilder(),
FusedAdamBuilder(),
FusedLambBuilder(),
SparseAttnBuilder(),
TransformerBuilder(),
StochasticTransformerBuilder(),
UtilsBuilder()
]
ALL_OPS = {op.name: op for op in __op_builders__}
| 31 | 64 | 0.803519 |
acecedce6c070acda2ca361fc29534f9b282ecad | 2,392 | py | Python | pgoapi/protos/pogoprotos/networking/requests/messages/verify_challenge_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 14 | 2017-03-28T16:32:24.000Z | 2021-03-13T23:03:57.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/verify_challenge_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 8 | 2017-03-01T07:56:09.000Z | 2017-08-15T07:37:12.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/verify_challenge_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 15 | 2017-02-24T01:30:23.000Z | 2021-06-27T08:46:43.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/verify_challenge_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/verify_challenge_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/verify_challenge_message.proto\x12\'pogoprotos.networking.requests.messages\"\'\n\x16VerifyChallengeMessage\x12\r\n\x05token\x18\x01 \x01(\tb\x06proto3')
)
_VERIFYCHALLENGEMESSAGE = _descriptor.Descriptor(
name='VerifyChallengeMessage',
full_name='pogoprotos.networking.requests.messages.VerifyChallengeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='pogoprotos.networking.requests.messages.VerifyChallengeMessage.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=154,
)
DESCRIPTOR.message_types_by_name['VerifyChallengeMessage'] = _VERIFYCHALLENGEMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VerifyChallengeMessage = _reflection.GeneratedProtocolMessageType('VerifyChallengeMessage', (_message.Message,), dict(
DESCRIPTOR = _VERIFYCHALLENGEMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.verify_challenge_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.VerifyChallengeMessage)
))
_sym_db.RegisterMessage(VerifyChallengeMessage)
# @@protoc_insertion_point(module_scope)
| 34.171429 | 216 | 0.793896 |
acecef26796486e7cdfce61f6aa27d7711f043cf | 3,110 | py | Python | src/FitCurves/FitCurves.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | 18 | 2015-04-29T20:54:15.000Z | 2021-12-13T17:48:05.000Z | src/FitCurves/FitCurves.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | null | null | null | src/FitCurves/FitCurves.py | songrun/VectorSkinning | a19dff78215b51d824adcd39c7dcdf8dc78ec617 | [
"Apache-2.0"
] | 8 | 2017-04-23T17:52:13.000Z | 2022-03-14T11:01:56.000Z | import os
from cffi import FFI
from numpy import *
try:
from pydb import debugger
## Also add an exception hook.
import pydb, sys
sys.excepthook = pydb.exception_hook
except ImportError:
import pdb
def debugger():
pdb.set_trace()
## Compile the library with:
'''
# OSX
gcc -fPIC \
FitCurves.c GGVecLib.c \
-dynamiclib -o FitCurves.dylib \
-g -O3 -Wall -Wshadow -Wno-sign-compare
# Linux
gcc -fPIC \
FitCurves.c GGVecLib.c \
-shared -o FitCurves.so \
-g -O2 -Wall -Wshadow -Wno-sign-compare
# Cygwin?
gcc -fPIC \
FirCurve.c GGVecLib.c \
-shared -o FitCurves.dll \
-g -O2 -Wall -Wshadow -Wno-sign-compare
'''
ffi = FFI()
ffi.cdef("""
typedef struct Point2Struct { /* 2d point */
double x, y;
} Point2;
typedef Point2 Vector2;
typedef Point2 *BezierCurve;
void (*DrawBezierCurve)(int n, BezierCurve curve);
void FitCurve(Point2 *d, int nPts, double error);
""")
import ctypes
def platform_shared_library_suffix():
import sys
result = '.so'
if 'win' in sys.platform.lower(): result = '.dll'
## No else if, because we want darwin to override win (which is a substring of darwin)
if 'darwin' in sys.platform.lower(): result = '.dylib'
return result
libFitCurves = ffi.dlopen( os.path.join( os.path.dirname( __file__ ), 'FitCurves' + platform_shared_library_suffix() ) )
@ffi.callback("void(int,BezierCurve)")
def DrawBezierCurve( n, curve ):
assert curve_in_progress is not None
bezier = []
for i in xrange( n+1 ):
bezier.append( ( curve[i].x, curve[i].y ) )
curve_in_progress.append( bezier )
libFitCurves.DrawBezierCurve = DrawBezierCurve
curve_in_progress = None
def FitCurve( vertices, error = 1e-1 ):
'''
Given an N-by-2 numpy array 'vertices' of 2D vertices representing a line strip,
returns an N-by-4-by-2 numpy.array of N cubic bezier curves approximating 'vertices'.
'''
import numpy
global curve_in_progress
assert curve_in_progress is None
## Make sure the input values have their data in a way easy to access from C.
vertices = numpy.ascontiguousarray( numpy.asarray( vertices, dtype = ctypes.c_double ) )
## 'vertices' must be 2D
assert vertices.shape[1] == 2
## This calls a callback function that appends to the global variable 'curve_in_progress'.
curve_in_progress = []
libFitCurves.FitCurve(
ffi.cast( 'Point2*', vertices.ctypes.data ),
len( vertices ),
error
)
result = asarray( curve_in_progress )
curve_in_progress = None
return result
def test_simple( N = 10 ):
print 'test_simple( %d )' % N
from pprint import pprint
assert N > 1
line_strip = zeros( ( N, 2 ) )
line_strip[:,0] = linspace( 0, 1, N )
line_strip[:,1] = linspace( -1, 1, N )
pprint( line_strip )
beziers = FitCurve( line_strip )
pprint( beziers )
def main():
import sys
N = 10
if len( sys.argv ) > 1: N = int( sys.argv[1] )
test_simple( N )
if __name__ == '__main__': main()
| 23.740458 | 120 | 0.643408 |
acecf0151b9fd490e7b5fd67b1f68c3d16e87ea4 | 22,146 | py | Python | Latent_Space_Transfer/4_manual_mask_CNN_63/train_InstinsicDAE_TF.py | bhushan23/illumination-nets | a7e579489e3ed67c926b27113cf65eec2aea6287 | [
"BSD-2-Clause"
] | null | null | null | Latent_Space_Transfer/4_manual_mask_CNN_63/train_InstinsicDAE_TF.py | bhushan23/illumination-nets | a7e579489e3ed67c926b27113cf65eec2aea6287 | [
"BSD-2-Clause"
] | 5 | 2018-11-03T19:54:23.000Z | 2018-11-28T05:36:23.000Z | Latent_Space_Transfer/4_manual_mask_CNN_63/train_InstinsicDAE_TF.py | bhushan23/illumination-nets | a7e579489e3ed67c926b27113cf65eec2aea6287 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import argparse
import os
import sys
sys.path.insert(0, './core')
sys.path.insert(0, './models')
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from torch.autograd import gradcheck
from torch.autograd import Function
import math
# our data loader
import DAELightTransferDataLoader as lightDL
import gc
ON_SERVER = True
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, help='number of data loading workers', default=8)
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', default = True, action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--gpu_ids', type=int, default=0, help='ids of GPUs to use')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--epoch_iter', type=int,default=600, help='number of epochs on entire dataset')
parser.add_argument('--location', type = int, default=0, help ='where is the code running')
parser.add_argument('-f',type=str,default= '', help='dummy input required for jupyter notebook')
parser.add_argument('--modelPath', default='', help="path to model (to continue training)")
if ON_SERVER:
out_path = './results_CNN_Latent_128/' # '/nfs/bigdisk/bsonawane/LightTransfer_Exp/Latent_Space/2_CNN_64/' #cropped_multipie/4_manual_masking_map_approach_for_lighting_only_multipie_cropped_63/'
# data_path = '/nfs/bigdisk/zhshu/data/fare/real/multipie_select_batches/'
data_path = '/nfs/bigmind/add_ssd/zhshu/data/Multipie/crops14/'
# data_path = '/nfs/bigdisk/multipie_cropped/'
# data_path = '/nfs/bigdisk/bsonawane/multipie-data/'
else:
out_path = '/home/bhushan/work/thesis/Sem2/source/experiment/illumination-nets/1_lighting_transfer_with_unknown_light_source/output'
data_path = '/home/bhushan/work/thesis/Sem2/source/experiment/illumination-nets/data/multipie_select_batches/'
parser.add_argument('--dirCheckpoints', default=out_path+'/checkpoints/dae-2', help='folder to model checkpoints')
parser.add_argument('--dirImageoutput', default=out_path+'/images/train', help='folder to output images')
parser.add_argument('--dirTestingoutput', default=out_path+'/images/test', help='folder to testing results/images')
parser.add_argument('--dirDataroot', default=data_path, help='folder to dataroot')
parser.add_argument('--useDense', default = True, help='enables dense net architecture')
parser.add_argument('--use_celeba', default= False, help='If true use celebA else Multipie')
opt = parser.parse_args()
# size of image
opt.imgSize=64
opt.cuda = True
opt.use_dropout = 0
opt.ngf = 32
opt.ndf = 32
# dimensionality: shading latent code
opt.sdim = 16
# dimensionality: albedo latent code
opt.tdim = 16
# dimensionality: texture (shading*albedo) latent code
opt.idim = opt.sdim + opt.tdim
# dimensionality: warping grid (deformation field) latent code
opt.wdim = 128
# dimensionality of general latent code (before disentangling)
opt.zdim = 128
opt.use_gpu = True
opt.gpu_ids = 0
opt.ngpu = 1
opt.nc = 3
opt.useDense=True
print(opt)
try:
os.makedirs(opt.dirCheckpoints)
except OSError:
pass
try:
os.makedirs(opt.dirImageoutput)
except OSError:
pass
try:
os.makedirs(opt.dirTestingoutput)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
def getBaseGrid(N=64, normalize = True, getbatch = False, batchSize = 1):
a = torch.arange(-(N-1), (N), 2)
if normalize:
a = a/(N-1)
x = a.repeat(N,1)
y = x.t()
grid = torch.cat((x.unsqueeze(0), y.unsqueeze(0)),0)
if getbatch:
grid = grid.unsqueeze(0).repeat(batchSize,1,1,1)
return grid
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# sample iamges
def visualizeAsImages(img_list, output_dir,
n_sample=4, id_sample=None, dim=-1,
filename='myimage', nrow=2,
normalize=False):
if id_sample is None:
images = img_list[0:n_sample,:,:,:]
else:
images = img_list[id_sample,:,:,:]
if dim >= 0:
images = images[:,dim,:,:].unsqueeze(1)
vutils.save_image(images,
'%s/%s'% (output_dir, filename+'.png'),
nrow=nrow, normalize = normalize, padding=2)
def parseSampledDataPoint(dp0_img, nc):
dp0_img = dp0_img.float()/255 # convert to float and rerange to [0,1]
if nc==1:
dp0_img = dp0_img.unsqueeze(3)
dp0_img = dp0_img.permute(0,3,1,2).contiguous() # reshape to [batch_size, 3, img_H, img_W]
return dp0_img
def setCuda(*args):
barg = []
for arg in args:
barg.append(arg.cuda())
return barg
def setAsVariable(*args):
barg = []
for arg in args:
barg.append(Variable(arg))
return barg
# ---- The model ---- #
# get the model definition/architecture
# get network
import DAENet
if opt.useDense:
encoders = DAENet.Dense_Encoders_Intrinsic(opt)
decoders = DAENet.Dense_DecodersIntegralWarper2_Intrinsic(opt)
else:
encoders = DAENet.Encoders_Intrinsic(opt)
decoders = DAENet.DecodersIntegralWarper2_Intrinsic(opt)
# light_transfer = DAENet.LightingTransfer(opt)
if opt.cuda:
encoders.cuda()
decoders.cuda()
# light_transfer.cuda()
if not opt.modelPath=='':
# rewrite here
print('Reload previous model at: '+ opt.modelPath)
encoders.load_state_dict(torch.load(opt.modelPath+'_encoders.pth'))
decoders.load_state_dict(torch.load(opt.modelPath+'_decoders.pth'))
# light_transfer.load_state_dict(torch.load(opt.modelPath+'_lighttransfer.pth'))
else:
print('No previous model found, initializing model weight.')
encoders.apply(weights_init)
decoders.apply(weights_init)
# light_transfer.apply(weights_init)
print(opt.gpu_ids)
updator_encoders = optim.Adam(encoders.parameters(), lr = opt.lr, betas=(opt.beta1, 0.999))
updator_decoders = optim.Adam(decoders.parameters(), lr = opt.lr, betas=(opt.beta1, 0.999))
# updator_lighttran = optim.Adam(light_transfer.parameters(), lr = opt.lr, betas=(opt.beta1, 0.999))
# criteria/loss
criterionRecon = nn.L1Loss()
criterionTVWarp = DAENet.TotalVaryLoss(opt)
criterionBiasReduce = DAENet.BiasReduceLoss(opt)
criterionSmoothL1 = DAENet.TotalVaryLoss(opt)
criterionSmoothL2 = DAENet.SelfSmoothLoss2(opt)
# Training set
TrainingData = []
# Following dataset was used for previous experiments
"""
TrainingData.append(opt.dirDataroot + 'session01_01_select')
TrainingData.append(opt.dirDataroot + 'session01_02_select')
TrainingData.append(opt.dirDataroot + 'session01_03_select')
TrainingData.append(opt.dirDataroot + 'session01_04_select')
TrainingData.append(opt.dirDataroot + 'session01_05_select')
TrainingData.append(opt.dirDataroot + 'session01_06_select')
TrainingData.append(opt.dirDataroot + 'session01_07_select')
TrainingData.append(opt.dirDataroot + 'session02_01_select')
TrainingData.append(opt.dirDataroot + 'session02_02_select')
TrainingData.append(opt.dirDataroot + 'session02_03_select')
TrainingData.append(opt.dirDataroot + 'session02_04_select')
TrainingData.append(opt.dirDataroot + 'session02_05_select')
TrainingData.append(opt.dirDataroot + 'session02_06_select')
TrainingData.append(opt.dirDataroot + 'session02_07_select')
TrainingData.append(opt.dirDataroot + 'session03_01_select')
TrainingData.append(opt.dirDataroot + 'session03_02_select')
TrainingData.append(opt.dirDataroot + 'session03_03_select')
TrainingData.append(opt.dirDataroot + 'session03_04_select')
TrainingData.append(opt.dirDataroot + 'session03_05_select')
TrainingData.append(opt.dirDataroot + 'session04_01_select')
TrainingData.append(opt.dirDataroot + 'session04_02_select')
TrainingData.append(opt.dirDataroot + 'session04_03_select')
TrainingData.append(opt.dirDataroot + 'session04_04_select')
TrainingData.append(opt.dirDataroot + 'session04_05_select')
TrainingData.append(opt.dirDataroot + 'session04_06_select')
TrainingData.append(opt.dirDataroot + 'session04_07_select')
TrainingMask = []
TrainingMask.append(opt.dirDataroot + 'session01_masks')
TrainingMask.append(opt.dirDataroot + 'session02_masks')
TrainingMask.append(opt.dirDataroot + 'session03_masks')
TrainingMask.append(opt.dirDataroot + 'session04_masks')
#
#
# # Testing set
TestingData = []
TestingData.append(opt.dirDataroot + 'session01_select_test')
TestingMask = []
TestingMask.append(opt.dirDataroot + 'session01_masks')
"""
TrainingData = []
# TrainingData.append(opt.dirDataroot + 'small_train')
TrainingData.append(opt.dirDataroot + 'session01_crops14')
# TrainingData.append(opt.dirDataroot + 'session02_crops14')
# TrainingData.append(opt.dirDataroot + 'session03_crops14')
# TrainingData.append(opt.dirDataroot + 'session04_crops14')
TestingData = []
TestingData.append(opt.dirDataroot + 'session_test_crops14')
# TestingData.append(opt.dirDataroot + 'small_test')
TrainingMask = []
TestingMask = []
# ------------ training ------------ #
doTraining = True
doTesting = True
iter_mark=0
print("Loading Dataset")
if opt.use_celeba:
train_dataset = lightDL.CelebA_DataLoader(dir_path=opt.dirDataroot, batch_size=32, resize=64, is_training=True)
test_dataset = lightDL.CelebA_DataLoader(dir_path=opt.dirDataroot, batch_size=32, resize=64, is_training=False)
dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
dataloader_test = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
else:
dataset = lightDL.FareMultipieLightingTripletsFrontal(None, root=TrainingData, root_mask=TrainingMask, transform = None, resize = 64)
# train_amount = train_amount + len(dataset)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
dataset_test = lightDL.FareMultipieLightingTripletsFrontal(None, root=TestingData, root_mask = TestingMask, transform = None, resize=64)
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
print("Dataset Loaded")
# print('# size of the current (sub)dataset is %d' %len(dataset))
# train_amount = train_amount + len(dataset)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
#
#
# dataset_test = lightDL.FareMultipieLightingTripletsFrontal(None, root=TestingData, root_mask = TestingMask, transform = None, resize=64)
# dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
# _, src_img, _, dest_img = next(iter(dataloader))
#
# src_img = parseSampledDataPoint(src_img, opt.nc)
# src_img = src_img.type(torch.cuda.FloatTensor)
#
# dest_img = parseSampledDataPoint(dest_img, opt.nc)
# dest_img = dest_img.type(torch.cuda.FloatTensor)
#
# visualizeAsImages(src_img.data.clone(),
# opt.dirImageoutput,
# filename='TEST_INIT_srcimg0', n_sample = 49, nrow=7, normalize=False)
# visualizeAsImages(dest_img.data.clone(),
# opt.dirImageoutput,
# filename='TEST_INIT_destImg', n_sample = 49, nrow=7, normalize=False)
print('Log done')
for epoch in range(opt.epoch_iter):
train_loss = 0
train_amount = 0+1e-6
gc.collect() # collect garbage
encoders.train()
decoders.train()
# for dataroot in TrainingData:
if not doTraining:
break
for batch_idx, data_point in enumerate(dataloader, 0):
#raw_input("Press Enter to continue...")
gc.collect() # collect garbage
### prepare data ###
dp0_img, dest_light, dest_img = data_point[1], data_point[2], data_point[3]
# dest_img = dest_img.type(torch.cuda.FloatTensor)
# dest_img = dest_img.permute(0, 3, 1, 2)
# print('dest_light: ', dest_light)
dp0_img = parseSampledDataPoint(dp0_img, opt.nc)
dp0_img = dp0_img.type(torch.cuda.FloatTensor)
dest_img = parseSampledDataPoint(dest_img, opt.nc)
dest_img = dest_img.type(torch.cuda.FloatTensor)
baseg = getBaseGrid(N=opt.imgSize, getbatch = True, batchSize = dp0_img.size()[0])
zeroWarp = torch.cuda.FloatTensor(1, 2, opt.imgSize, opt.imgSize).fill_(0)
if opt.cuda:
dp0_img, baseg, zeroWarp = setCuda(dp0_img, baseg, zeroWarp)
dp0_img, = setAsVariable(dp0_img)
baseg = Variable(baseg, requires_grad=False)
zeroWarp = Variable(zeroWarp, requires_grad=False)
updator_decoders.zero_grad()
updator_encoders.zero_grad()
#updator_lighttran.zero_grad()
decoders.zero_grad()
encoders.zero_grad()
# light_transfer.zero_grad()
### forward training points: dp0
dp0_z, dp0_zS, dp0_zT, dp0_zW = encoders(dp0_img)
baseg = baseg.type(torch.cuda.FloatTensor)
# new_zS = light_transfer(dest_light, do0_zS)
dp0_S, dp0_T, dp0_I, dp0_W, dp0_output, dp0_Wact = decoders(dest_light, dp0_zS, dp0_zT, dp0_zW, baseg)
# reconstruction loss
loss_recon = criterionRecon(dp0_output, dest_img)
# smooth warping loss
loss_tvw = criterionTVWarp(dp0_W, weight=1e-6)
# bias reduce loss
loss_br = criterionBiasReduce(dp0_W, zeroWarp, weight=1e-2)
# intrinsic loss :Shading, L2
loss_intr_S = criterionSmoothL2(dp0_S, weight = 1e-6)
# all loss functions
loss_all = loss_recon + loss_tvw + loss_br + loss_intr_S
loss_all.backward()
updator_decoders.step()
updator_encoders.step()
loss_encdec = loss_recon.data[0] + loss_br.data[0] + loss_tvw.data[0] + loss_intr_S.data[0]
train_loss += loss_encdec
iter_mark+=1
print('Iteration[%d] loss -- all: %.4f .. recon: %.4f .. tvw: %.4f .. br: %.4f .. intr_s: %.4f .. '
% (iter_mark, loss_encdec, loss_recon.data[0], loss_tvw.data[0], loss_br.data[0], loss_intr_S.data[0]))
# visualzing training progress
gx = (dp0_W.data[:,0,:,:]+baseg.data[:,0,:,:]).unsqueeze(1).clone()
gy = (dp0_W.data[:,1,:,:]+baseg.data[:,1,:,:]).unsqueeze(1).clone()
visualizeAsImages(dp0_img.data.clone(),
opt.dirImageoutput,
filename='iter_'+str(iter_mark)+'_img0_', n_sample = 49, nrow=7, normalize=False)
visualizeAsImages(dest_img.data.clone(),
opt.dirImageoutput,
filename='iter_'+str(iter_mark)+'_destImg_', n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_I.data.clone(),
# opt.dirImageoutput,
# filename='iter_'+str(iter_mark)+'_tex0_', n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_S.data.clone(),
# opt.dirImageoutput,
# filename='iter_'+str(iter_mark)+'_intr_shade0_', n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_T.data.clone(),
# opt.dirImageoutput,
# filename='iter_'+str(iter_mark)+'_intr_tex0_', n_sample = 49, nrow=7, normalize=False)
visualizeAsImages(dp0_output.data.clone(),
opt.dirImageoutput,
filename='iter_'+str(iter_mark)+'_output0_', n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages((gx+1)/2,
# opt.dirImageoutput,
# filename='iter_'+str(iter_mark)+'_warp0x_', n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages((gy+1)/2,
# opt.dirImageoutput,
# filename='iter_'+str(iter_mark)+'_warp0y_', n_sample = 49, nrow=7, normalize=False)
if doTraining:
# do checkpointing
torch.save(encoders.state_dict(), '%s/wasp_model_epoch_encoders.pth' % (opt.dirCheckpoints))
torch.save(decoders.state_dict(), '%s/wasp_model_epoch_decoders.pth' % (opt.dirCheckpoints))
# ------------ testing ------------ #
# on synthetic image set
print('Testing images ... ')
#raw_input("Press Enter to continue...")
testing_loss=0
gc.collect() # collect garbage
if doTesting:
encoders.train()
decoders.train()
for batch_idx, data_point in enumerate(dataloader_test, 0):
#raw_input("Press Enter to continue...")
gc.collect() # collect garbage
### prepare data ###
dp0_img, dest_light, dest_img = data_point[1], data_point[2], data_point[3]
dest_img = parseSampledDataPoint(dest_img, opt.nc)
dest_img = dest_img.type(torch.cuda.FloatTensor)
dp0_img = parseSampledDataPoint(dp0_img, opt.nc)
baseg = getBaseGrid(N=opt.imgSize, getbatch = True, batchSize = dp0_img.size()[0])
zeroWarp = torch.cuda.FloatTensor(1, 2, opt.imgSize, opt.imgSize).fill_(0)
if opt.cuda:
dp0_img, baseg, zeroWarp = setCuda(dp0_img, baseg, zeroWarp)
dp0_img, = setAsVariable(dp0_img)
baseg = Variable(baseg, requires_grad=False)
zeroWarp = Variable(zeroWarp, requires_grad=False)
updator_decoders.zero_grad()
updator_encoders.zero_grad()
decoders.zero_grad()
encoders.zero_grad()
dp0_img = dp0_img.type(torch.cuda.FloatTensor)
### forward training points: dp0
dp0_z, dp0_zS, dp0_zT, dp0_zW = encoders(dp0_img)
baseg = baseg.type(torch.cuda.FloatTensor)
dp0_S, dp0_T, dp0_I, dp0_W, dp0_output, dp0_Wact = decoders(dest_light, dp0_zS, dp0_zT, dp0_zW, baseg)
# reconstruction loss
loss_recon = criterionRecon(dp0_output, dest_img)
# smooth warping loss
loss_tvw = criterionTVWarp(dp0_W, weight=1e-6)
# bias reduce loss
loss_br = criterionBiasReduce(dp0_W, zeroWarp, weight=1e-2)
# intrinsic loss :Shading, L2
loss_intr_S = criterionSmoothL2(dp0_S, weight = 1e-6)
# all loss functions
loss_all = loss_recon + loss_tvw + loss_br + loss_intr_S
loss_encdec = loss_recon.data[0] + loss_br.data[0] + loss_tvw.data[0] + loss_intr_S.data[0]
testing_loss += loss_encdec
print('Iteration[%d] loss -- all: %.4f .. recon: %.4f .. tvw: %.4f .. br: %.4f .. intr_s: %.4f .. '
% (iter_mark, loss_encdec, loss_recon.data[0], loss_tvw.data[0], loss_br.data[0], loss_intr_S.data[0]))
# visualzing training progress
print('Storing:' )
gx = (dp0_W.data[:,0,:,:]+baseg.data[:,0,:,:]).unsqueeze(1).clone()
gy = (dp0_W.data[:,1,:,:]+baseg.data[:,1,:,:]).unsqueeze(1).clone()
visualizeAsImages(dp0_img.data.clone(),
opt.dirTestingoutput,
filename='img0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_I.data.clone(),
# opt.dirTestingoutput,
# filename='tex0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
visualizeAsImages(dest_img.data.clone(),
opt.dirTestingoutput,
filename='expected_output0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_S.data.clone(),
# opt.dirTestingoutput,
# filename='intr_shade0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages(dp0_T.data.clone(),
# opt.dirTestingoutput,
# filename='intr_tex0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
visualizeAsImages(dp0_output.data.clone(),
opt.dirTestingoutput,
filename='output0_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages((gx+1)/2,
# opt.dirTestingoutput,
# filename='warp0x_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
#visualizeAsImages((gy+1)/2,
# opt.dirTestingoutput,
# filename='warp0y_'+str(iter_mark), n_sample = 49, nrow=7, normalize=False)
break
# put testing code here #
gc.collect() # collect garbage
##
| 41.011111 | 202 | 0.663912 |
acecf056c89d10d9a067cc74f6efaf30c117f290 | 1,617 | py | Python | sure_weather/weather/weatherdotcom.py | vamshikr/sure_weather | 100612f1879e4606461283de23adc9192b9d9850 | [
"MIT"
] | null | null | null | sure_weather/weather/weatherdotcom.py | vamshikr/sure_weather | 100612f1879e4606461283de23adc9192b9d9850 | [
"MIT"
] | null | null | null | sure_weather/weather/weatherdotcom.py | vamshikr/sure_weather | 100612f1879e4606461283de23adc9192b9d9850 | [
"MIT"
] | null | null | null | import json
import logging
import os
from http import HTTPStatus
import aiohttp
from sure_weather.exceptions import WeatherServiceException
from sure_weather.weather.base_service import BaseWeatherService
class WeatherDotCom(BaseWeatherService):
"""
Weather.com service
"""
SERVICE_NAME = 'weather.com'
BASE_URL_KEY = 'WEATHERDOTCOM_URL'
def __init__(self):
super().__init__()
self.url = os.environ[self.BASE_URL_KEY]
async def _get_current_weather(self, latitude: float, longitude: float):
"""
For a given location gets the current weather report from weather.com
:param latitude:
:param longitude:
:return:
"""
data = {
'lat': str(latitude),
'lon': str(longitude)
}
async with aiohttp.ClientSession() as session:
async with session.post(self.url + "/weatherdotcom", json=data) as response:
if response.status == HTTPStatus.OK:
return json.loads(await response.text())
async def get_current_temperature(self, latitude: float, longitude: float):
"""
For a given location gets the current temperature in fahrenheit from weather.com
:param latitude:
:param longitude:
:return:
"""
try:
report = await self._get_current_weather(latitude, longitude)
return float(report["query"]["results"]["channel"]["condition"]["temp"])
except KeyError as err:
logging.exception(err)
raise WeatherServiceException from err
| 30.509434 | 88 | 0.63389 |
acecf0956e2a411ebe83e60579d552ed2ba69836 | 6,433 | py | Python | src/appxs/commonx/templatetags/field_data_tag.py | spunkmars/django-spms | 95ac193891f93da07c3a26feeaf846e6030f3466 | [
"BSD-3-Clause"
] | 23 | 2020-04-14T07:50:38.000Z | 2022-01-27T09:07:19.000Z | src/appxs/commonx/templatetags/field_data_tag.py | bjzhangyong/django-spms | 95ac193891f93da07c3a26feeaf846e6030f3466 | [
"BSD-3-Clause"
] | 8 | 2021-03-19T09:01:16.000Z | 2022-02-10T12:28:55.000Z | src/appxs/commonx/templatetags/field_data_tag.py | bjzhangyong/django-spms | 95ac193891f93da07c3a26feeaf846e6030f3466 | [
"BSD-3-Clause"
] | 6 | 2020-04-14T13:34:29.000Z | 2022-01-25T04:05:16.000Z | # coding=utf-8
from datetime import datetime
from django import template
from django.db.models.fields import DateTimeField, DateField, TimeField
from spmo.common import Common
from spcc.views.common import map_value
co = Common()
register = template.Library()
def get_model_all_fields(model=None):
'''
获取model所有field,包括外键,多对多等
:param model:
:return:
'''
return list(set([f for f in model._meta.get_fields() if f.many_to_one or f.many_to_many or f.concrete]))
def do_filed_data_analysis(parser, token):
try:
tag_name, object_data, object_show_fields, object_ex_fields = token.split_contents()
except:
raise (template.TemplateSyntaxError, "%r tags error" % token.split_contents()[0])
return FieldDataNode(object_data, object_show_fields, object_ex_fields)
class FieldDataNode(template.Node):
def __init__(self, object_data, object_show_fields, object_ex_fields):
self.object = template.Variable(object_data)
self.show_fields = template.Variable(object_show_fields)
self.ex_fields = template.Variable(object_ex_fields)
def render(self, context):
object = self.object.resolve(context)
show_fields = self.show_fields.resolve(context)
ex_fields = self.ex_fields.resolve(context)
model = type(object)
model_m2m_fs = model._meta.many_to_many
data = {}
f_keys = []
items = {}
if hasattr(object, 'items'):
items = object.items()
else:
items = [(field, field.value_to_string(object)) for field in object._meta.fields]
# print 'items: %s' % items
for field, value in items:
data[field.name] = "none"
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = getattr(object, 'get_%s_display' % field.name)()
elif isinstance(field, DateTimeField):
t = getattr(object, field.name)
if t is None:
data[field.name] = None
else:
data[field.name] = t.strftime('%Y-%m-%d %H:%M:%S')
else:
if hasattr(field, 'related_fields'):
data[field.name] = getattr(object, field.name)
f_keys.append(field.name)
else:
data[field.name] = value
for m2m_field in model_m2m_fs:
m2m_datas = getattr(object, '%s' % m2m_field.name).all()
m2m_field_name = m2m_field.name
for m2m in m2m_datas:
if hasattr(m2m, 'get_absolute_url'):
m2m_t_data = '<a href=%s >%s</a>' % (m2m.get_absolute_url(), m2m.__str__())
else:
m2m_t_data = m2m.__str__()
if m2m_field.name in data:
data[m2m_field_name].append(m2m_t_data)
else:
data[m2m_field_name] = [m2m_t_data]
if m2m_field_name in data:
data[m2m_field_name] = ",".join(data[m2m_field_name])
ex_data = {}
res_str = ""
for ex_key in ex_fields:
if not ex_key in ex_data:
ex_data[ex_key] = {}
if ex_key == "m2m":
for x, y in ex_fields[ex_key]['fields'].items():
m2m_datas = getattr(object, '%s' % x).all()
ex_data_tmp = []
for m2m in m2m_datas:
ex_data_tmp.append(getattr(m2m, "%s" % y))
ex_data[ex_key].update({x: ",".join(ex_data_tmp[:])})
for ex_key in ex_data:
for fl in ex_fields[ex_key]['fields'].keys():
res_str = u"%s<td>%s</td>" % (res_str, ex_data[ex_key][fl])
elif "m2m" == ex_key.split("_")[-1]:
ex_key_va = "_".join(ex_key.split("_")[:-1])
ex_model = eval('%s' % ex_key_va)
for field_name, m2m_field in ex_fields[ex_key]["fields"].items():
relation_name = getattr(ex_model.objects.all()[0], '%s' % m2m_field).query_field_name
m2m_datas = getattr(object, '%s' % relation_name).all()
ex_data_tmp = []
for m2m in m2m_datas:
ex_data_tmp.append(getattr(m2m, "%s" % field_name))
ex_data[ex_key].update({field_name: ",".join(ex_data_tmp[:])})
for ex_key in ex_data:
for fl in ex_fields[ex_key]['fields'].keys():
res_str = u"%s<td>%s</td>" % (res_str, ex_data[ex_key][fl])
else:
ex_model = eval('%s' % ex_key)
ex_filter = ex_fields[ex_key]['filter']
f_words = []
for ft in ex_filter:
for k in ex_filter[ft]:
if ft == 'st':
f_words.append('%s=%s' % (k, ex_filter[ft][k]))
elif ft == 'dy':
f_words.append('%s=object.%s' % (k, ex_filter[ft][k]))
exec("ex_qset=ex_model.objects.filter(%s)" % ','.join(f_words))
for fl in ex_fields[ex_key]['fields']:
if not ex_key in ex_data:
ex_data[ex_key] = {}
if ex_qset:
ex_data[ex_key].update({fl: getattr(ex_qset[0], fl)})
else:
ex_data[ex_key].update({fl: "None"})
for ex_key in ex_data:
for fl in ex_fields[ex_key]['fields']:
res_str = u"%s<td>%s</td>" % (res_str, ex_data[ex_key][fl])
for key in show_fields:
if key in data:
field_o = getattr(object, key)
if key in f_keys and hasattr(field_o, 'get_absolute_url'):
res_str = u"%s<td><a href=%s >%s</a></td>" % (res_str, field_o.get_absolute_url(), data[key])
else:
res_str = u"%s<td>%s</td>" % (res_str, data[key])
else:
res_str = u"%s<td>%s</td>" % (res_str, "None")
return res_str
register.tag('field_data', do_filed_data_analysis)
| 41.772727 | 114 | 0.510026 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.