text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
#
# Copyright 2017, 2018 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = "0.2.0"
setup(name='github-codecommit-mirror',
version=version,
description='Mirror all repositories of an organization/group from Github or Gitlab to AWS CodeCommit, including branches.',
classifiers=[
"Programming Language :: Python :: 3.5",
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Topic :: Terminals',
"Operating System :: POSIX :: Linux",
"Environment :: Console",
],
keywords=['git','github','gitlab','codecommit', 'mirror', 'sync'],
author='dpa-infocom GmbH',
maintainer='Martin Borho',
maintainer_email='martin@borho.net',
url='https://github.com/dpa-newslab/github-codecommit-mirror',
license='Apache Software License (http://www.apache.org/licenses/LICENSE-2.0)',
packages=find_packages(exclude=['tests', 'htmlcov', 'dist',]),
include_package_data=True,
zip_safe=False,
install_requires=[
"GitPython==3.1.2",
"boto3==1.4.6",
"requests==2.18.4",
],
entry_points="""
[console_scripts]
gh-cc-mirror = gh_cc_mirror:cmd_github
gl-cc-mirror = gh_cc_mirror:cmd_gitlab
""",
)
|
# coding: utf-8
import re
from ..extractor.nbc import NBCIE as Old
from ..utils import (
smuggle_url,
update_url_query,
int_or_none,
)
class NBCIE(Old):
def _real_extract(self, url):
try:
result = super(NBCIE, self)._real_extract(url)
if not result or not result.get('formats', None):
raise
return result
except:
permalink, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, url).replace('https://schema.org', 'http://schema.org')
video_data = self._search_json_ld(webpage, '', fatal=False)
query = {
'mbr': 'true',
'manifest': 'm3u',
}
theplatform_url = smuggle_url(update_url_query(
'http://link.theplatform.com/s/NnzsPC/media/guid/2410887629/' + video_id,
query), {'force_smil_url': True})
return {
'_type': 'url_transparent',
'id': video_id,
'title': video_data.get('title'),
'url': theplatform_url,
'description': video_data.get('description'),
'keywords': video_data.get('keywords'),
'season_number': int_or_none(video_data.get('seasonNumber')),
'episode_number': int_or_none(video_data.get('episodeNumber')),
'series': video_data.get('showName'),
'ie_key': 'ThePlatform',
}
|
import json
import sys
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print("Error - please specify one file name with the combined JSON")
with open(filename) as f:
data_json = json.load(f)
slot_stats = {}
def process_tutor_slots(tutor):
timeslots = tutor["timeSlots"]
officepref = tutor["officePrefs"]
slots = data_json["slots"]
for i, (t, o) in enumerate(zip(timeslots, officepref)):
if t > 0 and o >= 0:
curr_slot = slots[i]
assert curr_slot["sid"] == i, "sid != i, {} != {}".format(curr_slot["sid"], i)
# print("Time Pref:", t)
# print("Office Pref:", o)
# print(curr_slot)
# print()
slot_stats[curr_slot["sid"]] = slot_stats.get(curr_slot["sid"], 0) + 1
for tutor in data_json["tutors"]:
process_tutor_slots(tutor)
for slot in data_json["slots"]:
print(slot_stats.get(slot["sid"], -1), slot["office"], slot["day"], slot["hour"])
|
import os
import sys
import time
import argparse
try:
import configparser
except:
import ConfigParser as configparser
from alize.script import AlizeTestCase
from blue.server import MinicapService
from blue.utility import *
from blue.utility import LOG as L
class TestCase_Unit(AlizeTestCase):
def __init__(self, *args, **kwargs):
super(TestCase_Unit, self).__init__(*args, **kwargs)
self.get_config(self.get("args.config"))
self.get_service()
self.service = MinicapService("minicap", self.get("args.mobile"),
self.adb.get().HEIGHT, self.adb.get().WIDTH,
self.adb.get().MINICAP_HEIGHT, self.adb.get().MINICAP_WIDTH, self.adb.get().ROTATE)
self.service.start(); time.sleep(1)
def __del__(self):
if self.service != None:
self.service.stop()
def arg_parse(self, parser):
parser.add_argument(action='store', dest='testcase',
help='TestCase Name.')
parser.add_argument('-m', action='store', dest='mobile',
help='Mobile (Android) Serial ID.')
parser.add_argument('-a', action='store', dest='attack',
help='Attack ID.')
parser.add_argument('-d', action='store', dest='deploy',
help='Deploy Fleet Number.')
parser.add_argument('-f', action='store', dest='fleet',
help='Fleet Number. (1 ~ 4)')
parser.add_argument('-e', action='store', dest='expedition',
help='Expedition ID.')
parser.add_argument('-j', action='store', dest='job',
help='Jenkins Job.')
parser.add_argument('-t', action='store', dest='timeout',
help='Timeout.')
parser.add_argument('-u', action='store', dest='url',
help='target Jenkins URL.')
parser.add_argument('-s', action='store', dest='slack',
help='target slack api token.')
parser.add_argument('-c', action='store', dest='config',
help='Configure File.')
parser.add_argument('-i', action='store', dest='userid',
help='jenkins userid.')
parser.add_argument('-p', action='store', dest='token',
help='jenkins api token.')
return parser
@classmethod
def get_service(cls):
cls.adb = cls.service["alize.android"].get(cls.get("args.mobile"), PROFILE_DIR)
cls.minicap = cls.service["alize.minicap"].get(cls.get("minicap.ip"), int(cls.get("minicap.port")))
cls.pic = cls.service["alize.picture"].get()
if cls.get("args.slack") == None:
serial = cls.get("slack.serial")
else:
serial = cls.get("args.slack")
cls.slack = cls.service["alize.slack"].get(serial)
def get_config(cls, conf=None):
if conf == None:
conf = os.path.join(SCRIPT_DIR, "config.ini")
else:
conf = conf + ".ini"
conf = os.path.join(SCRIPT_DIR, "config", conf)
try:
config = configparser.RawConfigParser()
cfp = open(conf, 'r')
config.readfp(cfp)
for section in config.sections():
for option in config.options(section):
cls.set("%s.%s" % (section, option), config.get(section, option))
except Exception as e:
L.warning('error: could not read config file: %s' % str(e))
|
#
# django-weblogparser
#
# Admin
#
from django.contrib import admin
from weblogparser.models import LogFilePath, LogFile, LogEntry
class LogFilePathAdmin(admin.ModelAdmin):
list_display = ['path']
admin.site.register(LogFilePath, LogFilePathAdmin)
class LogFileAdmin(admin.ModelAdmin):
list_display = ['path', 'filename', 'created', 'modified', 'errors']
admin.site.register(LogFile, LogFileAdmin)
class LogEntryAdmin(admin.ModelAdmin):
list_display = ['timestamp', 'log_file', 'status', 'bytes_returned']
list_filter = ['status']
admin.site.register(LogEntry, LogEntryAdmin)
|
import random
import numpy as np
import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
def run_global_best_pso(n_dims, test_func, n_inds, n_gens, lower_bound, upper_bound,
initial_positions=None, random_seed=12345,
c1=0.5, c2=0.3, w=0.9
):
# check input
assert lower_bound < upper_bound, "Lower bound must be smaller than upper bound."
if initial_positions is not None:
assert len(initial_positions) == n_inds
for position in initial_positions:
assert len(position) == n_dims
assert max(position) <= upper_bound
assert min(position) >= lower_bound
# set up
np.random.seed(random_seed)
options = {'c1':c1, 'c2':c2, 'w':w}
bounds = (np.array([lower_bound] * n_dims), np.array([upper_bound] * n_dims))
optimizer = ps.single.GlobalBestPSO(n_particles=n_inds, dimensions=n_dims, bounds=bounds, options=options)
if initial_positions is not None:
optimizer.pos = np.array(initial_positions).copy()
stats = optimizer.optimize(test_func, iters=n_gens)
pos_history = optimizer.get_pos_history
history = list()
history.append( {'gen': 0, 'individuals': initial_positions} ) # TODO: better to do it inside pyswarms
for g in range(n_gens):
solutions = list()
#fitnesses = list() # TODO
for i in range(n_inds):
solutions.append(pos_history[g][i].tolist()) # convert from np.array to list
history.append( {'gen': g+1, 'individuals': solutions} )
return history
if __name__ == "__main__":
n_inds = 10
n_gens = 1000
n_dims = 3
lower_bound = -3.0
upper_bound = 3.0
test_func = fx.sphere_func
initial_positions = [[random.uniform(lower_bound, upper_bound) for _ in range(n_dims)] for _ in range(n_inds)]
print 'Initial solution: {}'.format(initial_positions)
results = run_global_best_pso(n_dims=n_dims, test_func=test_func,
n_inds=n_inds, n_gens=n_gens,
lower_bound=lower_bound, upper_bound=upper_bound,
initial_positions=initial_positions)
assert len(results) == n_gens + 1
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from torch import nn
from torch.nn.utils.weight_norm import weight_norm
class VisDialDiscriminator(nn.Module):
def __init__(self, config, embedding):
super().__init__()
self.config = config
self.embedding = embedding
self.emb_out_dim = embedding.text_out_dim
self.hidden_dim = self.config.hidden_dim
self.projection_layer = nn.Linear(self.emb_out_dim, self.hidden_dim)
def forward(self, encoder_output, batch):
answer_options_len = batch["answer_options_len"]
# BATCH_SIZE X DIALOGUES X 100 X SEQ_LEN
answer_options = batch["answer_options"]
max_seq_len = answer_options.size(-1)
batch_size, ndialogues, noptions, seq_len = answer_options.size()
# (B X D X 100) X SEQ_LEN
answer_options = answer_options.view(-1, max_seq_len)
answer_options_len = answer_options_len.view(-1)
# (B x D x 100) x EMB_OUT_DIM
answer_options = self.embedding(answer_options)
# (B x D x 100) x HIDDEN_DIM
answer_options = self.projection_layer(answer_options)
# (B x D) x 100 x HIDDEN_DIM
answer_options = answer_options.view(
batch_size * ndialogues, noptions, self.hidden_dim
)
# (B x D) x HIDDEN_DIM => (B x D) x 100 x HIDDEN_DIM
encoder_output = encoder_output.unsqueeze(1).expand(-1, noptions, -1)
# (B x D) x 100 x HIDDEN_DIM * (B x D) x 100 x HIDDEN_DIM = SAME THING
# SUM => (B x D) x 100
scores = torch.sum(answer_options * encoder_output, dim=2)
return scores
class LanguageDecoder(nn.Module):
def __init__(self, in_dim, out_dim, **kwargs):
super().__init__()
self.language_lstm = nn.LSTMCell(
in_dim + kwargs["hidden_dim"], kwargs["hidden_dim"], bias=True
)
self.fc = weight_norm(nn.Linear(kwargs["hidden_dim"], out_dim))
self.dropout = nn.Dropout(p=kwargs["dropout"])
self.init_weights(kwargs["fc_bias_init"])
def init_weights(self, fc_bias_init):
self.fc.bias.data.fill_(fc_bias_init)
self.fc.weight.data.uniform_(-0.1, 0.1)
def forward(self, weighted_attn):
# Get LSTM state
state = registry.get(f"{weighted_attn.device}_lstm_state")
h1, c1 = state["td_hidden"]
h2, c2 = state["lm_hidden"]
# Language LSTM
h2, c2 = self.language_lstm(torch.cat([weighted_attn, h1], dim=1), (h2, c2))
predictions = self.fc(self.dropout(h2))
# Update hidden state for t+1
state["lm_hidden"] = (h2, c2)
return predictions
|
# input and print, with format strings
answer = input("What's your name? ")
print(f"hello, {answer}")
|
"""
Copyright 2020 EPAM Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
MIN_BUCKET_NAME_LEN = 3
MAX_BUCKET_NAME_LEN = 63
ALL_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'sa-east-1',
'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2',
'eu-west-3', 'ap-northeast-1', 'ap-northeast-2',
'ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'eu-north-1']
REQUIRED = 'required'
VALIDATOR = 'validator'
PROJECT_PATH_CFG = 'project_path'
ACCOUNT_ID_CFG = 'account_id'
REGION_CFG = 'region'
LAMBDAS_ALIASES_NAME_CFG = 'lambdas_alias_name'
AWS_ACCESS_KEY_ID_CFG = 'aws_access_key_id'
AWS_SECRET_ACCESS_KEY_CFG = 'aws_secret_access_key'
DEPLOY_TARGET_BUCKET_CFG = 'deploy_target_bucket'
PROJECTS_MAPPING_CFG = 'build_projects_mapping'
RESOURCES_SUFFIX_CFG = 'resources_suffix'
RESOURCES_PREFIX_CFG = 'resources_prefix'
PYTHON_BUILD_TOOL_NAME = 'python'
NODE_BUILD_TOOL_NAME = 'node'
MVN_BUILD_TOOL_NAME = 'mvn'
ALLOWED_BUILD_TOOLS = [PYTHON_BUILD_TOOL_NAME,
MVN_BUILD_TOOL_NAME,
NODE_BUILD_TOOL_NAME]
REQUIRED_PARAM_ERROR = 'The required key {} is missing'
class ConfigValidator:
def __init__(self, config_dict) -> None:
self._config_dict = config_dict
self._fields_validators_mapping = {
PROJECT_PATH_CFG: {
REQUIRED: True,
VALIDATOR: self._validate_project_path},
ACCOUNT_ID_CFG: {
REQUIRED: True,
VALIDATOR: self._validate_account_id},
REGION_CFG: {
REQUIRED: True,
VALIDATOR: self._validate_region},
DEPLOY_TARGET_BUCKET_CFG: {
REQUIRED: True,
VALIDATOR: self._validate_bundle_bucket_name},
PROJECTS_MAPPING_CFG: {
REQUIRED: True,
VALIDATOR: self._validate_project_mapping},
AWS_ACCESS_KEY_ID_CFG: {
REQUIRED: False,
VALIDATOR: self._validate_aws_access_key},
AWS_SECRET_ACCESS_KEY_CFG: {
REQUIRED: False,
VALIDATOR: self._validate_aws_secret_access_key},
RESOURCES_PREFIX_CFG: {
REQUIRED: False,
VALIDATOR: self._validate_resources_prefix_suffix},
RESOURCES_SUFFIX_CFG: {
REQUIRED: False,
VALIDATOR: self._validate_resources_prefix_suffix}
}
def validate(self):
error_messages = {}
for key, value in self._config_dict.items():
validation_rules = self._fields_validators_mapping.get(key)
if not validation_rules:
raise AssertionError(
f'There is no validator for the configuration field {key}')
is_required = validation_rules.get(REQUIRED)
if is_required:
if not value:
error_messages[key] = REQUIRED_PARAM_ERROR.format(key)
continue
validator_func = validation_rules.get(VALIDATOR)
validation_errors = validator_func(key, value)
if validation_errors:
error_messages[key] = validation_errors
return error_messages
def _validate_project_path(self, key, value):
str_error = self._assert_value_is_str(key, value)
if str_error:
return [str_error]
errors = []
if len(value) == 0:
errors.append(f'{key} must not be empty')
if not os.path.exists(value):
errors.append(f'The path {value} specified in {key} must exist')
return errors
@staticmethod
def _validate_account_id(key, value):
errors = []
try:
int(value)
except TypeError as e:
errors.append(f'{key} must be int, not {type(value)}')
return errors
if len(str(value)) != 12:
errors.append(f'{key} must be a 12-digit number')
return errors
def _validate_region(self, key, value):
str_error = self._assert_value_is_str(key, value)
if str_error:
return [str_error]
if value not in ALL_REGIONS:
return [
f'{key} value must be one of {ALL_REGIONS}, but is {value}'
]
def _validate_bundle_bucket_name(self, key, value):
str_error = self._assert_value_is_str(key=key,
value=value)
if str_error:
return [str_error]
errors = []
# check min length
if len(value) < MIN_BUCKET_NAME_LEN or len(
value) > MAX_BUCKET_NAME_LEN:
errors.append(f'The length of {key} must be between '
f'{MIN_BUCKET_NAME_LEN} and {MAX_BUCKET_NAME_LEN} '
f'characters long')
return errors
def _validate_project_mapping(self, key, value):
errors = []
if type(value) is not dict:
errors.append(f'{key} must be type of dict')
return errors
project_path = self._config_dict.get(PROJECT_PATH_CFG)
for key in value.keys():
if key not in ALLOWED_BUILD_TOOLS:
errors.append(f'{key} is not supported to be built')
continue
for build_key, paths in value.items():
for path in paths:
if not os.path.exists(os.path.join(project_path, path)):
errors.append(f'The path in {key}:{build_key} project '
f'mapping does not exists: {path}')
return errors
def _validate_aws_access_key(self, key, value):
str_error = self._assert_value_is_str(key=key,
value=value)
if str_error:
return [str_error]
if len(value) < 16 or len(value) > 128:
return [
f'The length of {key} must be in a '
f'range between 16 and 128 characters']
def _validate_aws_secret_access_key(self, key, value):
# the only constraint found
str_error = self._assert_value_is_str(key=key,
value=value)
if str_error:
return [str_error]
def _validate_resources_prefix_suffix(self, key, value):
str_error = self._assert_value_is_str(key=key,
value=value)
if str_error:
return [str_error]
if len(value) > 5:
return [
f'The length of {key} must be less or equal to 5 character']
@staticmethod
def _assert_value_is_str(key, value):
if type(value) is not str:
return f'{key} must be type of string'
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
SAMPLE_RATE = 50 # 50Hz
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
min_speed = rospy.get_param('~min_speed', 0.5)
max_speed = rospy.get_param('waypoint_loader/velocity', 40) / 3.6 # convert km/h to m/s
steering_tau = rospy.get_param('~steering_tau', 0.0)
throttle_kp = rospy.get_param('~throttle_k_p', 0.5)
throttle_ki = rospy.get_param('~throttle_k_i', 0.00001)
throttle_kd = rospy.get_param('~throttle_k_d', 0.0 )
throttle_gains = [throttle_kp, throttle_ki, throttle_kd]
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
self.dbw_enabled = False
self.current_velocity = None
self.twist_cmd = None
# Create `TwistController` object
self.controller = Controller(
wheel_base = wheel_base,
steer_ratio = steer_ratio,
min_speed = min_speed,
max_speed = max_speed,
decel_limit = decel_limit,
accel_limit = accel_limit,
max_lat_accel = max_lat_accel,
max_steer_angle = max_steer_angle,
vehicle_mass = vehicle_mass,
fuel_capacity = fuel_capacity,
brake_deadband = brake_deadband,
throttle_gains = throttle_gains,
wheel_radius = wheel_radius,
steering_tau = steering_tau,
sample_rate = SAMPLE_RATE)
# Subscribe to topics
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb, queue_size=1)
rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb, queue_size=1)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cmd_cb, queue_size=1)
self.loop()
def loop(self):
rate = rospy.Rate(SAMPLE_RATE)
while not rospy.is_shutdown():
if self.dbw_enabled == True:
throttle, brake, steer = self.controller.control(self.twist_cmd, self.current_velocity)
self.publish(throttle, brake, steer)
rate.sleep()
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg.data
#rospy.logwarn("dbw_enabled_cb: %s", self.dbw_enabled)
if self.dbw_enabled == True:
self.controller.reset()
def current_velocity_cb(self, msg):
self.current_velocity = msg.twist
#rospy.logwarn("twist_velocity_cb: %s", self.current_velocity)
def twist_cmd_cb(self, msg):
self.twist_cmd = msg.twist
#rospy.logwarn("twist_cmd_cb: %s", self.twist_cmd)
if __name__ == '__main__':
DBWNode()
|
import datasets
import addTorch
# order blocks
def compile(graph, inputs):
orderedBlocks = []
compiledBlocks = {}
for block in graph:
compiledBlocks[block] = False
for block in graph:
if not compiledBlocks[block]:
topologicalSort(graph, block, inputs, orderedBlocks, compiledBlocks)
return orderedBlocks, inputs
# recursively stack blocks in order
def topologicalSort(graph, key, inputs, stack, visited):
visited[key] = True
for value in graph[key]['inputs']:
if graph[key]['inputs'][value] == None:
if key + "." + value not in inputs:
print("invalid")
break;
#inputs.append(key + "_" + value)
graph[key]['inputs'][value] = key + "." + value
elif not visited[graph[key]['inputs'][value].split('.')[0]]:
print(value, graph[key])
topologicalSort(graph, graph[key]['inputs'][value].split('.')[0], inputs, stack, visited)
stack.append(key)
# write
def write(file, graph):
with open(file, 'r') as f:
contents = f.read()
lines = contents.split('\n')
for line in lines:
# TODO: this is pretty poor parsing, should use something else
if '@network' in line:
start = line.index('(')
end = line.index(')')
content = line[start : end]
input_start = line.index("[")
input_end = line.index("]") + 1
input_content = line[input_start + 1 : input_end - 1]
output_start = line[input_end:].index("[")
output_end = line[input_end:].index("]") + 1
output_content = line[input_end + output_start + 1 : input_end + output_end - 1]
inputs = input_content.split(',')
outputs = output_content.split(',')
inputs = [i.replace(' ', '')[1:-1] for i in inputs]
outputs = [o.replace(' ', '')[1:-1] for o in outputs]
order, inps = compile(graph, inputs)
# define network
net = 'class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n'
# define forward function
forward = '\n def forward(self, '
for i in inps:
forward += i.replace(".", "_") + ", "
forward = forward[:-2] + "):\n"
# define initialize model
initialize = '\nnet = Net()\n'
# generate script from stack
while order:
block = order.pop(0)
if 'add' == graph[block]['type']:
addNet, addForward = addTorch.addSum(block, graph[block]['inputs'], graph[block]['attributes'])
net += addNet
forward += addForward
# add blocks to the model
elif 'conv2d' == graph[block]['type']:
addNet, addForward = addTorch.addConv2d(block, graph[block]['inputs'], graph[block]['attributes'])
net += addNet
forward += addForward
elif 'maxpool' == graph[block]['type']:
addNet, addForward = addTorch.addMaxPool2d(block, graph[block]['inputs'], graph[block]['attributes'])
net += addNet
forward += addForward
elif 'flatten' == graph[block]['type']:
addNet, addForward = addTorch.addFlatten(block, graph[block]['inputs'], graph[block]['attributes'])
net += addNet
forward += addForward
elif 'dense' == graph[block]['type']:
addNet, addForward = addTorch.addLinear(block, graph[block]['inputs'], graph[block]['attributes'])
net += addNet
forward += addForward
elif 'relu' == graph[block]['type']:
forward += addTorch.addReLU(block, graph[block]['inputs'], graph[block]['attributes'])
elif 'sigmoid' == graph[block]['type']:
forward += addTorch.addSigmoid(block, graph[block]['inputs'], graph[block]['attributes'])
elif 'tanh' == graph[block]['type']:
forward += addTorch.addReLU(block, graph[block]['inputs'], graph[block]['attributes'])
elif 'softmax' == graph[block]['type']:
forward += addTorch.addSoftmax(block, graph[block]['inputs'], graph[block]['attributes'])
forward += "\n return "
for o in outputs:
forward += o.replace(".", "_") + ", "
forward = forward[:-2]
contents = contents.replace(line, net + forward + initialize)
return contents
#return net + forward + initialize
if __name__ == '__main__':
# test
test = {"conv_1":{"inputs":{"input":None},"attributes":["in_channels=3","out_channels=6","kernel_size=[3, 3]","stride=[1, 1]","padding=[1, 1]","dilation=[1, 1]","groups=1","bias=True","padding_mode=zeros"],"type":"conv2d"},"conv_2":{"inputs":{"input":"relu_1.output"},"attributes":["in_channels=6","out_channels=12","kernel_size=[3, 3]","stride=[1, 1]","padding=[1, 1]","dilation=[1, 1]","groups=1","bias=True","padding_mode=zeros"],"type":"conv2d"},"relu_1":{"inputs":{"input":"conv_1.output"},"attributes":[],"type":"relu"},"add_1":{"inputs":{"input1":"conv_2.output","input2":"conv_1.output"},"attributes":[],"type":"add"},"tanh_1":{"inputs":{"input":"add_1.output"},"attributes":[],"type":"tanh"}}
#print(addTorch.addConv2d('conv_2', test['conv_2']['inputs'], test['conv_2']['attributes']))
#print(addTorch.addSum('add_1', test['add_1']['inputs'], test['add_1']['attributes']))
# view generated code
print(write('blank.py', test))
|
import discord
from discord.ext import commands
import sys
import io
import os
import json
import datetime
import re
import requests
def to_ascii(string):
string = string.replace("ä", "/ae").replace("ö", "/oe").replace("Ä", "/AE").replace("Ö", "/OE").replace("§", "/ss")
return string
def to_utf8(string):
string = string.replace("ä", "ä").replace("ö", "ö").replace("/ae", "ä").replace("/oe", "ö").replace("Ä", "Ä") \
.replace("Ö", "Ö").replace("§", "§").replace("/ss", "§")
return string
class CustomCommandsCog(commands.Cog):#discord.Client
def __init__(self, bot):
self.bot = bot
@commands.group(name="command", pass_context=True)
async def _command(self, ctx):
""" Tell users what your group command is all about here"""
if ctx.invoked_subcommand is None:
print ("Komento annettiin ilman alakomentoa")
await ctx.send("`!command add/remove hi ""\"hello""\"`", delete_after=30.0)
#listeners now must have a decorator
@commands.Cog.listener()
async def on_message(self, message):
guild = message.guild
channel = message.channel
path = "{}/customfiles/".format(os.path.dirname(__file__))
if path == "/":
path = ""
try:
if message.content.startswith('!'):
if guild:
with open("{}chatlogs/{}.txt".format(path, guild.name), "a+", encoding="utf-8") as logs:
print(to_utf8(str(("{0.created_at} : {0.author.name} : {0.channel} : {0.content} : {0.embeds}".format(message)))), file=logs)
try:
server = message.guild.id
except AttributeError:
return
command = message.content.replace("!", "")
#print (command)
try:
with open(f"{path}custom_commands.json") as data_file:
data = json.load(data_file)
#print("Tiedosto avattu")
try:
viesti = data[str(server)]["!{}".format(to_ascii(str(command)))]["message"]
await channel.send(to_utf8(viesti), delete_after=300.0)
return
except KeyError: #NameError for fixing, if command manage to go broken
return
print("Viestin sanomisessa KeyError / path on väärin merkitty")
except IOError:
print ("Tiedoston avaamisessa on vikaa, path on väärin")
return
else:
#print("Viesti ei alkanut '!' merkillä")
#print("Poistetaan käyttäjän lähettämä viesti")
#await self.message.delete()
#await bot.process_commands(message)
pass
#return
except AttributeError:
#print("Komento aloitettiin väärin!")
pass
#@commands.command(name='command-add', aliases=['lisaa'])
@_command.command()
@commands.guild_only()
@commands.has_any_role("Admins", "Mods", "raid")
async def add(self, message, *, arg):
path = "{}/customfiles/".format(os.path.dirname(__file__))
if path == "/":
path = ""
words = "".join(arg)#.replace(" ", " ")
file = f"{path}custom_commands.json"
#channel = message.guild.get_channel(521118811198586891)
channel = message.channel
server = message.guild.id
if len(words) < 3:
await channel.send("Anna komento, sekä viesti: `!command add hi ""\"hello""\"`", delete_after=40.0)
#await channel.send(f"{arg}")
return
def convert(string):
a = string.find("\"")
if a == -1 or string[-1] != "\"" or string.count("\"") < 2:
return
string_list = list(string)
string_list[a], string_list[-1] = "[start]", "[end]"
if string_list[a - 1] != " ":
string_list[a - 1] += " "
string = "".join(string_list)
start = string.find("[start]") + 7
end = string.find("[end]")
viesti_raw = to_ascii(string[start:end]).replace("\\n", "\n")
komento_raw = to_ascii(" ".join(string[:start - 8].split(" ")[0:]))
komento = komento_raw.replace("!", "")
try:
if not komento[0].isalpha() and not komento[0].isdigit():
komento = list(komento)
komento[0] = "!"
komento = "".join(komento)
elif komento[0].isalpha() or komento[0].isdigit():
komento = "!" + komento
return komento.lower(), viesti_raw, komento_raw
except IndexError:
raise IndexError
try:
command, viesti, command_raw = convert(words)
if len(command_raw) > 30:
raise ValueError
if "[end]" in command and "[start]" in command:
await channel.send("Annoit vääränlaisen syötteen. Anna ensin komento ja sitten "
"viesti lainausmerkeissä.", delete_after=40.0)
return
except TypeError:
await channel.send("Komennon viestin täytyy alkaa ja päättyä lainausmerkillä. "
"Anna ensin komento ja sitten viesti."
"`!command add hi ""\"hello""\"`", delete_after=40.0)
return
except IndexError:
await channel.send("Komennon nimi ei saa olla pelkkiä huutomerkkejä, sillä ne "
"poistetaan siitä joka tapauksessa. Siten tämä komento olisi "
"tyhjä merkkijono.", delete_after=40.0)
return
except ValueError:
await channel.send(f"Komennon maksimipituus on 30 merkkiä. Sinun oli "f"{len(command_raw)}.", delete_after=30.0)
return
with open(file) as data_file:
data = json.load(data_file)
try:
server_commands = list(data[str(server)])
if command in server_commands:
await channel.send("Komento on jo olemassa.", delete_after=40.0)
return
elif len(server_commands) > 199:
await channel.send("Komentojen maksimimäärä on 200 kappaletta, joka on tällä "
"guildilla jo täyttynyt.", delete_after=40.0)
return
except KeyError:
data[str(server)] = {}
data[str(server)][command] = {"message": viesti}
with open(file, "w") as data_file:
json.dump(data, data_file, indent=4)
await channel.send("Komento `{}` lisätty.".format(to_utf8(command)), delete_after=40.0)
await message.message.delete()
if (command_raw[0] == "!" and command_raw.count("!") > 1) or (command_raw[0] != "!" and command_raw.count("!") > 0):
#await channel.send("Komennon nimessä ei voi olla huutomerkkejä ja ne poistettiin automaattisesti.")
print ("Komennon nimessä ei voi olla dollarin merkkejä ja ne poistettiin automaattisesti.")
#@commands.command(name='command-del', aliases=['poista', 'remove'])
@_command.command()
@commands.guild_only()
@commands.has_any_role("Admins", "Mods", "raid")
async def remove(self, message, *, arg):
#channel = message.guild.get_channel(521118811198586891)
channel = message.channel
komento = " ".join(arg).replace(" ", "")
server = message.guild.id
path = "{}/customfiles/".format(os.path.dirname(__file__))
if path == "/":
path = ""
file = f"{path}custom_commands.json"
if not komento[0].isalpha() and not komento[0].isdigit():
komento = list(komento)
komento[0] = "!"
komento = "".join(komento)
elif komento[0].isalpha() or komento[0].isdigit():
komento = "!" + komento
komento = to_ascii(komento)
with open(file) as data_file:
data = json.load(data_file)
if str(komento) in list(data[str(server)]):
del data[str(server)][str(komento)]
with open(file, "w") as data_file:
json.dump(data, data_file, indent=4)
await channel.send("Komento `{}` poistettu.".format(to_utf8(str(komento))), delete_after=40.0)
await message.message.delete()
else:
await channel.send(f"Komentoa `{arg}` ei ole olemassa.", delete_after=40.0)
await message.message.delete()
# The setup function below is necessary. Remember we give bot.add_cog() the name of the class in this case CommandsCog.
# When we load the cog, we use the name of the file.
def setup(bot):
bot.add_cog(CustomCommandsCog(bot))
|
import operator
import warnings
from collections.abc import Iterator, Sequence
from functools import wraps, partial
from numbers import Number, Integral
from operator import getitem
from pprint import pformat
import numpy as np
import pandas as pd
from pandas.util import cache_readonly
from pandas.api.types import (
is_bool_dtype,
is_timedelta64_dtype,
is_numeric_dtype,
is_datetime64_any_dtype,
)
from tlz import merge, first, unique, partition_all, remove
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..utils import parse_bytes, partial_by_order, Dispatch, IndexCallable, apply
from .. import threaded
from ..context import globalmethod
from ..utils import (
random_state_data,
pseudorandom,
derived_from,
funcname,
memory_repr,
put_lines,
M,
key_split,
OperatorMethodMixin,
is_arraylike,
typename,
)
from ..array.core import Array, normalize_arg
from ..array.utils import empty_like_safe, zeros_like_safe
from ..blockwise import blockwise, Blockwise
from ..base import DaskMethodsMixin, tokenize, dont_optimize, is_dask_collection
from ..delayed import delayed, Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .optimize import optimize
from .utils import (
meta_nonempty,
make_meta,
insert_meta_param_description,
raise_on_meta_error,
clear_known_categories,
is_categorical_dtype,
has_known_categories,
PANDAS_VERSION,
PANDAS_GT_100,
index_summary,
is_dataframe_like,
is_series_like,
is_index_like,
valid_divisions,
hash_object_dispatch,
check_matching_columns,
drop_by_shallow_copy,
)
no_default = "__no_default__"
pd.set_option("compute.use_numexpr", False)
def _concat(args, ignore_index=False):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not has_parallel_type(args[0]):
try:
return pd.Series(args)
except Exception:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return (
args[0]
if not args2
else methods.concat(args2, uniform=True, ignore_index=ignore_index)
)
def finalize(results):
return _concat(results)
class Scalar(DaskMethodsMixin, OperatorMethodMixin):
""" A Dask object to represent a pandas scalar"""
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
raise TypeError(
"Expected meta to specify scalar, got "
"{0}".format(typename(type(meta)))
)
self._meta = meta
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self._name
def __dask_layers__(self):
return (self.key,)
__dask_optimize__ = globalmethod(
optimize, key="dataframe_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return first, ()
def __dask_postpersist__(self):
return Scalar, (self._name, self._meta, self.divisions)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, "dtype"):
o.remove("dtype") # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + "..."
if hasattr(self._meta, "dtype"):
extra = ", dtype=%s" % self._meta.dtype
else:
extra = ", type=%s" % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
def __bool__(self):
raise TypeError(
"Trying to convert {} to a boolean value. Because Dask objects are "
"lazily evaluated, they cannot be converted to a boolean value or used "
"in boolean conditions like if statements. Try calling .compute() to "
"force computation prior to converting to a boolean value or using in "
"a conditional statement.".format(self)
)
@property
def key(self):
return (self._name, 0)
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + "-" + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Scalar(graph, name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
name = "delayed-" + self._name
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())
return Delayed(self.key, dsk)
def _scalar_binary(op, self, other, inv=False):
name = "{0}-{1}".format(funcname(op), tokenize(self, other))
dependencies = [self]
dsk = {}
return_type = get_parallel_type(other)
if isinstance(other, Scalar):
dependencies.append(other)
other_key = (other._name, 0)
elif is_dask_collection(other):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if return_type is not Scalar:
return return_type(graph, name, meta, [other.index.min(), other.index.max()])
else:
return Scalar(graph, name, meta)
class _Frame(DaskMethodsMixin, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not self._is_partition_type(meta):
raise TypeError(
"Expected meta to specify type {0}, got type "
"{1}".format(type(self).__name__, typename(type(meta)))
)
self._meta = meta
self.divisions = tuple(divisions)
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self._name,)
def __dask_tokenize__(self):
return self._name
__dask_optimize__ = globalmethod(
optimize, key="dataframe_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
"""Size of the Series or DataFrame as a Delayed object.
Examples
--------
>>> series.size # doctest: +SKIP
dd.Scalar<size-ag..., dtype=int64>
"""
return self.reduction(
methods.size, np.sum, token="size", meta=int, split_every=False
)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name, self._meta, self.divisions)
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if isinstance(x, np.ndarray) and x.shape == ():
continue
elif not isinstance(
x, (Number, Scalar, _Frame, Array, pd.DataFrame, pd.Series, pd.Index)
):
return NotImplemented
if method == "__call__":
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
@property
def _elemwise(self):
return elemwise
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index([""] * (self.npartitions + 1), name=name)
return divisions
def __repr__(self):
data = self._repr_data().to_string(max_rows=5, show_dimensions=False)
_str_fmt = """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks"""
if len(self.columns) == 0:
data = data.partition("\n")[-1].replace("Index", "Divisions")
_str_fmt = "Empty {}".format(_str_fmt)
return _str_fmt.format(
klass=self.__class__.__name__,
data=data,
name=key_split(self._name),
task=len(self.dask),
)
@property
def index(self):
"""Return dask Index instance"""
return self.map_partitions(
getattr,
"index",
token=self._name + "-index",
meta=self._meta.index,
enforce_metadata=False,
)
@index.setter
def index(self, value):
self.divisions = value.divisions
result = map_partitions(
methods.assign_index, self, value, enforce_metadata=False
)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(
M.reset_index, drop=drop, enforce_metadata=False
).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
""" Forget division information """
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = "get-partition-%s-%s" % (str(n), self._name)
divisions = self.divisions[n : n + 2]
layer = {(name, 0): (self._name, n)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
@derived_from(pd.DataFrame)
def drop_duplicates(self, subset=None, split_every=None, split_out=1, **kwargs):
if subset is not None:
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(subset=subset, **kwargs)
kwargs["subset"] = subset
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {"cols": subset}
else:
self._meta_nonempty.drop_duplicates(**kwargs)
split_out_setup = split_out_setup_kwargs = None
if kwargs.get("keep", True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(
self,
chunk=chunk,
aggregate=chunk,
meta=self._meta,
token="drop-duplicates",
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs,
**kwargs
)
def __len__(self):
return self.reduction(
len, np.sum, token="len", meta=int, split_every=False
).compute()
def __bool__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.any() or a.all().".format(self.__class__.__name__)
)
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
def wrapper():
raise TypeError("cannot convert the series to {0}".format(str(cast_type)))
return wrapper
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __complex__(self):
return self._scalarfunc(complex)
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Note that the index and divisions are assumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*. Arguments
and keywords may contain ``Scalar``, ``Delayed`` or regular
python objects. DataFrame-like args (both dask and pandas) will be
repartitioned to align (if necessary) before applying the function.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to clear them afterwards:
>>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-
based windows.
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
Freq: D, dtype: float64
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
def memory_usage_per_partition(self, index=True, deep=False):
""" Return the memory usage of each partition
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the index in
returned Series.
deep : bool, default False
If True, introspect the data deeply by interrogating
``object`` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the parition number and whose values
are the memory usage of each partition in bytes.
"""
return self.map_partitions(
total_mem_usage, index=index, deep=deep
).clear_divisions()
@insert_meta_param_description(pad=12)
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs["aca_chunk"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs["aca_combine"] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs["aca_aggregate"] = aggregate
return aca(
self,
chunk=_reduction_chunk,
aggregate=_reduction_aggregate,
combine=_reduction_combine,
meta=meta,
token=token,
split_every=split_every,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs,
**kwargs
)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError(
"%s is both the pipe target and a keyword argument" % target
)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None, shuffle=False):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state : int or np.random.RandomState
If int create a new RandomState with this as the seed.
Otherwise draw from the passed RandomState.
shuffle : bool, default False
If set to True, the dataframe is shuffled (within partition)
before the split.
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = "split-" + token
layer = {
(name, i): (pd_split, (self._name, i), frac, state, shuffle)
for i, state in enumerate(state_data)
}
out = []
for i in range(len(frac)):
name2 = "split-%d-%s" % (i, token)
dsk2 = {
(name2, j): (getitem, (name, j), i) for j in range(self.npartitions)
}
graph = HighLevelGraph.from_collections(
name2, merge(dsk2, layer), dependencies=[self]
)
out_df = type(self)(graph, name2, self._meta, self.divisions)
out.append(out_df)
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
return self._head(n=n, npartitions=npartitions, compute=compute, safe=True)
def _head(self, n, npartitions, compute, safe):
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = "head-%d-%d-%s" % (npartitions, n, self._name)
if safe:
head = safe_head
else:
head = M.head
if npartitions > 1:
name_p = "head-partial-%d-%s" % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (head, concat, n)
else:
dsk = {(name, 0): (head, (self._name, 0), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(
graph, name, self._meta, [self.divisions[0], self.divisions[npartitions]]
)
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = "tail-%d-%s" % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def _partitions(self, index):
if not isinstance(index, tuple):
index = (index,)
from ..array.slicing import normalize_index
index = normalize_index(index, (self.npartitions,))
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)
name = "blocks-" + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()
divisions = [self.divisions[i] for _, i in new_keys] + [
self.divisions[new_keys[-1][1] + 1]
]
dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
@property
def partitions(self):
""" Slice dataframe by partitions
This allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along partitions so, for example, ``df.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions.
Examples
--------
>>> df.partitions[0] # doctest: +SKIP
>>> df.partitions[:3] # doctest: +SKIP
>>> df.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask DataFrame
"""
return IndexCallable(self._partitions)
# Note: iloc is implemented only on DataFrame
def repartition(
self,
divisions=None,
npartitions=None,
partition_size=None,
freq=None,
force=False,
):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. Only used if npartitions and
partition_size isn't specified.
npartitions : int, optional
Number of partitions of output. Only used if partition_size
isn't specified.
partition_size: int or string, optional
Max number of bytes of memory for each partition. Use numbers or
strings like 5MB. If specified npartitions and divisions will be
ignored.
.. warning::
This keyword argument triggers computation to determine
the memory size of each partition, which may be expensive.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Notes
-----
Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`
should be specified. A ``ValueError`` will be raised when that is
not the case.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if (
sum(
[
partition_size is not None,
divisions is not None,
npartitions is not None,
freq is not None,
]
)
!= 1
):
raise ValueError(
"Please provide exactly one of ``npartitions=``, ``freq=``, "
"``divisisions=``, ``partitions_size=`` keyword arguments"
)
if partition_size is not None:
return repartition_size(self, partition_size)
elif npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
elif isinstance(value, Scalar):
test_value = value._meta_nonempty
else:
test_value = value
meta = self._meta_nonempty.fillna(
value=test_value, method=method, limit=limit, axis=axis
)
if axis == 1 or method is None:
# Control whether or not dask's partition alignment happens.
# We don't want for a pandas Series.
# We do want it for a dask Series
if is_series_like(value) and not is_dask_collection(value):
args = ()
kwargs = {"value": value}
else:
args = (value,)
kwargs = {}
return self.map_partitions(
M.fillna,
*args,
method=method,
limit=limit,
axis=axis,
meta=meta,
enforce_metadata=False,
**kwargs
)
if method in ("pad", "ffill"):
method = "ffill"
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = "bfill"
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = "fillna-chunk-" + tokenize(self, method)
dsk = {
(name, i): (
methods.fillna_check,
(self._name, i),
method,
i != skip_check,
)
for i in range(self.npartitions)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
parts = new_dd_object(graph, name, meta, self.divisions)
else:
parts = self
return parts.map_overlap(
M.fillna, before, after, method=method, limit=limit, meta=meta
)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method="ffill", limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method="bfill", limit=limit, axis=axis)
def sample(self, n=None, frac=None, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Fraction of axis items to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
random_state : int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
DataFrame.random_split
pandas.DataFrame.sample
"""
if n is not None:
msg = (
"sample does not support the number of sampled items "
"parameter, 'n'. Please use the 'frac' parameter instead."
)
if isinstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
name = "sample-" + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {
(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def replace(self, to_replace=None, value=None, regex=False):
return self.map_partitions(
M.replace,
to_replace=to_replace,
value=value,
regex=regex,
enforce_metadata=False,
)
def to_dask_array(self, lengths=None):
"""Convert a dask DataFrame to a dask array.
Parameters
----------
lengths : bool or Sequence of ints, optional
How to determine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the length of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
Returns
-------
"""
if lengths is True:
lengths = tuple(self.map_partitions(len, enforce_metadata=False).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengths)
arr._chunks = chunks
return arr
def to_hdf(self, path_or_buf, key, mode="a", append=False, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_json(self, filename, *args, **kwargs):
""" See dd.to_json docstring for more information """
from .io import to_json
return to_json(self, filename, *args, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
See Also
--------
dask.dataframe.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, self.__dask_keys__())
name = "delayed-" + self._name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
return [Delayed(k, graph) for k in keys]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
"""
from dask.dataframe.rolling import Rolling
if isinstance(window, Integral):
if window < 0:
raise ValueError("window must be >= 0")
if min_periods is not None:
if not isinstance(min_periods, Integral):
raise ValueError("min_periods must be an integer")
if min_periods < 0:
raise ValueError("min_periods must be >= 0")
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
axis=axis,
)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Pandas currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
pandas, use ``.astype(bool)`` after the ``shift``.
"""
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(
M.diff, token="diff", periods=periods, axis=1, enforce_metadata=False
)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token="diff", periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(
M.shift,
token="shift",
periods=periods,
freq=freq,
axis=1,
enforce_metadata=False,
)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(
M.shift, before, after, token="shift", periods=periods
)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(
M.shift,
token="shift",
periods=periods,
freq=freq,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(
method, meta=meta, token=token, skipna=skipna, axis=axis
)
return handle_out(out, result)
else:
result = self.reduction(
method,
meta=meta,
token=token,
skipna=skipna,
axis=axis,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
@derived_from(pd.DataFrame)
def abs(self):
_raise_if_object_series(self, "abs")
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta, enforce_metadata=False)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"all", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"any", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def sum(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
result = self._reduction_agg(
"sum", axis=axis, skipna=skipna, split_every=split_every, out=out
)
if min_count:
return result.where(
self.notnull().sum(axis=axis) >= min_count, other=np.NaN
)
else:
return result
@derived_from(pd.DataFrame)
def prod(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
result = self._reduction_agg(
"prod", axis=axis, skipna=skipna, split_every=split_every, out=out
)
if min_count:
return result.where(
self.notnull().sum(axis=axis) >= min_count, other=np.NaN
)
else:
return result
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"max", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"min", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = "idxmax"
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(
M.idxmax,
self,
meta=meta,
token=self._token_prefix + fn,
skipna=skipna,
axis=axis,
enforce_metadata=False,
)
else:
scalar = not is_series_like(meta)
result = aca(
[self],
chunk=idxmaxmin_chunk,
aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine,
meta=meta,
aggregate_kwargs={"scalar": scalar},
token=self._token_prefix + fn,
split_every=split_every,
skipna=skipna,
fn=fn,
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = "idxmin"
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(
M.idxmin,
self,
meta=meta,
token=self._token_prefix + fn,
skipna=skipna,
axis=axis,
enforce_metadata=False,
)
else:
scalar = not is_series_like(meta)
result = aca(
[self],
chunk=idxmaxmin_chunk,
aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine,
meta=meta,
aggregate_kwargs={"scalar": scalar},
token=self._token_prefix + fn,
split_every=split_every,
skipna=skipna,
fn=fn,
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + "count"
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(
M.count, meta=meta, token=token, axis=axis, enforce_metadata=False
)
else:
meta = self._meta_nonempty.count()
# Need the astype(int) for empty dataframes, which sum to float dtype
result = self.reduction(
M.count,
aggregate=_count_aggregate,
meta=meta,
token=token,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return result
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "mean")
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.mean,
self,
meta=meta,
token=self._token_prefix + "mean",
axis=axis,
skipna=skipna,
enforce_metadata=False,
)
return handle_out(out, result)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "mean-%s" % tokenize(self, axis, skipna)
result = map_partitions(
methods.mean_aggregate,
s,
n,
token=name,
meta=meta,
enforce_metadata=False,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
@derived_from(pd.DataFrame)
def var(
self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "var")
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
enforce_metadata=False,
)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._var_1d(self, skipna, ddof, split_every)
return handle_out(out, result)
count_timedeltas = len(
self._meta_nonempty.select_dtypes(include=[np.timedelta64]).columns
)
# pandas 1.0+ does not implement var on timedelta
if not PANDAS_GT_100 and count_timedeltas == len(self._meta.columns):
result = self._var_timedeltas(skipna, ddof, split_every)
elif not PANDAS_GT_100 and count_timedeltas > 0:
result = self._var_mixed(skipna, ddof, split_every)
else:
result = self._var_numeric(skipna, ddof, split_every)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def _var_numeric(self, skipna=True, ddof=1, split_every=False):
num = self.select_dtypes(include=["number", "bool"], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype("f8")
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)
name = self._token_prefix + "var-numeric" + tokenize(num, split_every)
cols = num._meta.columns if is_dataframe_like(num) else None
var_shape = num._meta_nonempty.values.var(axis=0).shape
array_var_name = (array_var._name,) + (0,) * len(var_shape)
layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(
graph, name, num._meta_nonempty.var(), divisions=[None, None]
)
def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):
timedeltas = self.select_dtypes(include=[np.timedelta64])
var_timedeltas = [
self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)
for col_idx in timedeltas._meta.columns
]
var_timedelta_names = [(v._name, 0) for v in var_timedeltas]
name = (
self._token_prefix + "var-timedeltas-" + tokenize(timedeltas, split_every)
)
layer = {
(name, 0): (
methods.wrap_var_reduction,
var_timedelta_names,
timedeltas._meta.columns,
)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=var_timedeltas
)
return new_dd_object(
graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None]
)
def _var_mixed(self, skipna=True, ddof=1, split_every=False):
data = self.select_dtypes(include=["number", "bool", np.timedelta64])
timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)
numeric_vars = self._var_numeric(skipna, ddof, split_every)
name = self._token_prefix + "var-mixed-" + tokenize(data, split_every)
layer = {
(name, 0): (
methods.var_mixed_concat,
(numeric_vars._name, 0),
(timedelta_vars._name, 0),
data._meta.columns,
)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[numeric_vars, timedelta_vars]
)
return new_dd_object(
graph, name, self._meta_nonempty.var(), divisions=[None, None]
)
def _var_1d(self, column, skipna=True, ddof=1, split_every=False):
is_timedelta = is_timedelta64_dtype(column._meta)
if is_timedelta:
if not skipna:
is_nan = column.isna()
column = column.astype("i8")
column = column.mask(is_nan)
else:
column = column.dropna().astype("i8")
if PANDAS_VERSION >= "0.24.0":
if pd.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.astype("f8")
if not np.issubdtype(column.dtype, np.number):
column = column.astype("f8")
name = self._token_prefix + "var-1d-" + tokenize(column, split_every)
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)
layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(
graph, name, column._meta_nonempty.var(), divisions=[None, None]
)
@derived_from(pd.DataFrame)
def std(
self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "std")
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.std,
self,
meta=meta,
token=self._token_prefix + "std",
axis=axis,
skipna=skipna,
ddof=ddof,
enforce_metadata=False,
)
return handle_out(out, result)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + "std"
result = map_partitions(
np.sqrt, v, meta=meta, token=name, enforce_metadata=False
)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "sem")
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(
M.sem,
self,
meta=meta,
token=self._token_prefix + "sem",
axis=axis,
skipna=skipna,
ddof=ddof,
)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "sem"
result = map_partitions(
np.sqrt, v / n, meta=meta, token=name, enforce_metadata=False
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return result
def quantile(self, q=0.5, axis=0, method="default"):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
axis = self._validate_axis(axis)
keyname = "quantiles-concat--" + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(
M.quantile,
self,
q,
axis,
token=keyname,
enforce_metadata=False,
meta=(q, "f8"),
)
else:
_raise_if_object_series(self, "quantile")
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q, method) for c in num.columns)
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
layer = {
(keyname, 0): (pd.Series, qnames, num.columns, None, meta.name)
}
graph = HighLevelGraph.from_collections(
keyname, layer, dependencies=quantiles
)
divisions = (min(num.columns), max(num.columns))
return Series(graph, keyname, meta, divisions)
else:
layer = {(keyname, 0): (methods.concat, qnames, 1)}
graph = HighLevelGraph.from_collections(
keyname, layer, dependencies=quantiles
)
return DataFrame(graph, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(
self,
split_every=False,
percentiles=None,
percentiles_method="default",
include=None,
exclude=None,
):
if self._meta.ndim == 1:
return self._describe_1d(self, split_every, percentiles, percentiles_method)
elif (include is None) and (exclude is None):
data = self._meta.select_dtypes(include=[np.number, np.timedelta64])
# when some numerics/timedeltas are found, by default keep them
if len(data.columns) == 0:
chosen_columns = self._meta.columns
else:
# check if there are timedelta or boolean columns
bools_and_timedeltas = self._meta.select_dtypes(
include=[np.timedelta64, "bool"]
)
if len(bools_and_timedeltas.columns) == 0:
return self._describe_numeric(
self, split_every, percentiles, percentiles_method
)
else:
chosen_columns = data.columns
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
chosen_columns = self._meta.columns
else:
chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)
stats = [
self._describe_1d(
self[col_idx], split_every, percentiles, percentiles_method
)
for col_idx in chosen_columns
]
stats_names = [(s._name, 0) for s in stats]
name = "describe--" + tokenize(self, split_every)
layer = {(name, 0): (methods.describe_aggregate, stats_names)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = self._meta_nonempty.describe(include=include, exclude=exclude)
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_1d(
self, data, split_every=False, percentiles=None, percentiles_method="default"
):
if is_bool_dtype(data._meta):
return self._describe_nonnumeric_1d(data, split_every=split_every)
elif is_numeric_dtype(data._meta):
return self._describe_numeric(
data,
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
)
elif is_timedelta64_dtype(data._meta):
return self._describe_numeric(
data.dropna().astype("i8"),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_timedelta_column=True,
)
else:
return self._describe_nonnumeric_1d(data, split_every=split_every)
def _describe_numeric(
self,
data,
split_every=False,
percentiles=None,
percentiles_method="default",
is_timedelta_column=False,
):
num = data._get_numeric_data()
if data.ndim == 2 and len(num.columns) == 0:
raise ValueError("DataFrame contains only non-numeric data.")
elif data.ndim == 1 and data.dtype == "object":
raise ValueError("Cannot compute ``describe`` on object dtype.")
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
else:
# always include the the 50%tle to calculate the median
# unique removes duplicates and sorts quantiles
percentiles = np.array(percentiles)
percentiles = np.append(percentiles, 0.5)
percentiles = np.unique(percentiles)
percentiles = list(percentiles)
stats = [
num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile(percentiles, method=percentiles_method),
num.max(split_every=split_every),
]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name if isinstance(data._meta, pd.Series) else None
name = "describe-numeric--" + tokenize(num, split_every)
layer = {
(name, 0): (
methods.describe_numeric_aggregate,
stats_names,
colname,
is_timedelta_column,
)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = num._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_nonnumeric_1d(self, data, split_every=False):
vcounts = data.value_counts(split_every)
count_nonzero = vcounts[vcounts != 0]
count_unique = count_nonzero.size
stats = [
# nunique
count_unique,
# count
data.count(split_every=split_every),
# most common value
vcounts._head(1, npartitions=1, compute=False, safe=False),
]
if is_datetime64_any_dtype(data._meta):
min_ts = data.dropna().astype("i8").min(split_every=split_every)
max_ts = data.dropna().astype("i8").max(split_every=split_every)
stats += [min_ts, max_ts]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name
name = "describe-nonnumeric-1d--" + tokenize(data, split_every)
layer = {
(name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = data._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _cum_agg(
self, op_name, chunk, aggregate, axis, skipna=True, chunk_kwargs=None, out=None
):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = "{0}{1}(axis=1)".format(self._token_prefix, op_name)
result = self.map_partitions(chunk, token=name, **chunk_kwargs)
return handle_out(out, result)
else:
# cumulate each partitions
name1 = "{0}{1}-map".format(self._token_prefix, op_name)
cumpart = map_partitions(
chunk, self, token=name1, meta=self, **chunk_kwargs
)
name2 = "{0}{1}-take-last".format(self._token_prefix, op_name)
cumlast = map_partitions(
_take_last,
cumpart,
skipna,
meta=pd.Series([], dtype="float"),
token=name2,
)
suffix = tokenize(self)
name = "{0}{1}-{2}".format(self._token_prefix, op_name, suffix)
cname = "{0}{1}-cum-last-{2}".format(self._token_prefix, op_name, suffix)
# aggregate cumulated partisions and its previous last element
layer = {}
layer[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
layer[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
layer[(cname, i)] = (
methods._cum_aggregate_apply,
aggregate,
(cname, i - 1),
(cumlast._name, i - 1),
)
layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[cumpart, cumlast]
)
result = new_dd_object(graph, name, chunk(self._meta), self.divisions)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg(
"cumsum",
chunk=M.cumsum,
aggregate=operator.add,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg(
"cumprod",
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True, out=None):
return self._cum_agg(
"cummax",
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True, out=None):
return self._cum_agg(
"cummin",
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other, enforce_metadata=False)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other, enforce_metadata=False)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull, enforce_metadata=False)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull, enforce_metadata=False)
@derived_from(pd.DataFrame)
def isna(self):
if hasattr(pd, "isna"):
return self.map_partitions(M.isna, enforce_metadata=False)
else:
raise NotImplementedError(
"Need more recent version of Pandas "
"to support isna. "
"Please use isnull instead."
)
@derived_from(pd.DataFrame)
def isin(self, values):
if is_dataframe_like(self._meta):
# DataFrame.isin does weird alignment stuff
bad_types = (_Frame, pd.Series, pd.DataFrame)
else:
bad_types = (_Frame,)
if isinstance(values, bad_types):
raise NotImplementedError("Passing a %r to `isin`" % typename(type(values)))
meta = self._meta_nonempty.isin(values)
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
return self.map_partitions(
M.isin, delayed(values), meta=meta, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def astype(self, dtype):
# XXX: Pandas will segfault for empty dataframes when setting
# categorical dtypes. This operation isn't allowed currently anyway. We
# get the metadata with a non-empty frame to throw the error instead of
# segfaulting.
if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):
meta = self._meta_nonempty.astype(dtype)
else:
meta = self._meta.astype(dtype)
if hasattr(dtype, "items"):
set_unknown = [
k
for k, v in dtype.items()
if is_categorical_dtype(v) and getattr(v, "categories", None) is None
]
meta = clear_known_categories(meta, cols=set_unknown)
elif is_categorical_dtype(dtype) and getattr(dtype, "categories", None) is None:
meta = clear_known_categories(meta)
return self.map_partitions(
M.astype, dtype=dtype, meta=meta, enforce_metadata=False
)
@derived_from(pd.Series)
def append(self, other, interleave_partitions=False):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat(
[self, other], join="outer", interleave_partitions=interleave_partitions
)
@derived_from(pd.DataFrame)
def align(self, other, join="outer", axis=None, fill_value=None):
meta1, meta2 = _emulate(
M.align, self, other, join, axis=axis, fill_value=fill_value
)
aligned = self.map_partitions(
M.align,
other,
join=join,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
token = tokenize(self, other, join, axis, fill_value)
name1 = "align1-" + token
dsk1 = {
(name1, i): (getitem, key, 0)
for i, key in enumerate(aligned.__dask_keys__())
}
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = "align2-" + token
dsk2 = {
(name2, i): (getitem, key, 1)
for i, key in enumerate(aligned.__dask_keys__())
}
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(
M.combine, other, func, fill_value=fill_value, overwrite=overwrite
)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op, original=pd.DataFrame):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, closed=None, label=None):
from .tseries.resample import Resampler
return Resampler(self, rule, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
if PANDAS_GT_100:
is_anchored = offset.is_anchored()
else:
is_anchored = offset.isAnchored()
include_right = is_anchored or not hasattr(offset, "_inc")
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[: end + 1] + (date,)
name = "first-" + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (
methods.boundary_slice,
(self._name, end),
None,
date,
include_right,
True,
"loc",
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1 :]
name = "last-" + tokenize(self, offset)
dsk = {
(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))
}
dsk[(name, 0)] = (
methods.boundary_slice,
(self._name, start),
date,
None,
True,
False,
"loc",
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca(
[self],
chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every,
b=16,
meta=float,
)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
return self.map_partitions(methods.values)
def _validate_chunks(self, arr, lengths):
from dask.array.core import normalize_chunks
if isinstance(lengths, Sequence):
lengths = tuple(lengths)
if len(lengths) != self.npartitions:
raise ValueError(
"The number of items in 'lengths' does not match "
"the number of partitions. "
"{} != {}".format(len(lengths), self.npartitions)
)
if self.ndim == 1:
chunks = normalize_chunks((lengths,))
else:
chunks = normalize_chunks((lengths, (len(self.columns),)))
return chunks
elif lengths is not None:
raise ValueError("Unexpected value for 'lengths': '{}'".format(lengths))
return arr._chunks
def _is_index_level_reference(self, key):
"""
Test whether a key is an index level reference
To be considered an index level reference, `key` must match the index name
and must NOT match the name of any column (if a dataframe).
"""
return (
self.index.name is not None
and not is_dask_collection(key)
and (np.isscalar(key) or isinstance(key, tuple))
and key == self.index.name
and key not in getattr(self, "columns", ())
)
def _contains_index_name(self, columns_or_index):
"""
Test whether the input contains a reference to the index of the DataFrame/Series
"""
if isinstance(columns_or_index, list):
return any(self._is_index_level_reference(n) for n in columns_or_index)
else:
return self._is_index_level_reference(columns_or_index)
def _raise_if_object_series(x, funcname):
"""
Utility function to raise an error if an object column does not support
a certain operation like `mean`.
"""
if isinstance(x, Series) and hasattr(x, "dtype") and x.dtype == object:
raise ValueError("`%s` not supported with object series" % funcname)
class Series(_Frame):
""" Parallel Pandas Series
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_is_partition_type = staticmethod(is_series_like)
_token_prefix = "series-"
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask = renamed.dask
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def shape(self):
"""
Return a tuple representing the dimensionality of a Series.
The single element of the tuple is a Delayed result.
Examples
--------
>>> series.shape # doctest: +SKIP
# (dd.Scalar<size-ag..., dtype=int64>,)
"""
return (self.size,)
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
""" Namespace of datetime methods """
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
""" Namespace for string methods """
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ["cat", "str"]:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
""" Number of bytes """
return self.reduction(
methods.nbytes, np.sum, token="nbytes", meta=int, split_every=False
)
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(
name=self.name, dtype=self.dtype
)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(
klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask),
)
def rename(self, index=None, inplace=False, sorted_index=False):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
Parameters
----------
index : scalar, hashable sequence, dict-like or callable, optional
If dict-like or callable, the transformation is applied to the
index. Scalar or hashable sequence-like will alter the
``Series.name`` attribute.
inplace : boolean, default False
Whether to return a new Series or modify this one inplace.
sorted_index : bool, default False
If true, the output ``Series`` will have known divisions inferred
from the input series and the transformation. Ignored for
non-callable/dict-like ``index`` or when the input series has
unknown divisions. Note that this may only be set to ``True`` if
you know that the transformed index is monotonicly increasing. Dask
will check that transformed divisions are monotonic, but cannot
check all the values between divisions, so incorrectly setting this
can result in bugs.
Returns
-------
renamed : Series
See Also
--------
pandas.Series.rename
"""
from pandas.api.types import is_scalar, is_dict_like, is_list_like
import dask.dataframe as dd
if is_scalar(index) or (
is_list_like(index)
and not is_dict_like(index)
and not isinstance(index, dd.Series)
):
res = self if inplace else self.copy()
res.name = index
else:
res = self.map_partitions(M.rename, index, enforce_metadata=False)
if self.known_divisions:
if sorted_index and (callable(index) or is_dict_like(index)):
old = pd.Series(range(self.npartitions + 1), index=self.divisions)
new = old.rename(index).index
if not new.is_monotonic_increasing:
msg = (
"sorted_index=True, but the transformed index "
"isn't monotonic_increasing"
)
raise ValueError(msg)
res.divisions = tuple(new.tolist())
else:
res = res.clear_divisions()
if inplace:
self.dask = res.dask
self._name = res._name
self.divisions = res.divisions
self._meta = res._meta
res = self
return res
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how="start", axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5, method="default"):
""" Approximate quantiles of Series
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
return quantile(self, q, method=method)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = "index-%s" % tokenize(self, key)
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return Series(graph, name, self._meta, self.divisions)
raise NotImplementedError(
"Series getitem in only supported for other series objects "
"with matching partition structure"
)
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how="any", subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@derived_from(pd.Series)
def __iter__(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for row in s:
yield row
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, "index", None):
raise ValueError("No axis named {0}".format(axis))
# convert to numeric axis
return {None: 0, "index": 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, by=by, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
@derived_from(pd.Series, version="0.25.0")
def explode(self):
meta = self._meta.explode()
return self.map_partitions(M.explode, meta=meta, enforce_metadata=False)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(
self,
chunk=methods.unique,
aggregate=methods.unique,
meta=self._meta,
token="unique",
split_every=split_every,
series_name=self.name,
split_out=split_out,
)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(
self,
chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(),
token="value-counts",
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(
self,
chunk=M.nlargest,
aggregate=M.nlargest,
meta=self._meta,
token="series-nlargest",
split_every=split_every,
n=n,
)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(
self,
chunk=M.nsmallest,
aggregate=M.nsmallest,
meta=self._meta,
token="series-nsmallest",
split_every=split_every,
n=n,
)
@derived_from(pd.Series)
def isin(self, values):
# Added just to get the different docstring for Series
return super(Series, self).isin(values)
@insert_meta_param_description(pad=12)
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if is_series_like(arg) and is_dask_collection(arg):
return series_map(self, arg)
if not (
isinstance(arg, dict)
or callable(arg)
or is_series_like(arg)
and not is_dask_collection(arg)
):
raise TypeError(
"arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg))
)
name = "map-" + tokenize(self, arg, na_action)
dsk = {
(name, i): (M.map, k, arg, na_action)
for i, k in enumerate(self.__dask_keys__())
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)
else:
meta = make_meta(meta, index=getattr(make_meta(self), "index", None))
return Series(graph, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna, enforce_metadata=False)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(
M.between, left=left, right=right, inclusive=inclusive
)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(
M.clip, lower=lower, upper=upper, enforce_metadata=False
)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(
M.clip_lower, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(
M.clip_upper, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.Series)
def align(self, other, join="outer", axis=None, fill_value=None):
return super(Series, self).align(
other, join=join, axis=axis, fill_value=fill_value
)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func, fill_value=fill_value)
@derived_from(pd.Series)
def squeeze(self):
return self
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
""" Create a Dask Bag from a Series """
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name, meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op, original=pd.Series):
""" bind operator method like Series.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(
op, self, other, meta=meta, axis=axis, fill_value=fill_value
)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@classmethod
def _bind_comparison_method(cls, name, comparison, original=pd.Series):
""" bind comparison method like Series.eq to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
if fill_value is None:
return elemwise(comparison, self, other, axis=axis)
else:
op = partial(comparison, fill_value=fill_value)
return elemwise(op, self, other, axis=axis)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if meta is no_default:
meta = _emulate(
M.apply,
self._meta_nonempty,
func,
convert_dtype=convert_dtype,
args=args,
udf=True,
**kwds
)
warnings.warn(meta_warning(meta))
return map_partitions(
M.apply, self, func, convert_dtype, args, meta=meta, **kwds
)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method="pearson", min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != "pearson":
raise NotImplementedError("Only Pearson correlation has been implemented")
df = concat([self, other], axis=1)
return cov_corr(
df, min_periods, corr=True, scalar=True, split_every=split_every
)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, Integral):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag), split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(
M.memory_usage, index=index, deep=deep, enforce_metadata=False
)
return delayed(sum)(result.to_delayed())
def __divmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
class Index(Series):
_partition_type = pd.Index
_is_partition_type = staticmethod(is_index_like)
_token_prefix = "index-"
_accessors = set()
_dt_attributes = {
"nanosecond",
"microsecond",
"millisecond",
"dayofyear",
"minute",
"hour",
"day",
"dayofweek",
"second",
"week",
"weekday",
"weekofyear",
"month",
"quarter",
"year",
}
_cat_attributes = {
"known",
"as_known",
"as_unknown",
"add_categories",
"categories",
"remove_categories",
"reorder_categories",
"as_ordered",
"codes",
"remove_unused_categories",
"set_categories",
"as_unordered",
"ordered",
"rename_categories",
}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = "head-%d-%s" % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(
M.max,
meta=self._meta_nonempty.max(),
token=self._token_prefix + "max",
split_every=split_every,
)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(
M.min,
meta=self._meta_nonempty.min(),
token=self._token_prefix + "min",
split_every=split_every,
)
def count(self, split_every=False):
return self.reduction(
methods.index_count,
np.sum,
token="index-count",
meta=int,
split_every=split_every,
)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(
M.shift, periods, meta=meta, token="shift", transform_divisions=False
)
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(
M.shift,
periods,
token="shift",
meta=meta,
freq=freq,
transform_divisions=False,
)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
@derived_from(pd.Index)
def to_series(self):
return self.map_partitions(M.to_series, meta=self._meta.to_series())
@derived_from(pd.Index, ua_args=["index"])
def to_frame(self, index=True, name=None):
if not index:
raise NotImplementedError()
if PANDAS_VERSION >= "0.24.0":
return self.map_partitions(
M.to_frame, index, name, meta=self._meta.to_frame(index, name)
)
else:
if name is not None:
raise ValueError(
"The 'name' keyword was added in pandas 0.24.0. "
"Your version of pandas is '{}'.".format(PANDAS_VERSION)
)
else:
return self.map_partitions(M.to_frame, meta=self._meta.to_frame())
class DataFrame(_Frame):
"""
Parallel Pandas DataFrame
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_is_partition_type = staticmethod(is_dataframe_like)
_token_prefix = "dataframe-"
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask = renamed.dask
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
Only indexing the column positions is supported. Trying to select
row positions will raise a ValueError.
See :ref:`dataframe.indexing` for more.
Examples
--------
>>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
def __len__(self):
try:
s = self[self.columns[0]]
except IndexError:
return super().__len__()
else:
return len(s)
@property
def empty(self):
raise NotImplementedError(
"Checking whether a Dask DataFrame has any rows may be expensive. "
"However, checking the number of columns is fast. "
"Depending on which of these results you need, use either "
"`len(df.index) == 0` or `len(df.columns) == 0`"
)
def __getitem__(self, key):
name = "getitem-%s" % tokenize(self, key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
elif isinstance(key, slice):
from pandas.api.types import is_float_dtype
is_integer_slice = any(
isinstance(i, Integral) for i in (key.start, key.step, key.stop)
)
# Slicing with integer labels is always iloc based except for a
# float indexer for some reason
if is_integer_slice and not is_float_dtype(self.index.dtype):
self.iloc[key]
else:
return self.loc[key]
if isinstance(key, (np.ndarray, list)) or (
not is_dask_collection(key) and (is_series_like(key) or is_index_like(key))
):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return new_dd_object(graph, name, self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):
df = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})
elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):
key = list(key)
df = self.assign(**{k: value for k in key})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
self.divisions = df.divisions
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, "_meta").columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
return self[key]
else:
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))
return list(o)
def __iter__(self):
return iter(self._meta)
def _ipython_key_completions_(self):
return self.columns.tolist()
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
The number of rows is a Delayed result. The number of columns
is a concrete integer.
Examples
--------
>>> df.size # doctest: +SKIP
(Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)
"""
col_size = len(self.columns)
row_size = delayed(int)(self.size / col_size)
return (row_size, col_size)
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(
self,
other,
drop=True,
sorted=False,
npartitions=None,
divisions=None,
inplace=False,
**kwargs
):
"""Set the DataFrame index (row labels) using an existing column.
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (dask dataframe ``set_index`` is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
df: Dask DataFrame
index: string or Dask Series
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None use the same as
the input. If 'auto' then decide by memory use.
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisions: list, optional
Known values on which to separate index values of the partitions.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions
Defaults to computing this with a single pass over the data. Note
that if ``sorted=True``, specified divisions are assumed to match
the existing partitions in the data. If ``sorted=False``, you should
leave divisions empty and call ``repartition`` after ``set_index``.
inplace : bool, optional
Modifying the DataFrame in place is not supported by Dask.
Defaults to False.
compute: bool
Whether or not to trigger an immediate computation. Defaults to False.
Note, that even if you set ``compute=False``, an immediate computation
will still be triggered if ``divisions`` is ``None``.
Examples
--------
>>> df2 = df.set_index('x') # doctest: +SKIP
>>> df2 = df.set_index(d.x) # doctest: +SKIP
>>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range('2000', '2010', freq='1D')
>>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP
"""
if inplace:
raise NotImplementedError("The inplace= keyword is not supported")
pre_sorted = sorted
del sorted
if divisions is not None:
check_divisions(divisions)
if pre_sorted:
from .shuffle import set_sorted_index
return set_sorted_index(
self, other, drop=drop, divisions=divisions, **kwargs
)
else:
from .shuffle import set_index
return set_index(
self,
other,
drop=drop,
npartitions=npartitions,
divisions=divisions,
**kwargs
)
@derived_from(pd.DataFrame)
def pop(self, item):
out = self[item]
del self[item]
return out
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = "dataframe-nlargest"
return aca(
self,
chunk=M.nlargest,
aggregate=M.nlargest,
meta=self._meta,
token=token,
split_every=split_every,
n=n,
columns=columns,
)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = "dataframe-nsmallest"
return aca(
self,
chunk=M.nsmallest,
aggregate=M.nsmallest,
meta=self._meta,
token=token,
split_every=split_every,
n=n,
columns=columns,
)
@derived_from(pd.DataFrame)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, by=by, **kwargs)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(
self, columns=columns, index=index, split_every=split_every, **kwargs
)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (
isinstance(v, Scalar)
or is_series_like(v)
or callable(v)
or pd.api.types.is_scalar(v)
or is_index_like(v)
or isinstance(v, Array)
):
raise TypeError(
"Column assignment doesn't support type "
"{0}".format(typename(type(v)))
)
if callable(v):
kwargs[k] = v(self)
if isinstance(v, Array):
from .io import from_dask_array
if len(v.shape) > 1:
raise ValueError("Array assignment only supports 1-D arrays")
if v.npartitions != self.npartitions:
raise ValueError(
"Number of partitions do not match ({0} != {1})".format(
v.npartitions, self.npartitions
)
)
kwargs[k] = from_dask_array(v, index=self.index)
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta_nonempty.assign(**_extract_meta(kwargs, nonempty=True))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame, ua_args=["index"])
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns=columns)
def query(self, expr, **kwargs):
""" Filter dataframe with complex expression
Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_num_threads(1)
See also
--------
pandas.DataFrame.query
"""
return self.map_partitions(M.query, expr, **kwargs)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if inplace is None:
inplace = False
if "=" in expr and inplace in (True, None):
raise NotImplementedError(
"Inplace eval not supported. Please use inplace=False"
)
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how="any", subset=None, thresh=None):
return self.map_partitions(
M.dropna, how=how, subset=subset, thresh=thresh, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(
M.clip, lower=lower, upper=upper, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(
M.clip_lower, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(
M.clip_upper, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def squeeze(self, axis=None):
if axis in [None, 1]:
if len(self.columns) == 1:
return self[self.columns[0]]
else:
return self
elif axis == 0:
raise NotImplementedError(
"{0} does not support squeeze along axis 0".format(type(self))
)
elif axis not in [0, 1, None]:
raise ValueError("No axis {0} for object type {1}".format(axis, type(self)))
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how="start", axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
@derived_from(pd.DataFrame, version="0.25.0")
def explode(self, column):
meta = self._meta.explode(column)
return self.map_partitions(M.explode, column, meta=meta, enforce_metadata=False)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more information """
from .io import to_parquet
return to_parquet(self, path, *args, **kwargs)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)
def _get_numeric_data(self, how="any", subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + "-get_numeric_data"
return self.map_partitions(M._get_numeric_data, meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, "index", "columns", None):
raise ValueError("No axis named {0}".format(axis))
# convert to numeric axis
return {None: 0, "index": 0, "columns": 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels=None, axis=0, columns=None, errors="raise"):
axis = self._validate_axis(axis)
if (axis == 1) or (columns is not None):
return self.map_partitions(
drop_by_shallow_copy, columns or labels, errors=errors
)
raise NotImplementedError(
"Drop currently only works for axis=1 or when columns is not None"
)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
indicator=False,
npartitions=None,
shuffle=None,
):
"""Merge the DataFrame with another DataFrame
This will merge the two datasets, either on the indices, a certain column
in each dataset or the index in one dataset and the column in another.
Parameters
----------
right: dask.dataframe.DataFrame
how : {'left', 'right', 'outer', 'inner'}, default: 'inner'
How to handle the operation of the two objects:
- left: use calling frame's index (or column if on is specified)
- right: use other frame's index
- outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
- inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If on is None and not merging on indexes then this
defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column to join on in the left DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
right_on : label or list, or array-like
Column to join on in the right DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
left_index : boolean, default False
Use the index from the left DataFrame as the join key.
right_index : boolean, default False
Use the index from the right DataFrame as the join key.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and
right side, respectively
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row. If string, column with
information on source of each row will be added to output DataFrame,
and column will be named value of string. Information column is
Categorical-type and takes on a value of "left_only" for observations
whose merge key only appears in `left` DataFrame, "right_only" for
observations whose merge key only appears in `right` DataFrame,
and "both" if the observation’s merge key is found in both.
npartitions: int or None, optional
The ideal number of output partitions. This is only utilised when
performing a hash_join (merging on columns only). If ``None`` then
``npartitions = max(lhs.npartitions, rhs.npartitions)``.
Default is ``None``.
shuffle: {'disk', 'tasks'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
Notes
-----
There are three ways to join dataframes:
1. Joining on indices. In this case the divisions are
aligned using the function ``dask.dataframe.multi.align_partitions``.
Afterwards, each partition is merged with the pandas merge function.
2. Joining one on index and one on column. In this case the divisions of
dataframe merged by index (:math:`d_i`) are used to divide the column
merged dataframe (:math:`d_c`) one using
``dask.dataframe.multi.rearrange_by_divisions``. In this case the
merged dataframe (:math:`d_m`) has the exact same divisions
as (:math:`d_i`). This can lead to issues if you merge multiple rows from
(:math:`d_c`) to one row in (:math:`d_i`).
3. Joining both on columns. In this case a hash join is performed using
``dask.dataframe.multi.hash_join``.
"""
if not is_dataframe_like(right):
raise ValueError("right must be DataFrame")
from .multi import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
npartitions=npartitions,
indicator=indicator,
shuffle=shuffle,
)
@derived_from(pd.DataFrame)
def join(
self,
other,
on=None,
how="left",
lsuffix="",
rsuffix="",
npartitions=None,
shuffle=None,
):
if not is_dataframe_like(other):
raise ValueError("other must be DataFrame")
from .multi import merge
return merge(
self,
other,
how=how,
left_index=on is None,
right_index=True,
left_on=on,
suffixes=[lsuffix, rsuffix],
npartitions=npartitions,
shuffle=shuffle,
)
@derived_from(pd.DataFrame)
def append(self, other, interleave_partitions=False):
if isinstance(other, Series):
msg = (
"Unable to appending dd.Series to dd.DataFrame."
"Use pd.Series to append as row."
)
raise ValueError(msg)
elif is_series_like(other):
other = other.to_frame().T
return super(DataFrame, self).append(
other, interleave_partitions=interleave_partitions
)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self, index=True, name="Pandas"):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples(index=index, name=name):
yield row
@classmethod
def _bind_operator_method(cls, name, op, original=pd.DataFrame):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
if axis in (1, "columns"):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = "Unable to {0} dd.Series with axis=1".format(name)
raise ValueError(msg)
elif is_series_like(other):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(
op, self, other=other, axis=axis, fill_value=fill_value
)
return map_partitions(
op,
self,
other=other,
meta=meta,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(
op,
self,
other,
meta=meta,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@classmethod
def _bind_comparison_method(cls, name, comparison, original=pd.DataFrame):
""" bind comparison method like DataFrame.eq to this class """
def meth(self, other, axis="columns", level=None):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@insert_meta_param_description(pad=12)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
args=(),
meta=no_default,
**kwds
):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
pandas_kwargs = {"axis": axis, "raw": raw}
if PANDAS_VERSION >= "0.23.0":
kwds.setdefault("result_type", None)
if not PANDAS_GT_100:
pandas_kwargs["broadcast"] = broadcast
pandas_kwargs["reduce"] = None
kwds.update(pandas_kwargs)
if axis == 0:
msg = (
"dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
if meta is no_default:
meta = _emulate(
M.apply, self._meta_nonempty, func, args=args, udf=True, **kwds
)
warnings.warn(meta_warning(meta))
return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta="__no_default__"):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method="pearson", min_periods=None, split_every=False):
if method != "pearson":
raise NotImplementedError("Only Pearson correlation has been implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append("Index: 0 entries")
lines.append("Empty %s" % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({"index": self.index, "count": self.count()})
if memory_usage:
computations.update(
{"memory_usage": self.map_partitions(M.memory_usage, index=True)}
)
computations = dict(
zip(computations.keys(), da.compute(*computations.values()))
)
if verbose:
import textwrap
index = computations["index"]
counts = computations["count"]
lines.append(index_summary(index))
lines.append("Data columns (total {} columns):".format(len(self.columns)))
from pandas.io.formats.printing import pprint_thing
space = max([len(pprint_thing(k)) for k in self.columns]) + 1
column_width = max(space, 7)
header = (
textwrap.dedent(
"""\
# {{column:<{column_width}}} Non-Null Count Dtype
--- {{underl:<{column_width}}} -------------- -----"""
)
.format(column_width=column_width)
.format(column="Column", underl="------")
)
column_template = textwrap.dedent(
"""\
{{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}""".format(
column_width=column_width
)
)
column_info = [
column_template.format(
i=pprint_thing(i),
name=pprint_thing(name),
count=pprint_thing(count),
dtype=pprint_thing(dtype),
)
for i, (name, count, dtype) in enumerate(
zip(self.columns, counts, self.dtypes)
)
]
lines.extend(header.split("\n"))
else:
column_info = [index_summary(self.columns, name="Columns")]
lines.extend(column_info)
dtype_counts = [
"%s(%d)" % k
for k in sorted(self.dtypes.value_counts().iteritems(), key=str)
]
lines.append("dtypes: {}".format(", ".join(dtype_counts)))
if memory_usage:
memory_int = computations["memory_usage"].sum()
lines.append("memory usage: {}\n".format(memory_repr(memory_int)))
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
result = result.groupby(result.index).sum()
return result
def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(
self, index=index, columns=columns, values=values, aggfunc=aggfunc
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""
Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where
one or more columns are identifier variables (``id_vars``), while
all other columns, considered measured variables (``value_vars``),
are "unpivoted" to the row axis, leaving just two non-identifier
columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt
"""
from .reshape import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def to_records(self, index=False, lengths=None):
from .io import to_records
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
records = to_records(self)
chunks = self._validate_chunks(records, lengths)
records._chunks = (chunks[0],)
return records
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)
return self._HTML_FMT.format(
data=data, name=key_split(self._name), task=len(self.dask)
)
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
cols = meta.columns
if len(cols) == 0:
series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)
else:
series_df = pd.concat(
[_repr_data_series(s, index=index) for _, s in meta.iteritems()], axis=1
)
return series_df
_HTML_FMT = """<div><strong>Dask DataFrame Structure:</strong></div>
{data}
<div>Dask Name: {name}, {task} tasks</div>"""
def _repr_html_(self):
data = self._repr_data().to_html(
max_rows=5, show_dimensions=False, notebook=True
)
return self._HTML_FMT.format(
data=data, name=key_split(self._name), task=len(self.dask)
)
def _select_columns_or_index(self, columns_or_index):
"""
Parameters
----------
columns_or_index
Column or index name, or a list of these
Returns
-------
dd.DataFrame
Dask DataFrame with columns corresponding to each column or
index level in columns_or_index. If included, the column
corresponding to the index level is named _index
"""
# Ensure columns_or_index is a list
columns_or_index = (
columns_or_index
if isinstance(columns_or_index, list)
else [columns_or_index]
)
column_names = [
n for n in columns_or_index if self._is_column_label_reference(n)
]
selected_df = self[column_names]
if self._contains_index_name(columns_or_index):
# Index name was included
selected_df = selected_df.assign(_index=self.index)
return selected_df
def _is_column_label_reference(self, key):
"""
Test whether a key is a column label reference
To be considered a column label reference, `key` must match the name of at
least one column.
"""
return (
not is_dask_collection(key)
and (np.isscalar(key) or isinstance(key, tuple))
and key in self.columns
)
# bind operators
for op in [
operator.abs,
operator.add,
operator.and_,
operator.eq,
operator.gt,
operator.ge,
operator.inv,
operator.lt,
operator.le,
operator.mod,
operator.mul,
operator.ne,
operator.neg,
operator.or_,
operator.pow,
operator.sub,
operator.truediv,
operator.floordiv,
operator.xor,
]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in [
"add",
"sub",
"mul",
"div",
"divide",
"truediv",
"floordiv",
"mod",
"pow",
"radd",
"rsub",
"rmul",
"rdiv",
"rtruediv",
"rfloordiv",
"rmod",
"rpow",
]:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ["lt", "gt", "le", "ge", "ne", "eq"]:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def is_broadcastable(dfs, s):
"""
This Series is broadcastable against another dataframe in the sequence
"""
return (
isinstance(s, Series)
and s.npartitions == 1
and s.known_divisions
and any(
s.divisions == (min(df.columns), max(df.columns))
for df in dfs
if isinstance(df, DataFrame)
)
)
def elemwise(op, *args, **kwargs):
""" Elementwise operation for Dask dataframes
Parameters
----------
op: callable
Function to apply across input dataframes
*args: DataFrames, Series, Scalars, Arrays,
The arguments of the operation
**kwrags: scalars
meta: pd.DataFrame, pd.Series (optional)
Valid metadata for the operation. Will evaluate on a small piece of
data if not provided.
transform_divisions: boolean
If the input is a ``dask.dataframe.Index`` we normally will also apply
the function onto the divisions and apply those transformed divisions
to the output. You can pass ``transform_divisions=False`` to override
this behavior
Examples
--------
>>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP
"""
meta = kwargs.pop("meta", no_default)
out = kwargs.pop("out", None)
transform_divisions = kwargs.pop("transform_divisions", True)
_name = funcname(op) + "-" + tokenize(op, *args, **kwargs)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
# Clean up dask arrays if present
for i, a in enumerate(dasks):
if not isinstance(a, Array):
continue
# Ensure that they have similar-ish chunk structure
if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):
msg = (
"When combining dask arrays with dataframes they must "
"match chunking exactly. Operation: %s" % funcname(op)
)
raise ValueError(msg)
# Rechunk to have a single chunk along all other axes
if a.ndim > 1:
a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})
dasks[i] = a
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = op(
*[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],
**kwargs
)
if isinstance(divisions, pd.Index):
divisions = divisions.tolist()
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
_is_broadcastable = partial(is_broadcastable, dfs)
dfs = list(remove(_is_broadcastable, dfs))
other = [
(i, arg)
for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar, Array))
]
# adjust the key length of Scalar
dsk = partitionwise_graph(op, _name, *args, **kwargs)
graph = HighLevelGraph.from_collections(_name, dsk, dependencies=dasks)
if meta is no_default:
if len(dfs) >= 2 and not all(hasattr(d, "npartitions") for d in dasks):
# should not occur in current funcs
msg = "elemwise with 2 or more DataFrames and Scalar is not supported"
raise NotImplementedError(msg)
# For broadcastable series, use no rows.
parts = [
d._meta
if _is_broadcastable(d)
else empty_like_safe(d, (), dtype=d.dtype)
if isinstance(d, Array)
else d._meta_nonempty
for d in dasks
]
with raise_on_meta_error(funcname(op)):
meta = partial_by_order(*parts, function=op, other=other)
result = new_dd_object(graph, _name, meta, divisions)
return handle_out(out, result)
def handle_out(out, result):
""" Handle out parameters
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
if out is not None and type(out) != type(result):
raise TypeError(
"Mismatched types between result and out parameter. "
"out=%s, result=%s" % (str(type(out)), str(type(result)))
)
if isinstance(out, DataFrame):
if len(out.columns) != len(result.columns):
raise ValueError(
"Mismatched columns count between result and out parameter. "
"out=%s, result=%s" % (str(len(out.columns)), str(len(result.columns)))
)
if isinstance(out, (Series, DataFrame, Scalar)):
out._meta = result._meta
out._name = result._name
out.dask = result.dask
if not isinstance(out, Scalar):
out.divisions = result.divisions
elif out is not None:
msg = (
"The out parameter is not fully supported."
" Received type %s, expected %s "
% (typename(type(out)), typename(type(result)))
)
raise NotImplementedError(msg)
else:
return result
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [
from_pandas(df, 1)
if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)
else df
for df in dfs
]
return dfs
def hash_shard(df, nparts, split_out_setup=None, split_out_setup_kwargs=None):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_object_dispatch(h, index=False)
if is_series_like(h):
h = h.values
h %= nparts
return {i: df.iloc[h == i] for i in range(nparts)}
def split_evenly(df, k):
""" Split dataframe into k roughly equal parts """
divisions = np.linspace(0, len(df), k + 1).astype(int)
return {i: df.iloc[divisions[i] : divisions[i + 1]] for i in range(k)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(
args,
chunk=None,
aggregate=None,
combine=None,
meta=no_default,
token=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
split_every=None,
split_out=None,
split_out_setup=None,
split_out_setup_kwargs=None,
sort=None,
**kwargs
):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
sort : bool, default None
If allowed, sort the keys of the output aggregation.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
dfs = [arg for arg in args if isinstance(arg, _Frame)]
npartitions = set(arg.npartitions for arg in dfs)
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(
token or (chunk, aggregate),
meta,
args,
chunk_kwargs,
aggregate_kwargs,
combine_kwargs,
split_every,
split_out,
split_out_setup,
split_out_setup_kwargs,
)
# Chunk
a = "{0}-chunk-{1}".format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {
(a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0].__dask_keys__())
}
else:
dsk = {
(a, 0, i, 0): (
apply,
chunk,
[(x._name, i) if isinstance(x, _Frame) else x for x in args],
chunk_kwargs,
)
for i in range(npartitions)
}
# Split
if split_out and split_out > 1:
split_prefix = "split-%s" % token_key
shard_prefix = "shard-%s" % token_key
for i in range(npartitions):
dsk[(split_prefix, i)] = (
hash_shard,
(a, 0, i, 0),
split_out,
split_out_setup,
split_out_setup_kwargs,
)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = "{0}-combine-{1}".format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds])
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (
apply,
combine,
[conc],
combine_kwargs,
)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
if sort is not None:
if sort and split_out > 1:
raise NotImplementedError(
"Cannot guarentee sorted keys for `split_out>1`."
" Try using split_out=1, or grouping with sort=False."
)
aggregate_kwargs = aggregate_kwargs or {}
aggregate_kwargs["sort"] = sort
# Aggregate
for j in range(split_out):
b = "{0}-agg-{1}".format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, depth, i, j) for i in range(k)])
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)
meta = _emulate(aggregate, _concat([meta_chunk]), udf=True, **aggregate_kwargs)
meta = make_meta(
meta, index=(getattr(make_meta(dfs[0]), "index", None) if dfs else None)
)
graph = HighLevelGraph.from_collections(b, dsk, dependencies=dfs)
divisions = [None] * (split_out + 1)
return new_dd_object(graph, b, meta, divisions)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
elif isinstance(x, Delayed):
raise ValueError(
"Cannot infer dataframe metadata with a `dask.delayed` argument"
)
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func), udf=kwargs.pop("udf", False)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(
func,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
**kwargs
):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe. Arguments and keywords may contain
``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args
(both dask and pandas) will be repartitioned to align (if necessary)
before applying the function.
enforce_metadata : bool
Whether or not to enforce the structure of the metadata at runtime.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work or types don't match.
$META
"""
name = kwargs.pop("token", None)
assert callable(func)
if name is not None:
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = "{0}-{1}".format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
dfs = [df for df in args if isinstance(df, _Frame)]
meta_index = getattr(make_meta(dfs[0]), "index", None) if dfs else None
if meta is no_default:
# Use non-normalized kwargs here, as we want the real values (not
# delayed values)
meta = _emulate(func, *args, udf=True, **kwargs)
else:
meta = make_meta(meta, index=meta_index)
if all(isinstance(arg, Scalar) for arg in args):
layer = {
(name, 0): (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=args)
return Scalar(graph, name, meta)
elif not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = make_meta(_concat([meta]), index=meta_index)
# Ensure meta is empty series
meta = make_meta(meta)
args2 = []
dependencies = []
for arg in args:
if isinstance(arg, _Frame):
args2.append(arg)
dependencies.append(arg)
continue
arg = normalize_arg(arg)
arg2, collections = unpack_collections(arg)
if collections:
args2.append(arg2)
dependencies.extend(collections)
else:
args2.append(arg)
kwargs3 = {}
simple = True
for k, v in kwargs.items():
v = normalize_arg(v)
v, collections = unpack_collections(v)
dependencies.extend(collections)
kwargs3[k] = v
if collections:
simple = False
if enforce_metadata:
dsk = partitionwise_graph(
apply_and_enforce,
name,
*args2,
dependencies=dependencies,
_func=func,
_meta=meta,
**kwargs3
)
elif not simple:
dsk = partitionwise_graph(
apply, name, func, *args2, **kwargs3, dependencies=dependencies
)
else:
dsk = partitionwise_graph(
func, name, *args2, **kwargs, dependencies=dependencies
)
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = func(
*[pd.Index(a.divisions) if a is dfs[0] else a for a in args], **kwargs
)
if isinstance(divisions, pd.Index):
divisions = divisions.tolist()
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return new_dd_object(graph, name, meta, divisions)
def apply_and_enforce(*args, **kwargs):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
func = kwargs.pop("_func")
meta = kwargs.pop("_meta")
df = func(*args, **kwargs)
if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):
if not len(df):
return meta
if is_dataframe_like(df):
check_matching_columns(meta, df)
c = meta.columns
else:
c = meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if is_dataframe_like(df):
if is_dataframe_like(columns):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (
len(columns) == len(df.columns)
and type(columns) is type(df.columns)
and columns.equals(df.columns)
):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif is_series_like(df) or is_index_like(df):
if is_series_like(columns) or is_index_like(columns):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = "rename-{0}".format(tokenize(df, metadata))
dsk = partitionwise_graph(_rename, name, metadata, df)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
return new_dd_object(graph, name, metadata, df.divisions)
def quantile(df, q, method="default"):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for
floats and ints and fallback to the ``'dask'`` otherwise.
"""
# current implementation needs q to be sorted so
# sort if array-like, otherwise leave it alone
q_ndarray = np.array(q)
if q_ndarray.ndim > 0:
q_ndarray.sort(kind="mergesort")
q = q_ndarray
assert isinstance(df, Series)
allowed_methods = ["default", "dask", "tdigest"]
if method not in allowed_methods:
raise ValueError("method can only be 'default', 'dask' or 'tdigest'")
if method == "default":
internal_method = "dask"
else:
internal_method = method
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if is_series_like(meta):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = "quantiles-" + token
empty_index = pd.Index([], dtype=float)
return Series(
{(name, 0): pd.Series([], name=df.name, index=empty_index, dtype="float")},
name,
df._meta,
[None, None],
)
else:
new_divisions = [np.min(q), np.max(q)]
df = df.dropna()
if internal_method == "tdigest" and (
np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)
):
from dask.utils import import_required
import_required(
"crick", "crick is a required dependency for using the t-digest method."
)
from dask.array.percentile import _tdigest_chunk, _percentiles_from_tdigest
name = "quantiles_tdigest-1-" + token
val_dsk = {
(name, i): (_tdigest_chunk, (getattr, key, "values"))
for i, key in enumerate(df.__dask_keys__())
}
name2 = "quantiles_tdigest-2-" + token
merge_dsk = {
(name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))
}
else:
from dask.array.percentile import _percentile, merge_percentiles
name = "quantiles-1-" + token
val_dsk = {
(name, i): (_percentile, (getattr, key, "values"), qs)
for i, key in enumerate(df.__dask_keys__())
}
name2 = "quantiles-2-" + token
merge_dsk = {
(name2, 0): finalize_tsk(
(merge_percentiles, qs, [qs] * df.npartitions, sorted(val_dsk))
)
}
dsk = merge(val_dsk, merge_dsk)
graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])
return return_type(graph, name2, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = "corr" if corr else "cov"
a = "{0}-chunk-{1}".format(funcname, df._name)
dsk = {
(a, i): (cov_corr_chunk, f, corr) for (i, f) in enumerate(df.__dask_keys__())
}
prefix = "{0}-combine-{1}-".format(funcname, df._name)
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = "{0}-{1}".format(funcname, token)
dsk[(name, 0)] = (
cov_corr_agg,
[(a, i) for i in range(k)],
df.columns,
min_periods,
corr,
scalar,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
if scalar:
return Scalar(graph, name, "f8")
meta = make_meta([(c, "f8") for c in df.columns], index=df.columns)
return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation
"""
shape = (df.shape[1], df.shape[1])
df = df.astype("float64", copy=False)
sums = zeros_like_safe(df.values, shape=shape)
counts = zeros_like_safe(df.values, shape=shape)
for idx, col in enumerate(df):
mask = df.iloc[:, idx].notnull()
sums[idx] = df[mask].sum().values
counts[idx] = df[mask].count().values
cov = df.cov().values
dtype = [("sum", sums.dtype), ("count", counts.dtype), ("cov", cov.dtype)]
if corr:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
mu = (sums / counts).T
m = zeros_like_safe(df.values, shape=shape)
mask = df.isnull().values
for idx, x in enumerate(df):
# Avoid using ufunc.outer (not supported by cupy)
mu_discrepancy = (
np.subtract(df.iloc[:, idx].values[:, None], mu[idx][None, :]) ** 2
)
mu_discrepancy[mask] = np.nan
m[idx] = np.nansum(mu_discrepancy, axis=0)
m = m.T
dtype.append(("m", m.dtype))
out = {"sum": sums, "count": counts, "cov": cov * (counts - 1)}
if corr:
out["m"] = m
return out
def cov_corr_combine(data_in, corr=False):
data = {"sum": None, "count": None, "cov": None}
if corr:
data["m"] = None
for k in data.keys():
data[k] = [d[k] for d in data_in]
data[k] = np.concatenate(data[k]).reshape((len(data[k]),) + data[k][0].shape)
sums = np.nan_to_num(data["sum"])
counts = data["count"]
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
with np.errstate(invalid="ignore"):
d = (s2 / n2) - (s1 / n1)
C = np.nansum(
(n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0
) + np.nansum(data["cov"], 0)
out = {"sum": cum_sums[-1], "count": cum_counts[-1], "cov": C}
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data["m"] + counts * (sums / counts_na - mu) ** 2, axis=0)
out["m"] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out["count"]
C = out["cov"]
C[counts < min_periods] = np.nan
if corr:
m2 = out["m"]
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
with np.errstate(invalid="ignore", divide="ignore"):
mat = C / den
if scalar:
return float(mat[0, 1])
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None, shuffle=False):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(
... df, [0.5, 0.5], random_state=123, shuffle=True
... ) # roughly 50/50 split
>>> a
a b
3 4 5
0 1 2
5 6 7
>>> b
a b
1 2 3
4 5 6
2 3 4
"""
p = list(p)
if shuffle:
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
df = df.sample(frac=1.0, random_state=random_state)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
def _last_valid(s):
for i in range(1, min(10, len(s) + 1)):
val = s.iloc[-i]
if not pd.isnull(val):
return val
else:
nonnull = s[s.notna()]
if not nonnull.empty:
return nonnull.iloc[-1]
return None
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each column
if is_dataframe_like(a):
# create Series from appropriate backend dataframe library
series_typ = type(a.iloc[0:1, 0])
if a.empty:
return series_typ([], dtype="float")
return series_typ(
{col: _last_valid(a[col]) for col in a.columns}, index=a.columns
)
else:
return _last_valid(a)
def check_divisions(divisions):
if not isinstance(divisions, (list, tuple)):
raise ValueError("New division must be list or tuple")
divisions = list(divisions)
if divisions != sorted(divisions):
raise ValueError("New division must be sorted")
if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):
msg = "New division must be unique, except for the last element"
raise ValueError(msg)
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
check_divisions(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError("New division must be longer than 2 elements")
if force:
if a[0] < b[0]:
msg = (
"left side of the new division must be equal or smaller "
"than old division"
)
raise ValueError(msg)
if a[-1] > b[-1]:
msg = (
"right side of the new division must be equal or larger "
"than old division"
)
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = "left side of old and new divisions are different"
raise ValueError(msg)
if a[-1] != b[-1]:
msg = "right side of old and new divisions are different"
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while i < len(a) and j < len(b):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
if len(a) == i + 1 or a[i] < a[i + 1]:
j += 1
i += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
while (
last_elem
and c[i] == b[-1]
and (b[-1] != b[-2] or j == len(b) - 1)
and i < k
):
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError(
"check for duplicate partitions\nold:\n%s\n\n"
"new:\n%s\n\ncombined:\n%s" % (pformat(a), pformat(b), pformat(c))
)
d[(out2, j - 1)] = (methods.concat, tmp)
j += 1
return d
def repartition_freq(df, freq=None):
""" Repartition a timeseries dataframe by a new frequency """
if not isinstance(df.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
try:
start = df.divisions[0].ceil(freq)
except ValueError:
start = df.divisions[0]
divisions = pd.date_range(start=start, end=df.divisions[-1], freq=freq).tolist()
if not len(divisions):
divisions = [df.divisions[0], df.divisions[-1]]
else:
if divisions[-1] != df.divisions[-1]:
divisions.append(df.divisions[-1])
if divisions[0] != df.divisions[0]:
divisions = [df.divisions[0]] + divisions
return df.repartition(divisions=divisions)
def repartition_size(df, size):
"""
Repartition dataframe so that new partitions have approximately `size` memory usage each
"""
if isinstance(size, str):
size = parse_bytes(size)
size = int(size)
mem_usages = df.map_partitions(total_mem_usage, deep=True).compute()
# 1. split each partition that is larger than partition_size
nsplits = 1 + mem_usages // size
if np.any(nsplits > 1):
split_name = "repartition-split-{}-{}".format(size, tokenize(df))
df = _split_partitions(df, nsplits, split_name)
# update mem_usages to account for the split partitions
split_mem_usages = []
for n, usage in zip(nsplits, mem_usages):
split_mem_usages.extend([usage / n] * n)
mem_usages = pd.Series(split_mem_usages)
# 2. now that all partitions are less than size, concat them up to size
assert np.all(mem_usages <= size)
new_npartitions = list(map(len, iter_chunks(mem_usages, size)))
new_partitions_boundaries = np.cumsum(new_npartitions)
new_name = "repartition-{}-{}".format(size, tokenize(df))
return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)
def total_mem_usage(df, index=True, deep=False):
mem_usage = df.memory_usage(index=index, deep=deep)
if is_series_like(mem_usage):
mem_usage = mem_usage.sum()
return mem_usage
def iter_chunks(sizes, max_size):
"""Split sizes into chunks of total max_size each
Parameters
----------
sizes : iterable of numbers
The sizes to be chunked
max_size : number
Maximum total size per chunk.
It must be greater or equal than each size in sizes
"""
chunk, chunk_sum = [], 0
iter_sizes = iter(sizes)
size = next(iter_sizes, None)
while size is not None:
assert size <= max_size
if chunk_sum + size <= max_size:
chunk.append(size)
chunk_sum += size
size = next(iter_sizes, None)
else:
assert chunk
yield chunk
chunk, chunk_sum = [], 0
if chunk:
yield chunk
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
new_name = "repartition-%d-%s" % (npartitions, tokenize(df))
if df.npartitions == npartitions:
return df
elif df.npartitions > npartitions:
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [
int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)
]
return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)
else:
original_divisions = divisions = pd.Series(df.divisions)
if df.known_divisions and (
np.issubdtype(divisions.dtype, np.datetime64)
or np.issubdtype(divisions.dtype, np.number)
):
if np.issubdtype(divisions.dtype, np.datetime64):
divisions = divisions.values.astype("float64")
if is_series_like(divisions):
divisions = divisions.values
n = len(divisions)
divisions = np.interp(
x=np.linspace(0, n, npartitions + 1),
xp=np.linspace(0, n, n),
fp=divisions,
)
if np.issubdtype(original_divisions.dtype, np.datetime64):
divisions = (
pd.Series(divisions).astype(original_divisions.dtype).tolist()
)
elif np.issubdtype(original_divisions.dtype, np.integer):
divisions = divisions.astype(original_divisions.dtype)
if isinstance(divisions, np.ndarray):
divisions = divisions.tolist()
divisions = list(divisions)
divisions[0] = df.divisions[0]
divisions[-1] = df.divisions[-1]
return df.repartition(divisions=divisions)
else:
div, mod = divmod(npartitions, df.npartitions)
nsplits = [div] * df.npartitions
nsplits[-1] += mod
return _split_partitions(df, nsplits, new_name)
def _repartition_from_boundaries(df, new_partitions_boundaries, new_name):
if not isinstance(new_partitions_boundaries, list):
new_partitions_boundaries = list(new_partitions_boundaries)
if new_partitions_boundaries[0] > 0:
new_partitions_boundaries.insert(0, 0)
if new_partitions_boundaries[-1] < df.npartitions:
new_partitions_boundaries.append(df.npartitions)
dsk = {}
for i, (start, end) in enumerate(
zip(new_partitions_boundaries, new_partitions_boundaries[1:])
):
dsk[new_name, i] = (methods.concat, [(df._name, j) for j in range(start, end)])
divisions = [df.divisions[i] for i in new_partitions_boundaries]
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
def _split_partitions(df, nsplits, new_name):
""" Split a Dask dataframe into new partitions
Parameters
----------
df: DataFrame or Series
nsplits: List[int]
Number of target dataframes for each partition
The length of nsplits should be the same as df.npartitions
new_name: str
See Also
--------
repartition_npartitions
repartition_size
"""
if len(nsplits) != df.npartitions:
raise ValueError("nsplits should have len={}".format(df.npartitions))
dsk = {}
split_name = "split-{}".format(tokenize(df, nsplits))
j = 0
for i, k in enumerate(nsplits):
if k == 1:
dsk[new_name, j] = (df._name, i)
j += 1
else:
dsk[split_name, i] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[new_name, j] = (getitem, (split_name, i), jj)
j += 1
divisions = [None] * (1 + sum(nsplits))
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = "repartition-split-" + token
out = "repartition-merge-" + token
dsk = repartition_divisions(
df.divisions, divisions, df._name, tmp, out, force=force
)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])
return new_dd_object(graph, out, df._meta, divisions)
elif is_dataframe_like(df) or is_series_like(df):
name = "repartition-dataframe-" + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError("Data must be DataFrame or Series")
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
minmax = "max" if fn == "idxmax" else "min"
if len(x) > 0:
idx = getattr(x, fn)(skipna=skipna)
value = getattr(x, minmax)(skipna=skipna)
else:
idx = value = pd.Series([], dtype="i8")
if is_series_like(idx):
return pd.DataFrame({"idx": idx, "value": value})
return pd.DataFrame({"idx": [idx], "value": [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
minmax = "max" if fn == "idxmax" else "min"
if len(x) > 0:
x = x.set_index("idx")
idx = [getattr(x.value, fn)(skipna=skipna)]
value = [getattr(x.value, minmax)(skipna=skipna)]
else:
idx = value = pd.Series([], dtype="i8")
return pd.DataFrame({"idx": idx, "value": value})
def idxmaxmin_combine(x, fn=None, skipna=True):
if len(x) == 0:
return x
return (
x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True)
)
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)["idx"]
if len(res) == 0:
raise ValueError("attempt to get argmax of an empty sequence")
if scalar:
return res[0]
res.name = None
return res
def _count_aggregate(x):
return x.sum().astype("int64")
def safe_head(df, n):
r = M.head(df, n)
if len(r) != n:
msg = (
"Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`."
)
warnings.warn(msg.format(n, len(r)))
return r
def maybe_shift_divisions(df, periods, freq):
"""Maybe shift divisions by periods of size freq
Used to shift the divisions for the `shift` method. If freq isn't a fixed
size (not anchored or relative), then the divisions are shifted
appropriately. Otherwise the divisions are cleared.
Parameters
----------
df : dd.DataFrame, dd.Series, or dd.Index
periods : int
The number of periods to shift.
freq : DateOffset, timedelta, or time rule string
The frequency to shift by.
"""
if isinstance(freq, str):
freq = pd.tseries.frequencies.to_offset(freq)
is_offset = isinstance(freq, pd.DateOffset)
if is_offset:
if PANDAS_GT_100:
is_anchored = freq.is_anchored()
else:
is_anchored = freq.isAnchored()
if is_anchored or not hasattr(freq, "delta"):
# Can't infer divisions on relative or anchored offsets, as
# divisions may now split identical index value.
# (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])
return df.clear_divisions()
if df.known_divisions:
divs = pd.Series(range(len(df.divisions)), index=df.divisions)
divisions = divs.shift(periods, freq=freq).index
return type(df)(df.dask, df._name, df._meta, divisions)
return df
@wraps(pd.to_datetime)
def to_datetime(arg, meta=None, **kwargs):
if meta is None:
if isinstance(arg, Index):
meta = pd.DatetimeIndex([])
meta.name = arg.name
else:
meta = pd.Series([pd.Timestamp("2000")])
meta.index = meta.index.astype(arg.index.dtype)
meta.index.name = arg.index.name
return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)
@wraps(pd.to_timedelta)
def to_timedelta(arg, unit="ns", errors="raise"):
meta = pd.Series([pd.Timedelta(1, unit=unit)])
return map_partitions(pd.to_timedelta, arg, unit=unit, errors=errors, meta=meta)
if hasattr(pd, "isna"):
@wraps(pd.isna)
def isna(arg):
return map_partitions(pd.isna, arg)
def _repr_data_series(s, index):
"""A helper for creating the ``_repr_data`` property"""
npartitions = len(index) - 1
if is_categorical_dtype(s):
if has_known_categories(s):
dtype = "category[known]"
else:
dtype = "category[unknown]"
else:
dtype = str(s.dtype)
return pd.Series([dtype] + ["..."] * npartitions, index=index, name=s.name)
get_parallel_type = Dispatch("get_parallel_type")
@get_parallel_type.register(pd.Series)
def get_parallel_type_series(_):
return Series
@get_parallel_type.register(pd.DataFrame)
def get_parallel_type_dataframe(_):
return DataFrame
@get_parallel_type.register(pd.Index)
def get_parallel_type_index(_):
return Index
@get_parallel_type.register(object)
def get_parallel_type_object(o):
return Scalar
@get_parallel_type.register(_Frame)
def get_parallel_type_frame(o):
return get_parallel_type(o._meta)
def parallel_types():
return tuple(
k
for k, v in get_parallel_type._lookup.items()
if v is not get_parallel_type_object
)
def has_parallel_type(x):
""" Does this object have a dask dataframe equivalent? """
get_parallel_type(x) # trigger lazy registration
return isinstance(x, parallel_types())
def new_dd_object(dsk, name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
if has_parallel_type(meta):
return get_parallel_type(meta)(dsk, name, meta, divisions)
elif is_arraylike(meta) and meta.shape:
import dask.array as da
chunks = ((np.nan,) * (len(divisions) - 1),) + tuple(
(d,) for d in meta.shape[1:]
)
if len(chunks) > 1:
layer = dsk.layers[name]
if isinstance(layer, Blockwise):
layer.new_axes["j"] = chunks[1][0]
layer.output_indices = layer.output_indices + ("j",)
else:
suffix = (0,) * (len(chunks) - 1)
for i in range(len(chunks[0])):
layer[(name, i) + suffix] = layer.pop((name, i))
return da.Array(dsk, name=name, chunks=chunks, dtype=meta.dtype)
else:
return get_parallel_type(meta)(dsk, name, meta, divisions)
def partitionwise_graph(func, name, *args, **kwargs):
"""
Apply a function partition-wise across arguments to create layer of a graph
This applies a function, ``func``, in an embarrassingly parallel fashion
across partitions/chunks in the provided arguments. It handles Dataframes,
Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery
to provide a nicely symbolic graph.
It is most commonly used in other graph-building functions to create the
appropriate layer of the resulting dataframe.
Parameters
----------
func: callable
name: str
descriptive name for the operation
*args:
**kwargs:
Returns
-------
out: Blockwise graph
Examples
--------
>>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP
>>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP
>>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP
>>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP
See Also
--------
map_partitions
"""
pairs = []
numblocks = {}
for arg in args:
if isinstance(arg, _Frame):
pairs.extend([arg._name, "i"])
numblocks[arg._name] = (arg.npartitions,)
elif isinstance(arg, Scalar):
pairs.extend([arg._name, "i"])
numblocks[arg._name] = (1,)
elif isinstance(arg, Array):
if arg.ndim == 1:
pairs.extend([arg.name, "i"])
elif arg.ndim == 0:
pairs.extend([arg.name, ""])
elif arg.ndim == 2:
pairs.extend([arg.name, "ij"])
else:
raise ValueError("Can't add multi-dimensional array to dataframes")
numblocks[arg._name] = arg.numblocks
else:
pairs.extend([arg, None])
return blockwise(
func, name, "i", *pairs, numblocks=numblocks, concatenate=True, **kwargs
)
def meta_warning(df):
"""
Provide an informative message when the user is asked to provide metadata
"""
if is_dataframe_like(df):
meta_str = {k: str(v) for k, v in df.dtypes.to_dict().items()}
elif is_series_like(df):
meta_str = (df.name, str(df.dtype))
else:
meta_str = None
msg = (
"\nYou did not provide metadata, so Dask is running your "
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `meta=` keyword, as described in the map or "
"apply function that you are using."
)
if meta_str:
msg += (
"\n"
" Before: .apply(func)\n"
" After: .apply(func, meta=%s)\n" % str(meta_str)
)
return msg
def prefix_reduction(f, ddf, identity, **kwargs):
""" Computes the prefix sums of f on df
If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with
partitions [f(identity, P1),
f(f(identity, P1), P2),
f(f(f(identity, P1), P2), P3),
...]
Parameters
----------
f : callable
an associative function f
ddf : dd.DataFrame
identity : pd.DataFrame
an identity element of f, that is f(identity, df) = f(df, identity) = df
kwargs : ??
keyword arguments of f ??
"""
dsk = dict()
name = "prefix_reduction-" + tokenize(f, ddf, identity, **kwargs)
meta = ddf._meta
n = len(ddf.divisions) - 1
divisions = [None] * (n + 1)
N = 1
while N < n:
N *= 2
for i in range(n):
dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, i), identity], kwargs)
for i in range(n, N):
dsk[(name, i, 1, 0)] = identity
d = 1
while d < N:
for i in range(0, N, 2 * d):
dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (
apply,
f,
[(name, i + d - 1, d, 0), (name, i + 2 * d - 1, d, 0)],
kwargs,
)
d *= 2
dsk[(name, N - 1, N, 1)] = identity
while d > 1:
d //= 2
for i in range(0, N, 2 * d):
dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)
dsk[(name, i + 2 * d - 1, d, 1)] = (
apply,
f,
[(name, i + 2 * d - 1, 2 * d, 1), (name, i + d - 1, d, 0)],
kwargs,
)
for i in range(n):
dsk[(name, i)] = (apply, f, [(name, i, 1, 1), identity], kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, meta, divisions)
def suffix_reduction(f, ddf, identity, **kwargs):
""" Computes the suffix sums of f on df
If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with
partitions [f(P1, f(P2, ...f(Pn, identity)...)),
f(P2, ...f(Pn, identity)...),
...f(Pn, identity)...,
...]
Parameters
----------
f : callable
an associative function f
ddf : dd.DataFrame
identity : pd.DataFrame
an identity element of f, that is f(identity, df) = f(df, identity) = df
kwargs : ??
keyword arguments of f ??
"""
dsk = dict()
name = "suffix_reduction-" + tokenize(f, ddf, identity, **kwargs)
meta = ddf._meta
n = len(ddf.divisions) - 1
divisions = [None] * (n + 1)
N = 1
while N < n:
N *= 2
for i in range(n):
dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, n - 1 - i), identity], kwargs)
for i in range(n, N):
dsk[(name, i, 1, 0)] = identity
d = 1
while d < N:
for i in range(0, N, 2 * d):
dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (
apply,
f,
[(name, i + 2 * d - 1, d, 0), (name, i + d - 1, d, 0)],
kwargs,
)
d *= 2
dsk[(name, N - 1, N, 1)] = identity
while d > 1:
d //= 2
for i in range(0, N, 2 * d):
dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)
dsk[(name, i + 2 * d - 1, d, 1)] = (
apply,
f,
[(name, i + d - 1, d, 0), (name, i + 2 * d - 1, 2 * d, 1)],
kwargs,
)
for i in range(n):
dsk[(name, i)] = (apply, f, [(name, n - 1 - i, 1, 1), identity], kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, meta, divisions)
def mapseries(base_chunk, concat_map):
return base_chunk.map(concat_map)
def mapseries_combine(index, concat_result):
final_series = concat_result.sort_index()
final_series = index.to_series().map(final_series)
return final_series
def series_map(base_series, map_series):
npartitions = base_series.npartitions
split_out = map_series.npartitions
dsk = {}
base_token_key = tokenize(base_series, split_out)
base_split_prefix = "base-split-{}".format(base_token_key)
base_shard_prefix = "base-shard-{}".format(base_token_key)
for i, key in enumerate(base_series.__dask_keys__()):
dsk[(base_split_prefix, i)] = (hash_shard, key, split_out)
for j in range(split_out):
dsk[(base_shard_prefix, 0, i, j)] = (getitem, (base_split_prefix, i), j)
map_token_key = tokenize(map_series)
map_split_prefix = "map-split-{}".format(map_token_key)
map_shard_prefix = "map-shard-{}".format(map_token_key)
for i, key in enumerate(map_series.__dask_keys__()):
dsk[(map_split_prefix, i)] = (
hash_shard,
key,
split_out,
split_out_on_index,
None,
)
for j in range(split_out):
dsk[(map_shard_prefix, 0, i, j)] = (getitem, (map_split_prefix, i), j)
token_key = tokenize(base_series, map_series)
map_prefix = "map-series-{}".format(token_key)
for i in range(npartitions):
for j in range(split_out):
dsk[(map_prefix, i, j)] = (
mapseries,
(base_shard_prefix, 0, i, j),
(_concat, [(map_shard_prefix, 0, k, j) for k in range(split_out)]),
)
final_prefix = "map-series-combine-{}".format(token_key)
for i, key in enumerate(base_series.index.__dask_keys__()):
dsk[(final_prefix, i)] = (
mapseries_combine,
key,
(_concat, [(map_prefix, i, j) for j in range(split_out)]),
)
meta = map_series._meta.copy()
meta.index = base_series._meta.index
meta = make_meta(meta)
dependencies = [base_series, map_series, base_series.index]
graph = HighLevelGraph.from_collections(
final_prefix, dsk, dependencies=dependencies
)
divisions = list(base_series.divisions)
return new_dd_object(graph, final_prefix, meta, divisions)
|
from rawserialised import *
from boostmappings import mappings
class Parser:
def __init__(self, funcs):
self.funcs = funcs
self.parsed = []
def parse(self):
for func in self.funcs:
self.parsed.append(Parser._parse(func))
##static methods below as they do not require instance state
@staticmethod
def _parse(serialised):
nested_args = []
nested_ret = []
##parse arg types
try:
for arg in serialised['type']['arg_types']:
chain = Parser._chain_nested(arg)
nested_args.append(chain)
except:
nested_args.append('None')
##parse return types
try:
nested_ret.append(Parser._chain_nested(serialised['type']['ret_type']))
except:
nested_ret.append('None')
return (nested_ret, nested_args)
@staticmethod
def _chain_nested(arg):
try:
if 'args' in arg:
return '%s%s' % (Parser._mapped(arg['type_ref']),[Parser._chain_nested(a) for a in arg['args']])
elif 'items' in arg:
return '%s%s' % (Parser._mapped(arg['fallback']['type_ref']), [Parser._chain_nested(i) for i in arg['items']])
elif 'item' in arg:
return Parser._chain_nested(arg['item'])
else:
return Parser._mapped(arg['type_ref'])
except:
return 'None'
@staticmethod
def _mapped(builtin):
return mappings.get(builtin, builtin)
if __name__ == '__main__':
parser = Parser([inproduct])
parser.parse()
print(parser.parsed)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="skender-stock-indicators",
version="0.0.1",
author="Dave Skender",
maintainer="Dong-Geon Lee",
description="Stock indicators. Send in historical price quotes and get back desired technical indicators such as Stochastic RSI, Average True Range, Parabolic SAR, etc. Nothing more.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://daveskender.github.io/Stock.Indicators/wraps/python",
project_urls={
"Bug Tracker": "https://github.com/DaveSkender/Stock.Indicators/issues",
"Documentation": "https://daveskender.github.io/Stock.Indicators/wraps/python",
"Source Code": "https://github.com/DaveSkender/Stock.Indicators/tree/master/wraps/python",
},
license="Apache 2.0",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
platforms=["Windows", "Linux"],
package_dir={"": "."},
packages=setuptools.find_packages(exclude=('tests', 'tests.*')),
package_data={
"SkenderStockIndicators._cslib": ["lib/*.dll"],
},
python_requires=">=3.8",
install_requires=[
'pythonnet',
],
)
|
"""
core layer, the core function module, provide the minimal function or features that data scientist may use
can be customized in services layer design
"""
|
from PhaseNet_Analysis import run_phasenet
def test_run_phasenet(benchmark):
benchmark(run_phasenet)
|
"""Test the Yeelight light."""
import logging
from unittest.mock import ANY, AsyncMock, MagicMock, call, patch
from yeelight import (
BulbException,
BulbType,
HSVTransition,
LightType,
PowerMode,
RGBTransition,
SceneClass,
SleepTransition,
TemperatureTransition,
transitions,
)
from yeelight.flow import Action, Flow
from yeelight.main import _MODEL_SPECS
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
FLASH_LONG,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.components.yeelight import (
ATTR_COUNT,
ATTR_MODE_MUSIC,
ATTR_TRANSITIONS,
CONF_CUSTOM_EFFECTS,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_NIGHTLIGHT_SWITCH,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DEFAULT_MODE_MUSIC,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
YEELIGHT_HSV_TRANSACTION,
YEELIGHT_RGB_TRANSITION,
YEELIGHT_SLEEP_TRANSACTION,
YEELIGHT_TEMPERATURE_TRANSACTION,
)
from homeassistant.components.yeelight.light import (
ATTR_MINUTES,
ATTR_MODE,
EFFECT_CANDLE_FLICKER,
EFFECT_DATE_NIGHT,
EFFECT_DISCO,
EFFECT_FACEBOOK,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_HAPPY_BIRTHDAY,
EFFECT_HOME,
EFFECT_MOVIE,
EFFECT_NIGHT_MODE,
EFFECT_ROMANCE,
EFFECT_STOP,
EFFECT_SUNRISE,
EFFECT_SUNSET,
EFFECT_TWITTER,
EFFECT_WHATSAPP,
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
SERVICE_SET_COLOR_FLOW_SCENE,
SERVICE_SET_COLOR_SCENE,
SERVICE_SET_COLOR_TEMP_SCENE,
SERVICE_SET_HSV_SCENE,
SERVICE_SET_MODE,
SERVICE_SET_MUSIC_MODE,
SERVICE_START_FLOW,
SUPPORT_YEELIGHT,
YEELIGHT_COLOR_EFFECT_LIST,
YEELIGHT_MONO_EFFECT_LIST,
YEELIGHT_TEMP_ONLY_EFFECT_LIST,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.color import (
color_hs_to_RGB,
color_hs_to_xy,
color_RGB_to_hs,
color_RGB_to_xy,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import (
CAPABILITIES,
ENTITY_LIGHT,
ENTITY_NIGHTLIGHT,
IP_ADDRESS,
MODULE,
NAME,
PROPERTIES,
UNIQUE_FRIENDLY_NAME,
_mocked_bulb,
_patch_discovery,
_patch_discovery_interval,
)
from tests.common import MockConfigEntry
CONFIG_ENTRY_DATA = {
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
async def test_services(hass: HomeAssistant, caplog):
"""Test Yeelight services."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_MODE_MUSIC: True,
CONF_SAVE_ON_CHANGE: True,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _async_test_service(
service,
data,
method,
payload=None,
domain=DOMAIN,
failure_side_effect=BulbException,
):
err_count = len([x for x in caplog.records if x.levelno == logging.ERROR])
# success
if method.startswith("async_"):
mocked_method = AsyncMock()
else:
mocked_method = MagicMock()
setattr(mocked_bulb, method, mocked_method)
await hass.services.async_call(domain, service, data, blocking=True)
if payload is None:
mocked_method.assert_called_once()
elif type(payload) == list:
mocked_method.assert_called_once_with(*payload)
else:
mocked_method.assert_called_once_with(**payload)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR]) == err_count
)
# failure
if failure_side_effect:
if method.startswith("async_"):
mocked_method = AsyncMock(side_effect=failure_side_effect)
else:
mocked_method = MagicMock(side_effect=failure_side_effect)
setattr(mocked_bulb, method, mocked_method)
await hass.services.async_call(domain, service, data, blocking=True)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR])
== err_count + 1
)
# turn_on rgb_color
brightness = 100
rgb_color = (0, 128, 255)
transition = 2
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_RGB_COLOR: rgb_color,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.start_music.reset_mock()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_brightness.reset_mock()
mocked_bulb.async_set_color_temp.assert_not_called()
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.async_set_hsv.assert_not_called()
mocked_bulb.async_set_hsv.reset_mock()
mocked_bulb.async_set_rgb.assert_called_once_with(
*rgb_color, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_rgb.reset_mock()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_start_flow.reset_mock()
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
mocked_bulb.async_stop_flow.reset_mock()
# turn_on hs_color
brightness = 100
hs_color = (180, 100)
transition = 2
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_HS_COLOR: hs_color,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.start_music.reset_mock()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_brightness.reset_mock()
mocked_bulb.async_set_color_temp.assert_not_called()
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.async_set_hsv.assert_called_once_with(
*hs_color, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_hsv.reset_mock()
mocked_bulb.async_set_rgb.assert_not_called()
mocked_bulb.async_set_rgb.reset_mock()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_start_flow.reset_mock()
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
mocked_bulb.async_stop_flow.reset_mock()
# turn_on color_temp
brightness = 100
color_temp = 200
transition = 1
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_COLOR_TEMP: color_temp,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_color_temp.assert_called_once_with(
color_temperature_mired_to_kelvin(color_temp),
duration=transition * 1000,
light_type=LightType.Main,
)
mocked_bulb.async_set_hsv.assert_not_called()
mocked_bulb.async_set_rgb.assert_not_called()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
mocked_bulb.last_properties["power"] = "off"
# turn_on nightlight
await _async_test_service(
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_NIGHTLIGHT},
"async_turn_on",
payload={
"duration": DEFAULT_TRANSITION,
"light_type": LightType.Main,
"power_mode": PowerMode.MOONLIGHT,
},
domain="light",
)
mocked_bulb.last_properties["power"] = "on"
# turn_off
await _async_test_service(
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_TRANSITION: transition},
"async_turn_off",
domain="light",
payload={"duration": transition * 1000, "light_type": LightType.Main},
)
# set_mode
mode = "rgb"
await _async_test_service(
SERVICE_SET_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE: "rgb"},
"async_set_power_mode",
[PowerMode[mode.upper()]],
)
# start_flow
await _async_test_service(
SERVICE_START_FLOW,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"async_start_flow",
)
# set_color_scene
await _async_test_service(
SERVICE_SET_COLOR_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_RGB_COLOR: [10, 20, 30],
ATTR_BRIGHTNESS: 50,
},
"async_set_scene",
[SceneClass.COLOR, 10, 20, 30, 50],
)
# set_hsv_scene
await _async_test_service(
SERVICE_SET_HSV_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_HS_COLOR: [180, 50], ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.HSV, 180, 50, 50],
)
# set_color_temp_scene
await _async_test_service(
SERVICE_SET_COLOR_TEMP_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_KELVIN: 4000, ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.CT, 4000, 50],
)
# set_color_flow_scene
await _async_test_service(
SERVICE_SET_COLOR_FLOW_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"async_set_scene",
)
# set_auto_delay_off_scene
await _async_test_service(
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MINUTES: 1, ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.AUTO_DELAY_OFF, 50, 1],
)
# set_music_mode failure enable
await _async_test_service(
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "true"},
"start_music",
failure_side_effect=AssertionError,
)
# set_music_mode disable
await _async_test_service(
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "false"},
"stop_music",
failure_side_effect=None,
)
# set_music_mode success enable
await _async_test_service(
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "true"},
"start_music",
failure_side_effect=None,
)
# test _cmd wrapper error handler
mocked_bulb.last_properties["power"] = "off"
err_count = len([x for x in caplog.records if x.levelno == logging.ERROR])
type(mocked_bulb).turn_on = MagicMock()
type(mocked_bulb).set_brightness = MagicMock(side_effect=BulbException)
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 50},
blocking=True,
)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR]) == err_count + 1
)
async def test_state_already_set_avoid_ratelimit(hass: HomeAssistant):
"""Ensure we suppress state changes that will increase the rate limit when there is no change."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.Color
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_HS_COLOR: (PROPERTIES["hue"], PROPERTIES["sat"]),
},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.last_properties["color_mode"] = 1
rgb = int(PROPERTIES["rgb"])
blue = rgb & 0xFF
green = (rgb >> 8) & 0xFF
red = (rgb >> 16) & 0xFF
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_RGB_COLOR: (red, green, blue)},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.async_set_rgb.reset_mock()
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS_PCT: PROPERTIES["current_brightness"],
},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_COLOR_TEMP: 250},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
# Should call for the color mode change
assert mocked_bulb.async_set_color_temp.mock_calls == [
call(4000, duration=350, light_type=ANY)
]
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.last_properties["color_mode"] = 2
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_COLOR_TEMP: 250},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.last_properties["color_mode"] = 3
# This last change should generate a call even though
# the color mode is the same since the HSV has changed
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_HS_COLOR: (5, 5)},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == [
call(5.0, 5.0, duration=350, light_type=ANY)
]
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
async def test_device_types(hass: HomeAssistant, caplog):
"""Test different device types."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
async def _async_setup(config_entry):
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
async def _async_test(
bulb_type,
model,
target_properties,
nightlight_properties=None,
name=UNIQUE_FRIENDLY_NAME,
entity_id=ENTITY_LIGHT,
):
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
mocked_bulb.bulb_type = bulb_type
model_specs = _MODEL_SPECS.get(model)
type(mocked_bulb).get_model_specs = MagicMock(return_value=model_specs)
await _async_setup(config_entry)
state = hass.states.get(entity_id)
assert state.state == "on"
target_properties["friendly_name"] = name
target_properties["flowing"] = False
target_properties["night_light"] = True
target_properties["music_mode"] = False
assert dict(state.attributes) == target_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry = er.async_get(hass)
registry.async_clear_config_entry(config_entry.entry_id)
# nightlight
if nightlight_properties is None:
return
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: True}
)
config_entry.add_to_hass(hass)
await _async_setup(config_entry)
assert hass.states.get(entity_id).state == "off"
state = hass.states.get(f"{entity_id}_nightlight")
assert state.state == "on"
nightlight_properties["friendly_name"] = f"{name} Nightlight"
nightlight_properties["icon"] = "mdi:weather-night"
nightlight_properties["flowing"] = False
nightlight_properties["night_light"] = True
nightlight_properties["music_mode"] = False
assert dict(state.attributes) == nightlight_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry.async_clear_config_entry(config_entry.entry_id)
await hass.async_block_till_done()
bright = round(255 * int(PROPERTIES["bright"]) / 100)
current_brightness = round(255 * int(PROPERTIES["current_brightness"]) / 100)
ct = color_temperature_kelvin_to_mired(int(PROPERTIES["ct"]))
hue = int(PROPERTIES["hue"])
sat = int(PROPERTIES["sat"])
rgb = int(PROPERTIES["rgb"])
rgb_color = ((rgb >> 16) & 0xFF, (rgb >> 8) & 0xFF, rgb & 0xFF)
hs_color = (hue, sat)
bg_bright = round(255 * int(PROPERTIES["bg_bright"]) / 100)
bg_ct = color_temperature_kelvin_to_mired(int(PROPERTIES["bg_ct"]))
bg_hue = int(PROPERTIES["bg_hue"])
bg_sat = int(PROPERTIES["bg_sat"])
bg_rgb = int(PROPERTIES["bg_rgb"])
bg_hs_color = (bg_hue, bg_sat)
bg_rgb_color = ((bg_rgb >> 16) & 0xFF, (bg_rgb >> 8) & 0xFF, bg_rgb & 0xFF)
nl_br = round(255 * int(PROPERTIES["nl_br"]) / 100)
# Default
await _async_test(
None,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# White
await _async_test(
BulbType.White,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# Color - color mode CT
mocked_bulb.last_properties["color_mode"] = "2" # CT
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp", "hs", "rgb"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode HS
mocked_bulb.last_properties["color_mode"] = "3" # HSV
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"hs_color": hs_color,
"rgb_color": color_hs_to_RGB(*hs_color),
"xy_color": color_hs_to_xy(*hs_color),
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode RGB
mocked_bulb.last_properties["color_mode"] = "1" # RGB
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"hs_color": color_RGB_to_hs(*rgb_color),
"rgb_color": rgb_color,
"xy_color": color_RGB_to_xy(*rgb_color),
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode HS but no hue
mocked_bulb.last_properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties["hue"] = None
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode RGB but no color
mocked_bulb.last_properties["color_mode"] = "1" # RGB
mocked_bulb.last_properties["rgb"] = None
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - unsupported color_mode
mocked_bulb.last_properties["color_mode"] = 4 # Unsupported
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"color_mode": "unknown",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
assert "Light reported unknown color mode: 4" in caplog.text
# WhiteTemp
model_specs = _MODEL_SPECS["ceiling1"]
await _async_test(
BulbType.WhiteTemp,
"ceiling1",
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": nl_br,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# WhiteTempMood
properties.pop("power")
properties["main_power"] = "on"
model_specs = _MODEL_SPECS["ceiling4"]
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"friendly_name": NAME,
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"flowing": False,
"night_light": True,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": nl_br,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# Background light - color mode CT
mocked_bulb.last_properties["bg_lmode"] = "2" # CT
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"color_temp": bg_ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp", "hs", "rgb"],
"hs_color": (27.001, 19.243),
"rgb_color": (255, 228, 205),
"xy_color": (0.372, 0.35),
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
# Background light - color mode HS
mocked_bulb.last_properties["bg_lmode"] = "3" # HS
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"hs_color": bg_hs_color,
"rgb_color": color_hs_to_RGB(*bg_hs_color),
"xy_color": color_hs_to_xy(*bg_hs_color),
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
# Background light - color mode RGB
mocked_bulb.last_properties["bg_lmode"] = "1" # RGB
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"hs_color": color_RGB_to_hs(*bg_rgb_color),
"rgb_color": bg_rgb_color,
"xy_color": color_RGB_to_xy(*bg_rgb_color),
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
async def test_effects(hass: HomeAssistant):
"""Test effects."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CUSTOM_EFFECTS: [
{
CONF_NAME: "mock_effect",
CONF_FLOW_PARAMS: {
ATTR_COUNT: 3,
ATTR_TRANSITIONS: [
{YEELIGHT_HSV_TRANSACTION: [300, 50, 500, 50]},
{YEELIGHT_RGB_TRANSITION: [100, 100, 100, 300, 30]},
{YEELIGHT_TEMPERATURE_TRANSACTION: [3000, 200, 20]},
{YEELIGHT_SLEEP_TRANSACTION: [800]},
],
},
}
]
}
},
)
config_entry = MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_LIGHT).attributes.get(
"effect_list"
) == YEELIGHT_COLOR_EFFECT_LIST + ["mock_effect"]
async def _async_test_effect(name, target=None, called=True):
async_mocked_start_flow = AsyncMock()
mocked_bulb.async_start_flow = async_mocked_start_flow
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_EFFECT: name},
blocking=True,
)
if not called:
return
async_mocked_start_flow.assert_called_once()
if target is None:
return
args, _ = async_mocked_start_flow.call_args
flow = args[0]
assert flow.count == target.count
assert flow.action == target.action
assert str(flow.transitions) == str(target.transitions)
effects = {
"mock_effect": Flow(
count=3,
transitions=[
HSVTransition(300, 50, 500, 50),
RGBTransition(100, 100, 100, 300, 30),
TemperatureTransition(3000, 200, 20),
SleepTransition(800),
],
),
EFFECT_DISCO: Flow(transitions=transitions.disco()),
EFFECT_FAST_RANDOM_LOOP: None,
EFFECT_WHATSAPP: Flow(count=2, transitions=transitions.pulse(37, 211, 102)),
EFFECT_FACEBOOK: Flow(count=2, transitions=transitions.pulse(59, 89, 152)),
EFFECT_TWITTER: Flow(count=2, transitions=transitions.pulse(0, 172, 237)),
EFFECT_HOME: Flow(
count=0,
action=Action.recover,
transitions=[
TemperatureTransition(degrees=3200, duration=500, brightness=80)
],
),
EFFECT_NIGHT_MODE: Flow(
count=0,
action=Action.recover,
transitions=[RGBTransition(0xFF, 0x99, 0x00, duration=500, brightness=1)],
),
EFFECT_DATE_NIGHT: Flow(
count=0,
action=Action.recover,
transitions=[RGBTransition(0xFF, 0x66, 0x00, duration=500, brightness=50)],
),
EFFECT_MOVIE: Flow(
count=0,
action=Action.recover,
transitions=[
RGBTransition(
red=0x14, green=0x14, blue=0x32, duration=500, brightness=50
)
],
),
EFFECT_SUNRISE: Flow(
count=1,
action=Action.stay,
transitions=[
RGBTransition(
red=0xFF, green=0x4D, blue=0x00, duration=50, brightness=1
),
TemperatureTransition(degrees=1700, duration=360000, brightness=10),
TemperatureTransition(degrees=2700, duration=540000, brightness=100),
],
),
EFFECT_SUNSET: Flow(
count=1,
action=Action.off,
transitions=[
TemperatureTransition(degrees=2700, duration=50, brightness=10),
TemperatureTransition(degrees=1700, duration=180000, brightness=5),
RGBTransition(
red=0xFF, green=0x4C, blue=0x00, duration=420000, brightness=1
),
],
),
EFFECT_ROMANCE: Flow(
count=0,
action=Action.stay,
transitions=[
RGBTransition(
red=0x59, green=0x15, blue=0x6D, duration=4000, brightness=1
),
RGBTransition(
red=0x66, green=0x14, blue=0x2A, duration=4000, brightness=1
),
],
),
EFFECT_HAPPY_BIRTHDAY: Flow(
count=0,
action=Action.stay,
transitions=[
RGBTransition(
red=0xDC, green=0x50, blue=0x19, duration=1996, brightness=80
),
RGBTransition(
red=0xDC, green=0x78, blue=0x1E, duration=1996, brightness=80
),
RGBTransition(
red=0xAA, green=0x32, blue=0x14, duration=1996, brightness=80
),
],
),
EFFECT_CANDLE_FLICKER: Flow(
count=0,
action=Action.recover,
transitions=[
TemperatureTransition(degrees=2700, duration=800, brightness=50),
TemperatureTransition(degrees=2700, duration=800, brightness=30),
TemperatureTransition(degrees=2700, duration=1200, brightness=80),
TemperatureTransition(degrees=2700, duration=800, brightness=60),
TemperatureTransition(degrees=2700, duration=1200, brightness=90),
TemperatureTransition(degrees=2700, duration=2400, brightness=50),
TemperatureTransition(degrees=2700, duration=1200, brightness=80),
TemperatureTransition(degrees=2700, duration=800, brightness=60),
TemperatureTransition(degrees=2700, duration=400, brightness=70),
],
),
}
for name, target in effects.items():
await _async_test_effect(name, target)
await _async_test_effect("not_existed", called=False)
async def test_state_fails_to_update_triggers_update(hass: HomeAssistant):
"""Ensure we call async_get_properties if the turn on/off fails to update the state."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.Color
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_on.mock_calls) == 1
assert len(mocked_bulb.async_get_properties.mock_calls) == 2
mocked_bulb.last_properties["power"] = "on"
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_off.mock_calls) == 1
assert len(mocked_bulb.async_get_properties.mock_calls) == 3
# But if the state is correct no calls
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_on.mock_calls) == 1
assert len(mocked_bulb.async_get_properties.mock_calls) == 3
async def test_ambilight_with_nightlight_disabled(hass: HomeAssistant):
"""Test that main light on ambilights with the nightlight disabled shows the correct brightness."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
capabilities = {**CAPABILITIES}
capabilities["model"] = "ceiling10"
properties["color_mode"] = "3" # HSV
properties["bg_power"] = "off"
properties["current_brightness"] = 0
properties["bg_lmode"] = "2" # CT
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.WhiteTempMood
main_light_entity_id = "light.yeelight_ceiling10_0x15243f"
config_entry = MockConfigEntry(
domain=DOMAIN,
data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False},
options={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False},
)
config_entry.add_to_hass(hass)
with _patch_discovery(capabilities=capabilities), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
state = hass.states.get(main_light_entity_id)
assert state.state == "on"
# bg_power off should not set the brightness to 0
assert state.attributes[ATTR_BRIGHTNESS] == 128
|
#!usr/bin/env python
import socket
import threading
import select
import time
def main():
class Chat_Server(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.running = 1
self.conn = None
self.addr = None
def run(self):
HOST = ''
PORT = 1776
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST,PORT))
s.listen(1)
self.conn, self.addr = s.accept()
# Select loop for listen
while self.running == True:
inputready,outputready,exceptready \
= select.select ([self.conn],[self.conn],[])
for input_item in inputready:
# Handle sockets
data = self.conn.recv(1024)
if data:
print "Them: " + data
else:
break
time.sleep(0)
def kill(self):
self.running = 0
class Chat_Client(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.host = None
self.sock = None
self.running = 1
def run(self):
PORT = 1776
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, PORT))
# Select loop for listen
while self.running == True:
inputready,outputready,exceptready \
= select.select ([self.sock],[self.sock],[])
for input_item in inputready:
# Handle sockets
data = self.sock.recv(1024)
if data:
print "Them: " + data
else:
break
time.sleep(0)
def kill(self):
self.running = 0
class Text_Input(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.running = 1
def run(self):
while self.running == True:
text = raw_input('')
try:
chat_client.sock.sendall(text)
except:
Exception
try:
chat_server.conn.sendall(text)
except:
Exception
time.sleep(0)
def kill(self):
self.running = 0
# Prompt, object instantiation, and threads start here.
ip_addr = raw_input('What IP (or type listen)?: ')
if ip_addr == 'listen':
chat_server = Chat_Server()
chat_client = Chat_Client()
chat_server.start()
text_input = Text_Input()
text_input.start()
elif ip_addr == 'Listen':
chat_server = Chat_Server()
chat_client = Chat_Client()
chat_server.start()
text_input = Text_Input()
text_input.start()
else:
chat_server = Chat_Server()
chat_client = Chat_Client()
chat_client.host = ip_addr
text_input = Text_Input()
chat_client.start()
text_input.start()
if __name__ == "__main__":
main()
|
import pygame
import os
from typing import List, Tuple
from itertools import count
PLAYER_WIDTH = 15
PLAYER_SPEED = 3
BLACK = (0, 0, 0)
ENEMY_HEIGHT = 8
ENEMY_WIDTH = 12
ENEMY_SCALE = 1
ENEMY_STEPS_PER_WIDTH = 4
ENEMY_SIDE_SPACE = 2
CRAB_WIDTH = 11
OCTOPUS_WIDTH = 12
SQUID_WIDTH = 8
CORPSE_WIDTH = 13
ROCKET_WIDTH = 3
ROCKET_SPEED = 5
def load_asset(asset):
return os.path.join('space_invaders', 'assets', asset)
class Entity(pygame.sprite.Sprite):
def __init__(self, image, width, height, posX, posY):
super().__init__()
self.image = pygame.transform.scale(image, (width, height))
self.rect = self.image.get_rect()
centerX = posX + round(width/2, 0)
centerY = posY + round(height/2, 0)
self.rect.center = (centerX, centerY)
def update(self, window: pygame.Surface):
return super().update()
class Player(Entity):
def __init__(self, scale, posX, posY, leftEdge, rightEdge):
image = pygame.image.load(load_asset('player_1.png'))
self.leftEdge = leftEdge
self.rightEdge = rightEdge
super().__init__(image, PLAYER_WIDTH * scale,
ENEMY_HEIGHT * scale, posX, posY)
def move(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and self.rect.left <= self.leftEdge:
return
if keys[pygame.K_RIGHT] and self.rect.right >= self.rightEdge:
return
self.rect.move_ip(
(keys[pygame.K_RIGHT] - keys[pygame.K_LEFT]) * PLAYER_SPEED, 0)
def draw(self, screen):
screen.blit(self.image, self.rect)
class Enemy(Entity):
def __init__(self, images, altImages, width, height, posX, posY, score):
self.score = score
self.images = images
self.altImages = altImages
self.imageCounter = 0
self.width = width
self.height = height
super().__init__(images[0], width, height, posX, posY)
def switchImage(self, hasReachedBorder: bool):
if hasReachedBorder:
self.images = self.altImages
self.imageCounter = self.imageCounter + \
1 if self.imageCounter + 1 < len(self.images) else 0
self.image = pygame.transform.scale(
self.images[self.imageCounter], (self.width, self.height))
def GetScore(self):
return self.score
def moveHorizontal(self, moveRight):
stepWidth = (ENEMY_WIDTH / ENEMY_STEPS_PER_WIDTH) * ENEMY_SCALE
step = stepWidth if moveRight else stepWidth * (-1)
self.rect.move_ip(step, 0)
def moveVertical(self):
self.rect.move_ip(0, ENEMY_HEIGHT * ENEMY_SCALE)
def update(self, window: pygame.Surface):
return super().update(window)
class EnemyGroup():
def __init__(self):
self.step = 0
self.moveRight = True
self.enemyRows: List['EnemyRow'] = []
self.leftEdge = 0
self.rightEdge = 0
super().__init__()
def addRow(self, enemyRow: 'EnemyRow'):
self.enemyRows.append(enemyRow)
self.leftEdge = enemyRow.leftEdge if enemyRow.leftEdge > self.leftEdge else self.leftEdge
self.rightEdge = enemyRow.rightEdge if enemyRow.rightEdge > self.rightEdge else self.rightEdge
def isEmpty(self):
for row in self.enemyRows:
for enemy in row.sprites():
return False
return True
def hasReached(self, player: Player):
for row in reversed(self.enemyRows):
for enemy in row.sprites():
if enemy.rect.bottom > player.rect.top:
return True
else:
break
return False
def move(self, screen: pygame.Surface):
hasReachedBorder = False
moveDown = False
for row in self.enemyRows:
hasReachedBorder = row.hasReachedBorder(self.moveRight)
if hasReachedBorder:
moveDown = True
self.moveRight = not self.moveRight
break
for row in reversed(self.enemyRows):
if moveDown:
row.moveVertical(screen)
else:
row.moveHorizontal(self.moveRight, screen)
def attack(self, playerXCenter: int):
if len(self.enemyRows) is 0:
return None
row: EnemyRow = None
unobstructedEnemies: List[pygame.sprite.Sprite] = []
for row in reversed(self.enemyRows):
for enemy in row.sprites():
isUnobstructed = True
for unobstructed in unobstructedEnemies:
if (enemy.rect.left < unobstructed.rect.left and enemy.rect.right < unobstructed.rect.left) \
or (enemy.rect.left > unobstructed.rect.right and enemy.rect.right > unobstructed.rect.right):
continue
else:
isUnobstructed = False
break
if isUnobstructed:
unobstructedEnemies.append(enemy)
shooter: pygame.sprite.Sprite = None
for enemy in unobstructedEnemies:
if shooter == None or abs(playerXCenter - enemy.rect.centerx) < abs(playerXCenter - shooter.rect.centerx):
shooter = enemy
if shooter == None:
return None
posX = shooter.rect.centerx - ROCKET_WIDTH
posY = shooter.rect.topleft[1] + ENEMY_HEIGHT * ENEMY_SCALE
return Rocket(posX, posY, False)
# handles collisions and returns the score
def groupcollide(self, group: pygame.sprite.Group) -> Tuple[int, List['Corpse']]:
score = 0
corpses: List[Corpse] = []
for row in self.enemyRows:
for enemy in (pygame.sprite.groupcollide(group, row, True, True).values()):
score += row.score
corpses.append(
Corpse(enemy[0].rect.left, enemy[0].rect.top))
return (score, corpses)
def draw(self, screen: pygame.Surface):
for row in self.enemyRows:
row.draw(screen)
class EnemyRow(pygame.sprite.Group):
def __init__(self, sprites, score, leftEdge, rightEdge):
self.score = score
self.leftEdge = leftEdge
self.rightEdge = rightEdge
super().__init__(sprites)
def hasReachedBorder(self, moveRight: bool) -> bool:
if len(self.sprites()) == 0:
return False
left = self.leftEdge + (ENEMY_WIDTH * ENEMY_SCALE) / 2
right = self.rightEdge + (ENEMY_WIDTH * ENEMY_SCALE) / 2
first: pygame.sprite.Sprite = self.sprites()[0]
last: pygame.sprite.Sprite = self.sprites()[-1]
return first.rect.centerx <= left if not moveRight else last.rect.centerx >= right
def moveVertical(self, screen: pygame.Surface):
for enemy in self.sprites():
rect = pygame.draw.rect(screen, BLACK, (enemy.rect.x, enemy.rect.y,
ENEMY_WIDTH*ENEMY_SCALE, ENEMY_HEIGHT*ENEMY_SCALE))
enemy.switchImage(False)
Enemy.moveVertical(enemy)
screen.blit(enemy.image, enemy.rect)
pygame.display.update((rect, enemy.rect))
def moveHorizontal(self, moveRight: bool, screen: pygame.Surface):
for enemy in self.sprites():
rect = pygame.draw.rect(screen, BLACK, (enemy.rect.x, enemy.rect.y,
ENEMY_WIDTH*ENEMY_SCALE, ENEMY_HEIGHT*ENEMY_SCALE))
enemy.switchImage(False)
Enemy.moveHorizontal(enemy, moveRight)
screen.blit(enemy.image, enemy.rect)
pygame.display.update((rect, enemy.rect))
class Rocket(Entity):
def __init__(self, posX, posY, moveUp):
image = pygame.image.load(load_asset('rocket_1.png'))
image = image if not moveUp else pygame.transform.rotate(image, 180)
self.direction = -1 if moveUp else 1
super().__init__(image, ROCKET_WIDTH * ENEMY_SCALE,
ENEMY_HEIGHT * ENEMY_SCALE, posX, posY)
def update(self, screen: pygame.Surface):
self.rect.move_ip(0, ROCKET_SPEED * self.direction)
return super().update(screen)
class Ufo(Enemy):
def __init__(self, posX, posY):
images = [pygame.image.load(load_asset('ufo.png')),
pygame.image.load(load_asset('ufo.png'))]
altImages = images
super().__init__(images, altImages, CRAB_WIDTH *
ENEMY_SCALE, ENEMY_HEIGHT * ENEMY_SCALE, posX, posY, 100)
class Crab(Enemy):
def __init__(self, posX, posY):
images = [pygame.image.load(load_asset('crab_white_1.png')),
pygame.image.load(load_asset('crab_white_2.png'))]
altImages = [pygame.image.load(load_asset('crab_green_1.png')),
pygame.image.load(load_asset('crab_green_2.png'))]
super().__init__(images, altImages, CRAB_WIDTH *
ENEMY_SCALE, ENEMY_HEIGHT * ENEMY_SCALE, posX, posY, 20)
class Octopus(Enemy):
def __init__(self, posX, posY):
images = [pygame.image.load(load_asset('octopus_white_1.png')),
pygame.image.load(load_asset('octopus_white_2.png'))]
altImages = [pygame.image.load(load_asset('octopus_green_1.png')),
pygame.image.load(load_asset('octopus_green_2.png'))]
super().__init__(images, altImages, OCTOPUS_WIDTH *
ENEMY_SCALE, ENEMY_HEIGHT * ENEMY_SCALE, posX, posY, 10)
class Squid(Enemy):
def __init__(self, posX, posY):
images = [pygame.image.load(load_asset('squid_white_1.png')),
pygame.image.load(load_asset('squid_white_2.png'))]
altImages = [pygame.image.load(load_asset('squid_green_1.png')),
pygame.image.load(load_asset('squid_green_2.png'))]
super().__init__(images, altImages, SQUID_WIDTH *
ENEMY_SCALE, ENEMY_HEIGHT * ENEMY_SCALE, posX, posY, 30)
class Corpse(Enemy):
def __init__(self, posX, posY):
images = [pygame.image.load(load_asset('corpse_white.png')),
pygame.image.load(load_asset('corpse_white.png'))]
altImages = [pygame.image.load(load_asset('corpse_green.png')),
pygame.image.load(load_asset('corpse_green.png'))]
super().__init__(images, altImages, CORPSE_WIDTH *
ENEMY_SCALE, ENEMY_HEIGHT * ENEMY_SCALE, posX, posY, 0)
def BuildEnemyGroup(availableWidth, availableHeight, posYStart) -> EnemyGroup:
group = EnemyGroup()
formationHeight = 6 * ENEMY_HEIGHT + 5 * ENEMY_HEIGHT # rows + space
formationHeight += 12 * ENEMY_HEIGHT # space down
formationWidth = 11 * ENEMY_WIDTH + 10 * \
ENEMY_WIDTH / 4 # 11 enemies + space between them
formationSpace = 2 * ENEMY_WIDTH # space left and right
formationWidth += formationSpace
global ENEMY_SCALE
enemyScale = availableHeight / formationHeight
ENEMY_SCALE = enemyScale if (enemyScale < (
availableWidth / formationWidth)) else (availableWidth / formationWidth)
scaledFormationWidth = formationWidth * ENEMY_SCALE
scaledFormationSpace = formationSpace * ENEMY_SCALE
scaledWidth = ENEMY_WIDTH * ENEMY_SCALE
scaledHorizontalSpace = scaledWidth / 4
scaledHeight = ENEMY_HEIGHT * ENEMY_SCALE
posX = (availableWidth - scaledFormationWidth) / \
2 + scaledFormationSpace / 2
leftEdge = posX - scaledFormationSpace / 2
rightEdge = leftEdge + scaledFormationWidth
posY = posYStart
for i in range(6):
row = []
score = 0
for j in range(11):
enemy = BuildEnemy(i, posX, posY)
if enemy is not None:
row.append(enemy)
score = enemy.score
posX += scaledWidth + scaledHorizontalSpace
posX = (availableWidth - scaledFormationWidth) / \
2 + scaledFormationSpace / 2
posY += scaledHeight * 2
enemyRow = EnemyRow(row, score, leftEdge, rightEdge)
group.addRow(enemyRow)
return group
def BuildEnemy(row, posX, posY) -> Enemy:
enemy: Enemy
if row == 0:
enemy = Squid(posX, posY)
elif row == 1:
enemy = Squid(posX, posY)
elif row >= 2 and row <= 3:
enemy = Crab(posX, posY)
elif row >= 4 and row <= 5:
enemy = Octopus(posX, posY)
return enemy
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScArroioTrintaSpider(FecamGazetteSpider):
name = "sc_arroio_trinta"
FECAM_QUERY = "cod_entidade:24"
TERRITORY_ID = "4201604"
|
from http import HTTPStatus
from django.test import TestCase
from django.urls import resolve, reverse
from apps.core.views import CookiesView, CookieToggle
class CookieToggleTestCase(TestCase):
def test_choices(self):
toggle = CookieToggle()
toggle_label_choices = [v["label"] for k, v in toggle.choices()]
assert 2 == len(toggle_label_choices)
assert "ON" in toggle_label_choices
assert "OFF" in toggle_label_choices
class CookiesViewTestCase(TestCase):
def setUp(self):
self.url = reverse("core:cookies")
def test_cookies_url_resolves_to_correct_view(self):
match = resolve("/cookies/")
assert match.func.view_class == CookiesView
def test_cookies_view_loads_correct_template(self):
response = self.client.get(self.url)
assert HTTPStatus.OK == response.status_code
self.assertTemplateUsed(response, "pages/cookies.html")
def test_cookies_set_usage_to_on(self):
""" usage refers to google analytics """
data = {"usage": True}
response = self.client.post(self.url, data)
assert HTTPStatus.FOUND == response.status_code
assert '{"usage": true}' == response.cookies["cookies_policy"].value
assert "true" == response.cookies["cookies_preferences_set"].value
def test_cookies_set_usage_to_off(self):
""" usage refers to google analytics """
data = {"usage": False}
response = self.client.post(self.url, data)
assert HTTPStatus.FOUND == response.status_code
assert '{"usage": false}' == response.cookies["cookies_policy"].value
assert "true" == response.cookies["cookies_preferences_set"].value
def test_cookies_set_usage__invalid_data(self):
data = {"wibble": False}
response = self.client.post(self.url, data)
assert HTTPStatus.OK == response.status_code
assert "cookies_policy" not in response.cookies.keys()
assert "cookies_preferences_set" not in response.cookies.keys()
def test_cookies_set_usage_to_on__default_redirect_url(self):
""" Default redirect url is the index page / """
data = {"usage": True}
response = self.client.post(self.url, data)
assert HTTPStatus.FOUND == response.status_code
assert "/" == response.url
def test_cookies_set_usage_to_on__with_next_in_query_params(self):
""" Take redirect url from next= query param if present """
data = {"usage": True}
url = f"{self.url}?next=/wobble/"
response = self.client.post(url, data)
assert HTTPStatus.FOUND == response.status_code
assert "/wobble/" == response.url
|
#
# Minimal settings for ReFrame tutorial on Piz Daint
#
class ReframeSettings:
job_poll_intervals = [1, 2, 3]
job_submit_timeout = 60
checks_path = ['checks/']
checks_path_recurse = True
site_configuration = {
'systems': {
'daint': {
'descr': 'Piz Daint',
'hostnames': ['daint'],
'modules_system': 'tmod',
'partitions': {
'login': {
'scheduler': 'local',
'modules': [],
'access': [],
'environs': ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-intel', 'PrgEnv-pgi'],
'descr': 'Login nodes',
'max_jobs': 4
},
'gpu': {
'scheduler': 'nativeslurm',
'modules': ['daint-gpu'],
'access': ['--constraint=gpu'],
'environs': ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-intel', 'PrgEnv-pgi'],
'container_platforms': {
'Singularity': {
'modules': ['Singularity']
}
},
'descr': 'Hybrid nodes (Haswell/P100)',
'max_jobs': 100
},
'mc': {
'scheduler': 'nativeslurm',
'modules': ['daint-mc'],
'access': ['--constraint=mc'],
'environs': ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-intel', 'PrgEnv-pgi'],
'container_platforms': {
'Singularity': {
'modules': ['Singularity']
}
},
'descr': 'Multicore nodes (Broadwell)',
'max_jobs': 100
}
}
}
},
'environments': {
'*': {
'PrgEnv-cray': {
'modules': ['PrgEnv-cray'],
},
'PrgEnv-gnu': {
'modules': ['PrgEnv-gnu'],
},
'PrgEnv-intel': {
'modules': ['PrgEnv-intel'],
},
'PrgEnv-pgi': {
'modules': ['PrgEnv-pgi'],
}
}
}
}
logging_config = {
'level': 'DEBUG',
'handlers': [
{
'type': 'file',
'name': 'reframe.log',
'level': 'DEBUG',
'format': '[%(asctime)s] %(levelname)s: '
'%(check_name)s: %(message)s',
'append': False,
},
# Output handling
{
'type': 'stream',
'name': 'stdout',
'level': 'INFO',
'format': '%(message)s'
},
{
'type': 'file',
'name': 'reframe.out',
'level': 'INFO',
'format': '%(message)s',
'append': False,
}
]
}
perf_logging_config = {
'level': 'DEBUG',
'handlers': [
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'INFO',
'format': (
'%(asctime)s|reframe %(version)s|'
'%(check_info)s|jobid=%(check_jobid)s|'
'%(check_perf_var)s=%(check_perf_value)s|'
'ref=%(check_perf_ref)s '
'(l=%(check_perf_lower_thres)s, '
'u=%(check_perf_upper_thres)s)'
),
'append': True
}
]
}
settings = ReframeSettings()
|
# -*- coding: utf-8 -*-
#
# Time-To-Recover Test documentation build configuration file, created by
# sphinx-quickstart on Fri May 4 13:58:22 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'sphinxcontrib.plantuml',
'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The Tuna'
copyright = u'2014, russell nakamura'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2014.07.10'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'bootswatch_theme': 'spacelab',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = "The Tuna"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Tuna documentation'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
# '**': ['localtoc.html', 'navigation.html', 'searchbox.html']
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
basename = "tuna"
htmlhelp_basename = basename + 'doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\\usepackage{booktabs}',
'fontpkg':'\\usepackage[urw-garamond]{mathdesign}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tuna.tex', basename + u' Documentation',
u'russelln', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', basename, basename + u' Documentation',
[u'russelln'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', basename, basename + u' Documentation',
u'russelln', basename, basename + ' Tester.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = basename
epub_author = u'russelln'
epub_publisher = u'russelln'
epub_copyright = u'2013, russelln'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members', 'show_inheritance']
autoclass_content = 'both'
autodoc_member_order = 'groupwise'
todo_include_todos = True
|
import os
import pickle
import numpy as np
from sklearn import neighbors, svm
BASE_DIR = os.path.dirname(__file__) + '/'
PATH_TO_PKL = 'trained_classifier.pkl'
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']
}
]
#self.svm = GridSearchCV(SVC(C=1, probability=True), param_grid, cv=10).fit(X, y)
class FaceClassifier:
def __init__(self, model_path=None):
self.model = None
if model_path is None:
return
elif model_path == 'default':
model_path = BASE_DIR+PATH_TO_PKL
# Load models
with open(model_path, 'rb') as f:
self.model = pickle.load(f)
def train(self, X, y, model='knn', save_model_path=None):
if model == 'knn':
self.model = neighbors.KNeighborsClassifier(3, weights='uniform')
else: # svm
self.model = svm.SVC(kernel='linear', probability=True)
self.model.fit(X, y)
if save_model_path is not None:
with open(save_model_path, 'wb') as f:
pickle.dump(self.model, f)
def classify(self, descriptor):
if self.model is None:
#print('Train the model before doing classifications.')
return
#return self.model.predict([descriptor])[0]
return self.model.predict_proba(descriptor).ravel()
|
# Copyright (C) 2012 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from assert_parser_result import assert_parser_result
def test_call_function():
assert_parser_result(
r"""
0001:0001 SYMBOL(f)
0001:0002 LPAREN
0001:0003 RPAREN
0001:0004 NEWLINE
""",
r"""
[LPAREN:]
[SYMBOL:f]
[EOF:]
""",
r"""
PepFunctionCall(
PepSymbol('f'),
()
)
""" )
def test_call_function_with_args():
assert_parser_result(
r"""
0001:0001 SYMBOL(f)
0001:0002 LPAREN
0001:0004 INT(1)
0001:0005 COMMA(,)
0001:0007 INT(2)
0001:0008 COMMA(,)
0001:0010 INT(3)
0001:0012 RPAREN
0001:0013 NEWLINE
""",
r"""
[LPAREN:]
[SYMBOL:f]
[INT:1]
[COMMA:,]
[INT:2]
[COMMA:,]
[INT:3]
[EOF:]
""",
r"""
PepFunctionCall(
PepSymbol('f'),
(
PepInt('1'),
PepInt('2'),
PepInt('3')
)
)
""" )
|
import numpy as np
import matplotlib as mpl
from matplotlib import gridspec
import matplotlib.pyplot as plt
from scipy.cluster import hierarchy
import seaborn as sns
import pandas as pd
from .utils import nullity_filter, nullity_sort
import warnings
def matrix(df,
filter=None, n=0, p=0, sort=None,
figsize=(25, 10), width_ratios=(15, 1), color=(0.25, 0.25, 0.25),
fontsize=16, labels=None, sparkline=True, inline=False,
freq=None, ax=None):
"""
A matrix visualization of the nullity of the given DataFrame.
:param df: The `DataFrame` being mapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The max number of columns to include in the filtered DataFrame.
:param p: The max percentage fill of the columns in the filtered DataFrame.
:param sort: The row sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. Default to 16.
:param labels: Whether or not to display the column names. Defaults to the underlying data labels when there are
50 columns or less, and no labels when there are more than 50 columns.
:param sparkline: Whether or not to display the sparkline. Defaults to True.
:param width_ratios: The ratio of the width of the matrix to the width of the sparkline. Defaults to `(15, 1)`.
Does nothing if `sparkline=False`.
:param color: The color of the filled columns. Default is `(0.25, 0.25, 0.25)`.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='columns')
height = df.shape[0]
width = df.shape[1]
# z is the color-mask array, g is a NxNx3 matrix. Apply the z color-mask to set the RGB of each pixel.
z = df.notnull().values
g = np.zeros((height, width, 3))
g[z < 0.5] = [1, 1, 1]
g[z > 0.5] = color
# Set up the matplotlib grid layout. A unary subplot if no sparkline, a left-right splot if yes sparkline.
if ax is None:
plt.figure(figsize=figsize)
if sparkline:
gs = gridspec.GridSpec(1, 2, width_ratios=width_ratios)
gs.update(wspace=0.08)
ax1 = plt.subplot(gs[1])
else:
gs = gridspec.GridSpec(1, 1)
ax0 = plt.subplot(gs[0])
else:
if sparkline is not False:
warnings.warn(
"Plotting a sparkline on an existing axis is not currently supported. "
"To remove this warning, set sparkline=False."
)
sparkline = False
ax0 = ax
# Create the nullity plot.
ax0.imshow(g, interpolation='none')
# Remove extraneous default visual elements.
ax0.set_aspect('auto')
ax0.grid(b=False)
ax0.xaxis.tick_top()
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.spines['top'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['left'].set_visible(False)
# Set up and rotate the column ticks. The labels argument is set to None by default. If the user specifies it in
# the argument, respect that specification. Otherwise display for <= 50 columns and do not display for > 50.
if labels or (labels is None and len(df.columns) <= 50):
ha = 'left'
ax0.set_xticks(list(range(0, width)))
ax0.set_xticklabels(list(df.columns), rotation=45, ha=ha, fontsize=fontsize)
else:
ax0.set_xticks([])
# Adds Timestamps ticks if freq is not None, else set up the two top-bottom row ticks.
if freq:
ts_list = []
if type(df.index) == pd.PeriodIndex:
ts_array = pd.date_range(df.index.to_timestamp().date[0],
df.index.to_timestamp().date[-1],
freq=freq).values
ts_ticks = pd.date_range(df.index.to_timestamp().date[0],
df.index.to_timestamp().date[-1],
freq=freq).map(lambda t:
t.strftime('%Y-%m-%d'))
elif type(df.index) == pd.DatetimeIndex:
ts_array = pd.date_range(df.index.date[0], df.index.date[-1],
freq=freq).values
ts_ticks = pd.date_range(df.index.date[0], df.index.date[-1],
freq=freq).map(lambda t:
t.strftime('%Y-%m-%d'))
else:
raise KeyError('Dataframe index must be PeriodIndex or DatetimeIndex.')
try:
for value in ts_array:
ts_list.append(df.index.get_loc(value))
except KeyError:
raise KeyError('Could not divide time index into desired frequency.')
ax0.set_yticks(ts_list)
ax0.set_yticklabels(ts_ticks, fontsize=int(fontsize / 16 * 20), rotation=0)
else:
ax0.set_yticks([0, df.shape[0] - 1])
ax0.set_yticklabels([1, df.shape[0]], fontsize=int(fontsize / 16 * 20), rotation=0)
# Create the inter-column vertical grid.
in_between_point = [x + 0.5 for x in range(0, width - 1)]
for in_between_point in in_between_point:
ax0.axvline(in_between_point, linestyle='-', color='white')
if sparkline:
# Calculate row-wise completeness for the sparkline.
completeness_srs = df.notnull().astype(bool).sum(axis=1)
x_domain = list(range(0, height))
y_range = list(reversed(completeness_srs.values))
min_completeness = min(y_range)
max_completeness = max(y_range)
min_completeness_index = y_range.index(min_completeness)
max_completeness_index = y_range.index(max_completeness)
# Set up the sparkline, remove the border element.
ax1.grid(b=False)
ax1.set_aspect('auto')
# GH 25
if int(mpl.__version__[0]) <= 1:
ax1.set_axis_bgcolor((1, 1, 1))
else:
ax1.set_facecolor((1, 1, 1))
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.set_ymargin(0)
# Plot sparkline---plot is sideways so the x and y axis are reversed.
ax1.plot(y_range, x_domain, color=color)
if labels:
# Figure out what case to display the label in: mixed, upper, lower.
label = 'Data Completeness'
if str(df.columns[0]).islower():
label = label.lower()
if str(df.columns[0]).isupper():
label = label.upper()
# Set up and rotate the sparkline label.
ha = 'left'
ax1.set_xticks([min_completeness + (max_completeness - min_completeness) / 2])
ax1.set_xticklabels([label], rotation=45, ha=ha, fontsize=fontsize)
ax1.xaxis.tick_top()
ax1.set_yticks([])
else:
ax1.set_xticks([])
ax1.set_yticks([])
# Add maximum and minimum labels, circles.
ax1.annotate(max_completeness,
xy=(max_completeness, max_completeness_index),
xytext=(max_completeness + 2, max_completeness_index),
fontsize=int(fontsize / 16 * 14),
va='center',
ha='left')
ax1.annotate(min_completeness,
xy=(min_completeness, min_completeness_index),
xytext=(min_completeness - 2, min_completeness_index),
fontsize=int(fontsize / 16 * 14),
va='center',
ha='right')
ax1.set_xlim([min_completeness - 2, max_completeness + 2]) # Otherwise the circles are cut off.
ax1.plot([min_completeness], [min_completeness_index], '.', color=color, markersize=10.0)
ax1.plot([max_completeness], [max_completeness_index], '.', color=color, markersize=10.0)
# Remove tick mark (only works after plotting).
ax1.xaxis.set_ticks_position('none')
if inline:
warnings.warn(
"The 'inline' argument has been deprecated, and will be removed in a future version "
"of missingno."
)
plt.show()
else:
return ax0
def bar(df, figsize=None, fontsize=16, labels=None, log=False, color='dimgray', inline=False,
filter=None, n=0, p=0, sort=None, ax=None, orientation=None):
"""
A bar chart visualization of the nullity of the given DataFrame.
:param df: The input DataFrame.
:param log: Whether or not to display a logarithmic plot. Defaults to False (linear).
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. This default to 16.
:param labels: Whether or not to display the column names. Would need to be turned off on particularly large
displays. Defaults to True.
:param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`.
:param orientation: The way the bar plot is oriented. Defaults to vertical if there are less than or equal to 50
columns and horizontal if there are more.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='rows')
nullity_counts = len(df) - df.isnull().sum()
if orientation is None:
if len(df.columns) > 50:
orientation = 'left'
else:
orientation = 'bottom'
if ax is None:
ax1 = plt.gca()
if figsize is None:
if len(df.columns) <= 50 or orientation == 'top' or orientation == 'bottom':
figsize = (25, 10)
else:
figsize = (25, (25 + len(df.columns) - 50) * 0.5)
else:
ax1 = ax
figsize = None # for behavioral consistency with other plot types, re-use the given size
plot_args = {'figsize': figsize, 'fontsize': fontsize, 'log': log, 'color': color, 'ax': ax1}
if orientation == 'bottom':
(nullity_counts / len(df)).plot.bar(**plot_args)
else:
(nullity_counts / len(df)).plot.barh(**plot_args)
axes = [ax1]
# Start appending elements, starting with a modified bottom x axis.
if labels or (labels is None and len(df.columns) <= 50):
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45, ha='right', fontsize=fontsize)
# Create the numerical ticks.
ax2 = ax1.twinx()
axes.append(ax2)
if not log:
ax1.set_ylim([0, 1])
ax2.set_yticks(ax1.get_yticks())
else:
# For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually
# appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale
# is used, we have to make it match the `ax1` layout ourselves.
ax2.set_yscale('log')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticklabels([int(n*len(df)) for n in ax1.get_yticks()], fontsize=fontsize)
# Create the third axis, which displays columnar totals above the rest of the plot.
ax3 = ax1.twiny()
axes.append(ax3)
ax3.set_xticks(ax1.get_xticks())
ax3.set_xlim(ax1.get_xlim())
ax3.set_xticklabels(nullity_counts.values, fontsize=fontsize, rotation=45, ha='left')
else:
# Create the numerical ticks.
ax2 = ax1.twinx()
axes.append(ax2)
if not log:
# Width
ax1.set_xlim([0, 1])
# Bottom
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels([int(n*len(df)) for n in ax1.get_xticks()], fontsize=fontsize)
# Right
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels(nullity_counts.values, fontsize=fontsize, ha='left')
else:
# For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually
# appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale
# is used, we have to make it match the `ax1` layout ourselves.
ax1.set_xscale('log')
ax1.set_xlim(ax1.get_xlim())
# Bottom
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels([int(n*len(df)) for n in ax1.get_xticks()], fontsize=fontsize)
# Right
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels(nullity_counts.values, fontsize=fontsize, ha='left')
# Create the third axis, which displays columnar totals above the rest of the plot.
ax3 = ax1.twiny()
axes.append(ax3)
ax3.set_yticks(ax1.get_yticks())
if log:
ax3.set_xscale('log')
ax3.set_xlim(ax1.get_xlim())
ax3.set_ylim(ax1.get_ylim())
ax3.grid(False)
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
if inline:
warnings.warn(
"The 'inline' argument has been deprecated, and will be removed in a future version "
"of missingno."
)
plt.show()
else:
return ax1
def heatmap(df, inline=False,
filter=None, n=0, p=0, sort=None,
figsize=(20, 12), fontsize=16, labels=True,
cmap='RdBu', vmin=-1, vmax=1, cbar=True, ax=None
):
"""
Presents a `seaborn` heatmap visualization of nullity correlation in the given DataFrame.
Note that this visualization has no special support for large datasets. For those, try the dendrogram instead.
:param df: The DataFrame whose completeness is being heatmapped.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). See
`nullity_filter()` for more information.
:param n: The cap on the number of columns to include in the filtered DataFrame. See `nullity_filter()` for
more information.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame. See `nullity_filter()` for
more information.
:param sort: The column sort order to apply. Can be "ascending", "descending", or None.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to (20, 12).
:param fontsize: The figure's font size.
:param labels: Whether or not to label each matrix entry with its correlation (default is True).
:param cmap: What `matplotlib` colormap to use. Defaults to `RdBu`.
:param vmin: The normalized colormap threshold. Defaults to -1, e.g. the bottom of the color scale.
:param vmax: The normalized colormap threshold. Defaults to 1, e.g. the bottom of the color scale.
:param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will
return its figure.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
# Apply filters and sorts, set up the figure.
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort, axis='rows')
if ax is None:
plt.figure(figsize=figsize)
ax0 = plt.gca()
else:
ax0 = ax
# Remove completely filled or completely empty variables.
df = df.iloc[:,[i for i, n in enumerate(np.var(df.isnull(), axis='rows')) if n > 0]]
# Create and mask the correlation matrix. Construct the base heatmap.
corr_mat = df.isnull().corr()
mask = np.zeros_like(corr_mat)
mask[np.triu_indices_from(mask)] = True
if labels:
sns.heatmap(corr_mat, mask=mask, cmap=cmap, ax=ax0, cbar=cbar,
annot=True, annot_kws={'size': fontsize - 2},
vmin=vmin, vmax=vmax)
else:
sns.heatmap(corr_mat, mask=mask, cmap=cmap, ax=ax0, cbar=cbar,
vmin=vmin, vmax=vmax)
# Apply visual corrections and modifications.
ax0.xaxis.tick_bottom()
ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(), rotation=45, ha='right', fontsize=fontsize)
ax0.set_yticklabels(ax0.yaxis.get_majorticklabels(), fontsize=fontsize, rotation=0)
ax0.set_yticklabels(ax0.yaxis.get_majorticklabels(), rotation=0, fontsize=fontsize)
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.patch.set_visible(False)
for text in ax0.texts:
t = float(text.get_text())
if 0.95 <= t < 1:
text.set_text('<1')
elif -1 < t <= -0.95:
text.set_text('>-1')
elif t == 1:
text.set_text('1')
elif t == -1:
text.set_text('-1')
elif -0.05 < t < 0.05:
text.set_text('')
else:
text.set_text(round(t, 1))
if inline:
warnings.warn(
"The 'inline' argument has been deprecated, and will be removed in a future version "
"of missingno."
)
plt.show()
else:
return ax0
def dendrogram(df, method='average',
filter=None, n=0, p=0,
orientation=None, figsize=None,
fontsize=16, inline=False, ax=None
):
"""
Fits a `scipy` hierarchical clustering algorithm to the given DataFrame's variables and visualizes the results as
a `scipy` dendrogram.
The default vertical display will fit up to 50 columns. If more than 50 columns are specified and orientation is
left unspecified the dendrogram will automatically swap to a horizontal display to fit the additional variables.
:param df: The DataFrame whose completeness is being dendrogrammed.
:param method: The distance measure being used for clustering. This is a parameter that is passed to
`scipy.hierarchy`.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`.
:param fontsize: The figure's font size.
:param orientation: The way the dendrogram is oriented. Defaults to top-down if there are less than or equal to 50
columns and left-right if there are more.
:param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will
return its figure.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
if not figsize:
if len(df.columns) <= 50 or orientation == 'top' or orientation == 'bottom':
figsize = (25, 10)
else:
figsize = (25, (25 + len(df.columns) - 50) * 0.5)
if ax is None:
plt.figure(figsize=figsize)
ax0 = plt.gca()
else:
ax0 = ax
df = nullity_filter(df, filter=filter, n=n, p=p)
# Link the hierarchical output matrix, figure out orientation, construct base dendrogram.
x = np.transpose(df.isnull().astype(int).values)
z = hierarchy.linkage(x, method)
if not orientation:
if len(df.columns) > 50:
orientation = 'left'
else:
orientation = 'bottom'
hierarchy.dendrogram(
z,
orientation=orientation,
labels=df.columns.tolist(),
distance_sort='descending',
link_color_func=lambda c: 'black',
leaf_font_size=fontsize,
ax=ax0
)
# Remove extraneous default visual elements.
ax0.set_aspect('auto')
ax0.grid(b=False)
if orientation == 'bottom':
ax0.xaxis.tick_top()
ax0.xaxis.set_ticks_position('none')
ax0.yaxis.set_ticks_position('none')
ax0.spines['top'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['left'].set_visible(False)
ax0.patch.set_visible(False)
# Set up the categorical axis labels and draw.
if orientation == 'bottom':
ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(), rotation=45, ha='left')
elif orientation == 'top':
ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(), rotation=45, ha='right')
if orientation == 'bottom' or orientation == 'top':
ax0.tick_params(axis='y', labelsize=int(fontsize / 16 * 20))
else:
ax0.tick_params(axis='x', labelsize=int(fontsize / 16 * 20))
if inline:
warnings.warn(
"The 'inline' argument has been deprecated, and will be removed in a future version "
"of missingno."
)
plt.show()
else:
return ax0
def geoplot(df,
filter=None, n=0, p=0,
x=None, y=None, figsize=(25, 10), inline=False,
by=None, cmap='YlGn', **kwargs):
"""
Generates a geographical data nullity heatmap, which shows the distribution of missing data across geographic
regions. The precise output depends on the inputs provided. If no geographical context is provided, a quadtree
is computed and nullities are rendered as abstract geographic squares. If geographical context is provided in the
form of a column of geographies (region, borough. ZIP code, etc.) in the `DataFrame`, convex hulls are computed
for each of the point groups and the heatmap is generated within them.
:param df: The DataFrame whose completeness is being geoplotted.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`.
:param x: The variable in the dataset containing the x-coordinates of the dataset.
:param y: The variable in the dataset containing the y-coordinates of the dataset.
:param by: If specified, plot in convex hull mode, using the given column to cluster points in the same area. If
not specified, plot in quadtree mode.
:param cmap: The colormap to display the data with. Defaults to `YlGn`.
:param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will
return its figure.
:param kwargs: Additional keyword arguments are passed to the underlying `geoplot` function.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
warnings.warn(
"The 'geoplot' function has been deprecated, and will be removed in a future version "
"of missingno. The 'geoplot' package has an example recipe for a more full-featured "
"geospatial nullity plot: "
"https://residentmario.github.io/geoplot/gallery/plot_san_francisco_trees.html"
)
try:
import geoplot as gplt
except ImportError:
raise ImportError("Install geoplot <= 0.2.4 (the package) for geoplot function support")
if gplt.__version__ >= "0.3.0":
raise ImportError(
"The missingno geoplot function requires geoplot package version 0.2.4 or lower."
"To use the geoplot function, downgrade to an older version of the geoplot package."
)
import geopandas as gpd
from shapely.geometry import Point
df = nullity_filter(df, filter=filter, n=n, p=p)
nullity = df.notnull().sum(axis='columns') / df.shape[1]
if x and y:
gdf = gpd.GeoDataFrame(nullity, columns=['nullity'],
geometry=df.apply(lambda srs: Point(srs[x], srs[y]), axis='columns'))
else:
raise ValueError("The 'x' and 'y' parameters must be specified.")
if by:
if df[by].isnull().any():
warnings.warn('The "{0}" column included null values. The offending records were dropped'.format(by))
df = df.dropna(subset=[by])
gdf = gdf.loc[df.index]
vc = df[by].value_counts()
if (vc < 3).any():
warnings.warn('Grouping by "{0}" included clusters with fewer than three points, which cannot be made '
'polygonal. The offending records were dropped.'.format(by))
where = df[by].isin((df[by].value_counts() > 2).where(lambda b: b).dropna().index.values)
gdf = gdf.loc[where]
gdf[by] = df[by]
gplt.aggplot(gdf, figsize=figsize, hue='nullity', agg=np.average, cmap=cmap, by=by, edgecolor='None', **kwargs)
ax = plt.gca()
if inline:
warnings.warn(
"The 'inline' argument has been deprecated, and will be removed in a future version "
"of missingno."
)
plt.show()
else:
return ax
|
from chispa import assert_df_equality
from cishouseholds.derive import assign_unique_id_column
def test_assign_unique_id_column(spark_session):
expected_df = spark_session.createDataFrame(
data=[("XAE-12", "XAE", "12"), ("BSE-53", "BSE", "53"), ("53", None, "53")],
schema=["id", "A", "B"],
)
input_df = expected_df.drop("id")
output_df = assign_unique_id_column(input_df, "id", ["A", "B"])
assert_df_equality(output_df, expected_df, ignore_column_order=True, ignore_nullable=True)
|
import sqlite3
from sqlite3 import Error
import time
from discord import user
import requests
from datetime import datetime
import os
class Connection:
'''
A class represent a connection to a database
This database will contain 3 tables, one for the last day a user upload image
the other is for all the picture and text that user uploaded
one is for if a user is reminded or not
'''
def __init__(self, dir = "/home/hphucs/dailyBot/database/data.db"):
super().__init__()
self.dir = dir
self.conn = None
self.cursor = None
try:
self.conn = sqlite3.connect(self.dir)
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS `LastTime` (`id` VARCHAR(25) NOT NULL,`time` VARCHAR(25),`streak` INT,`channel` VARCHAR(25) ,PRIMARY KEY (`id`));")
self.cursor.execute("CREATE TABLE IF NOT EXISTS `dailyEntries` (`id` INTEGER PRIMARY KEY,`author` VARCHAR(25) NOT NULL,`date` VARCHAR(25) NOT NULL,`message` TEXT,`url` TEXT, `name` VARCHAR(25) NOT NULL);")
self.cursor.execute("CREATE TABLE IF NOT EXISTS `LastRemind` (`id` VARCHAR(25) NOT NULL,`reminded` INT,`remindSwitch` INT ,PRIMARY KEY (`id`));")
except Error as e:
print(e)
def getAllUser(self):
rows = None
try:
self.cursor.execute("SELECT id, time, channel FROM LastTime")
rows = self.cursor.fetchall()
if len(rows) < 1:
return -1
return rows
except Error as e:
print(e)
return -2
def getRemindedList(self):
rows = None
try:
self.cursor.execute("SELECT id FROM LastRemind WHERE (reminded=1 and remindSwitch=1) or remindSwitch = 0")
rows = self.cursor.fetchall()
if len(rows) < 1:
return []
return rows
except Error as e:
print(e)
return -1
def updateLastTime(self, id, channel):
try:
streak = 0
#check if row exist
self.cursor.execute("SELECT streak FROM LastTime WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) == 1:
streak = int(rows[0][0])
streak += 1
#update the time and user
t = (str(id), str(int(time.time())),streak,str(channel))
self.cursor.execute("INSERT or REPLACE into `LastTime` (`id`,`time`,`streak`,`channel`) VALUES (?,?,?,?)", t)
self.conn.commit()
return (1,"")
except Error as e:
print(e)
return (-1,e)
def addRemindList(self, id):
try:
remindSwitch = 1
#check if row exist
self.cursor.execute("SELECT remindSwitch FROM LastRemind WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) > 0:
remindSwitch = int(rows[0][0])
#update the reminded to 1
t = (str(id), 1, remindSwitch)
self.cursor.execute("INSERT or REPLACE into `LastRemind` (`id`, `reminded`, `remindSwitch`) VALUES (?,?,?)", t)
self.conn.commit()
return (1, "")
except Error as e:
print(e)
return(-1, e)
def removeRemindList(self, id):
try:
remindSwitch = 1
#check if row exist
self.cursor.execute("SELECT remindSwitch FROM LastRemind WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) > 0:
remindSwitch = int(rows[0][0])
#update the reminded to 0
t = (str(id), 0, remindSwitch)
self.cursor.execute("INSERT or REPLACE into `LastRemind` (`id`, `reminded`, `remindSwitch`) VALUES (?,?,?)", t)
self.conn.commit()
return (1, "")
except Error as e:
print(e)
return(-1, e)
#turn the reminder, 1 is on and 0 is off
def turnReminder(self, id, switch: int):
try:
reminded = 0
#check if row exist
self.cursor.execute("SELECT `reminded` FROM LastRemind WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) > 0:
reminded = int(rows[0][0])
#turn the feature on or off
t = (str(id), reminded, int(switch))
self.cursor.execute("INSERT or REPLACE into `LastRemind` (`id`, `reminded`, `remindSwitch`) VALUES (?,?,?)", t)
self.conn.commit()
return (1,"")
except Error as e:
print(e)
return (-1, e)
def forceUpdate(self, id, channel):
try:
streak = 0
#check if row exist
self.cursor.execute("SELECT streak FROM LastTime WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) == 1:
streak = int(rows[0][0])
#don't care about the streak.
streak = streak
#update the time and user
t = (str(id), str(int(time.time())),streak,str(channel))
self.cursor.execute("INSERT or REPLACE into `LastTime` (`id`,`time`,`streak`,`channel`) VALUES (?,?,?,?)", t)
self.conn.commit()
return (1,"")
except Error as e:
print(e)
return (-1,e)
#get the last time of the user is reminded, if the user has never been reminded, return -1
def getLastTime(self, id):
rows = None
try:
self.cursor.execute("SELECT time FROM LastTime WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) < 1:
return -1
return rows[0][0]
except Error as e:
print(e)
return -1
#result as int
def getLastRemind(self, id):
rows = None
try:
self.cursor.execute("SELECT lastTime FROM LastRemind WHERE id=?", (id,))
rows = self.cursor.fetchall()
print("a")
if len(rows) < 1:
return -1
return rows[0][0]
except Error as e:
print(e)
return -1
def getChannel(self,id):
rows = None
try:
self.cursor.execute("SELECT channel FROM LastTime WHERE id=?", (id,))
rows = self.cursor.fetchall()
if len(rows) < 1:
return -1
return rows[0][0]
except Error as e:
print(e)
return -1
#result as int
def resetStreak(self,id):
try:
self.cursor.execute("UPDATE LastTime SET streak = 0 WHERE id = ?", (id,))
self.conn.commit()
return 1
except Error as e:
print(e)
return -1
def getStreak(self, id):
rows = None
try:
self.cursor.execute("SELECT streak FROM LastTime WHERE id=?", (id,))
rows = self.cursor.fetchall()
return rows[0][0]
except Error as e:
print(e)
return -1
def delete_entries(self, user_id, entries_id):
'''
delete an entries matching user_id and entries_id
'''
#try to see if can find such entries
try:
self.cursor.execute("SELECT * FROM dailyEntries WHERE id=? AND author=?", (entries_id,user_id,))
rows = self.cursor.fetchall()
# if there's no entries, return -1 and exit the function
if len(rows) == 0:
return -1
# Continue: deleting the entries
self.cursor.execute("DELETE FROM dailyEntries WHERE id=? AND author=?", (entries_id,user_id,))
self.conn.commit()
return 1
except Error as e:
print(e)
return -2
def addDailyPic2(self, id, message, discordUrl, name):
day = datetime.today().strftime('%d-%m-%Y')
baseUrl="https://hphucs.me/dailyBotAPI.php"
data = {'key':'<api key here>',
'id' : id,
'url': discordUrl}
try:
#add a new daily entry
result = requests.post(baseUrl, data = data)
if result.text != '-1':
r = (str(id), str(day), str(message), str(result.text), str(name),)
self.cursor.execute("INSERT into `dailyEntries` (`author`,`date`,`message`,`url`, `name`) VALUES (?,?,?,?,?)", r)
self.conn.commit()
return(1,"")
else:
return(-1,"Upload failed")
except Error as e:
print(e)
return (-1,e)
def addDailyText(self, id, message, name):
day = datetime.today().strftime('%d-%m-%Y')
try:
r = (str(id), str(day), str(message), "none", str(name),)
self.cursor.execute("INSERT into `dailyEntries` (`author`,`date`,`message`,`url`, `name`) VALUES (?,?,?,?,?)", r)
self.conn.commit()
return(1,"")
except Error as e:
print(e)
return (-1,e)
def viewDailyPic(self, id):
rows = None
self.cursor.execute("SELECT * FROM `dailyEntries` WHERE author=?",(id,))
rows = self.cursor.fetchall()
return rows
def view_single_pic(self, user_id, entry_id):
rows = None
self.cursor.execute("SELECT * FROM dailyEntries WHERE id=? AND author=?",(entry_id,user_id,))
rows = self.cursor.fetchall()
print(rows)
return rows
'''
Test = Connection()
Test.updateLastTime("123")
print(Test.getStreak("123"))
'''
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 21:33:22 2016
@author: Tobias Jachowski
"""
import matplotlib.pyplot as plt
import numpy as np
from pyoti.modification.modification import Modification, GraphicalMod
from pyoti import traces as tc
from pyoti.evaluate import signal as sn
class IAttachment(GraphicalMod):
"""
Subclass of Attachment that provides graphical interfaces to adjust the fit
parameters.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Define some transient parameters needed for the graphical fitting of
# the attachment plateaus [excited_psd, excited_position] for left and
# right stress views
self.rightposition = None
self.leftposition = None
self.rightpsd = None
self.leftpsd = None
# Dimensions of the buttons to adjust the plateaus:
self.left = 0.13
self.bottom = 0.79
self.width = 0.0625
self.height = 0.046875
self.bspace = 0.01
self.lspace = 0.01333
self._lines = {}
self._ax = None
self.supertitle = None
self._button = {}
# create some properties of actions/corresponding buttons
action = [ 'left', 'right', 'lleft', 'rright', 'up', 'down' ]
label = [ '<', '>', '<<', '>>', 'up', 'down' ]
offsetP = [ 0.0, 0.0, 0.0, 0.0, -0.0025, 0.0025 ]
offsetS = [ 2.5e-9, -2.5e-9, 25e-9, -25e-9, 0.0, 0.0 ]
row = [ 0, 0, 1, 1, 1, 0 ]
column = [ 0, 2, 0, 2, 1, 1 ]
self.action = action
self.label = dict(list(zip(action, label)))
self.offsetP = dict(list(zip(action, offsetP)))
self.offsetS = dict(list(zip(action, offsetS)))
self.row = dict(list(zip(action, row)))
self.column = dict(list(zip(action, column)))
def _figure(self):
"""
Initialize and show an interactive plot for adjusting the attachment.
Adjust the plateau correction parameters interactively and set the
modification accordingly.
The plot is stored in self.figure.
"""
# create new figure and axes for adjusting the plateaus
figure, ax = plt.subplots(1, 1, sharex=True, sharey=True)
self._ax = ax
# create buttons for interactive correction of plateaus and assign
# correct functions
# see http://math.andrej.com/2009/04/09/pythons-lambda-is-broken/ for
# explanation of weird function assignment
ax_button = {}
for ac in self.action:
ax_button[ac] = figure.add_axes([self.column[ac] *
(self.lspace + self.width) +
self.left, self.row[ac] *
(self.bspace + self.height) +
self.bottom,
self.width,
self.height])
self._button[ac] = plt.Button(ax_button[ac], self.label[ac])
def ap(event, ac=ac):
self._adjust_plateaus(ac)
# connect button to action, accordingly
self._button[ac].on_clicked(ap)
# create lines to plot the plateaus
self._lines['left'] = ax.plot([0], [0], 'r', alpha=0.75)[0]
self._lines['right'] = ax.plot([0], [0], 'g', alpha=0.75)[0]
ax.ticklabel_format(useOffset=False)
ax.grid(True)
self.supertitle = figure.suptitle("Adjust plateaus to make them "
"overlap")
return figure
def _update_fig(self, **kwargs):
"""
Update the plot
"""
# recalculate the plateaus with the new offset and scaling values
self.calculate_plateaus()
# plot data
self._lines['left'].set_data(self.leftposition * 1e6, self.leftpsd)
self._lines['right'].set_data(self.rightposition * 1e6, self.rightpsd)
excited_psd = self.modification.traces_apply[0]
excited_position = self.modification.traces_apply[1]
self._ax.set_xlabel(tc.label(excited_position))
self._ax.set_ylabel(tc.label(excited_psd))
# recompute ax.dataLim
self._ax.relim()
# update ax.viewLim using new dataLim
self._ax.autoscale_view()
def _pre_close_fig(self):
# Store attachment fit plot for ducumentation
self.supertitle.set_text('Adjusted plateaus')
self._lines.clear()
self._ax = None
self._button.clear()
self.supertitle = None
def _adjust_plateaus(self, action):
"""
Adjusts the attachment (offset of excited_position) and the the scaling
factor to correct for differences of left and right DNA overstretching
plateaus.
It is interactively called from the data plot (see below) and updates
the plot accordingly.
"""
# change offset and scaling for plateaus
self.modification.iattributes.offsetPsd += self.offsetP[action]
self.modification.iattributes.offsetStage += self.offsetS[action]
self.update_fig()
def calculate_plateaus(self):
"""
Calculate the plateaus according to the offsets and the scaling of
data_based.
"""
# determine the excited axis of position and psd signal
ex = self.modification._excited()
excited_psd = self.modification._NAME['psd'][ex]
excited_position = self.modification._NAME['position'][ex]
self.modification.traces_apply = [excited_psd, excited_position]
# recalculate data for plotting
# data_based: [excited_psd, excited_position]
data_based = self.modification._get_data_based(
traces=self.modification.traces_apply, window=False, decimate=True)
# subtract offsets
data_based[:, 0] -= self.modification.iattributes.offsetPsd
data_based[:, 1] -= self.modification.iattributes.offsetStage
# determine left/right stress regions of DNA
signal = data_based[:, 1] # [excited_position]
resolution = self.modification.view_based.samplingrate \
/ self.modification.decimate
minima, maxima = sn.get_extrema(signal, resolution)
rightstress, _, leftstress = \
sn.get_sections(signal, minima, maxima)[1][0:3]
# invert the signals of left stress cycle
data_based[leftstress] *= -1
# set plateau data arrays
self.rightposition = data_based[rightstress, 1]
self.leftposition = data_based[leftstress, 1]
self.rightpsd = data_based[rightstress, 0]
self.leftpsd = data_based[leftstress, 0]
class Attachment(Modification):
"""
Determine attachment point of DNA and scaling of lateral PSD
"""
GRAPHICALMOD = IAttachment
def __init__(self, db_update=False, **kwargs):
super().__init__(datapoints=25000, **kwargs)
# determine the excited axis of position and psd signal
if not db_update:
ex = self._excited()
excited_psd = self._NAME['psd'][ex]
excited_position = self._NAME['position'][ex]
self.traces_apply = [excited_psd, excited_position]
# Define parameters that are used to calculate the modification
# offset of PSD relative to trap center position of bead
self.add_iattribute('offsetPsd', description='Offset PSD (V)',
value=0.0)
# offset of the position relative to the attachment point of the DNA
self.add_iattribute('offsetStage', description='Offset position (m)',
value=0.0)
def _print_info(self):
print(" Excited axis is: %s" % self.traces_apply[1])
def _modify(self, data, samples, data_traces, data_index, mod_index):
# correct attachment point of DNA: adjust excited_position
# (set it to 0 where DNA is attached)
# correct for offset of excited_psd
offset = np.array([self.iattributes.offsetPsd, # excited_psd,
self.iattributes.offsetStage]) # excited_position
data[:, data_index] -= offset[np.newaxis, mod_index]
return data
# The following is only to update to database version 0.8.0
class GAttachment(Attachment):
pass
|
_base_ = [
'../../_base_/models/convswin_base.py', '../../_base_/datasets/kitti.py',
'../../_base_/iter_runtime.py', '../../_base_/schedules/schedule_cos24x_iter.py'
]
model = dict(
pretrained='./nfs/checkpoints/swin_large_patch4_window7_224_22k.pth', # noqa
backbone=dict(
pretrain_img_size=224,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48]),
neck=dict(
type='DepthFusionMultiLevelNeck',
in_channels=[64, 192, 384, 768, 1536],
out_channels=[64, 192, 384, 768, 1536],
embedding_dim=512, # 384?
scales=[1, 1, 1, 1, 1]),
decode_head=dict(
type='UpsampleHead',
in_channels=[1536, 768, 384, 192, 64],
in_index=[0, 1, 2, 3],
up_sample_channels=[1536, 768, 384, 192, 64],
channels=64,
min_depth=1e-3,
max_depth=80,
att_fusion=False,
loss_decode=dict(
type='SigLoss', valid_mask=True, loss_weight=1.0, min_depth=1e-3, max_depth=80)
))
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
optimizer = dict(
type='AdamW',
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
)
find_unused_parameters=True
# search the best
evaluation = dict(by_epoch=False,
start=0,
interval=400,
pre_eval=True,
rule='less',
save_best='rmse_all',
greater_keys=("a1_all", "a2_all", "a3_all"),
less_keys=("abs_rel_all", "rmse_all", "silog_all", "sq_rel_all"))
# change 1/10 warmup_ratio to converge
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=1600 * 8,
warmup_ratio=1.0 / 1000,
min_lr_ratio=1e-8,
by_epoch=False)
# change interval to 10 to check convergement
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook')
])
|
from robusta.api import *
class StressTestParams(ActionParams):
"""
:var n: Number of requests to run.
:var url: In cluster target url.
"""
n: int = 1000
url: str
@action
def http_stress_test(event: ExecutionBaseEvent, action_params: StressTestParams):
"""
Run an http stress test and send the results
"""
# TODO: remove timeout?
output = RobustaJob.run_simple_job(
"williamyeh/hey", f"/hey -n {action_params.n} {action_params.url}", 120
)
finding = Finding(
title=f"Done running stress test with {action_params.n} http requests for url {action_params.url}",
source=FindingSource.MANUAL,
aggregation_key="http_stress_test",
)
if output:
finding.add_enrichment([FileBlock("result.txt", output)])
event.add_finding(finding)
|
__all__ = [
'send_response',
'send_scheduledmessages_response',
'stop_scheduled_messages_response',
'get_messages_details_response',
'send_wrapper_response',
'get_message_query_response',
'get_scheduled_message_response',
]
|
# This function will take a name and return the first initial of a name
# Create a function to return the first initial of a name
# Parameters:
# name: name of person
# Return value
# first letter of name passed in
def get_initial(name):
initial = name[0:1].upper()
return initial
# Ask for someone's name and return the initials
first_name = input('Enter your first name: ')
# Call get_initial function to retrieve first letter of name
first_name_initial = get_initial(first_name)
print('Your initial is: ' + first_name_initial)
|
# Databricks notebook source
# MAGIC %run ../../notebooks/_modules/epma_global/functions
# COMMAND ----------
import os
import time
from pyspark.sql.functions import col,when,lit
import pyspark.sql.functions as F
import re
import pyspark.sql.types as pst
# COMMAND ----------
dbutils.widgets.removeAll()
# COMMAND ----------
dbutils.widgets.text('input_table','epma_autocoding.match_lookup_final','Input table')
dbutils.widgets.text('vtm_table','dss_corporate.vtm','VTM table')
dbutils.widgets.text('vmp_table','dss_corporate.vmp','VMP table')
dbutils.widgets.text('amp_table','dss_corporate.amp','AMP table')
dbutils.widgets.text('invalid_scenario_table', 'epma_autocoding.match_lookup_final_invalid_scenario', 'invalid_scenario_table')
dbutils.widgets.text('update_scenario_table', 'epma_autocoding.match_lookup_final_update_scenario', 'update_scenario_table')
stage = {
'input_table': dbutils.widgets.get('input_table'),
'vtm_table': dbutils.widgets.get('vtm_table'),
'vmp_table': dbutils.widgets.get('vmp_table'),
'amp_table': dbutils.widgets.get('amp_table'),
'invalid_scenario_table': dbutils.widgets.get('invalid_scenario_table'),
'update_scenario_table': dbutils.widgets.get('update_scenario_table')
}
exit_message = []
# COMMAND ----------
data, message = get_all_data(stage, stage_specific_tables_spark=True)
df_lookup, _ = get_data(stage['input_table'], pandas=False)
exit_message = exit_message + message
# COMMAND ----------
df_dss = data['amp'].select(col('APID').alias('match_id'), 'INVALID') \
.union(data['vmp'].select(col('VPID').alias('match_id'), 'INVALID')) \
.union(data['vtm'].select(col('VTMID').alias('match_id'), 'INVALID'))
df_lookup_invalid = df_lookup.join(df_dss, 'match_id', 'inner') \
.filter(col('INVALID')==1)
# COMMAND ----------
df_lookup_invalid_report = df_lookup_invalid.select('epma_id', 'epma_description', 'match_id', 'id_level', 'match_level', 'match_datetime', 'INVALID')
create_table(df_lookup_invalid_report, stage['invalid_scenario_table'], overwrite=True)
# COMMAND ----------
df_dss_prev = data['vmp'].select(col('VPIDPREV').alias('match_id'), col('VPID').alias('match_id_update')) \
.union(data['vtm'].select(col('VTMIDPREV').alias('match_id'), col('VTMID').alias('match_id_update')))
df_lookup_dss_prev_update = df_lookup.join(df_dss_prev, 'match_id', 'inner') \
.withColumn('match_id', F.col('match_id_update')) \
.drop('match_id_update')
# COMMAND ----------
df_lookup_update_report = df_lookup_dss_prev_update.select('epma_id','epma_description','match_id','id_level','match_level','match_datetime')
create_table(df_lookup_update_report, stage['update_scenario_table'], overwrite=True)
|
DOMAIN = 'flask-seed.com'
ENV = 'production'
DEBUG = False
SECRET_KEY = '<FIXME>'
CACHE_TYPE = "SimpleCache"
CACHE_DEFAULT_TIMEOUT = 300
CACHE_THRESHOLD = 10240
ACCEPT_LANGUAGES = ['en', 'zh']
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
DEBUG_LOG = 'logs/debug.log'
ERROR_LOG = 'logs/error.log'
ADMINS = ['<FIXME>']
MAIL_SERVER = 'smtp.mxhichina.com'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = '<FIXME>'
MAIL_PASSWORD = '<FIXME>'
MAIL_DEFAULT_SENDER = '<FIXME>'
MONGODB_URI = 'mongodb://localhost:27017/flask-seed'
MONGODB_URI_PYTEST = 'mongodb://localhost:27017/pytest'
# Upload to Storage Service
UPLOAD_ENDPOINT = '//upload.qiniup.com/'
UPLOAD_BASE = '//cdn.flask-seed.com'
UPLOAD_BUCKET = 'flask-seed'
UPLOAD_AK = '<FIXME>'
UPLOAD_SK = '<FIXME>'
UPLOAD_MIMES = ['image/jpeg', 'image/png', 'image/gif',
'video/quicktime', 'video/mp4', 'video/mpeg', 'video/webm',
'audio/mpeg', 'audio/x-wav', 'audio/webm']
UPLOAD_MAX = 50
UPLOAD_IMAGE_PREVIEW_SM = '?imageMogr2/thumbnail/x200'
UPLOAD_IMAGE_PREVIEW_MD = '?imageMogr2/thumbnail/600x'
UPLOAD_VIDEO_POSTER_SM = '?vframe/jpg/offset/1/h/200'
# Upload to Local
# UPLOAD_ENDPOINT = '/upload'
# UPLOAD_FOLDER = 'uploads'
# UPLOAD_MIMES = ['image/jpeg', 'image/png']
# UPLOAD_MAX = 10
# UPLOAD_IMAGE_PREVIEW_SM = ''
# UPLOAD_IMAGE_PREVIEW_MD = ''
# UPLOAD_VIDEO_COVER_SM = ''
|
import project1 as p1
import utils
import numpy as np
#-------------------------------------------------------------------------------
# Data loading. There is no need to edit code in this section.
#-------------------------------------------------------------------------------
train_data = utils.load_data('reviews_train.tsv')
val_data = utils.load_data('reviews_val.tsv')
test_data = utils.load_data('reviews_test.tsv')
train_texts, train_labels = zip(*((sample['text'], sample['sentiment']) for sample in train_data))
val_texts, val_labels = zip(*((sample['text'], sample['sentiment']) for sample in val_data))
test_texts, test_labels = zip(*((sample['text'], sample['sentiment']) for sample in test_data))
dictionary = p1.bag_of_words(train_texts)
train_bow_features = p1.extract_bow_feature_vectors(train_texts, dictionary)
val_bow_features = p1.extract_bow_feature_vectors(val_texts, dictionary)
test_bow_features = p1.extract_bow_feature_vectors(test_texts, dictionary)
#-------------------------------------------------------------------------------
# Problem 5
#-------------------------------------------------------------------------------
# toy_features, toy_labels = toy_data = utils.load_toy_data('toy_data.tsv')
#
# T = 10
# L = 0.2
#
# thetas_perceptron = p1.perceptron(toy_features, toy_labels, T)
# thetas_avg_perceptron = p1.average_perceptron(toy_features, toy_labels, T)
# thetas_pegasos = p1.pegasos(toy_features, toy_labels, T, L)
#
# def plot_toy_results(algo_name, thetas):
# print('theta for', algo_name, 'is', ', '.join(map(str,list(thetas[0]))))
# print('theta_0 for', algo_name, 'is', str(thetas[1]))
# utils.plot_toy_data(algo_name, toy_features, toy_labels, thetas)
#
# plot_toy_results('Perceptron', thetas_perceptron)
# plot_toy_results('Average Perceptron', thetas_avg_perceptron)
# plot_toy_results('Pegasos', thetas_pegasos)
#-------------------------------------------------------------------------------
# Problem 7
#-------------------------------------------------------------------------------
# T = 10
# L = 0.01
#
# pct_train_accuracy, pct_val_accuracy = \
# p1.classifier_accuracy(p1.perceptron, train_bow_features,val_bow_features,train_labels,val_labels,T=T)
# print("{:35} {:.4f}".format("Training accuracy for perceptron:", pct_train_accuracy))
# print("{:35} {:.4f}".format("Validation accuracy for perceptron:", pct_val_accuracy))
#
# avg_pct_train_accuracy, avg_pct_val_accuracy = \
# p1.classifier_accuracy(p1.average_perceptron, train_bow_features,val_bow_features,train_labels,val_labels,T=T)
# print("{:43} {:.4f}".format("Training accuracy for average perceptron:", avg_pct_train_accuracy))
# print("{:43} {:.4f}".format("Validation accuracy for average perceptron:", avg_pct_val_accuracy))
#
# avg_peg_train_accuracy, avg_peg_val_accuracy = \
# p1.classifier_accuracy(p1.pegasos, train_bow_features,val_bow_features,train_labels,val_labels,T=T,L=L)
# print("{:50} {:.4f}".format("Training accuracy for Pegasos:", avg_peg_train_accuracy))
# print("{:50} {:.4f}".format("Validation accuracy for Pegasos:", avg_peg_val_accuracy))
#-------------------------------------------------------------------------------
# Problem 8
#-------------------------------------------------------------------------------
# data = (train_bow_features, train_labels, val_bow_features, val_labels)
#
# # values of T and lambda to try
# Ts = [1, 5, 10, 15, 25, 50]
# Ls = [0.001, 0.01, 0.1, 1, 10]
#
# pct_tune_results = utils.tune_perceptron(Ts, *data)
# print('perceptron valid:', list(zip(Ts, pct_tune_results[1])))
# print('best = {:.4f}, T={:.4f}'.format(np.max(pct_tune_results[1]), Ts[np.argmax(pct_tune_results[1])]))
#
# avg_pct_tune_results = utils.tune_avg_perceptron(Ts, *data)
# print('avg perceptron valid:', list(zip(Ts, avg_pct_tune_results[1])))
# print('best = {:.4f}, T={:.4f}'.format(np.max(avg_pct_tune_results[1]), Ts[np.argmax(avg_pct_tune_results[1])]))
#
# # fix values for L and T while tuning Pegasos T and L, respective
# fix_L = 0.01
# peg_tune_results_T = utils.tune_pegasos_T(fix_L, Ts, *data)
# print('Pegasos valid: tune T', list(zip(Ts, peg_tune_results_T[1])))
# print('best = {:.4f}, T={:.4f}'.format(np.max(peg_tune_results_T[1]), Ts[np.argmax(peg_tune_results_T[1])]))
#
# fix_T = Ts[np.argmax(peg_tune_results_T[1])]
# peg_tune_results_L = utils.tune_pegasos_L(fix_T, Ls, *data)
# print('Pegasos valid: tune L', list(zip(Ls, peg_tune_results_L[1])))
# print('best = {:.4f}, L={:.4f}'.format(np.max(peg_tune_results_L[1]), Ls[np.argmax(peg_tune_results_L[1])]))
#
# utils.plot_tune_results('Perceptron', 'T', Ts, *pct_tune_results)
# utils.plot_tune_results('Avg Perceptron', 'T', Ts, *avg_pct_tune_results)
# utils.plot_tune_results('Pegasos', 'T', Ts, *peg_tune_results_T)
# utils.plot_tune_results('Pegasos', 'L', Ls, *peg_tune_results_L)
#-------------------------------------------------------------------------------
# Use the best method (perceptron, average perceptron or Pegasos) along with
# the optimal hyperparameters according to validation accuracies to test
# against the test dataset. The test data has been provided as
# test_bow_features and test_labels.
#-------------------------------------------------------------------------------
# Your code here
#-------------------------------------------------------------------------------
# Assign to best_theta, the weights (and not the bias!) learned by your most
# accurate algorithm with the optimal choice of hyperparameters.
#-------------------------------------------------------------------------------
# best_theta = None # Your code here
# wordlist = [word for (idx, word) in sorted(zip(dictionary.values(), dictionary.keys()))]
# sorted_word_features = utils.most_explanatory_word(best_theta, wordlist)
# print("Most Explanatory Word Features")
# print(sorted_word_features[:10])
|
from .operator import CompoundOperator
|
# Copyright 2021 MosaicML. All Rights Reserved.
import os
import pathlib
import sys
import pytest
from torch.utils.data import DataLoader
from composer import Callback, Event, State, Trainer
from composer.loggers import FileLogger, FileLoggerHparams, Logger, LoggerDestination, LogLevel
from tests.common import RandomClassificationDataset, SimpleModel
from composer.utils.collect_env import disable_env_report
class FileArtifactLoggerTracker(LoggerDestination):
def __init__(self) -> None:
self.logged_artifacts = []
def log_file_artifact(self, state: State, log_level: LogLevel, artifact_name: str, file_path: pathlib.Path, *,
overwrite: bool):
del state, overwrite # unused
self.logged_artifacts.append((log_level, artifact_name, file_path))
@pytest.mark.parametrize("log_level", [LogLevel.EPOCH, LogLevel.BATCH])
@pytest.mark.timeout(10)
def test_file_logger(dummy_state: State, log_level: LogLevel, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(
log_interval=3,
log_level=log_level,
filename=log_file_name,
artifact_name="{run_name}/rank{rank}.log",
buffer_size=1,
flush_interval=1,
).initialize_object()
file_tracker_destination = FileArtifactLoggerTracker()
logger = Logger(dummy_state, destinations=[log_destination, file_tracker_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_fit({"metric": "fit"}) # should print
logger.data_epoch({"metric": "epoch"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch"}) # should print on batch level, since we print every 3 steps
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch1"}) # should print, since we log every 3 epochs
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch2"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch1"}) # should NOT print
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
if log_level == LogLevel.EPOCH:
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
]
else:
assert log_level == LogLevel.BATCH
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch", }\n',
'[BATCH][batch=2]: { "metric": "batch", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
'[EPOCH][batch=3]: { "metric": "epoch2", }\n',
]
# Flush interval is 1, so there should be one log_file call per LogLevel
# Flushes also happen per each eval_start, epoch_start, and close()
# If the loglevel is batch, flushing also happens every epoch end
if log_level == LogLevel.EPOCH:
#
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timer.epoch) + int(
dummy_state.timer.epoch) + 1
else:
assert log_level == LogLevel.BATCH
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timer.batch) + int(
dummy_state.timer.epoch) + int(dummy_state.timer.epoch) + 1
@pytest.mark.timeout(15) # disk can be slow on Jenkins
def test_file_logger_capture_stdout_stderr(dummy_state: State, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(filename=log_file_name,
buffer_size=1,
flush_interval=1,
capture_stderr=True,
capture_stdout=True).initialize_object()
# capturing should start immediately
print("Hello, stdout!\nExtra Line")
print("Hello, stderr!\nExtra Line2", file=sys.stderr)
logger = Logger(dummy_state, destinations=[log_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
assert f.readlines() == [
'[stdout]: Hello, stdout!\n',
'[stdout]: Extra Line\n',
'[stderr]: Hello, stderr!\n',
'[stderr]: Extra Line2\n',
]
class ExceptionRaisingCallback(Callback):
def fit_start(self, state: State, logger: Logger) -> None:
del state, logger # unused
raise RuntimeError("My Exception!")
def test_exceptions_are_printed(tmpdir: pathlib.Path):
# Test that exceptions are printed to stderr, which is captured by the file logger
# The file logger stops capturing stdout/stderr when it is closed
# Here, we construct a trainer that raises an exception on Event.FIT_START
# and assert that the exception is written to the logfile
exception_raising_callback = ExceptionRaisingCallback()
logfile_name = str(tmpdir / "logfile.txt")
file_logger = FileLogger(filename=logfile_name, capture_stderr=True)
dataloader = DataLoader(RandomClassificationDataset())
model = SimpleModel()
trainer = Trainer(model=model,
train_dataloader=dataloader,
max_duration=1,
callbacks=[exception_raising_callback],
loggers=[file_logger])
disable_env_report() # Printing the full report in this test can cause timeouts
# manually calling `sys.excepthook` for the exception, as it is impossible to write a test
# that validates unhandled exceptions are logged, since the test validation code would by definition
# need to handle the exception!
try:
trainer.fit()
except RuntimeError:
exc_type, exc_value, tb = sys.exc_info()
assert exc_type is not None
assert exc_value is not None
assert tb is not None
sys.excepthook(exc_type, exc_value, tb)
trainer.close()
with open(logfile_name, "r") as f:
log_lines = f.readlines()
assert "[stderr]: RuntimeError: My Exception!\n" == log_lines[-1]
# Since the trainer was closed, future prints should not appear in the file logger
print("SHOULD NOT BE CAPTURED")
with open(logfile_name, "r") as f:
logfile = f.read()
assert "SHOULD NOT BE CAPTURED" not in logfile
|
#!/usr/bin/env python
# coding: utf-8
# # Deep Crossentropy method
#
# In this section we'll extend your CEM implementation with neural networks! You will train a multi-layer neural network to solve simple continuous state space games. __Please make sure you're done with tabular crossentropy method from the previous notebook.__
#
# 
#
#
# In[1]:
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
get_ipython().system('wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash')
get_ipython().system('touch .setup_complete')
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
get_ipython().system('bash ../xvfb start')
os.environ['DISPLAY'] = ':1'
# In[2]:
import gym
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# if you see "<classname> has no attribute .env", remove .env or update gym
env = gym.make("CartPole-v0").env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape[0]
plt.imshow(env.render("rgb_array"))
print("state vector dim =", state_dim)
print("n_actions =", n_actions)
# # Neural Network Policy
#
# For this assignment we'll utilize the simplified neural network implementation from __[Scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html)__. Here's what you'll need:
#
# * `agent.partial_fit(states, actions)` - make a single training pass over the data. Maximize the probabilitity of :actions: from :states:
# * `agent.predict_proba(states)` - predict probabilities of all actions, a matrix of shape __[len(states), n_actions]__
#
# In[4]:
from sklearn.neural_network import MLPClassifier
agent = MLPClassifier(
hidden_layer_sizes=(20, 20),
activation='tanh',
)
# initialize agent to the dimension of state space and number of actions
agent.partial_fit([env.reset()] * n_actions, range(n_actions), range(n_actions))
# In[12]:
def generate_session(env, agent, t_max=1000):
"""
Play a single game using agent neural network.
Terminate when game finishes or after :t_max: steps
"""
states, actions = [], []
total_reward = 0
s = env.reset()
for t in range(t_max):
# use agent to predict a vector of action probabilities for state :s:
probs = agent.predict_proba(s.reshape(-1, 4)).ravel()
assert probs.shape == (env.action_space.n,), "make sure probabilities are a vector (hint: np.reshape)"
# use the probabilities you predicted to pick an action
# sample proportionally to the probabilities, don't just take the most likely action
a = np.random.choice(np.arange(n_actions), p=probs)
# ^-- hint: try np.random.choice
new_s, r, done, info = env.step(a)
# record sessions like you did before
states.append(s)
actions.append(a)
total_reward += r
s = new_s
if done:
break
return states, actions, total_reward
# In[13]:
dummy_states, dummy_actions, dummy_reward = generate_session(env, agent, t_max=5)
print("states:", np.stack(dummy_states))
print("actions:", dummy_actions)
print("reward:", dummy_reward)
# ### CEM steps
# Deep CEM uses exactly the same strategy as the regular CEM, so you can copy your function code from previous notebook.
#
# The only difference is that now each observation is not a number but a `float32` vector.
# In[14]:
def select_elites(states_batch, actions_batch, rewards_batch, percentile=50):
"""
Select states and actions from games that have rewards >= percentile
:param states_batch: list of lists of states, states_batch[session_i][t]
:param actions_batch: list of lists of actions, actions_batch[session_i][t]
:param rewards_batch: list of rewards, rewards_batch[session_i]
:returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions
Please return elite states and actions in their original order
[i.e. sorted by session number and timestep within session]
If you are confused, see examples below. Please don't assume that states are integers
(they will become different later).
"""
# <YOUR CODE: copy-paste your implementation from the previous notebook>
reward_threshold = np.percentile(rewards_batch, percentile)
elite_states = []
elite_actions = []
for (i, r) in enumerate(rewards_batch):
if r >= reward_threshold:
elite_states.extend(states_batch[i])
elite_actions.extend(actions_batch[i])
return elite_states, elite_actions
# # Training loop
# Generate sessions, select N best and fit to those.
# In[15]:
from IPython.display import clear_output
def show_progress(rewards_batch, log, percentile, reward_range=[-990, +10]):
"""
A convenience function that displays training progress.
No cool math here, just charts.
"""
mean_reward = np.mean(rewards_batch)
threshold = np.percentile(rewards_batch, percentile)
log.append([mean_reward, threshold])
clear_output(True)
print("mean reward = %.3f, threshold=%.3f" % (mean_reward, threshold))
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.plot(list(zip(*log))[0], label='Mean rewards')
plt.plot(list(zip(*log))[1], label='Reward thresholds')
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.hist(rewards_batch, range=reward_range)
plt.vlines([np.percentile(rewards_batch, percentile)],
[0], [100], label="percentile", color='red')
plt.legend()
plt.grid()
plt.show()
# In[17]:
n_sessions = 100
percentile = 70
log = []
for i in range(100):
# generate new sessions
sessions = [generate_session(env, agent) for _ in range(n_sessions)]
states_batch, actions_batch, rewards_batch = map(np.array, zip(*sessions))
elite_states, elite_actions = select_elites(states_batch, actions_batch, rewards_batch)
# <YOUR CODE: partial_fit agent to predict elite_actions(y) from elite_states(X)>
agent.partial_fit(elite_states, elite_actions)
show_progress(rewards_batch, log, percentile, reward_range=[0, np.max(rewards_batch)])
if np.mean(rewards_batch) > 190:
print("You Win! You may stop training now via KeyboardInterrupt.")
# # Results
# In[19]:
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) as env_monitor:
sessions = [generate_session(env_monitor, agent) for _ in range(100)]
# In[20]:
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from base64 import b64encode
from IPython.display import HTML
video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
video_path = video_paths[-1] # You can also try other indices
if 'google.colab' in sys.modules:
# https://stackoverflow.com/a/57378660/1214547
with video_path.open('rb') as fp:
mp4 = fp.read()
data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()
else:
data_url = str(video_path)
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url))
# # Homework part I
#
# ### Tabular crossentropy method
#
# You may have noticed that the taxi problem quickly converges from -100 to a near-optimal score and then descends back into -50/-100. This is in part because the environment has some innate randomness. Namely, the starting points of passenger/driver change from episode to episode.
#
# ### Tasks
# - __1.1__ (2 pts) Find out how the algorithm performance changes if you use a different `percentile` and/or `n_sessions`. Provide here some figures so we can see how the hyperparameters influence the performance.
# - __1.2__ (1 pts) Tune the algorithm to end up with positive average score.
#
# It's okay to modify the existing code.
#
# ```<Describe what you did here>```
# 1. Changed net arch to a bigger one
# In[29]:
env = gym.make("CartPole-v0").env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape[0]
agent = MLPClassifier(
hidden_layer_sizes=(20, 40, 20),
activation='relu',
)
# initialize agent to the dimension of state space and number of actions
agent.partial_fit([env.reset()] * n_actions, range(n_actions), range(n_actions))
# In[ ]:
n_sessions = [20, 50, 100, 300]
percentile = [10, 40, 70, 97]
log = []
plt.subplots(4, 4, figsize=(20, 20))
for n_sessions in n_sessions_opts:
for percentile in percentile_opts:
for i in range(100):
# generate new sessions
sessions = [generate_session(env, agent) for _ in range(n_sessions)]
states_batch, actions_batch, rewards_batch = map(np.array, zip(*sessions))
elite_states, elite_actions = select_elites(states_batch, actions_batch, rewards_batch)
# <YOUR CODE: partial_fit agent to predict elite_actions(y) from elite_states(X)>
agent.partial_fit(elite_states, elite_actions)
show_progress(rewards_batch, log, percentile, reward_range=[0, np.max(rewards_batch)])
if np.mean(rewards_batch) > 190:
print("You Win! You may stop training now via KeyboardInterrupt.")
# # Homework part II
#
# ### Deep crossentropy method
#
# By this moment you should have got enough score on [CartPole-v0](https://gym.openai.com/envs/CartPole-v0) to consider it solved (see the link). It's time to try something harder.
#
# * if you have any trouble with CartPole-v0 and feel stuck, feel free to ask us or your peers for help.
#
# ### Tasks
#
# * __2.1__ (3 pts) Pick one of environments: `MountainCar-v0` or `LunarLander-v2`.
# * For MountainCar, get average reward of __at least -150__
# * For LunarLander, get average reward of __at least +50__
#
# See the tips section below, it's kinda important.
# __Note:__ If your agent is below the target score, you'll still get most of the points depending on the result, so don't be afraid to submit it.
#
#
# * __2.2__ (up to 6 pts) Devise a way to speed up training against the default version
# * Obvious improvement: use [`joblib`](https://joblib.readthedocs.io/en/latest/). However, note that you will probably need to spawn a new environment in each of the workers instead of passing it via pickling. (2 pts)
# * Try re-using samples from 3-5 last iterations when computing threshold and training. (2 pts)
# * Experiment with the number of training iterations and learning rate of the neural network (see params). Provide some plots as in 1.1. (2 pts)
#
# __Please list what you did in Anytask submission form__.
#
#
# ### Tips
# * Gym page: [MountainCar](https://gym.openai.com/envs/MountainCar-v0), [LunarLander](https://gym.openai.com/envs/LunarLander-v2)
# * Sessions for MountainCar may last for 10k+ ticks. Make sure ```t_max``` param is at least 10k.
# * Also it may be a good idea to cut rewards via ">" and not ">=". If 90% of your sessions get reward of -10k and 10% are better, than if you use percentile 20% as threshold, R >= threshold __fails cut off bad sessions__ whule R > threshold works alright.
# * _issue with gym_: Some versions of gym limit game time by 200 ticks. This will prevent cem training in most cases. Make sure your agent is able to play for the specified __t_max__, and if it isn't, try `env = gym.make("MountainCar-v0").env` or otherwise get rid of TimeLimit wrapper.
# * If you use old _swig_ lib for LunarLander-v2, you may get an error. See this [issue](https://github.com/openai/gym/issues/100) for solution.
# * If it won't train it's a good idea to plot reward distribution and record sessions: they may give you some clue. If they don't, call course staff :)
# * 20-neuron network is probably not enough, feel free to experiment.
#
# You may find the following snippet useful:
# In[31]:
def visualize_mountain_car(env, agent):
# Compute policy for all possible x and v (with discretization)
xs = np.linspace(env.min_position, env.max_position, 100)
vs = np.linspace(-env.max_speed, env.max_speed, 100)
grid = np.dstack(np.meshgrid(xs, vs[::-1])).transpose(1, 0, 2)
grid_flat = grid.reshape(len(xs) * len(vs), 2)
probs = agent.predict_proba(grid_flat).reshape(len(xs), len(vs), 3).transpose(1, 0, 2)
# # The above code is equivalent to the following:
# probs = np.empty((len(vs), len(xs), 3))
# for i, v in enumerate(vs[::-1]):
# for j, x in enumerate(xs):
# probs[i, j, :] = agent.predict_proba([[x, v]])[0]
# Draw policy
f, ax = plt.subplots(figsize=(7, 7))
ax.imshow(probs, extent=(env.min_position, env.max_position, -env.max_speed, env.max_speed), aspect='auto')
ax.set_title('Learned policy: red=left, green=nothing, blue=right')
ax.set_xlabel('position (x)')
ax.set_ylabel('velocity (v)')
# Sample a trajectory and draw it
states, actions, _ = generate_session(env, agent)
states = np.array(states)
ax.plot(states[:, 0], states[:, 1], color='white')
# Draw every 3rd action from the trajectory
for (x, v), a in zip(states[::3], actions[::3]):
if a == 0:
plt.arrow(x, v, -0.1, 0, color='white', head_length=0.02)
elif a == 2:
plt.arrow(x, v, 0.1, 0, color='white', head_length=0.02)
with gym.make('MountainCar-v0').env as env:
visualize_mountain_car(env, agent_mountain_car)
# ### Bonus tasks
#
# * __2.3 bonus__ (2 pts) Try to find a network architecture and training params that solve __both__ environments above (_Points depend on implementation. If you attempted this task, please mention it in Anytask submission._)
#
# * __2.4 bonus__ (4 pts) Solve continuous action space task with `MLPRegressor` or similar.
# * Since your agent only predicts the "expected" action, you will have to add noise to ensure exploration.
# * Choose one of [MountainCarContinuous-v0](https://gym.openai.com/envs/MountainCarContinuous-v0) (90+ pts to solve), [LunarLanderContinuous-v2](https://gym.openai.com/envs/LunarLanderContinuous-v2) (200+ pts to solve)
# * 4 points for solving. Slightly less for getting some results below solution threshold. Note that discrete and continuous environments may have slightly different rules aside from action spaces.
#
#
# If you're still feeling unchallenged, consider the project (see other notebook in this folder).
# In[ ]:
|
from models.loss.centernet_loss import centernet_Loss
import torch.nn as nn
import torch
class centernet_loss_module(nn.Module):
def __init__(self, config, stride=4, nstack=2):
super().__init__()
self.nstack = nstack
if nstack == 1:
self.center_loss = centernet_Loss(config["model"]["classes"], stride, config=config, device_id= config["device_id"])
elif nstack == 2:
self.center_loss1 = centernet_Loss(config["model"]["classes"], stride, config=config, device_id= config["device_id"])
self.center_loss2 = centernet_Loss(config["model"]["classes"], stride, config=config, device_id= config["device_id"])
def forward(self, input, target=None):
result = []
if self.nstack == 1:
cls_pred, txty_pred, twth_pred = input[0]
center_loss_input = torch.cat((txty_pred, twth_pred, cls_pred), dim=1)
result.append(self.center_loss(center_loss_input, target))
elif self.nstack == 2:
if target == None:
cls_pred, txty_pred, twth_pred = input[0]
center_loss_input = torch.cat((txty_pred, twth_pred, cls_pred), dim=1)
result.append(self.center_loss2(center_loss_input, target))
else:
#input1
cls_pred1, txty_pred1, twth_pred1 = input[0]
center_loss_input1 = torch.cat((txty_pred1, twth_pred1, cls_pred1), dim=1)
result1 = self.center_loss1(center_loss_input1, target)
#intput2
cls_pred2, txty_pred2, twth_pred2 = input[1]
center_loss_input2 = torch.cat((txty_pred2, twth_pred2, cls_pred2), dim=1)
result2 = self.center_loss2(center_loss_input2, target)
result3 = []
for sub_list in list(zip(result1, result2)):
result3.append(sub_list[0] + sub_list[1])
result.append(result3) #TODO:合并结果
return result
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rqt_runtime_monitor'],
package_dir={'': 'src'}
)
setup(**d)
|
import os
import shutil
import pytest
from jina import Flow, DocumentArray, Document
from .. import DocCache
cur_dir = os.path.dirname(os.path.abspath(__file__))
default_config = os.path.abspath(os.path.join(cur_dir, '..', 'config.yml'))
@pytest.mark.parametrize('cache_fields', ['[content_hash]', '[id]'])
def test_cache(tmpdir, cache_fields):
os.environ['CACHE_FIELDS'] = cache_fields
os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache')
docs = []
docs2 = []
if cache_fields == '[content_hash]':
docs = [Document(content='a'), Document(content='a')]
docs2 = [Document(content='b'), Document(content='a')]
elif cache_fields == '[id]':
docs = [Document(id='a'), Document(id='a')]
docs2 = [Document(id='b'), Document(id='a')]
with Flow().add(uses=os.path.join(cur_dir, 'cache.yml')) as f:
response = f.post(
on='/index',
inputs=DocumentArray(docs),
return_results=True
)
assert len(response[0].docs) == 1
if cache_fields == '[content_hash]':
assert set([d.content for d in response[0].docs]) == {'a'}
elif cache_fields == '[id]':
assert set([d.id for d in response[0].docs]) == {'a'}
response = f.post(
on='/index',
inputs=DocumentArray(docs2),
return_results=True
)
assert len(response[0].docs) == 1
# assert the correct docs have been removed
if cache_fields == '[content_hash]':
assert set([d.content for d in response[0].docs]) == {'b'}
elif cache_fields == '[id]':
assert set([d.id for d in response[0].docs]) == {'b'}
def test_cache_id_content_hash(tmpdir):
os.environ['CACHE_FIELDS'] = '[id, content]'
os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache')
docs = [
Document(id='a', content='content'),
Document(id='a', content='content'),
Document(id='a', content='content'),
]
with Flow(return_results=True).add(uses=os.path.join(cur_dir, 'cache.yml')) as f:
response = f.post(
on='/index',
inputs=DocumentArray(docs),
return_results=True
)
assert len(response[0].docs) == 1
# assert the correct docs have been removed
assert set([d.content for d in response[0].docs]) == {'content'}
assert set([d.id for d in response[0].docs]) == {'a'}
def test_cache_id_content_hash2(tmpdir):
os.environ['CACHE_FIELDS'] = '[id, content_hash]'
os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache')
docs2 = [
Document(id='b', content='content'),
Document(id='a', content='content'),
Document(id='a', content='content'),
]
with Flow(return_results=True).add(uses=os.path.join(cur_dir, 'cache.yml')) as f:
response = f.post(
on='/index',
inputs=DocumentArray(docs2),
return_results=True
)
assert len(response[0].docs) == 2
def test_cache_crud(tmpdir):
docs = DocumentArray([
Document(id=1, content='content'),
Document(id=2, content='content'),
Document(id=3, content='content'),
Document(id=4, content='content2'),
])
cache = DocCache(
fields=('content_hash',),
metas={'workspace': os.path.join(tmpdir, 'cache'), 'name': 'cache'},
# runtime_args={'pea_id': 0},
)
cache.index_or_remove_from_request(docs)
# we cache all the docs by id, we just remove the ones that have already been "hit"
assert cache.ids_count == 4
assert cache.hashes_count == 2
docs = DocumentArray([
Document(id=1, content='content3'),
Document(id=2, content='content4'),
Document(id=3, content='contentX'),
Document(id=4, content='contentBLA'),
])
cache.update(docs)
assert cache.ids_count == 4
assert cache.hashes_count == 4
docs = DocumentArray([
Document(id=1),
Document(id=2),
Document(id=3),
Document(id=4),
Document(id=4),
Document(id=5),
Document(id=6),
Document(id=7),
])
cache.delete(docs)
assert cache.ids_count == 0
assert cache.hashes_count == 0
def test_default_config(tmpdir):
shutil.rmtree(os.path.join(cur_dir, '..', 'cache'), ignore_errors=True)
docs = DocumentArray([
Document(id=1, content='🐯'),
Document(id=2, content='🐯'),
Document(id=3, content='🐻'),
])
f = Flow(return_results=True).add(uses=default_config)
with f:
response = f.post(on='/index', inputs=docs, return_results=True)
assert len(response[0].data.docs) == 2 # the duplicated Document is removed from the request
assert set([doc.id for doc in response[0].data.docs]) == set(['1', '3'])
docs_to_update = DocumentArray([
Document(id=2, content='🐼')
])
with f:
response = f.post(on='/update', inputs=docs_to_update, return_results=True)
assert len(response[0].data.docs) == 1 # the Document with `id=2` is no longer duplicated.
with f:
response = f.post(on='/index', inputs=docs[-1], return_results=True)
assert len(response[0].data.docs) == 0 # the Document has been cached
f.post(on='/delete', inputs=docs[-1])
response = f.post(on='/index', inputs=docs[-1], return_results=True)
assert len(response[0].data.docs) == 1 # the Document is cached again after the deletion
|
from .user import User
__all__ = ("User",)
|
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.home, name='notice-home'),
]
|
from cu2 import exceptions
import threading
import click
import json
import os
import re
import requests
import sys
class BaseConfig(object):
def __init__(self):
self.load()
def __setattr__(self, name, value):
"""Ensures that changes made after loading with default values
are written back to disk.
"""
if hasattr(self, 'persistent_config'):
self.persistent_config[name] = value
object.__setattr__(self, name, value)
@property
def default_download_directory(self):
"""Returns a platform-specific download directory to use if no download
directory is specified by the user.
"""
if sys.platform in ['cygwin', 'win32']:
return os.path.join(os.environ['USERPROFILE'], 'Downloads')
else:
return os.environ['HOME']
def load(self):
try:
f = open(config_path)
except FileNotFoundError:
j = {}
else:
try:
j = json.load(f)
except ValueError as e:
f.seek(0, 0)
cfargs = {}
if hasattr(json.decoder, 'JSONDecodeError'):
cfargs = {'config': f.read(),
'cursor': (e.lineno, e.colno),
'message': 'Error reading config: {}'
.format(e.msg)}
else:
# Remove this hack when we drop Python 3.4 support
msg, pos = str(e).split(':')
m = re.match(r'\s*line (\d+) column (\d+).*', pos)
cur = (int(m.group(1)), int(m.group(2)))
cfargs = {'config': f.read(),
'cursor': cur,
'message': 'Error reading config: {}'
.format(msg)}
raise exceptions.ConfigError(**cfargs)
finally:
f.close()
self.cbz = j.get('cbz', False)
self.compact_new = j.get('compact_new', False)
self.download_directory = j.get('download_directory',
self.default_download_directory)
self.download_threads = j.get('download_threads', 4)
self.html_parser = j.get('html_parser', 'html.parser')
self.madokami = MadokamiConfig(self, j.get('madokami', {}))
self.relative_latest = j.get('relative_latest', False)
self.persistent_config = j
def serialize(self):
"""Returns the current persistent configuration as a dictionary. All
private configuration values starting with an underscore are removed
from the configuration.
"""
configuration = dict(self.persistent_config)
configuration['madokami'] = dict(self.madokami.__dict__)
configuration_keys = list(configuration.keys())
while True:
if not configuration_keys:
break
key = configuration_keys.pop(0)
key_levels = key.split('.')
dictionary = None
value = configuration[key_levels[0]]
for level in key_levels[1:]:
dictionary = value
value = value[level]
if key_levels[-1].startswith('_'):
del dictionary[key_levels[-1]]
continue
if isinstance(value, dict):
dict_keys = ['.'.join([key, x]) for x in value]
configuration_keys += dict_keys
return configuration
def write(self):
if hasattr(self, 'persistent_config'):
configuration = self.serialize()
with open(config_path, 'w') as file:
json.dump(configuration, file, sort_keys=True, indent=2)
class MadokamiConfig(object):
def __init__(self, config, dict):
self._config = config
self.password = dict.get('password', None)
self.username = dict.get('username', None)
@property
def login(self):
"""Returns a tuple containing the username and password. Missing values
will be prompted from the user during runtime.
"""
if not self.username:
self.username = click.prompt('Madokami username')
if not self.password:
self.password = click.prompt('Madokami password', hide_input=True)
return (self.username, self.password)
def get():
"""Returns the active config object."""
try:
return _config
except NameError:
initialize()
return _config
def initialize(directory=None):
"""Initializes the cu2 directory and config file either with specified
directory or ~/.cu2.
"""
global _config, config_path, cu2_dir
if directory:
cu2_dir = directory
elif sys.platform in ['cygwin', 'win32']:
cu2_dir = os.path.join(os.environ['APPDATA'], 'cu2')
else:
cu2_dir = os.path.join(os.environ['HOME'], '.cu2')
if not os.path.exists(cu2_dir):
os.mkdir(cu2_dir)
config_path = os.path.join(cu2_dir, 'config.json')
_config = BaseConfig()
|
from datetime import datetime, timedelta
from unittest.mock import Mock
from dateutil.tz import tzutc
from mock import patch
from app.data_models import QuestionnaireStore
from app.data_models.session_data import SessionData
from app.data_models.session_store import SessionStore
from app.questionnaire.questionnaire_schema import QuestionnaireSchema
from app.views.handlers.submission import SubmissionHandler
from tests.app.app_context_test_case import AppContextTestCase
class TestSubmissionPayload(AppContextTestCase):
def setUp(self):
super().setUp()
self.session_data = SessionData(
tx_id="tx_id",
schema_name="schema_name",
response_id="response_id",
period_str="period_str",
language_code="cy",
launch_language_code="en",
survey_url=None,
ru_name="ru_name",
ru_ref="ru_ref",
case_id="0123456789000000",
)
self.session_store = SessionStore("user_ik", "pepper", "eq_session_id")
self.expires_at = datetime.now(tzutc()) + timedelta(seconds=5)
def test_submission_language_code_in_payload(self):
session_store = self.session_store.create(
"eq_session_id", "user_id", self.session_data, self.expires_at
)
storage = Mock()
storage.get_user_data = Mock(return_value=("{}", 1))
with patch(
"app.views.handlers.submission.get_session_store",
return_value=session_store,
):
with patch(
"app.views.handlers.submission.convert_answers", return_value={}
):
submission_handler = SubmissionHandler(
QuestionnaireSchema({}), QuestionnaireStore(storage), {}
)
assert (
submission_handler.get_payload()["submission_language_code"] == "cy"
)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import numpy as np
from .._supported_operators import sklearn_operator_name_map
from ..common._apply_operation import (
apply_cast, apply_concat,
apply_div, apply_reshape,
)
from ..common._registration import register_converter
from ..common._topology import FloatTensorType
from ..proto import onnx_proto
def _calculate_proba(scope, operator, container, model):
"""
This function calculates class probability scores for
BaggingClassifier.
"""
final_proba_name = operator.outputs[1].full_name
proba_list = []
options = container.get_options(model, dict(raw_scores=False))
use_raw_scores = options['raw_scores']
has_proba = (hasattr(model.estimators_[0], 'predict_proba')
or (use_raw_scores and hasattr(
model.estimators_[0], 'decision_function')))
for index, estimator in enumerate(model.estimators_):
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type)
this_operator.raw_operator = estimator
container.add_options(id(estimator), {'raw_scores': use_raw_scores})
this_operator.inputs = operator.inputs
label_name = scope.declare_local_variable('label_%d' % index)
proba_name = scope.declare_local_variable('proba_%d' % index,
FloatTensorType())
this_operator.outputs.append(label_name)
this_operator.outputs.append(proba_name)
proba_output_name = (proba_name.onnx_name if has_proba
else label_name.onnx_name)
reshape_dim_val = len(model.classes_) if has_proba else 1
reshaped_proba_name = scope.get_unique_variable_name('reshaped_proba')
apply_reshape(scope, proba_output_name, reshaped_proba_name,
container, desired_shape=(1, -1, reshape_dim_val))
proba_list.append(reshaped_proba_name)
merged_proba_name = scope.get_unique_variable_name('merged_proba')
apply_concat(scope, proba_list,
merged_proba_name, container, axis=0)
if has_proba:
container.add_node('ReduceMean', merged_proba_name,
final_proba_name,
name=scope.get_unique_operator_name('ReduceMean'),
axes=[0], keepdims=0)
else:
n_estimators_name = scope.get_unique_variable_name('n_estimators')
class_labels_name = scope.get_unique_variable_name('class_labels')
equal_result_name = scope.get_unique_variable_name('equal_result')
cast_output_name = scope.get_unique_variable_name('cast_output')
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
container.add_initializer(
n_estimators_name, onnx_proto.TensorProto.FLOAT, [],
[len(model.estimators_)])
container.add_initializer(
class_labels_name, onnx_proto.TensorProto.INT64,
[1, 1, len(model.estimators_[0].classes_)],
model.estimators_[0].classes_)
container.add_node('Equal', [class_labels_name, merged_proba_name],
equal_result_name,
name=scope.get_unique_operator_name('Equal'))
apply_cast(scope, equal_result_name, cast_output_name,
container, to=onnx_proto.TensorProto.FLOAT)
container.add_node('ReduceSum', cast_output_name,
reduced_proba_name,
name=scope.get_unique_operator_name('ReduceSum'),
axes=[0], keepdims=0)
apply_div(scope, [reduced_proba_name, n_estimators_name],
final_proba_name, container, broadcast=1)
return final_proba_name
def convert_sklearn_bagging_classifier(scope, operator, container):
"""
Converter for BaggingClassifier.
"""
if scope.get_options(operator.raw_operator, dict(nocl=False))['nocl']:
raise RuntimeError(
"Option 'nocl' is not implemented for operator '{}'.".format(
operator.raw_operator.__class__.__name__))
bagging_op = operator.raw_operator
if (not (isinstance(bagging_op.max_features, float) and
bagging_op.max_features == 1.0)):
raise NotImplementedError(
"Not default values for max_features is "
"not supported with BaggingClassifier yet. "
"You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues")
if bagging_op.bootstrap_features:
raise NotImplementedError(
"bootstrap_features=True is "
"not supported with BaggingClassifier yet. "
"You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues")
classes = bagging_op.classes_
output_shape = (-1,)
classes_name = scope.get_unique_variable_name('classes')
argmax_output_name = scope.get_unique_variable_name('argmax_output')
array_feature_extractor_result_name = scope.get_unique_variable_name(
'array_feature_extractor_result')
class_type = onnx_proto.TensorProto.STRING
if np.issubdtype(bagging_op.classes_.dtype, np.floating):
class_type = onnx_proto.TensorProto.INT32
classes = classes.astype(np.int32)
elif np.issubdtype(bagging_op.classes_.dtype, np.signedinteger):
class_type = onnx_proto.TensorProto.INT32
else:
classes = np.array([s.encode('utf-8') for s in classes])
container.add_initializer(classes_name, class_type, classes.shape, classes)
proba_name = _calculate_proba(scope, operator, container, bagging_op)
container.add_node('ArgMax', proba_name,
argmax_output_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, argmax_output_name],
array_feature_extractor_result_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
cast_result_name = scope.get_unique_variable_name('cast_result')
reshaped_result_name = scope.get_unique_variable_name(
'reshaped_result')
apply_cast(scope, array_feature_extractor_result_name,
cast_result_name, container,
to=onnx_proto.TensorProto.INT64)
apply_reshape(scope, cast_result_name, reshaped_result_name,
container, desired_shape=output_shape)
apply_cast(scope, reshaped_result_name, operator.outputs[0].full_name,
container, to=onnx_proto.TensorProto.INT64)
else: # string labels
apply_reshape(scope, array_feature_extractor_result_name,
operator.outputs[0].full_name, container,
desired_shape=output_shape)
def convert_sklearn_bagging_regressor(scope, operator, container):
"""
Converter for BaggingRegressor.
"""
bagging_op = operator.raw_operator
if (not (isinstance(bagging_op.max_features, float) and
bagging_op.max_features == 1.0)):
raise NotImplementedError(
"Not default values for max_features is "
"not supported with BaggingRegressor yet. "
"You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues")
if bagging_op.bootstrap_features:
raise NotImplementedError(
"bootstrap_features=True is "
"not supported with BaggingRegressor yet. "
"You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues")
proba_list = []
for index, estimator in enumerate(bagging_op.estimators_):
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type)
this_operator.raw_operator = estimator
this_operator.inputs = operator.inputs
label_name = scope.declare_local_variable('label_%d' % index)
this_operator.outputs.append(label_name)
reshaped_proba_name = scope.get_unique_variable_name('reshaped_proba')
apply_reshape(scope, label_name.onnx_name, reshaped_proba_name,
container, desired_shape=(1, -1, 1))
proba_list.append(reshaped_proba_name)
merged_proba_name = scope.get_unique_variable_name('merged_proba')
apply_concat(scope, proba_list,
merged_proba_name, container, axis=0)
container.add_node('ReduceMean', merged_proba_name,
operator.outputs[0].full_name,
name=scope.get_unique_operator_name('ReduceMean'),
axes=[0], keepdims=0)
register_converter('SklearnBaggingClassifier',
convert_sklearn_bagging_classifier,
options={'zipmap': [True, False],
'nocl': [True, False],
'raw_scores': [True, False]})
register_converter('SklearnBaggingRegressor',
convert_sklearn_bagging_regressor)
|
import numpy as np
import math
def model_evaluate(real_score,predict_score):
AUPR = get_AUPR(real_score,predict_score)
AUC = get_AUC(real_score,predict_score)
[f1,accuracy,recall,spec,precision] = get_Metrics(real_score,predict_score)
return np.array([AUPR,AUC,f1,accuracy,recall,spec,precision])
def get_AUPR(real_score, predict_score):
sorted_predict_score = sorted(list(set(np.array(predict_score).flatten())))
sorted_predict_score_num = len(sorted_predict_score)
thresholdlist = []
for i in range(999):
threshold = sorted_predict_score[int(math.ceil(sorted_predict_score_num * (i + 1) / 1000) - 1)]
thresholdlist.append(threshold)
thresholds = np.matrix(thresholdlist)
TN = np.zeros((1, len(thresholdlist)))
TP = np.zeros((1, len(thresholdlist)))
FN = np.zeros((1, len(thresholdlist)))
FP = np.zeros((1, len(thresholdlist)))
for i in range(thresholds.shape[1]):
p_index = np.where(predict_score >= thresholds[0, i])
TP[0, i] = len(np.where(real_score[p_index] == 1)[0])
FP[0, i] = len(np.where(real_score[p_index] == 0)[0])
# print(TP[0, i], FP[0, i])
n_index = np.where(predict_score < thresholds[0, i])
FN[0, i] = len(np.where(real_score[n_index] == 1)[0])
TN[0, i] = len(np.where(real_score[n_index] == 0)[0])
precision = TP / (TP + FP)
recall = TP / (TP + FN)
x = list(np.array(recall).flatten())
y = list(np.array(precision).flatten())
xy = [(x, y) for x, y in zip(x, y)]
xy.sort()
x = [x for x, y in xy]
y = [y for x, y in xy]
new_x = [x for x, y in xy]
new_y = [y for x, y in xy]
new_x[0] = 0
new_y[0] = 1
new_x.append(1)
new_y.append(0)
name1 = 'plot_curve/non_attention_AUPR_X.csv'
np.savetxt(name1, new_x, delimiter=',')
name2 = 'plot_curve/non_attention_AUPR_Y.csv'
np.savetxt(name2, new_y, delimiter=',')
area = 0
for i in range(thresholds.shape[1]):
area = area + (new_y[i] + new_y[i + 1]) * (new_x[i + 1] - new_x[i]) / 2
return area
def get_AUC(real_score, predict_score):
sorted_predict_score = sorted(list(set(np.array(predict_score).flatten())))
sorted_predict_score_num = len(sorted_predict_score)
thresholdlist = []
for i in range(999):
threshold = sorted_predict_score[int(math.ceil(sorted_predict_score_num * (i + 1) / 1000) - 1)]
thresholdlist.append(threshold)
thresholds = np.matrix(thresholdlist)
TN = np.zeros((1, len(thresholdlist)))
TP = np.zeros((1, len(thresholdlist)))
FN = np.zeros((1, len(thresholdlist)))
FP = np.zeros((1, len(thresholdlist)))
for i in range(thresholds.shape[1]):
p_index = np.where(predict_score >= thresholds[0, i])
TP[0, i] = len(np.where(real_score[p_index] == 1)[0])
FP[0, i] = len(np.where(real_score[p_index] == 0)[0])
n_index = np.where(predict_score < thresholds[0, i])
FN[0, i] = len(np.where(real_score[n_index] == 1)[0])
TN[0, i] = len(np.where(real_score[n_index] == 0)[0])
sen = TP / (TP + FN)
spe = TN / (TN + FP)
x = list(np.array(1 - spe).flatten())
y = list(np.array(sen).flatten())
xy = [(x, y) for x, y in zip(x, y)]
xy.sort()
new_x = [x for x, y in xy]
new_y = [y for x, y in xy]
new_x[0] = 0
new_y[0] = 0
new_x.append(1)
new_y.append(1)
name1 = 'plot_curve/non_attention_AUC_X.csv'
np.savetxt(name1, new_x, delimiter=',')
name2 = 'plot_curve/non_attention_AUC_Y.csv'
np.savetxt(name2, new_y, delimiter=',')
area = 0
for i in range(thresholds.shape[1]):
area = area + (new_y[i] + new_y[i + 1]) * (new_x[i + 1] - new_x[i]) / 2
return area
def get_Metrics(real_score, predict_score):
sorted_predict_score = sorted(list(set(np.array(predict_score).flatten())))
sorted_predict_score_num = len(sorted_predict_score)
thresholdlist = []
for i in range(999):
threshold = sorted_predict_score[int(math.ceil(sorted_predict_score_num * (i + 1) / 1000) - 1)]
thresholdlist.append(threshold)
thresholds = np.matrix(thresholdlist)
TN = np.zeros((1, len(thresholdlist)))
TP = np.zeros((1, len(thresholdlist)))
FN = np.zeros((1, len(thresholdlist)))
FP = np.zeros((1, len(thresholdlist)))
for i in range(thresholds.shape[1]):
p_index = np.where(predict_score >= thresholds[0, i])
TP[0, i] = len(np.where(real_score[p_index] == 1)[0])
FP[0, i] = len(np.where(real_score[p_index] == 0)[0])
n_index = np.where(predict_score < thresholds[0, i])
FN[0, i] = len(np.where(real_score[n_index] == 1)[0])
TN[0, i] = len(np.where(real_score[n_index] == 0)[0])
accuracy = (TP + TN) / (TP + TN + FP + FN)
sen = TP / (TP + FN)
recall = sen
spec = TN / (TN + FP)
precision = TP / (TP + FP)
f1 = 2 * recall * precision / (recall + precision)
max_index = np.argmax(f1)
max_f1 = f1[0, max_index]
max_accuracy = accuracy[0, max_index]
max_recall = recall[0, max_index]
max_spec = spec[0, max_index]
max_precision = precision[0, max_index]
return [max_f1, max_accuracy, max_recall, max_spec, max_precision]
|
import dataclasses
import yaml
pathNodeSettings = "/etc/cactus-indy/node-settings.yaml"
pathNodeValidatorRegistry = "/etc/cactus-indy/node-validator-registry.yaml"
pathValidatorSettings = "/etc/cactus-indy/validator-001-settings.yaml"
pathValidatorSecrets = "/etc/cactus-indy/validator-001-secrets.yaml"
#dataclass for validator-<DLT id>-settings.yml
#data members should be equal to yml
@dataclasses.dataclass
class NodeSettings:
port: int
logging_dir: str
#dataclass for validator-<DLT id>-settings.yml
#data members should be equal to yml
@dataclasses.dataclass
class NodeValidatorRegistry:
proto: str
url: str
publickey: str
#dataclass for validator-<DLT id>-settings.yml
#data members should be equal to yml
@dataclasses.dataclass
class ValidatorSettings:
port: int
#dataclass for validator-<DLT id>-settings.yml
#data members should be equal to yml
@dataclasses.dataclass
class ValidatorSecrets:
sign_key: str
auth_credential: str
@dataclasses.dataclass
class Settings:
nodeSettings: NodeSettings = None
nodeValidatorRegistry: NodeValidatorRegistry = None
validatorSettings: ValidatorSettings = None
validatorSecrets: ValidatorSecrets = None
# this method is automatically implemented after generate object
def __post_init__(self):
self.validatorSettings = ValidatorSettings(**(self.loadYaml(pathNodeSettings)))
self.validatorSettings = ValidatorSettings(**(self.loadYaml(pathNodeValidatorRegistry)))
self.validatorSettings = ValidatorSettings(**(self.loadYaml(pathValidatorSettings)))
self.validatorSettings = ValidatorSettings(**(self.loadYaml(pathValidatorSecrets)))
def loadYaml(self, yamlFilePath):
# load usersettings file
with open(pathValidatorSettings) as yamlFile:
yamlObj = yaml.safe_load(yamlFile)
return yamlObj
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.random namespace.
"""
from __future__ import print_function as _print_function
from tensorflow.python import categorical
from tensorflow.python import get_seed
from tensorflow.python import multinomial
from tensorflow.python import random_gamma as gamma
from tensorflow.python import random_normal as normal
from tensorflow.python import random_poisson as poisson
from tensorflow.python import random_shuffle as shuffle
from tensorflow.python import random_uniform as uniform
from tensorflow.python import set_random_seed
from tensorflow.python import stateless_categorical
from tensorflow.python import stateless_multinomial
from tensorflow.python import stateless_random_normal as stateless_normal
from tensorflow.python import stateless_random_uniform as stateless_uniform
from tensorflow.python import stateless_truncated_normal
from tensorflow.python import truncated_normal
from tensorflow.python.ops.candidate_sampling_ops import all_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import fixed_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import learned_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import log_uniform_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import uniform_candidate_sampler
del _print_function
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.bigquery.run import bigquery
from starthinker.task.dbm.run import dbm
from starthinker.task.census.run import census
def recipe_dv360_segmentology(config, auth_read, recipe_timezone, auth_write, recipe_name, date_range, recipe_slug, partners, advertisers):
"""DV360 funnel analysis using Census data.
Args:
auth_read (authentication) - Credentials used for reading data.
recipe_timezone (timezone) - Timezone for report dates.
auth_write (authentication) - Authorization used for writing data.
recipe_name (string) - Name of report, not needed if ID used.
date_range (choice) - Timeframe to run the report for.
recipe_slug (string) - Name of Google BigQuery dataset to create.
partners (integer_list) - DV360 partner id.
advertisers (integer_list) - Comma delimited list of DV360 advertiser ids.
"""
dataset(config, {
'description':'Create a dataset for bigquery tables.',
'hour':[
4
],
'auth':auth_write,
'dataset':recipe_slug
})
bigquery(config, {
'auth':auth_write,
'function':'Pearson Significance Test',
'to':{
'dataset':recipe_slug
}
})
dbm(config, {
'auth':auth_read,
'report':{
'filters':{
'FILTER_PARTNER':{
'values':partners
},
'FILTER_ADVERTISER':{
'values':advertisers
}
},
'body':{
'timezoneCode':recipe_timezone,
'metadata':{
'title':recipe_name,
'dataRange':date_range,
'format':'CSV'
},
'params':{
'type':'TYPE_CROSS_PARTNER',
'groupBys':[
'FILTER_PARTNER',
'FILTER_PARTNER_NAME',
'FILTER_ADVERTISER',
'FILTER_ADVERTISER_NAME',
'FILTER_MEDIA_PLAN',
'FILTER_MEDIA_PLAN_NAME',
'FILTER_ZIP_POSTAL_CODE'
],
'metrics':[
'METRIC_BILLABLE_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_TOTAL_CONVERSIONS'
]
},
'schedule':{
'frequency':'WEEKLY'
}
}
}
})
dbm(config, {
'auth':auth_read,
'report':{
'name':recipe_name
},
'out':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV360_KPI',
'header':True,
'schema':[
{
'name':'Partner_Id',
'type':'INTEGER',
'mode':'REQUIRED'
},
{
'name':'Partner',
'type':'STRING',
'mode':'REQUIRED'
},
{
'name':'Advertiser_Id',
'type':'INTEGER',
'mode':'REQUIRED'
},
{
'name':'Advertiser',
'type':'STRING',
'mode':'REQUIRED'
},
{
'name':'Campaign_Id',
'type':'INTEGER',
'mode':'REQUIRED'
},
{
'name':'Campaign',
'type':'STRING',
'mode':'REQUIRED'
},
{
'name':'Zip',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Impressions',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Clicks',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Conversions',
'type':'FLOAT',
'mode':'NULLABLE'
}
]
}
}
})
bigquery(config, {
'auth':auth_write,
'from':{
'query':'''SELECT
Partner_Id,
Partner,
Advertiser_Id,
Advertiser,
Campaign_Id,
Campaign,
Zip,
SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,
SAFE_DIVIDE(Clicks, Impressions) AS Click,
SAFE_DIVIDE(Conversions, Impressions) AS Conversion,
Impressions AS Impressions FROM
`{dataset}.DV360_KPI`; ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'DV360_KPI_Normalized'
}
})
census(config, {
'auth':auth_write,
'normalize':{
'census_geography':'zip_codes',
'census_year':'2018',
'census_span':'5yr'
},
'to':{
'dataset':recipe_slug,
'type':'view'
}
})
census(config, {
'auth':auth_write,
'correlate':{
'join':'Zip',
'pass':[
'Partner_Id',
'Partner',
'Advertiser_Id',
'Advertiser',
'Campaign_Id',
'Campaign'
],
'sum':[
'Impressions'
],
'correlate':[
'Impression',
'Click',
'Conversion'
],
'dataset':recipe_slug,
'table':'DV360_KPI_Normalized',
'significance':80
},
'to':{
'dataset':recipe_slug,
'type':'view'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
DV360 funnel analysis using Census data.
1. Wait for <b>BigQuery->->->Census_Join</b> to be created.
2. Join the <a href='https://groups.google.com/d/forum/starthinker-assets' target='_blank'>StarThinker Assets Group</a> to access the following assets
3. Copy <a href='https://datastudio.google.com/c/u/0/reporting/3673497b-f36f-4448-8fb9-3e05ea51842f/' target='_blank'>DV360 Segmentology Sample</a>. Leave the Data Source as is, you will change it in the next step.
4. Click Edit Connection, and change to <b>BigQuery->->->Census_Join</b>.
5. Or give these intructions to the client.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-recipe_timezone", help="Timezone for report dates.", default='America/Los_Angeles')
parser.add_argument("-auth_write", help="Authorization used for writing data.", default='service')
parser.add_argument("-recipe_name", help="Name of report, not needed if ID used.", default='')
parser.add_argument("-date_range", help="Timeframe to run the report for.", default='LAST_365_DAYS')
parser.add_argument("-recipe_slug", help="Name of Google BigQuery dataset to create.", default='')
parser.add_argument("-partners", help="DV360 partner id.", default=[])
parser.add_argument("-advertisers", help="Comma delimited list of DV360 advertiser ids.", default=[])
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dv360_segmentology(config, args.auth_read, args.recipe_timezone, args.auth_write, args.recipe_name, args.date_range, args.recipe_slug, args.partners, args.advertisers)
|
class SessionHelper:
def __init__(self,app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username=username, password=password)
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user (self):
wd = self.app.wd
return wd.find_element_by_xpath("(//div[@id='top']/form/b)[1]").text[1:-1]
|
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
agent basic tests
"""
import json
from pathlib import Path
from uuid import uuid4
from flask import url_for
from sner.agent.core import main as agent_main
from sner.lib import file_from_zip
from sner.server.scheduler.models import Job, Queue
def test_version(tmpworkdir): # pylint: disable=unused-argument
"""test print version"""
result = agent_main(['--version'])
assert result == 0
def test_commandline_assignment(tmpworkdir): # pylint: disable=unused-argument
"""test custom assignment passed from command line"""
test_a = {'id': str(uuid4()), 'config': {'module': 'dummy', 'args': '--arg1'}, 'targets': []}
result = agent_main(['--assignment', json.dumps(test_a)])
assert result == 0
assert Path(f'{test_a["id"]}.zip').exists()
def test_exception_in_module(tmpworkdir): # pylint: disable=unused-argument
"""test exception handling during agent module execution"""
test_a = {'id': str(uuid4()), 'config': {'module': 'notexist'}, 'targets': []}
result = agent_main(['--assignment', json.dumps(test_a)])
assert result == 1
assert Path(f'{test_a["id"]}.zip').exists()
def test_run_with_liveserver(tmpworkdir, live_server, apikey, dummy_target): # pylint: disable=unused-argument
"""test basic agent's networking codepath; fetch, execute, pack and upload assignment"""
result = agent_main([
'--server', url_for('index_route', _external=True),
'--apikey', apikey,
'--queue', Queue.query.get(dummy_target.queue_id).name,
'--caps', 'cap1', 'cap2',
'--oneshot',
'--debug',
])
assert result == 0
job = Job.query.filter(Job.queue_id == dummy_target.queue_id).one()
assert dummy_target.target in file_from_zip(job.output_abspath, 'assignment.json').decode('utf-8')
|
import collections
import copy
import typing
from river import stats
from river import optim
from river import utils
from . import base
__all__ = ['Baseline']
class Baseline(base.Recommender):
"""Baseline for recommender systems.
A first-order approximation of the bias involved in target. The model equation is defined as:
$$\\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i}$$
Where $bu_{u}$ and $bi_{i}$ are respectively the user and item biases.
This model expects a dict input with a `user` and an `item` entries without any type constraint
on their values (i.e. can be strings or numbers). Other entries are ignored.
Parameters
----------
optimizer
The sequential optimizer used for updating the weights.
loss
The loss function to optimize for.
l2
regularization amount used to push weights towards 0.
initializer
Weights initialization scheme.
clip_gradient
Clips the absolute value of each gradient value.
Attributes
----------
global_mean : stats.Mean
The target arithmetic mean.
u_biases : collections.defaultdict
The user bias weights.
i_biases : collections.defaultdict
The item bias weights.
u_optimizer : optim.Optimizer
The sequential optimizer used for updating the user bias weights.
i_optimizer : optim.Optimizer
The sequential optimizer used for updating the item bias weights.
Examples
--------
>>> from river import optim
>>> from river import reco
>>> dataset = (
... ({'user': 'Alice', 'item': 'Superman'}, 8),
... ({'user': 'Alice', 'item': 'Terminator'}, 9),
... ({'user': 'Alice', 'item': 'Star Wars'}, 8),
... ({'user': 'Alice', 'item': 'Notting Hill'}, 2),
... ({'user': 'Alice', 'item': 'Harry Potter'}, 5),
... ({'user': 'Bob', 'item': 'Superman'}, 8),
... ({'user': 'Bob', 'item': 'Terminator'}, 9),
... ({'user': 'Bob', 'item': 'Star Wars'}, 8),
... ({'user': 'Bob', 'item': 'Notting Hill'}, 2)
... )
>>> model = reco.Baseline(optimizer=optim.SGD(0.005))
>>> for x, y in dataset:
... _ = model.learn_one(x, y)
>>> model.predict_one({'user': 'Bob', 'item': 'Harry Potter'})
6.538120
References
----------
[^1]: [Matrix factorization techniques for recommender systems](https://datajobs.com/data-science-repo/Recommender-Systems-[Netflix].pdf)
"""
def __init__(self, optimizer: optim.Optimizer = None, loss: optim.losses.Loss = None,
l2=0., initializer: optim.initializers.Initializer = None, clip_gradient=1e12):
self.optimizer = optim.SGD() if optimizer is None else copy.deepcopy(optimizer)
self.u_optimizer = optim.SGD() if optimizer is None else copy.deepcopy(optimizer)
self.i_optimizer = optim.SGD() if optimizer is None else copy.deepcopy(optimizer)
self.loss = optim.losses.Squared() if loss is None else loss
self.l2 = l2
if initializer is None:
initializer = optim.initializers.Zeros()
self.initializer = initializer
self.clip_gradient = clip_gradient
self.global_mean = stats.Mean()
self.u_biases: typing.DefaultDict[int, optim.initializers.Initializer] = collections.defaultdict(initializer)
self.i_biases: typing.DefaultDict[int, optim.initializers.Initializer] = collections.defaultdict(initializer)
def _predict_one(self, user, item):
return self.global_mean.get() + self.u_biases[user] + self.i_biases[item]
def _learn_one(self, user, item, y):
# Update the global mean
self.global_mean.update(y)
# Calculate the gradient of the loss with respect to the prediction
g_loss = self.loss.gradient(y, self._predict_one(user, item))
# Clamp the gradient to avoid numerical instability
g_loss = utils.math.clamp(g_loss, minimum=-self.clip_gradient, maximum=self.clip_gradient)
# Calculate bias gradients
u_grad_bias = {user: g_loss + self.l2 * self.u_biases[user]}
i_grad_bias = {item: g_loss + self.l2 * self.i_biases[item]}
# Update biases
self.u_biases = self.u_optimizer.update_after_pred(self.u_biases, u_grad_bias)
self.i_biases = self.i_optimizer.update_after_pred(self.i_biases, i_grad_bias)
return self
|
import pickle
import gzip
import cv2
import face_recognition
#filename= 'c_object.obj'
# filename='newDump.pkl'
def compute(imge):
img = cv2.cvtColor(imge, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
return encode
def save(object, filename, protocol = 0):
"""Saves a compressed object to disk
"""
file = gzip.GzipFile(filename, 'wb')
file.write(pickle.dumps(object, protocol))
file.close()
def load(filename):
"""Loads a compressed object from disk
"""
file = gzip.GzipFile(filename, 'rb')
data = file.read()
object = pickle.loads(data)
file.close()
return object
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_tensor_slice """
import numpy as np
import pytest
from mindspore import Tensor
from mindspore import Parameter
from mindspore import context
from mindspore import dtype as mstype
from mindspore.nn import Cell
from mindspore.common.parameter import ParameterTuple
from mindspore.ops import composite as C
grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True)
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE)
class NetWorkSlicePositive(Cell):
def __init__(self):
super(NetWorkSlicePositive, self).__init__()
self.tensor_ret0 = Tensor(np.ones([1, 2, 3], np.int32))
self.tensor_ret1 = Tensor(np.ones([4, 8, 10], np.int32))
self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))
self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))
def construct(self, tensor):
ret0 = tensor[3:4:1, 1:5:2, 3:6:1] + self.tensor_ret0
ret1 = tensor[-6:4:1, 0:8:1, ::1] + self.tensor_ret1
ret2 = tensor[::, ::, ::] + self.tensor_ret2
ret3 = tensor[::2] + self.tensor_ret3
return ret0, ret1, ret2, ret3
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_slice_positive():
net = NetWorkSlicePositive()
input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
input_0 = Tensor(input_np)
output0, output1, output2, output3 = net(input_0)
assert np.all(output0.asnumpy() == input_np[3:4:1, 1:5:2, 3:6:1] + np.ones([1, 2, 3]))
assert np.all(output1.asnumpy() == input_np[-6:4:1, 0:8:1, ::1] + np.ones([4, 8, 10]))
assert np.all(output2.asnumpy() == input_np[::, ::, ::] + np.ones([6, 8, 10]))
assert np.all(output3.asnumpy() == input_np[::2] + np.ones([3, 8, 10]))
class NetWorkSliceEllipsis(Cell):
def __init__(self):
super(NetWorkSliceEllipsis, self).__init__()
self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))
self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))
self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))
def construct(self, tensor):
ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0
ret1 = tensor[...] + self.tensor_ret1
ret2 = tensor[None] + self.tensor_ret2
ret3 = tensor[True] + self.tensor_ret2
return ret0, ret1, ret2, ret3
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_slice_ellipsis():
net = NetWorkSliceEllipsis()
input_np = np.arange(6*7*8*9).reshape(6, 7, 8, 9).astype(np.int32)
input_0 = Tensor(input_np)
output0, output1, output2, output3 = net(input_0)
assert np.all(output0.asnumpy() == input_np[0:4:2, ..., 1] + np.ones([2, 7, 8]))
assert np.all(output1.asnumpy() == input_np[...] + np.ones([6, 7, 8, 9]))
assert np.all(output2.asnumpy() == input_np[None] + np.ones([6, 7, 8, 9]))
assert np.all(output3.asnumpy() == input_np[True] + np.ones([1, 6, 7, 8, 9]))
class NetWorkReduceDimension(Cell):
def __init__(self):
super(NetWorkReduceDimension, self).__init__()
self.tensor_ret1 = Tensor(np.ones([3, 10], np.int32))
self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))
self.tensor_ret3 = Tensor(np.array(8, np.int32))
self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))
def construct(self, tensor):
ret1 = tensor[::2, 1, ::1] + self.tensor_ret1
ret2 = tensor[::, ::, 0] + self.tensor_ret2
ret3 = tensor[3, 2, 5] + self.tensor_ret3
ret4 = tensor[1] + self.tensor_ret4
return ret1, ret2, ret3, ret4
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_dimension():
net = NetWorkReduceDimension()
input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
input_0 = Tensor(input_np)
output1, output2, output3, output4 = net(input_0)
assert np.all(output1.asnumpy() == input_np[::2, 1, ::1] + np.ones([3, 10]))
assert np.all(output2.asnumpy() == input_np[::, ::, 0] + np.ones([6, 8]))
assert np.all(output3.asnumpy() == input_np[3, 2, 5] + np.array(8, np.int32))
assert np.all(output4.asnumpy() == input_np[1] + np.ones([8, 10]))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
class NetWorkSliceStep(Cell):
def __init__(self):
super(NetWorkSliceStep, self).__init__()
self.tensor_ret1 = Tensor(np.ones([6, 5, 10], np.int32))
self.tensor_ret2 = Tensor(np.ones([3, 5, 5], np.int32))
def construct(self, tensor):
ret1 = tensor[::1, -5::, ::-1] + self.tensor_ret1
ret2 = tensor[::2, -5::, ::2] + self.tensor_ret2
return ret1, ret2
@pytest.mark.level0
# ascend op stridedslice has bug, and has not been fixed.
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_step_negative():
net = NetWorkSliceStep()
input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
input_0 = Tensor(input_np)
output1, output2 = net(input_0)
assert np.all(output1.asnumpy() == input_np[::1, -5::, ::-1] + np.ones([6, 5, 10]))
assert np.all(output2.asnumpy() == input_np[::2, -5::, ::2] + np.ones([3, 5, 5]))
class TensorGetItemByThreeTensors(Cell):
def __init__(self):
super(TensorGetItemByThreeTensors, self).__init__()
self.const0 = Tensor(np.ones((4, 5, 8, 10)), mstype.int32)
self.const1 = Tensor(np.ones((3, 4, 5, 10)), mstype.int32)
self.const2 = Tensor(np.ones((5, 3, 4, 5)), mstype.int32)
def construct(self, x, index_0, index_1, index_2):
ret0 = x[index_0] + self.const0
ret1 = x[index_0, index_1] + self.const1
ret2 = x[index_0, index_1, index_2] + self.const2
return ret0, ret1, ret2
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_getitem_by_tensors():
"""This testcase may encounter a sync stream error occasionally"""
net = TensorGetItemByThreeTensors()
input_x = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
index_0 = np.random.randint(6, size=(3, 4, 5)).astype(np.int32)
index_1 = np.random.randint(6, size=(4, 5)).astype(np.int32)
index_2 = np.random.randint(6, size=(5, 3, 4, 5)).astype(np.int32)
input_x_ms = Tensor(input_x)
index_0_ms = Tensor(index_0)
index_1_ms = Tensor(index_1)
input_2_ms = Tensor(index_2)
output0, output1, output2 = net(input_x_ms, index_0_ms, index_1_ms, input_2_ms)
assert np.all(output0.asnumpy() == input_x[index_0] + np.ones([4, 5, 8, 10]))
assert np.all(output1.asnumpy() == input_x[index_0, index_1] + np.ones([3, 4, 5, 10]))
assert np.all(output2.asnumpy() == input_x[index_0, index_1, index_2] + np.ones([5, 3, 4, 5]))
class TensorGetItemByMixedTensorsBasicCase(Cell):
def __init__(self, c0, c1, c2, c3, c4, c5):
super(TensorGetItemByMixedTensorsBasicCase, self).__init__()
self.const0 = Tensor(c0)
self.const1 = Tensor(c1)
self.const2 = Tensor(c2)
self.const3 = Tensor(c3)
self.const4 = Tensor(c4)
self.const5 = Tensor(c5)
def construct(self, tensor, index_0, index_1):
ret0 = tensor[index_0, index_1, 0:3] + self.const0
ret1 = tensor[0:3, index_0, ...] + self.const1
ret2 = tensor[0, index_0, index_1] + self.const2
ret3 = tensor[..., index_0, 0:3] + self.const3
ret4 = tensor[0:2, index_0, index_1] + self.const4
ret5 = tensor[..., index_0, index_1] + self.const5
return ret0, ret1, ret2, ret3, ret4, ret5
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_getitem_by_mixed_tensors():
const0 = np.ones((3, 4, 5, 3), np.float32)
const1 = np.ones((3, 3, 4, 5, 5), np.float32)
const2 = np.ones((3, 4, 5), np.float32)
const3 = np.ones((3, 3, 4, 5, 3), np.float32)
const4 = np.ones((2, 3, 4, 5), np.float32)
const5 = np.ones((3, 3, 4, 5), np.float32)
net = TensorGetItemByMixedTensorsBasicCase(const0, const1, const2, const3, const4, const5)
input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
input_ms = Tensor(input_np, mstype.float32)
index_np_0 = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
index_np_1 = np.random.randint(4, size=(4, 5)).astype(np.int32)
index_0 = Tensor(index_np_0, mstype.int32)
index_1 = Tensor(index_np_1, mstype.int32)
out0, out1, out2, out3, out4, out5 = net(input_ms, index_0, index_1)
assert np.all(out0.asnumpy() == (input_np[index_np_0, index_np_1, 0:3] + const0))
assert np.all(out1.asnumpy() == (input_np[0:3, index_np_0, ...] + const1))
assert np.all(out2.asnumpy() == (input_np[0, index_np_0, index_np_1] + const2))
assert np.all(out3.asnumpy() == (input_np[..., index_np_0, 0:3] + const3))
assert np.all(out4.asnumpy() == (input_np[0:2, index_np_0, index_np_1] + const4))
assert np.all(out5.asnumpy() == (input_np[..., index_np_0, index_np_1] + const5))
class TensorItemByNone(Cell):
def construct(self, tensor):
ret = tensor.item()
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_item_by_none():
net = TensorItemByNone()
input_1d_np = np.ndarray([1]).astype(np.float32)
input_1d_ms = Tensor(input_1d_np, mstype.float32)
input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
input_3d_ms = Tensor(input_3d_np, mstype.float32)
output_ms = net(input_1d_ms)
assert np.all(output_ms.asnumpy() == input_1d_np.item())
with pytest.raises(ValueError):
net(input_3d_ms)
class TensorItemByItem(Cell):
def construct(self, tensor, index):
ret = tensor.item(index)
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_item_by_int():
net = TensorItemByItem()
input_1d_np = np.ndarray([1]).astype(np.float32)
input_1d_ms = Tensor(input_1d_np, mstype.float32)
input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
input_3d_ms = Tensor(input_3d_np, mstype.float32)
index_np_1, index_np_2, index_np_3, index_np_4 = 0, 1.0, 30, 60
output_1d_ms = net(input_1d_ms, index_np_1)
output_3d_ms_1 = net(input_3d_ms, index_np_1)
output_3d_ms_2 = net(input_3d_ms, index_np_3)
assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
assert np.all(output_3d_ms_1.asnumpy() == input_3d_np.item(index_np_1))
assert np.all(output_3d_ms_2.asnumpy() == input_3d_np.item(index_np_3))
with pytest.raises(TypeError):
net(input_1d_ms, index_np_2)
with pytest.raises(IndexError):
net(input_1d_ms, index_np_3)
with pytest.raises(TypeError):
net(input_3d_ms, index_np_2)
with pytest.raises(IndexError):
net(input_3d_ms, index_np_4)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_item_by_tuple():
net = TensorItemByItem()
input_1d_np = np.ndarray([1]).astype(np.float32)
input_1d_ms = Tensor(input_1d_np, mstype.float32)
input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
input_3d_ms = Tensor(input_3d_np, mstype.float32)
index_np_1 = (0,)
index_np_2 = (1, 2)
index_np_3 = (1, 2, 3)
index_np_4 = (3, 4, 4)
index_np_5 = (1, 2, 3, 4)
output_1d_ms = net(input_1d_ms, index_np_1)
output_3d_ms = net(input_3d_ms, index_np_3)
assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
assert np.all(output_3d_ms.asnumpy() == input_3d_np.item(index_np_3))
with pytest.raises(ValueError):
net(input_1d_ms, index_np_2)
with pytest.raises(ValueError):
net(input_3d_ms, index_np_2)
with pytest.raises(IndexError):
net(input_3d_ms, index_np_4)
with pytest.raises(ValueError):
net(input_3d_ms, index_np_5)
class TensorSetItemByMixedTensors_0(Cell):
def __init__(self, value):
super(TensorSetItemByMixedTensors_0, self).__init__()
self.const = Tensor(np.ones((3, 4, 5), np.float32))
self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)),
mstype.float32),
name="x")
self.value = value
def construct(self, index_0, index_1, index_2):
self.param[0:2, index_0, index_1] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_mixed_tensors_0():
value = 88.0
net = TensorSetItemByMixedTensors_0(value)
index_0 = np.random.randint(3, size=(3, 4, 5))
index_1 = np.random.randint(4, size=(4, 5))
index_2 = np.random.randint(3, size=(2, 1, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
const = np.ones((3, 4, 5), np.float32)
out = net(index_0_ms, index_1_ms, index_2_ms)
input_np[0:2, index_0, index_1] = value
assert np.all(out.asnumpy() == (input_np + const))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
class TensorSetItemByMixedTensors_1(Cell):
def __init__(self, value):
super(TensorSetItemByMixedTensors_1, self).__init__()
self.const = Tensor(np.ones((3, 4, 5), np.float32))
self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float32),
name="x")
self.value = value
def construct(self, index_0, index_1, index_2):
self.param[0:2, index_0, ...] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_mixed_tensors_1():
value = 88.0
net = TensorSetItemByMixedTensors_1(value)
index_0 = np.random.randint(3, size=(3, 4, 5))
index_1 = np.random.randint(4, size=(4, 5))
index_2 = np.random.randint(3, size=(2, 1, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
const = np.ones((3, 4, 5), np.float32)
out = net(index_0_ms, index_1_ms, index_2_ms)
input_np[0:2, index_0, ...] = value
assert np.all(out.asnumpy() == (input_np + const))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
class TensorSetItemByMixedTensors_2(Cell):
def __init__(self, value):
super(TensorSetItemByMixedTensors_2, self).__init__()
self.const = Tensor(np.ones((3, 4, 5), np.float16))
self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float16),
name="x")
self.value = value
def construct(self, index_0, index_1, index_2):
self.param[..., index_0, 1] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_mixed_tensors_2():
value = 88.0
net = TensorSetItemByMixedTensors_2(value)
index_0 = np.random.randint(3, size=(3, 4, 5))
index_1 = np.random.randint(4, size=(4, 5))
index_2 = np.random.randint(3, size=(2, 1, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
const = np.ones((3, 4, 5), np.float32)
out = net(index_0_ms, index_1_ms, index_2_ms)
input_np[..., index_0, 1] = value
assert np.all(out.asnumpy() == (input_np + const))
class TensorGetItemByMixedTensorsIndexError(Cell):
def construct(self, x, index_0, index_1):
ret = x[index_0, index_1, 0:3, ..., 0:5, [1, 2, 3, 4]]
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_getitem_by_mixed_tensor_exception():
input_ms = Tensor(np.arange(3 * 4 * 5 * 6 * 7 * 8 * 9).reshape((3, 4, 5, 6, 7, 8, 9)), mstype.int32)
index_0 = Tensor(np.random.randint(3, size=(3, 4, 5)), mstype.int32)
index_1 = Tensor(np.random.randint(4, size=(3, 4, 5)), mstype.int32)
net1 = TensorGetItemByMixedTensorsIndexError()
with pytest.raises(IndexError):
net1(input_ms, index_0, index_1)
class TensorSetItemByOneTensorWithNumber(Cell):
def __init__(self, value):
super(TensorSetItemByOneTensorWithNumber, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
self.value = value
def construct(self, index):
self.param[index] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_one_tensor_with_number():
value = 0.0
net = TensorSetItemByOneTensorWithNumber(value)
index_np = np.random.randint(4, size=(5, 4))
index = Tensor(index_np, mstype.int32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
const = np.ones((6, 7, 8)).astype(np.float32)
out = net(index)
input_data[index_np] = value
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByOneTensorWithTensor(Cell):
def __init__(self):
super(TensorSetItemByOneTensorWithTensor, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def construct(self, index, value):
self.param[index] = value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_one_tensor_with_tensor():
net = TensorSetItemByOneTensorWithTensor()
index_np = np.random.randint(4, size=(5, 4))
index = Tensor(index_np, mstype.int32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
const = np.ones((6, 7, 8)).astype(np.float32)
value = np.zeros((4, 7, 8)).astype(np.float32)
value_ms = Tensor(value, mstype.float32)
out = net(index, value_ms)
input_data[index_np] = value
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByOneTensorWithTupleOfNumber(Cell):
def __init__(self, value):
super(TensorSetItemByOneTensorWithTupleOfNumber, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
self.value = value
def construct(self, index):
self.param[index] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_one_tensor_with_tuple_number():
value = (0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7)
net = TensorSetItemByOneTensorWithTupleOfNumber(value)
input_np = np.random.randint(5, size=(5, 4))
input_ms = Tensor(input_np, mstype.int32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
const = np.ones((6, 7, 8)).astype(np.float32)
out = net(input_ms)
input_data[input_np] = value
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByOneTensorWithTupleOfTensor(Cell):
def __init__(self):
super(TensorSetItemByOneTensorWithTupleOfTensor, self).__init__()
self.const = Tensor(np.ones((6, 3, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 3 * 8).reshape((6, 3, 8)), mstype.float32), name="x")
def construct(self, index, value_0, value_1, value_2):
self.param[index] = (value_0, value_1, value_2)
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_one_tensor_with_tuple_tensors():
net = TensorSetItemByOneTensorWithTupleOfTensor()
input_np = np.random.randint(6, size=(5, 4)).astype(np.int32)
input_ms = Tensor(input_np, mstype.int32)
input_data = np.arange(6 * 3 * 8).reshape((6, 3, 8)).astype(np.float32)
value_0_np = np.zeros((8,), np.float32)
value_1_np = np.ones((8,), np.float32)
value_2_np = np.ones((8,), np.float32)*2
value_0 = Tensor(value_0_np)
value_1 = Tensor(value_1_np)
value_2 = Tensor(value_2_np)
const = np.ones((6, 3, 8)).astype(np.float32)
out = net(input_ms, value_0, value_1, value_2)
input_data[input_np] = (value_0_np, value_1_np, value_2_np)
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByTensorsWithNumber(Cell):
def __init__(self, value):
super(TensorSetItemByTensorsWithNumber, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
self.value = value
def construct(self, index_0, index_1, index_2):
self.param[index_0, index_1, index_2] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.level0
def test_setitem_by_tensors_with_number():
value = 0.0
net = TensorSetItemByTensorsWithNumber(value)
index_0 = np.random.randint(6, size=(3, 4, 5))
index_1 = np.random.randint(7, size=(4, 5))
index_2 = np.random.randint(8, size=(5, 3, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
out = net(index_0_ms, index_1_ms, index_2_ms)
const = np.ones((6, 7, 8)).astype(np.float32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
input_data[index_0, index_1, index_2] = value
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByTensorsWithTensor(Cell):
def __init__(self):
super(TensorSetItemByTensorsWithTensor, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def construct(self, index_0, index_1, index_2, value):
self.param[index_0, index_1, index_2] = value
ret = self.param + self.const
return ret
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_tensors_with_tensor():
net = TensorSetItemByTensorsWithTensor()
index_0 = np.random.randint(6, size=(3, 4, 5))
index_1 = np.random.randint(7, size=(4, 5))
index_2 = np.random.randint(8, size=(5, 3, 4, 5))
value = np.zeros((4, 5)).astype(np.float32)
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
value_ms = Tensor(value, mstype.float32)
out = net(index_0_ms, index_1_ms, index_2_ms, value_ms)
const = np.ones((6, 7, 8)).astype(np.float32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
input_data[index_0, index_1, index_2] = value
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByTensorsWithTensorNumberError(Cell):
def __init__(self):
super(TensorSetItemByTensorsWithTensorNumberError, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def construct(self, index_0, index_1, index_2, index_3, value):
self.param[index_0, index_1, index_2, index_3] = value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_tensors_with_tensor_error():
index_0 = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
index_1 = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
index_2 = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
index_3 = Tensor(np.random.randint(8, size=(1, 3, 4, 5)), mstype.int32)
value = Tensor(np.zeros((2, 5)), mstype.float32)
net = TensorSetItemByTensorsWithTensorNumberError()
with pytest.raises(IndexError):
net(index_0, index_1, index_2, index_3, value)
class TensorSetItemByTensorsWithTupleOfNumber(Cell):
def __init__(self, value):
super(TensorSetItemByTensorsWithTupleOfNumber, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
self.value = value
def construct(self, index_0, index_1, index_2):
self.param[index_0, index_1, index_2] = self.value
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
# GPU op has bug, and has not been fixed.
@pytest.mark.env_onecard
def test_setitem_by_tensors_with_tuple_of_number():
value = (0.0, 1.1, 2.2, 3.3, 4.4)
net = TensorSetItemByTensorsWithTupleOfNumber(value)
index_0 = np.random.randint(6, size=(3, 4, 5))
index_1 = np.random.randint(7, size=(4, 5))
index_2 = np.random.randint(8, size=(5, 3, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
input_data[index_0, index_1, index_2] = value
const = np.ones((6, 7, 8)).astype(np.float32)
out = net(index_0_ms, index_1_ms, index_2_ms)
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByTensorsWithTupleOfTensor(Cell):
def __init__(self):
super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def construct(self, index_0, index_1, index_2, value_0, value_1, value_2):
self.param[index_0, index_1, index_2] = (value_0, value_1, value_2)
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
# GPU op has bug, and has not been fixed.
@pytest.mark.env_onecard
def test_setitem_by_tensors_with_tuple_of_tensor():
value_0 = np.zeros((4, 5))
value_1 = np.ones((4, 5))
value_2 = np.ones((4, 5)) * 2
value_0_ms = Tensor(value_0, mstype.float32)
value_1_ms = Tensor(value_1, mstype.float32)
value_2_ms = Tensor(value_2, mstype.float32)
net = TensorSetItemByTensorsWithTupleOfTensor()
index_0 = np.random.randint(6, size=(3, 4, 5))
index_1 = np.random.randint(7, size=(4, 5))
index_2 = np.random.randint(8, size=(5, 3, 4, 5))
index_0_ms = Tensor(index_0, mstype.int32)
index_1_ms = Tensor(index_1, mstype.int32)
index_2_ms = Tensor(index_2, mstype.int32)
input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
input_data[index_0, index_1, index_2] = (value_0, value_1, value_2)
const = np.ones((6, 7, 8)).astype(np.float32)
out = net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms, value_2_ms)
assert np.all(out.asnumpy() == (input_data + const))
class TensorSetItemByTensorsWithTupleOfTensorNumberError(Cell):
def __init__(self):
super(TensorSetItemByTensorsWithTupleOfTensorNumberError, self).__init__()
self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def construct(self, index_0, index_1, index_2, value_0, value_1):
self.param[index_0, index_1, index_2] = (value_0, value_1)
ret = self.param + self.const
return ret
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_by_tensor_with_tuple_of_tensor_error():
net = TensorSetItemByTensorsWithTupleOfTensorNumberError()
index_0_ms = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
index_1_ms = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
index_2_ms = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
value_0 = np.zeros((4, 5))
value_1 = np.ones((4, 5))
value_0_ms = Tensor(value_0, mstype.float32)
value_1_ms = Tensor(value_1, mstype.float32)
with pytest.raises(ValueError):
net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_setitem_grad():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.weight = Parameter(
Tensor(np.ones([4, 4, 5]), dtype=mstype.float32), "b1", requires_grad=True)
def construct(self, a, b):
a[1:3:1, ::] = b
c = a + self.weight
return c
class GradNet(Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
self.weights = ParameterTuple(net.trainable_params())
def construct(self, x, y, sens):
return grad_by_list_with_sens(self.net, self.weights)(x, y, sens)
net = GradNet(Net())
x = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
y = Tensor(np.array([3]).astype(np.float32), mstype.float32)
sens = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
net(x, y, sens)
class TensorAssignWithSliceError1(Cell):
def construct(self, a, b):
a[1:3:-1, ::] = b
return a
class TensorAssignWithSliceError2(Cell):
def construct(self, a, b):
a[1:3:-1] = b
return a
class TensorAssignWithSlice2(Cell):
def construct(self, a, b, ck):
a[1:5] = b
a[3:4] = 5
a[-1:1:-1] = b
a[-1:3:-1] = 5
a[::] = b
a[::] = 9
z = a + ck
return z
class TensorAssignWithSlice(Cell):
def __init__(self):
super(TensorAssignWithSlice, self).__init__()
self.c = 2.0
def construct(self, a, b, ck):
a[1:3, ::] = b
a[2:3:, 3:] = b
a[::] = b
a[::] = self.c
a[::, ::] = b
a[::, ::] = self.c
a[2:3:, 0:, 4:1:-1] = b
a[2:3:, 0:, 4:1:-1] = self.c
z = a + ck
return z
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_slice_value_1():
net = TensorAssignWithSlice()
a = np.arange(60).reshape(3, 4, 5)
b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)
ck = np.arange(60).reshape(3, 4, 5)
ta = Tensor(a, dtype=mstype.float32)
tb = Tensor(b, dtype=mstype.float32)
tck = Tensor(ck, dtype=mstype.float32)
out = net(ta, tb, tck)
a[1:3, ::] = b
a[2:3:, 3:] = b
a[::] = b
a[::] = 2.0
a[::, ::] = b
a[::, ::] = 2.0
a[2:3:, 0:, 4:1:-1] = b
a[2:3:, 0:, 4:1:-1] = 2.0
z = a + ck
assert np.all(z == out.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_slice_value_2():
net2 = TensorAssignWithSlice2()
a = np.array([1, 2, 3, 4, 5, 6, 7, 8])
ck = np.array([1, 2, 3, 4, 5, 6, 7, 8])
b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)
tb = Tensor(b, dtype=mstype.float32)
ta = Tensor(a, dtype=mstype.float32)
tck = Tensor(ck, dtype=mstype.float32)
out = net2(ta, tb, tck)
a[1:5] = b
a[3:4] = 5
a[-1:1:-1] = b
a[-1:3:-1] = 5
a[::] = b
a[::] = 9
z = a + ck
assert np.all(z == out.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_exception():
net = TensorAssignWithSlice()
net2 = TensorAssignWithSlice2()
# The test case is no longer appropriate since x[1:3:-1] = np.array(2) does
# not incur an error in numpy, which leaves the original array unchanged after
# the assign operation.
# net_e1 = TensorAssignWithSliceError1()
# net_e2 = TensorAssignWithSliceError2()
a = np.arange(60).reshape(3, 4, 5)
ck = np.arange(60).reshape(3, 4, 5)
b = Tensor([1], dtype=mstype.float32)
Ta = Tensor(a, dtype=mstype.float32)
Tck = Tensor(ck, dtype=mstype.float32)
Ta4d = Tensor(a.reshape(1, 3, 4, 5), dtype=mstype.float32)
Ta4d_ck = Tensor(ck.reshape(1, 3, 4, 5), dtype=mstype.float32)
Tb = Tensor([1, 3], dtype=mstype.float32)
Tc = Tensor([], dtype=mstype.float32)
t = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
tck = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
# Error for A[Slice] = Number
# 1. A[Slice] = Number, Slice error
# with pytest.raises(ValueError):
# net_e2(t, 2)
# Error for A[Slice] = U, U is a Tensor
# 1. A[Slice] = U, u.size is error
with pytest.raises(ValueError):
net2(t, Tb, tck)
# 2. A[Slice] = U, U is empty
with pytest.raises(ValueError):
net2(t, Tc, tck)
# 3. A[Slice] = U, U.size error
with pytest.raises(ValueError):
net2(t, Tb, tck)
# Error for A[Tuple(Slice...)] = Tensor
# 1. A[Tuple(Slice...)] = U, U is empty
with pytest.raises(ValueError):
net(Ta, Tc, Tck)
# 2. A[Tuple(Slice...)] = U, U.size error
with pytest.raises(ValueError):
net(Ta, Tb, Tck)
# 3. A[Tuple(Slice...)] = U, Slice error
# with pytest.raises(IndexError):
# net_e1(Ta, b)
# Error for A[Tuple(Slice...)] = Number
# 1. A[Tuple(Slice...)] = Number, Slice error
# with pytest.raises(IndexError):
# net_e1(Ta, 2)
net = TensorAssignWithInteger()
# Error for A[Number] = scalar/Tensor
# 1. A[Number] = U, U is a Tensor, u.size not match
with pytest.raises(ValueError):
net(Ta, Tb, Tck)
with pytest.raises(ValueError):
net(Ta, Tc, Tck)
# 2. A[Number] = U, the number index error
with pytest.raises(IndexError):
net(Ta4d, b, Ta4d_ck)
# Error for A[(n,m)] = scalar/Tensor
# 1. A[(n,m)] = U, U is a tensor. u.size not match
net = TensorAssignWithTupleInteger()
with pytest.raises(ValueError):
net(Ta, Tc, Tck)
with pytest.raises(ValueError):
net(Ta, Tb, Tck)
# 2. A[(n,m)] = U, the number index error
with pytest.raises(IndexError):
net(Ta4d, b, Ta4d_ck)
# Error for A[...] = U or A[1:, ...] = u
# 1. A[...] = scalar/tensor
net = TensorAssignWithEllipsis()
net(Ta, Ta4d)
with pytest.raises(ValueError):
net(Ta, Tc)
with pytest.raises(ValueError):
net(Ta, Tb)
# 2. A[::, 1:, ...] = scalar/tensor
net = TensorAssignWithTupleEllipsis()
net(Ta, b)
with pytest.raises(ValueError):
net(Ta, Tb)
class TensorAssignWithTupleEllipsis2(Cell):
def construct(self, a, b):
a[1:, ..., ::] = b
return a
class TensorAssignWithTupleEllipsis(Cell):
def construct(self, a, b):
a[:2, ...] = 1.0
a[1:, ...] = b
return a
class TensorAssignWithEllipsis(Cell):
def construct(self, a, b):
a[...] = 1
a[...] = b
return a
class TensorAssignWithInteger(Cell):
def construct(self, a, b, ck):
a[1] = 1
a[0] = b
z = a + ck
return z
class TensorAssignWithTupleInteger(Cell):
def construct(self, a, b, ck):
a[(1)] = 1
a[(1)] = b
a[(1, 1)] = b
a[(1, 1)] = 1
z = a + ck
return z
class TensorAssignWithBoolTensorIndex(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndex, self).__init__()
self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
self.u_scalar = 5
def construct(self, a, b, c, u_tensor):
a[c] = self.u_scalar
a[b] = u_tensor
z = a + self.t
return z
class TensorAssignWithBoolTensorIndexError(Cell):
def construct(self, a, b, c, u_tensor):
a[b][c] = u_tensor
return a
class TensorAssignWithBoolTensorIndex2(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndex2, self).__init__()
self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
self.u_scalar = 5
def construct(self, a, u_tensor):
a[a > 8] = u_tensor
a[a >= 6] = self.u_scalar
a[a < 3] = self.u_scalar
a[a <= 5] = u_tensor
a[a == 5] = self.u_scalar
z = a + self.t
return z
class TensorAssignWithBoolTensorIndex2Error(Cell):
def construct(self, a, u_tensor):
a[a > 8][a > 5] = u_tensor
return a
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_bool_index_0():
a = np.arange(60).reshape(3, 4, 5)
b = a > 5
c = a < 3
Ta = Tensor(a, dtype=mstype.float32)
Tb = Tensor(b)
Tc = Tensor(c)
u_tensor = Tensor([1], dtype=mstype.float32)
net1 = TensorAssignWithBoolTensorIndex()
out = net1(Ta, Tb, Tc, u_tensor)
res = np.arange(60).reshape(3, 4, 5)
res[c] = 5
res[b] = 1
res = res + np.ones([3, 4, 5])
assert np.all(out.asnumpy() == res)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_bool_index_1():
a = np.arange(60).reshape(3, 4, 5)
Ta = Tensor(a, dtype=mstype.float32)
u_tensor = Tensor([1], dtype=mstype.float32)
net2 = TensorAssignWithBoolTensorIndex2()
out = net2(Ta, u_tensor)
res = np.arange(60).reshape(3, 4, 5)
res[res > 8] = 1
res[res >= 6] = 5
res[res < 3] = 5
res[res <= 5] = 1
res[res == 5] = 5
res = res + np.ones([3, 4, 5])
assert np.all(out.asnumpy() == res)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_assign_bool_index_exception():
a = np.arange(60).reshape(3, 4, 5)
b = a > 5
c = a < 3
Ta = Tensor(a, dtype=mstype.float32)
Tb = Tensor(b)
Tc = Tensor(c)
Td = Tensor([True, True])
u_tensor = Tensor([1], dtype=mstype.float32)
u_tensor_error = Tensor([1, 2], dtype=mstype.float32)
u_scalar = 5
net1 = TensorAssignWithBoolTensorIndex()
net2 = TensorAssignWithBoolTensorIndex2()
with pytest.raises(ValueError):
net1(Ta, Td, Tc, u_tensor)
with pytest.raises(IndexError):
net1(Ta, u_tensor, Tc, u_tensor)
with pytest.raises(ValueError):
net1(Ta, Tb, Td, u_tensor)
with pytest.raises(IndexError):
net1(Ta, Tb, Ta, u_tensor)
with pytest.raises(ValueError):
net1(Ta, Tb, Tc, u_tensor_error)
# net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)
with pytest.raises(ValueError):
net2(Ta, u_tensor_error)
net3 = TensorAssignWithBoolTensorIndexError()
with pytest.raises(IndexError):
net3(Ta, Tb, Tc, u_tensor)
with pytest.raises(IndexError):
net3(Ta, Tb, Tc, u_scalar)
net4 = TensorAssignWithBoolTensorIndex2Error()
with pytest.raises(IndexError):
net4(Ta, u_tensor)
with pytest.raises(IndexError):
net4(Ta, u_scalar)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_slice_reduce_out_of_bounds_neg():
class NetWork(Cell):
def __init__(self):
super(NetWork, self).__init__()
self.tensor_ret = Tensor(np.array(9, np.int32))
def construct(self, tensor):
ret = tensor[-7, 3, 4]
return ret
input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
net = NetWork()
with pytest.raises(IndexError) as ex:
net(input_tensor)
assert "'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
"but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': -7." in str(ex.value)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_slice_reduce_out_of_bounds_positive():
class NetWork(Cell):
def __init__(self):
super(NetWork, self).__init__()
self.tensor_ret = Tensor(np.array(9, np.int32))
def construct(self, tensor):
ret = tensor[6, 3, 4]
return ret
input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
net = NetWork()
with pytest.raises(IndexError) as ex:
net(input_tensor)
assert "'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
"but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': 6." in str(ex.value)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_range():
a = np.arange(4*5*6).reshape(4, 5, 6).astype(np.float32)
ta = Tensor(a, mstype.float32)
ms_out = []
for item in ta:
ms_out.append(item)
np_out = []
for item in a:
np_out.append(item)
for i, elem in enumerate(ms_out):
assert np.all(elem.asnumpy() == np_out[i])
|
import numpy as np
import math
class Section:
"""region"""
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def crop(self, img):
return img[self.y1: self.y2, self.x1: self.x2]
def coordinates(self):
return self.x1, self.y1, self.x2, self.y2
def translate(self, dx, dy):
'''returns new section transformed into new coordinates'''
return Section(self.x1 + dx, self.y1 + dy,
self.x2 + dx, self.y2 + dy)
def height(self):
return self.y2 - self.y1
@staticmethod
def of(section, shift=None):
x1, y1, x2, y2 = section.coordinates()
if shift is None:
return Section(x1, y1, x2, y2)
elif len(shift) == 2: # [dx,dy]
dx, dy = shift
return Section(x1 - dx, y1 - dy, x1 + dx, y2 + dy)
else: # [dx1, dy1, dx2, dy2]
return Section(x1 + shift[0], y1 + shift[1], x2 + shift[2], y2 + shift[3])
class OmrConfiguration:
rshape = [1000, 1500]
sec_id = Section(260, 35, 485, 333)
sec_type = Section(478, 35, 566, 246)
sec_answers = Section(15, 260, 500, 1270)
sec_one = Section(15, 260, 265, 1270)
sec_two = Section(260, 260, 500, 1270)
y_step = 20
y_window = 100
marker_x0_bound = 0
marker_x1_bound = 55
# sec_marker = Section(0, 0, marker_r_shift - marker_l_shift, rshape[1])
sec_marker_column = Section(marker_x0_bound, 0, marker_x1_bound, rshape[1])
num_markers = 63
marker_filter_median_blur = 3
marker_y_padding_top = 45
marker_y_padding_down = rshape[1] - 30
marker_smooth_window = 110
marker_threshold_spacing = 2
marker_height_range = range(3, 12)
marker_space_range = range(20, 25)
marker_width_range = range(7, 27)
# top_marker = Section(0, -5, 300, 15)
sec_marker = Section(0, -3, 70, 12)
sec_marker_shift = [0, -20, 237, 20]
marker_calibre_range = (195, 205)
conf = OmrConfiguration
class Marker:
def __init__(self, y0, y1, x0=None, x1=None, id=None):
assert y1 > y0
self.y0 = y0
self.y1 = y1
self.x0 = x0
self.x1 = x1
self.id = id
self.shift_y = 0
def set_id(self, id):
self.id = id
return self
def id(self):
return self.id
def set_shift_y(self, dy):
self.shift_y = dy
def translate(self, dx, dy):
'''returns new section transformed into new coordinates'''
return Marker(self.y0 + dy, self.y1 + dy,
self.x0 + dx, self.x1 + dx, self.id)
def coordinates(self):
return self.x0, self.y0, self.x1, self.y1
def center_y(self):
return (self.y0 + self.y1) / 2
def height(self):
return self.y1 - self.y0
def is_in_h_range(self, h_r=conf.marker_height_range):
return (self.y1 - self.y0) in h_r
def is_lower_than(self, that):
return self.x0 > that.x1
def is_in_h_space(self, that, space=conf.marker_space_range):
upper, lower = Marker.upper_lower(self, that)
return (lower.y0 - upper.y0) in space \
and (lower.y1 - upper.y1) in space
def __repr__(self):
return 'Marker (id:{}, y0:{}, y1:{}, x0:{}, x1:{})' \
.format(self.id, self.y0, self.y1, self.x0, self.x1)
def y0_y1_shift(self):
return self.y0, self.y1, self.shift_y
def set_x0_x1(self, x0, x1):
self.x0 = x0
self.x1 = x1
def x0_x1(self):
return self.x0, self.x1
@staticmethod
def upper_lower(m1, m2):
if m2.is_lower_than(m1):
return m1, m2
else:
return m2, m1
@staticmethod
def can_acept(y0, y1):
return y0 > conf.marker_y_padding_top \
and y1 < conf.marker_y_padding_down \
and y1 - y0 in conf.marker_height_range
def is_valid_marker(marker):
if marker.y0 < conf.marker_y_padding_top \
or marker.y1 > conf.marker_y_padding_down:
return False
if not marker.height() in conf.marker_height_range:
return False
|
from flask_restx import fields
from recipes.restx import api
category_model = api.model('Category', {
# 'id': fields.Integer(readonly=True, description='The category unique identifier'),
'categoryName': fields.String(required=True, description='The category name'),
})
source_model = api.model('Source', {
# 'id': fields.Integer(readonly=True, description='The source unique identifier'),
'sourceName': fields.String(required=True, description='The source name')
})
ingredient_model = api.model('Ingredient', {
# 'id': fields.Integer(readonly=True, description='The ingredient\'s unique identifier'),
'ingredientName': fields.String(required=True, description='ingredient name'),
'ingredientQuantity': fields.String(required=True, description='How much?'),
'ingredientMeasurement': fields.String(required=True, description='cups? Tbsp?'),
'ingredientPreparation': fields.String(description='any special preparation?')
})
# maybe not needed:
# measurement_units_model = api.model('Measurement units', {
# 'id': fields.Integer(readonly=True, description='The measurement units unique identifier'),
# 'measurement_units_name': fields.String(required=True, description='Measurement units for an ingredient')
# })
# measurement_quantity_model = api.model('Measurement quantity', {
# 'id': fields.Integer(readonly=True, description='The measurement quantity unique identifier'),
# 'measurement_units_quantity': fields.Integer(required=True, description='Measurement units for an ingredient')
# })
recipe_model = api.model('Recipe', {
# 'id': fields.Integer(readonly=True, description='a recipe'),
'recipeName': fields.String(required=True, description='a recipe name'),
'recipeNotes': fields.String(required=False, description='any notes?'),
'recipeRating': fields.Integer(required=False, description='rating'),
'recipeInstructions': fields.String(required=True, description='instructions'),
'ingredients': fields.List(fields.Nested(ingredient_model), required=True),
'recipeImage': fields.String(required=False, description='source of image from web'),
'categories': fields.List(fields.String()),
'source': fields.String(required=True, description='where is this from?')
})
|
import getpass
userdb = {} # 用于存储用户名和密码
def register():
username = input('用户名: ').strip()
if username == '':
print('用户名不能为空')
elif not username.isalnum():
print('用户名只能包含字母和数字')
elif username in userdb:
print('用户已存在')
else:
password = input('密码: ')
userdb[username] = password
def login():
username = input('用户名: ').strip()
password = getpass.getpass('密码: ').strip()
# if username not in userdb or userdb[username] != password:
if userdb.get(username) != password:
print('\033[31;1m登陆失败\033[0m')
else:
print('\033[32;1m登陆成功\033[0m')
def show_menu():
cmds = {'0': register, '1': login}
prompt = """(0) 注册
(1) 登陆
(2) 退出
请选择(0/1/2): """
while True:
choice = input(prompt).strip()
if choice not in ['0', '1', '2']:
print('无效的选择,请重试。')
continue
if choice == '2':
print('Bye-bye')
break
cmds[choice]()
if __name__ == '__main__':
show_menu()
|
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
|
"""Gaussian LSTM Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianLSTMModel
from garage.tf.policies.policy import StochasticPolicy
class GaussianLSTMPolicy(StochasticPolicy):
"""Gaussian LSTM Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Model name, also the variable scope.
hidden_dim (int): Hidden dimension for LSTM cell for mean.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (Callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (Callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (Callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (Callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
forget_bias (bool): If True, add 1 to the bias of the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
learn_std (bool): Is std trainable.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
layer_normalization (bool): Bool for using layer normalization or not.
state_include_action (bool): Whether the state includes action.
If True, input dimension will be
(observation dimension + action dimension).
"""
def __init__(self,
env_spec,
hidden_dim=32,
name='GaussianLSTMPolicy',
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
forget_bias=True,
learn_std=True,
std_share_network=False,
init_std=1.0,
layer_normalization=False,
state_include_action=True):
if not isinstance(env_spec.action_space, akro.Box):
raise ValueError('GaussianLSTMPolicy only works with '
'akro.Box action space, but not {}'.format(
env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._forget_bias = forget_bias
self._learn_std = learn_std
self._std_share_network = std_share_network
self._init_std = init_std
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
self._f_step_mean_std = None
if state_include_action:
self._input_dim = self._obs_dim + self._action_dim
else:
self._input_dim = self._obs_dim
self.model = GaussianLSTMModel(
output_dim=self._action_dim,
hidden_dim=hidden_dim,
name='GaussianLSTMModel',
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
cell_state_init=cell_state_init,
cell_state_init_trainable=cell_state_init_trainable,
forget_bias=forget_bias,
layer_normalization=layer_normalization,
learn_std=learn_std,
std_share_network=std_share_network,
init_std=init_std)
self._prev_actions = None
self._prev_hiddens = None
self._prev_cells = None
self._dist = None
self._init_hidden = None
self._init_cell = None
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None,
self._input_dim),
name='state_input',
dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None,
self._input_dim),
name='step_input',
dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(
shape=(None, self._hidden_dim),
name='step_hidden_input',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(None,
self._hidden_dim),
name='step_cell_input',
dtype=tf.float32)
(self._dist, step_mean, step_log_std, step_hidden, step_cell,
self._init_hidden,
self._init_cell) = self.model.build(state_input, step_input_var,
step_hidden_var,
step_cell_var).outputs
self._f_step_mean_std = tf.compat.v1.get_default_session(
).make_callable(
[step_mean, step_log_std, step_hidden, step_cell],
feed_list=[step_input_var, step_hidden_var, step_cell_var])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
"""
with tf.compat.v1.variable_scope(self._variable_scope):
_, step_input, step_hidden, step_cell = self.model.inputs
return self.model.build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._input_dim
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = np.array([True])
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
means, log_stds, hidden_vec, cell_vec = self._f_step_mean_std(
all_input, self._prev_hiddens, self._prev_cells)
rnd = np.random.normal(size=means.shape)
samples = rnd * np.exp(log_stds) + means
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return samples, agent_infos
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Policy distribution.
"""
return self._dist
@property
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianLSTMPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_dim=self._hidden_dim,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
recurrent_nonlinearity=self._recurrent_nonlinearity,
recurrent_w_init=self._recurrent_w_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
forget_bias=self._forget_bias,
learn_std=self._learn_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
layer_normalization=self._layer_normalization,
state_include_action=self._state_include_action)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_dist']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
|
"""Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/{implementation_lower}{py_version_short}',
'platstdlib': '{platbase}/lib/{implementation_lower}{py_version_short}',
'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages',
'platlib': '{platbase}/lib/{implementation_lower}{py_version_short}/site-packages',
'include': '{base}/include/{implementation_lower}{py_version_short}',
'platinclude': '{platbase}/include/{implementation_lower}{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/{implementation_lower}',
'platstdlib': '{base}/lib/{implementation_lower}',
'purelib': '{base}/lib/{implementation_lower}',
'platlib': '{base}/lib/{implementation_lower}',
'include': '{base}/include/{implementation_lower}',
'platinclude': '{base}/include/{implementation_lower}',
'scripts': '{base}/bin',
'data' : '{base}',
},
'pypy': {
'stdlib': '{base}/lib-{implementation_lower}/{py_version_short}',
'platstdlib': '{base}/lib-{implementation_lower}/{py_version_short}',
'purelib': '{base}/site-packages',
'platlib': '{base}/site-packages',
'include': '{base}/include',
'platinclude': '{base}/include',
'scripts': '{base}/bin',
'data' : '{base}',
},
'pypy_nt': {
'stdlib': '{base}/lib-{implementation_lower}/{py_version_short}',
'platstdlib': '{base}/lib-{implementation_lower}/{py_version_short}',
'purelib': '{base}/site-packages',
'platlib': '{base}/site-packages',
'include': '{base}/include',
'platinclude': '{base}/include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/{implementation_lower}{py_version_short}',
'platstdlib': '{userbase}/lib/{implementation_lower}{py_version_short}',
'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages',
'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages',
'include': '{userbase}/include/{implementation_lower}{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/{implementation}{py_version_nodot}',
'platstdlib': '{userbase}/{implementation}{py_version_nodot}',
'purelib': '{userbase}/{implementation}{py_version_nodot}/site-packages',
'platlib': '{userbase}/{implementation}{py_version_nodot}/site-packages',
'include': '{userbase}/{implementation}{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/{implementation_lower}{py_version_short}',
'platstdlib': '{userbase}/lib/{implementation_lower}{py_version_short}',
'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages',
'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages',
'include': '{userbase}/include/{implementation_lower}{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/{implementation_lower}',
'platstdlib': '{userbase}/lib/{implementation_lower}',
'purelib': '{userbase}/lib/{implementation_lower}/site-packages',
'platlib': '{userbase}/lib/{implementation_lower}/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _get_implementation():
if '__pypy__' in sys.builtin_module_names:
return 'PyPy'
return 'Python'
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/VS9.0/amd64
if (os.name == "nt"
and os.path.basename(os.path.dirname(os.path.dirname(_PROJECT_BASE))).lower() == "pc"
and os.path.basename(os.path.dirname(_PROJECT_BASE)).lower() == "vs9.0"):
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
# the build directory for posix builds
_PROJECT_BASE = os.path.normpath(os.path.abspath("."))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
if '__pypy__' in sys.builtin_module_names:
return 'pypy'
# the default scheme for posix is posix_prefix
return 'posix_prefix'
if os.name == 'nt':
if '__pypy__' in sys.builtin_module_names:
return 'pypy_nt'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
# Issue #22199: retain undocumented private name for compatibility
_get_makefile_filename = get_makefile_filename
def _generate_posix_vars():
"""Generate the Python module containing build-time variables."""
import pprint
vars = {}
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
# There's a chicken-and-egg situation on OS X with regards to the
# _sysconfigdata module after the changes introduced by #15298:
# get_config_vars() is called by get_platform() as part of the
# `make pybuilddir.txt` target -- which is a precursor to the
# _sysconfigdata.py module being constructed. Unfortunately,
# get_config_vars() eventually calls _init_posix(), which attempts
# to import _sysconfigdata, which we won't have built yet. In order
# for _init_posix() to work, if we're on Darwin, just mock up the
# _sysconfigdata module manually and populate it with the build vars.
# This is more than sufficient for ensuring the subsequent call to
# get_platform() succeeds.
name = '_sysconfigdata'
if 'darwin' in sys.platform:
import imp
module = imp.new_module(name)
module.build_time_vars = vars
sys.modules[name] = module
pybuilddir = 'build/lib.%s-%s' % (get_platform(), sys.version[:3])
if hasattr(sys, "gettotalrefcount"):
pybuilddir += '-pydebug'
try:
os.makedirs(pybuilddir)
except OSError:
pass
destfile = os.path.join(pybuilddir, name + '.py')
with open(destfile, 'wb') as f:
f.write('# system configuration generated and used by'
' the sysconfig module\n')
f.write('build_time_vars = ')
pprint.pprint(vars, stream=f)
# Create file used for sys.path fixup -- see Modules/getpath.c
with open('pybuilddir.txt', 'w') as f:
f.write(pybuilddir)
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
from _sysconfigdata import build_time_vars
vars.update(build_time_vars)
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
# pypy only: give us control over the ABI tag in a wheel name
if '__pypy__' in sys.builtin_module_names:
import imp
so_ext = imp.get_suffixes()[0][0]
vars['SOABI']= '-'.join(so_ext.split('.')[1].split('-')[:2])
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
_CONFIG_VARS['implementation'] = _get_implementation()
_CONFIG_VARS['implementation_lower'] = _get_implementation().lower()
_CONFIG_VARS['LIBRARY'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
#PyPy only - hardcode to 10.7, like in distutils/sysconfig_pypy.py
_CONFIG_VARS['MACOSX_DEPLOYMENT_TARGET'] = '10.7'
_osx_support.customize_config_vars(_CONFIG_VARS)
# PyPy:
import imp
for suffix, mode, type_ in imp.get_suffixes():
if type_ == imp.C_EXTENSION:
_CONFIG_VARS['SOABI'] = suffix.split('.')[1]
break
_CONFIG_VARS['INCLUDEPY'] = os.path.join(_CONFIG_VARS['prefix'],
'include')
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support
osname, release, machine = _osx_support.get_platform_osx(
get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print '%s: ' % (title)
print '\t%s = "%s"' % (key, value)
def _main():
"""Display all information sysconfig detains."""
if '--generate-posix-vars' in sys.argv:
_generate_posix_vars()
return
print 'Platform: "%s"' % get_platform()
print 'Python version: "%s"' % get_python_version()
print 'Current installation scheme: "%s"' % _get_default_scheme()
print
_print_dict('Paths', get_paths())
print
_print_dict('Variables', get_config_vars())
print
_print_dict('User', get_paths('%s_user' % os.name))
if __name__ == '__main__':
_main()
|
from verbs import (
Verb1, Verb1B, Verb1C,
Verb2, Verb2B, Verb2C, Verb2D, Verb2E, Verb2F, Verb2G
)
class LUW(Verb1):
stem1 = "λυ+"
class TIMAW(Verb1B):
stem1 = "τιμα"
class POIEW(Verb1C):
stem1 = "ποιε"
class DHLOW(Verb1B):
stem1 = "δηλο"
class DIDWMI(Verb2):
stem1 = "διδο"
stem2 = "δο"
class TIQHMI(Verb2B):
stem1 = "τιθε"
stem2 = "θε"
class hIHMI(Verb2C):
stem1 = "ἱε"
stem2 = "ἑ"
class hISTHMI(Verb2D):
stem1 = "ἱστα"
stem2 = "στα"
class hISTHMI1(Verb2G):
stem2 = "στα"
class DEIKNUMI(Verb2E):
stem1 = "δεικνυ"
class GIGNWSKW(Verb2F):
stem2 = "γνο"
VERBS = {
"λύω": LUW,
"τιμῶ": TIMAW,
"ποιῶ": POIEW,
"δηλῶ": DHLOW,
"δίδωμι": DIDWMI,
"τίθημι": TIQHMI,
"ἵημι": hIHMI,
"ἵστημι": hISTHMI,
"ἵστημι/1": hISTHMI1,
"δείκνυμι": DEIKNUMI,
"γιγνώσκω": GIGNWSKW,
}
|
# coding: utf-8
"""
NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OUTPUTOpenAPI spec version: 0.3.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.access_policy import AccessPolicy
from .models.access_policy_summary import AccessPolicySummary
from .models.batch_size import BatchSize
from .models.bucket import Bucket
from .models.bucket_item import BucketItem
from .models.bundle import Bundle
from .models.component_difference import ComponentDifference
from .models.component_difference_group import ComponentDifferenceGroup
from .models.connectable_component import ConnectableComponent
from .models.controller_service_api import ControllerServiceAPI
from .models.current_user import CurrentUser
from .models.fields import Fields
from .models.link import Link
from .models.permissions import Permissions
from .models.position import Position
from .models.resource import Resource
from .models.resource_permissions import ResourcePermissions
from .models.tenant import Tenant
from .models.uri_builder import UriBuilder
from .models.user import User
from .models.user_group import UserGroup
from .models.versioned_connection import VersionedConnection
from .models.versioned_controller_service import VersionedControllerService
from .models.versioned_flow import VersionedFlow
from .models.versioned_flow_coordinates import VersionedFlowCoordinates
from .models.versioned_flow_difference import VersionedFlowDifference
from .models.versioned_flow_snapshot import VersionedFlowSnapshot
from .models.versioned_flow_snapshot_metadata import VersionedFlowSnapshotMetadata
from .models.versioned_funnel import VersionedFunnel
from .models.versioned_label import VersionedLabel
from .models.versioned_port import VersionedPort
from .models.versioned_process_group import VersionedProcessGroup
from .models.versioned_processor import VersionedProcessor
from .models.versioned_property_descriptor import VersionedPropertyDescriptor
from .models.versioned_remote_group_port import VersionedRemoteGroupPort
from .models.versioned_remote_process_group import VersionedRemoteProcessGroup
# import apis into sdk package
from .apis.access_api import AccessApi
from .apis.bucket_flows_api import BucketFlowsApi
from .apis.buckets_api import BucketsApi
from .apis.flows_api import FlowsApi
from .apis.items_api import ItemsApi
from .apis.policies_api import PoliciesApi
from .apis.tenants_api import TenantsApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
from flask import Flask
app=Flask(__name__)
# app.config.from_pyfile('config.ini')
# app.config.from_envvar('FLASKCONFIG')
@app.route('/')
def index():
return 'hello python'
if __name__ == '__main__':
print(app.url_map)
app.run(host="0.0.0.0", port=5000, debug = True)
|
import asyncio
import discord
from discord.ext import commands
from constants import *
from utils import *
from sqlite import Sql
from tibia import *
from guildstatseu import GuildStats
class Test(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.load_config()
self.sql = Sql()
@commands.command(name='tibiaData', aliases=['td'], brief="TibiaData test", pass_context=True)
async def tibiaData(self, ctx, *, name):
name_from_msg = ctx.message.content[len(ctx.prefix) + len(ctx.invoked_with) + 1:] # not used alternativ way
embed = discord.Embed(
title='Tibia Discord Bot Test',
description=LOGIN_MESSAGE.format(self.bot.user.name, self.bot.user.id, self.config["PREFIX"])+"\n"+LOADING_DEFAULT_WHITELIST.format(self.config["DEFAULT_WHITELIST"]),
colour=0xffffff
)
msg = await ctx.send(content=LOADING_MESSAGE, embed=embed)
# Get character information from tibiadata.com
character = await TibiaData.get_character(name)
print(character.to_json())
embed.add_field(name="Name", value=character.name, inline=True)
embed.add_field(name="World", value=character.world, inline=True)
embed.add_field(name="Level", value=character.level, inline=True)
if character.house is not None:
embed.add_field(name="House", value=HOUSE.format(name=character.house.name, town=character.house.town), inline=True)
# Check if user has a custom tumbnail image and add it to embed message
Utils.add_thumbnail(embed, character.name, self.config["DEFAULT_WHITELIST"])
await msg.edit(content="Done", embed=embed)
emoji1 = '\N{THUMBS UP SIGN}'
emoji2 = '\N{THUMBS DOWN SIGN}'
await msg.add_reaction(emoji1)
await msg.add_reaction(emoji2)
reaction, user = await self.bot.wait_for('reaction_add', check=lambda reaction, user: user != self.bot.user)
await ctx.send(content="You responded with {}".format(reaction.emoji))
@commands.command(name='tibia', aliases=['t'], brief="Tibia test", pass_context=True)
async def tibia(self, ctx, *, name):
embed = discord.Embed(
title='Tibia Discord Bot Test',
description=LOGIN_MESSAGE.format(self.bot.user.name, self.bot.user.id, self.config["PREFIX"])+"\n"+LOADING_DEFAULT_WHITELIST.format(self.config["DEFAULT_WHITELIST"]),
colour=0xffffff
)
msg = await ctx.send(content=LOADING_MESSAGE, embed=embed)
# Get character information from tibiadata.com
character = await Tibia.get_character(name)
print(character.to_json())
embed.add_field(name="Name", value=character.name, inline=True)
embed.add_field(name="World", value=character.world, inline=True)
embed.add_field(name="Level", value=character.level, inline=True)
if character.house is not None:
embed.add_field(name="House", value=HOUSE.format(name=character.house.name, town=character.house.town), inline=True)
emoji1 = '\N{THUMBS UP SIGN}'
await msg.add_reaction(emoji1)
reaction, user = await self.bot.wait_for('reaction_add', check=lambda reaction, user: user != self.bot.user)
# Check if user has a custom tumbnail image and add it to embed message
Utils.add_thumbnail(embed, character.name, self.config["DEFAULT_WHITELIST"])
await msg.edit(content="Done", embed=embed)
@commands.command(name='test', aliases=['te'], brief="Test command", pass_context=True)
async def tibia(self, ctx, *, name):
embed = discord.Embed(
title='Tibia Discord Bot Test',
colour=0xffffff,
)
msg = await ctx.send(content=LOADING_MESSAGE, embed=embed)
# Get character information from tibia.com
character = await Tibia.get_character(name)
print(character.to_json())
# Highscores
# Check Experience
experience = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.EXPERIENCE)
if experience is not None:
embed.add_field(name=HIGHSCORE_EXP_MESSAGE.format(experience.rank), value=str(experience.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check magic level if druid or sorcerer
if (character.vocation in [tibiapy.Vocation.DRUID, tibiapy.Vocation.ELDER_DRUID, tibiapy.Vocation.SORCERER, tibiapy.Vocation.MASTER_SORCERER]):
magic = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.MAGIC_LEVEL)
if magic is not None:
embed.add_field(name=HIGHSCORE_MAGIC_MESSAGE.format(magic.rank), value=str(magic.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check distance skill if paladin
if (character.vocation in [tibiapy.Vocation.PALADIN, tibiapy.Vocation.ROYAL_PALADIN]):
distance = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.DISTANCE_FIGHTING)
if distance is not None:
embed.add_field(name=HIGHSCORE_DISTANCE_MESSAGE.format(distance.rank), value=str(distance.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check mele skills if knight
if (character.vocation in [tibiapy.Vocation.KNIGHT, tibiapy.Vocation.ELITE_KNIGHT]):
# Sword skill
sword = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.SWORD_FIGHTING)
if sword is not None:
embed.add_field(name=HIGHSCORE_SWORD_MESSAGE.format(sword.rank), value=str(sword.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Axe skill
axe = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.AXE_FIGHTING)
if axe is not None:
embed.add_field(name=HIGHSCORE_AXE_MESSAGE.format(axe.rank), value=str(axe.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Club skill
club = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.CLUB_FIGHTING)
if club is not None:
embed.add_field(name=HIGHSCORE_CLUB_MESSAGE.format(club.rank), value=str(club.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check Shielding all vocations
shielding = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.SHIELDING)
if shielding is not None:
embed.add_field(name=HIGHSCORE_SHIELDING_MESSAGE.format(shielding.rank), value=str(shielding.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check Fist all vocations
fist = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.FIST_FIGHTING)
if fist is not None:
embed.add_field(name=HIGHSCORE_FIST_MESSAGE.format(fist.rank), value=str(fist.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check Fishing all vocations
fishing = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.FISHING)
if fishing is not None:
embed.add_field(name=HIGHSCORE_FISHING_MESSAGE.format(fishing.rank), value=str(fishing.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check Fishing all vocations
achievements = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.ACHIEVEMENTS)
if achievements is not None:
embed.add_field(name=HIGHSCORE_ACHIEVEMENTS_MESSAGE.format(achievements.rank), value=str(achievements.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
# Check Fishing all vocations
loyalty = await TibiaData.check_player_highscore(character.name, character.world, tibiapy.Category.LOYALTY_POINTS)
if loyalty is not None:
embed.add_field(name=HIGHSCORE_LOYALTY_POINTS_MESSAGE.format(loyalty.rank), value=str(loyalty.value), inline=True)
await msg.edit(content=LOADING_MESSAGE, embed=embed)
embed.set_author(name=character.name, url=character.url)
#if character.deaths:
# for num, item in enumerate(character.deaths):
# embed.description = KILL_MESSAGE.format(date=Utils.utc_to_local(item.time), level=item.level, killers=", ".join([killer.name for killer in item.killers if killer.name != item.name]), assists=", ".join([killer.name for killer in item.assists if killer.name != item.name]) if item.assists else EMBED_BLANK)
#embed.add_field(name="Deaths" if num == 0 else EMBED_BLANK, value=kill_message if num == 0 else EMBED_BLANK, inline=True)
# break
await msg.edit(content="Done", embed=embed)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if str(reaction.emoji) == "\N{THUMBS UP SIGN}":
print(str(reaction.emoji))
pass
if str(reaction.emoji) == "\N{SMILE}":
print(str(reaction.emoji))
pass
@commands.Cog.listener()
@is_channel(Config.load_config()['CHANNEL_IDS']) # not working
async def on_message(self, ctx):
print(ctx)
#await self.bot.process_commands(ctx)
def setup(bot):
bot.add_cog(Test(bot))
|
# Generated by Django 2.2.6 on 2019-10-25 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("surfsara", "0020_auto_20191024_1516")]
operations = [
migrations.AddField(
model_name="permission",
name="state",
field=models.CharField(
choices=[("user permission", "user permission")],
default="user permission",
max_length=255,
),
)
]
|
# c:\Python35\python -m venv c:\path\to\myenv
|
from googletrans import Translator
translator = Translator()
with open('number.txt', 'w', encoding='utf-8') as num_2:
with open(r'C:\Users\Lonely_Wolf\OneDrive\Рабочий стол\work\number.txt', 'r', encoding='utf-8') as num:
numeral = num.read()
try:
num_2.write(translator.translate(numeral, dest='ru').text)
except AttributeError:
print('Что-то пошло не так')
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import pytest
import copy
import collections
import json
import functools
import time
import sys
import test_config
@pytest.fixture(scope="function")
def device_id(brand_new_client):
# TODO: suggest adding device_id and module_id to client object
return brand_new_client._mqtt_pipeline._pipeline.pipeline_configuration.device_id
@pytest.fixture(scope="function")
def module_id(brand_new_client):
return brand_new_client._mqtt_pipeline._pipeline.pipeline_configuration.module_id
@pytest.fixture(scope="function")
def watches_events(service_helper, device_id, module_id):
service_helper.start_watching(device_id, module_id)
yield
service_helper.stop_watching(device_id, module_id)
@pytest.fixture(scope="function")
def connection_retry():
return True
@pytest.fixture(scope="function")
def auto_connect():
return True
@pytest.fixture(scope="function")
def websockets():
return test_config.config.transport == test_config.TRANSPORT_MQTT_WS
@pytest.fixture(scope="function")
def extra_client_kwargs():
return {}
@pytest.fixture(scope="function")
def client_kwargs(extra_client_kwargs, auto_connect, connection_retry, websockets):
kwargs = {}
kwargs["auto_connect"] = auto_connect
kwargs["connection_retry"] = connection_retry
kwargs["websockets"] = websockets
for key, value in extra_client_kwargs.items():
kwargs[key] = value
return kwargs
collect_ignore = []
# Ignore Async tests if below Python 3.5
if sys.version_info < (3, 5):
collect_ignore.append("aio")
|
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
import numpy as np
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from detectron2.data import MetadataCatalog
from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
from detectron2.utils.file_io import PathManager
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (height, width), m.shape
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3).
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
# Need to imshow this first so that other patches can be drawn on top
ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
self.fig = fig
self.ax = ax
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(pred.non_empty_mask())
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > _KEYPOINT_THRESHOLD:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn in the object's center of mass.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component small than this will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.detach().numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
from logging import captureWarnings
import unittest
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../app'))
from app import db, app
from models import *
from seeder import seeder
class BasicTest(unittest.TestCase):
@classmethod
def setUpClass(self):
# print('-----setUp-----')
pass
# テスト後にシーダーを流しなおす
@classmethod
def tearDownClass(cls):
print("---tearDown---")
seeder()
def test_get_costomers(self):
print('---Customer全件読み込み---')
customers = Customer.query.all()
c_count = len(customers)
self.assertTrue(c_count)
# Invoice_Itemまで全件取得
print('Customer→Invoice_Item全件取得')
invoiceItemCount = 0
quotationItemCount = 0
for customer in customers:
for invoice in customer.invoices:
for invoiceItem in invoice.invoice_items:
invoiceItemCount += 1
self.assertGreaterEqual(invoiceItemCount, 1)
print('Customer→Quotation_Item全件取得')
for customer in customers:
for quotation in customer.quotations:
for quotationItem in quotation.quotation_items:
quotationItemCount += 1
self.assertGreaterEqual(quotationItemCount, 1)
def test_get_customers_dict(self):
print('---Customer全件読み込み→Dict---')
customers = Customer.query.all()
sch = CustomerSchema(many=True).dump(customers)
self.assertEqual(sch[0]['customerName'], '○○株式会社')
def test_get_customer_byId(self):
print('---Customer一件読み込み---')
customer = Customer.query.filter(Customer.id == 1).first()
self.assertTrue(customer)
self.assertEqual(customer.customerName, '○○株式会社')
print('---Customer一件読み込み失敗---')
customers = Customer.query.filter(Customer.id == 9999).all()
self.assertFalse(customers)
self.assertEqual(len(customers), 0)
def test_update_customer(self):
print('---Customer一件更新---')
customer = Customer.query.filter(Customer.id == 2).first()
customer.customerName = 'テスト株式会社'
db.session.commit()
customer = Customer.query.filter(Customer.id == 2).first()
self.assertEqual(customer.customerName, "テスト株式会社")
def test_create_customer(self):
print('---Customer新規作成---')
customers = [
Customer(customerName='テストクリエイト株式会社', honorificTitle='御中', postNumber='000-0000', address='鹿沼市板荷000', telNumber='000-0000-0000',
faxNumber='000-0000-0000', url='example.com', email='example@co.jp', manager='田中太郎', representative='田中代表', memo='これは○○株式会社のメモです'),
Customer(customerName='テストクリエイト株式会社2', honorificTitle='御中', postNumber='000-0000', address='鹿沼市板荷000', telNumber='000-0000-0000',
faxNumber='000-0000-0000', url='example.com', email='example@co.jp', manager='田中太郎', representative='田中代表', memo='これは○○株式会社のメモです'),
]
db.session.add_all(customers)
db.session.commit()
self.assertGreaterEqual(len(Customer.query.all()), 2)
def test_delete_customer(self):
print('---Customer一件削除---')
customer = Customer(customerName='デリートテスト会社', honorificTitle='御中', postNumber='000-0000', address='鹿沼市板荷000', telNumber='000-0000-0000',
faxNumber='000-0000-0000', url='example.com', email='example@co.jp', manager='田中太郎', representative='田中代表', memo='これは○○株式会社のメモです')
db.session.add(customer)
db.session.commit()
newId = customer.id
customer = Customer.query.filter(Customer.id == newId).delete()
db.session.commit()
customer = Customer.query.filter(Customer.id == newId).all()
self.assertEqual(len(customer), 0)
if __name__ == '__main__':
unittest.main()
|
def search(re, chars):
"""Given a regular expression and an iterator of chars, return True if
re matches some prefix of ''.join(chars); but only consume chars
up to the end of the match."""
states = set([re])
for ch in chars:
states = set(sum((after(ch, state) for state in states), []))
if empty in states:
return True
return False
def after(ch, re):
"""Imagine all strings starting with ch that re matches; return a list
of regexes that among them match the remainders of those strings. (For
example, say ch is 'c', and re matches 'x', 'ca', 'cat', and 'cow': then
a result of [q,r,s] would be correct if q|r|s matches 'a', 'at', and 'ow'.)
This is called the Antimirov derivative."""
tag, r, s = re
if tag == 'empty': return []
elif tag == 'literal': return [empty] if r == ch else []
elif tag == 'chain': return [chain(r_rest, s) for r_rest in after(ch, r)]
elif tag == 'either': return after(ch, r) + after(ch, s)
else: assert False
# Regular-expression constructors; the re above is built by these.
empty = ('empty', None, None)
def literal(char): return ('literal', char, None)
def chain(r, s): return s if r is empty else ('chain', r, s)
def either(r, s): return ('either', r, s)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyAuditPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'ModifyAuditPolicy','Dds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AuditStatus(self):
return self.get_query_params().get('AuditStatus')
def set_AuditStatus(self,AuditStatus):
self.add_query_param('AuditStatus',AuditStatus)
def get_StoragePeriod(self):
return self.get_query_params().get('StoragePeriod')
def set_StoragePeriod(self,StoragePeriod):
self.add_query_param('StoragePeriod',StoragePeriod)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
import os
import torch
import numpy as np
from utils.loggers import Logger
class Exp_Basic(object):
def __init__(self, args, logger: Logger):
self.args = args
self.logger = logger
self.device = self._acquire_device()
self.model = self._build_model().to(self.device)
def _build_model(self):
raise NotImplementedError
return None
def _acquire_device(self):
if self.args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.args.gpu))
print('Use GPU: cuda:{}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def _get_data(self):
pass
def vali(self):
pass
def train(self):
pass
def test(self):
pass
|
import typing as ty
from logging import getLogger
from xoto3.dynamodb.types import TableResource, ItemKey, Item
logger = getLogger(__name__)
def logged_update_item(
Table: TableResource, Key: ItemKey, update_args: ty.Mapping[str, ty.Any]
) -> Item:
"""A logged wrapper for Table.update_item"""
try:
dyn_resp = Table.update_item(**update_args)
if update_args.get("ReturnValues", "NONE") != "NONE":
return make_item_dict_from_updateItem_response(Key, dyn_resp)
return dict()
except Exception as e:
# verbose logging if an error occurs
logger.info("UpdateItem arguments", extra=dict(json=dict(update_args)))
e.update_item_arguments = update_args # type: ignore
raise e
def make_item_dict_from_updateItem_response(item_key: ItemKey, update_resp: dict) -> dict:
"""Simple utility for response to update_item where you want to return
the full object, which is a common pattern."""
return {**item_key, **update_resp["Attributes"]}
|
import logging
log = logging.getLogger("wfcli")
class NodeStore:
# a shallow wrapper for a dictionary.
# However, NodeStore implements a digest method
# these objects get stored in History in their entirety
def __init__(self):
self.nodes = {}
def __eq__(self, other_nds):
try:
return self.digest == other_nds.digest
except Exception:
return False
def get_node(self, node_id):
return self.nodes[node_id]
def add_node(self, node):
self.nodes[node.uuid] = node
def __contains__(self, id):
return id in self.nodes
def __delitem__(self, id):
del self.nodes[id]
def __len__(self):
return len(self.nodes)
@property
def digest(self):
iterable = [(key, node.digest) for key, node in self.nodes.items()]
fs = frozenset(iterable)
res = hash(fs)
return res
@property
def flat_format(self):
return [node.flat_format for uuid, node in self.nodes.items()]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AssociationsOperations(object):
"""AssociationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.customproviders.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
scope, # type: str
association_name, # type: str
association, # type: "_models.Association"
**kwargs # type: Any
):
# type: (...) -> "_models.Association"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Association"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(association, 'Association')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Association', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Association', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}'} # type: ignore
def begin_create_or_update(
self,
scope, # type: str
association_name, # type: str
association, # type: "_models.Association"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Association"]
"""Create or update an association.
:param scope: The scope of the association. The scope can be any valid REST resource instance.
For example, use '/subscriptions/{subscription-id}/resourceGroups/{resource-group-
name}/providers/Microsoft.Compute/virtualMachines/{vm-name}' for a virtual machine resource.
:type scope: str
:param association_name: The name of the association.
:type association_name: str
:param association: The parameters required to create or update an association.
:type association: ~azure.mgmt.customproviders.models.Association
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Association or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.customproviders.models.Association]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Association"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
scope=scope,
association_name=association_name,
association=association,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Association', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}'} # type: ignore
def _delete_initial(
self,
scope, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}'} # type: ignore
def begin_delete(
self,
scope, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete an association.
:param scope: The scope of the association.
:type scope: str
:param association_name: The name of the association.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
scope=scope,
association_name=association_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}'} # type: ignore
def get(
self,
scope, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Association"
"""Get an association.
:param scope: The scope of the association.
:type scope: str
:param association_name: The name of the association.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Association, or the result of cls(response)
:rtype: ~azure.mgmt.customproviders.models.Association
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Association"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Association', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}'} # type: ignore
def list_all(
self,
scope, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AssociationsList"]
"""Gets all association for the given scope.
:param scope: The scope of the association.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AssociationsList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.customproviders.models.AssociationsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AssociationsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AssociationsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/{scope}/providers/Microsoft.CustomProviders/associations'} # type: ignore
|
import json
import os
import zipfile
import shutil
from datetime import date
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.template import RequestContext, loader
from django.template.defaultfilters import slugify
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import ensure_csrf_cookie
from django.forms.models import modelformset_factory, modelform_factory
from django.forms import CheckboxSelectMultiple
from django.core.urlresolvers import reverse
from event_cal.models import InterviewShift
from electees.models import (
ElecteeGroup,
ElecteeGroupEvent,
ElecteeResource,
EducationalBackgroundForm,
ElecteeInterviewSurvey,
SurveyPart,
SurveyQuestion,
SurveyAnswer,
ElecteeInterviewFollowup,
ElecteeProcessVisibility
)
from mig_main.models import MemberProfile, AcademicTerm
from mig_main.utility import (
Permissions,
get_previous_page,
get_message_dict,
zipdir
)
from member_resources.views import get_permissions as get_member_permissions
from history.models import Officer
from electees.forms import (
get_unassigned_electees,
InstituteFormset,
BaseElecteeGroupForm,
AddSurveyQuestionsForm,
ElecteeSurveyForm,
InterviewFollowupForm,
ManualElecteeGroupMembersFormSet
)
from requirements.models import EventCategory, ProgressItem
from django.conf import settings
ELECTEE_RESUME_LOCATION = lambda: os.path.sep.join([settings.MEDIA_ROOT,'electee_resumes'])
def compile_electee_resumes():
try:
shutil.rmtree(ELECTEE_RESUME_LOCATION())
except OSError:
pass
media_parent = '/'.join(settings.MEDIA_ROOT.split('/')[:-2])+'/'
os.makedirs(ELECTEE_RESUME_LOCATION())
electees = MemberProfile.get_electees()
for electee in electees:
if electee.resume:
standing_dir = os.path.sep.join([ELECTEE_RESUME_LOCATION(),slugify(electee.standing.name)])
if not os.path.exists(standing_dir):
os.makedirs(standing_dir)
resume_name=slugify(electee.last_name+'_'+electee.first_name+'_'+electee.uniqname)+'.pdf'
shutil.copy(media_parent+electee.resume.url,os.path.sep.join([standing_dir,resume_name]))
def update_electee_resume_zips():
compile_electee_resumes()
current_path = os.getcwd()
zip_file_name = os.sep.join([settings.MEDIA_ROOT,'TBP_electee_resumes.zip'])
try:
os.remove(zip_file_name)
except OSError:
pass
zip_f = zipfile.ZipFile(zip_file_name,'w')
os.chdir(ELECTEE_RESUME_LOCATION())
zipdir('.',zip_f)
zip_f.close()
os.chdir(current_path)
def can_submit_background_form(user):
if not user_is_member(user):
return False
if user.is_superuser:
return True
return (user.userprofile.memberprofile.standing.name=='Graduate' and user.userprofile.memberprofile.status.name=='Electee')
def user_is_member(user):
if hasattr(user,'userprofile'):
if user.userprofile.is_member():
return True
return False
def get_permissions(user):
permission_dict = get_member_permissions(user)
permission_dict.update({
'can_edit_resources':Permissions.can_manage_electee_progress(user),
'can_edit_surveys':Permissions.can_manage_electee_progress(user),
'can_complete_surveys':Permissions.can_complete_electee_survey(user),
'can_submit_background_form':can_submit_background_form(user),
'can_submit_interview_followups':user_is_member(user) and user.userprofile.memberprofile.status.name=='Active',
'can_view_interview_pairings':Permissions.can_view_interview_pairings(user),
'can_view_followups':Permissions.can_see_follow_up(user),
})
return permission_dict
def get_common_context(request):
context_dict=get_message_dict(request)
context_dict.update({
'main_nav':'members',
'request':request,
'subnav':'electees',
'new_bootstrap':True,
})
return context_dict
def view_electee_groups(request):
request.session['current_page']=request.path
e_groups = ElecteeGroup.objects.filter(term=AcademicTerm.get_current_term()).order_by('points')
packets = ElecteeResource.objects.filter(term=AcademicTerm.get_current_term(),resource_type__is_packet=True).order_by('resource_type')
resources = ElecteeResource.objects.filter(term=AcademicTerm.get_current_term(),resource_type__is_packet=False).order_by('resource_type')
old_packets = ElecteeResource.objects.exclude(
term=AcademicTerm.get_current_term()
).filter(resource_type__is_packet=True).order_by('resource_type','-term')
old_resources = ElecteeResource.objects.exclude(
term=AcademicTerm.get_current_term()
).filter(resource_type__is_packet=False).order_by('resource_type','-term')
template = loader.get_template('electees/view_electee_groups.html')
context_dict = {
'groups':e_groups,
'resources':resources,
'old_resources':old_resources,
'packets':packets,
'old_packets':old_packets,
'electee_resumes':'TBP_electee_resumes.zip',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_electee_groups(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit electee groups'
return redirect('electees:view_electee_groups')
e_groups = ElecteeGroup.objects.filter(term=AcademicTerm.get_current_term())
ElecteeGroupFormSet = modelformset_factory(ElecteeGroup,form =BaseElecteeGroupForm,can_delete=True)
if request.method =='POST':
formset = ElecteeGroupFormSet(request.POST,prefix='groups')
if formset.is_valid():
instances=formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if not instance.id:
instance.term = AcademicTerm.get_current_term()
instance.points = 0
instance.save()
formset.save_m2m()
request.session['success_message']='Electee teams successfully updated'
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors'
else:
formset = ElecteeGroupFormSet(queryset=e_groups,prefix='groups')
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':'groups',
'subsubnav':'groups',
'has_files':False,
'submit_name':'Update Electee Teams',
'form_title':'Update/Add/Remove Electee Teams',
'help_text':'Create the electee teams for this semester, and specify the leaders and officers. You can also remove or edit here.',
'can_add_row':True,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
@ensure_csrf_cookie
def edit_electee_group_membership(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit electee teams'
return redirect('electees:view_electee_groups')
if request.method =='POST':
electee_groups_json=request.POST['electee_groups']
electee_groups = json.loads(electee_groups_json)
for group_id in electee_groups:
members = electee_groups[group_id]
group = ElecteeGroup.objects.get(id=group_id)
group.members.clear()
for member in members:
group.members.add(MemberProfile.objects.get(uniqname=member))
request.session['success_message']='Your changes have been saved'
e_groups = ElecteeGroup.objects.filter(term=AcademicTerm.get_current_term())
template = loader.get_template('electees/edit_electee_group_membership.html')
context_dict = {
'electee_groups':e_groups,
'unassigned_electees':get_unassigned_electees(),
'subsubnav':'members',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def manually_edit_electee_group_membership(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit electee teams'
return redirect('electees:view_electee_groups')
e_groups = ElecteeGroup.objects.filter(term=AcademicTerm.get_current_term())
prefix = 'manual_groups'
term =AcademicTerm.get_current_term()
formset=ManualElecteeGroupMembersFormSet(request.POST or None,prefix=prefix,queryset=ElecteeGroup.objects.filter(term=term))
if request.method=='POST':
if formset.is_valid():
formset.save()
request.session['success_message']='Electee team membership updated successfully'
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':prefix,
'subsubnav':'members',
'has_files':False,
'submit_name':'Update Electee Team Membership',
'form_title':'Add Electee Team Members',
'help_text':'Add members to electee teams. This is for initial addition only, for edits use the drag-and-drop interface.',
'can_add_row':False,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_electee_group_points(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit electee team points.'
return redirect('electees:view_electee_groups')
term =AcademicTerm.get_current_term()
GroupPointsFormSet = modelformset_factory(ElecteeGroupEvent,exclude=('related_event_id',),can_delete=True)
GroupPointsFormSet.form.base_fields['electee_group'].queryset=ElecteeGroup.objects.filter(term=term)
if request.method =='POST':
formset = GroupPointsFormSet(request.POST,prefix='group_points',queryset=ElecteeGroupEvent.objects.filter(related_event_id=None,electee_group__term=term))
if formset.is_valid():
formset.save()
request.session['success_message']='Electee team points updated successfully'
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
formset = GroupPointsFormSet(prefix='group_points',queryset=ElecteeGroupEvent.objects.filter(related_event_id=None,electee_group__term=term))
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':'group_points',
'subsubnav':'points',
'has_files':False,
'submit_name':'Update Electee Team Points',
'form_title':'Update/Add Remove Electee Team Points',
'help_text':'Track the electee team points. You should not note any points from threshold participation at service or social events here. Those are tabulated automatically.',
'can_add_row':True,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def submit_background_form(request):
if not can_submit_background_form(request.user):
request.session['error_message']='You are not authorized to submit an educational background form.'
return redirect('electees:view_electee_groups')
BackgroundForm = modelform_factory(EducationalBackgroundForm,exclude=('member','term',))
profile=request.user.userprofile.memberprofile
term =AcademicTerm.get_current_term()
existing_form = EducationalBackgroundForm.objects.filter(member=profile,term=term)
if existing_form.exists():
form = BackgroundForm(request.POST or None, prefix='background',instance=existing_form[0])
formset= InstituteFormset(request.POST or None, prefix='institute',instance=existing_form[0])
else:
blank_form = EducationalBackgroundForm(member=request.user.userprofile.memberprofile,term=AcademicTerm.get_current_term())
form = BackgroundForm(request.POST or None,prefix='background',instance=blank_form)
formset= InstituteFormset(request.POST or None,prefix='institute',instance=blank_form)
if request.method == 'POST':
if form.is_valid():
background_form = form.save(commit=False)
formset[0].empty_permitted=False
if formset.is_valid():
background_form.save()
form.save_m2m()
formset.save()
request.session['success_message']='Background form successfully submitted'
existing_progress_background_form= ProgressItem.objects.filter(member=profile,term=term,event_type__name='Educational Background Form')
if not existing_progress_background_form.exists():
p = ProgressItem(member=profile,term=term,amount_completed=1,date_completed=date.today(),name='Educational Background Form Completed')
p.event_type = EventCategory.objects.get(name='Educational Background Form')
p.save()
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Either there were errors in your prior degrees or you forgot to include one.'
else:
request.session['error_message']='There were errors in the submitted form, please correct the errors noted below.'
template = loader.get_template('electees/submit_education_form.html')
dp_ids=[]
for count in range(len(formset)):
dp_ids.append('id_institute-%d-degree_start_date'%(count))
dp_ids.append('id_institute-%d-degree_end_date'%(count))
context_dict = {
'form':form,
'formset':formset,
'prefix':'institute',
'dp_ids':dp_ids,
'dp_ids_dyn':['degree_start_date', 'degree_end_date'],
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_electee_resources(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit electee resources.'
return redirect('electees:view_electee_groups')
ResourceFormSet = modelformset_factory(ElecteeResource,exclude=('term',),can_delete=True)
term =AcademicTerm.get_current_term()
if request.method =='POST':
formset = ResourceFormSet(request.POST,request.FILES,prefix='resources',queryset=ElecteeResource.objects.filter(term=term))
if formset.is_valid():
instances=formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
instance.term=term
instance.save()
request.session['success_message']='Electee resources updated successfully'
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
formset = ResourceFormSet(prefix='resources',queryset=ElecteeResource.objects.filter(term=term))
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':'resources',
'has_files':True,
'submit_name':'Update Electee Resources',
'form_title':'Update/Add/Remove Electee Resources for %s'%(unicode(term)),
'help_text':'These are the full packets and their constituent parts. If you need a part that isn\'t listed here, contact the web chair.',
'can_add_row':True,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def manage_survey(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee survey.'
return redirect('electees:view_electee_groups')
template = loader.get_template('electees/manage_survey.html')
term = AcademicTerm.get_current_term()
current_survey = ElecteeInterviewSurvey.objects.filter(term = term)
survey_exists = current_survey.exists()
if survey_exists:
survey_has_q = current_survey[0].questions.all().exists()
else:
survey_has_q = False
context_dict = {
'survey_exists':survey_exists,
'parts_exist':SurveyPart.objects.all().exists(),
'questions_exist':SurveyQuestion.objects.all().exists(),
'survey_has_questions':survey_has_q,
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_survey_for_term(request,term_id):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee survey.'
return redirect('electees:view_electee_groups')
SurveyForm = modelform_factory(ElecteeInterviewSurvey,exclude=('term','questions'))
term = get_object_or_404(AcademicTerm,id=term_id)
current_surveys = ElecteeInterviewSurvey.objects.filter(term = term)
prefix='survey'
if current_surveys.exists():
current_survey=current_surveys[0]
existed=True
else:
current_survey = ElecteeInterviewSurvey(term=term)
existed = False
if request.method =='POST':
form = SurveyForm(request.POST,prefix=prefix,instance=current_survey)
if form.is_valid():
form.save()
request.session['success_message']='Electee interview survey updated successfully'
return redirect('electees:manage_survey')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
form = SurveyForm(prefix=prefix,instance=current_survey)
template = loader.get_template('generic_form.html')
verb = 'Update' if existed else 'Add'
context_dict = {
'form':form,
'prefix':prefix,
'has_files':False,
'submit_name':'Update Electee Survey',
'form_title':verb+' Electee Interview Survey for %s'%(unicode(term)),
'help_text':'This is the meta survey object that will group the questions for a particular term.',
'base':'electees/base_electees.html',
'dp_ids':['id_survey-due_date'],
'back_button':{'link':reverse('electees:manage_survey'),'text':'To Survey Manager'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_survey(request):
return redirect('electees:edit_survey_for_term',AcademicTerm.get_current_term().id)
def edit_survey_parts(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee survey.'
return redirect('electees:view_electee_groups')
SurveyPartFormSet = modelformset_factory(SurveyPart, exclude=[])
prefix='surveyparts'
if request.method =='POST':
formset = SurveyPartFormSet(request.POST,prefix=prefix,queryset=SurveyPart.objects.all())
if formset.is_valid():
formset.save()
request.session['success_message']='Electee interview survey parts updated successfully'
return redirect('electees:manage_survey')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
formset = SurveyPartFormSet(prefix=prefix,queryset=SurveyPart.objects.all())
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':prefix,
'has_files':False,
'can_add_row':True,
'submit_name':'Update Electee Survey Parts',
'form_title':'Update Electee Interview Survey Parts',
'help_text':'Add or edit the different parts of the survey. Questions will be associated with a particular part. Only those parts that have questions which appear in a given survey will be included in that survey. There should be no need to remove survey parts. If all questions in a part are required, leave that field blank.',
'base':'electees/base_electees.html',
'back_button':{'link':reverse('electees:manage_survey'),'text':'To Survey Manager'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_survey_questions(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee survey.'
return redirect('electees:view_electee_groups')
SurveyQuestionFormSet = modelformset_factory(SurveyQuestion, exclude=[])
prefix='surveyquestions'
if request.method =='POST':
formset = SurveyQuestionFormSet(request.POST,prefix=prefix,queryset=SurveyQuestion.objects.all())
if formset.is_valid():
formset.save()
request.session['success_message']='Electee interview survey questions updated successfully'
return redirect('electees:manage_survey')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
formset = SurveyQuestionFormSet(prefix=prefix,queryset=SurveyQuestion.objects.all())
template = loader.get_template('generic_formset.html')
context_dict = {
'formset':formset,
'prefix':prefix,
'has_files':False,
'can_add_row':True,
'submit_name':'Update Electee Survey Questions',
'form_title':'Update Electee Interview Survey Questions',
'help_text':'Add or edit the different questions for the survey. Questions will only be displayed if they are added to the current survey. There should be no need to remove survey parts. If there is no word limit for a question, leave that field blank.',
'base':'electees/base_electees.html',
'back_button':{'link':reverse('electees:manage_survey'),'text':'To Survey Manager'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def add_survey_questions_for_term(request,term_id):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee survey.'
return redirect('electees:view_electee_groups')
term = get_object_or_404(AcademicTerm,id=term_id)
current_surveys = ElecteeInterviewSurvey.objects.filter(term = term)
prefix='survey'
if current_surveys.exists():
current_survey=current_surveys[0]
existed=True
else:
raise Http404
if request.method =='POST':
form = AddSurveyQuestionsForm(request.POST,prefix=prefix,instance=current_survey)
if form.is_valid():
form.save()
request.session['success_message']='Electee survey questions updated successfully'
return redirect('electees:manage_survey')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
form = AddSurveyQuestionsForm(prefix=prefix,instance=current_survey)
template = loader.get_template('generic_form.html')
verb = 'Update' if existed else 'Add'
context_dict = {
'form':form,
'prefix':prefix,
'has_files':False,
'submit_name':'Update Electee Survey Questions',
'form_title':verb+' Electee Survey Questions for %s'%(unicode(term)),
'help_text':'Add questions for the particular term\'s survey.',
'base':'electees/base_electees.html',
'back_button':{'link':reverse('electees:manage_survey'),'text':'To Survey Manager'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def add_survey_questions(request):
return redirect('electees:add_survey_questions_for_term',AcademicTerm.get_current_term().id)
def preview_survey_for_term(request,term_id):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to preview the electee survey.'
return redirect('electees:view_electee_groups')
term = get_object_or_404(AcademicTerm,id=term_id)
current_surveys = ElecteeInterviewSurvey.objects.filter(term = term)
if current_surveys.exists():
current_survey=current_surveys[0]
existed=True
else:
raise Http404
template = loader.get_template('electees/preview_survey.html')
context_dict = {
'real_form':False,
'questions':current_survey.questions.all(),
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def preview_survey(request):
return redirect('electees:preview_survey_for_term',AcademicTerm.get_current_term().id)
def complete_survey_for_term(request,term_id):
if not Permissions.can_complete_electee_survey(request.user):
request.session['error_message']='You are not authorized to preview the electee survey.'
return redirect('electees:view_electee_groups')
term = get_object_or_404(AcademicTerm,id=term_id)
current_surveys = ElecteeInterviewSurvey.objects.filter(term = term)
submitter=request.user.userprofile.memberprofile
if current_surveys.exists():
current_survey=current_surveys[0]
existed=True
else:
raise Http404
questions = current_survey.questions.all()
if request.method =='POST':
form = ElecteeSurveyForm(request.POST,questions=questions)
if form.is_valid():
print form.cleaned_data
for (question, answer) in form.get_answers():
existing_answer = SurveyAnswer.objects.filter(term=term,submitter=submitter,question=question)
if existing_answer.exists():
old_answer = existing_answer[0]
if len(answer):
old_answer.answer=answer
old_answer.save()
else:
existing_answer.delete()
else:
if len(answer):
new_answer = SurveyAnswer(term=term,submitter=submitter,answer=answer,question=question)
new_answer.save()
request.session['success_message']='Electee survey updated successfully'
return redirect('electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
else:
answers = SurveyAnswer.objects.filter(submitter=submitter,term=term,question__in=questions).distinct()
form = ElecteeSurveyForm(questions=questions,answers=answers)
template = loader.get_template('electees/complete_survey.html')
context_dict = {
'real_form':True,
'form':form,
'survey':current_survey,
'questions':questions,
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def complete_survey(request):
return redirect('electees:complete_survey_for_term',AcademicTerm.get_current_term().id)
def complete_interview_followup(request,interview_id):
interview = get_object_or_404(InterviewShift,id=interview_id)
if not interview.user_can_followup(request.user):
request.session['error_message']='Only interviewers may submit evaluations and only after the interview has started'
return get_previous_page(request,alternate='electees:view_electee_groups')
profile=request.user.userprofile.memberprofile
previous_followup=ElecteeInterviewFollowup.objects.filter(interview=interview, member=profile)
prefix='followup'
if previous_followup.exists():
verb = 'Update'
form = InterviewFollowupForm(request.POST or None, prefix=prefix,instance=previous_followup[0])
else:
verb='Add'
blank_form = ElecteeInterviewFollowup(interview=interview, member=profile)
form = InterviewFollowupForm(request.POST or None,prefix=prefix,instance=blank_form)
if request.method =='POST':
if form.is_valid():
form.save()
request.session['success_message']='Electee interview followup updated successfully'
return get_previous_page(request,alternate='electees:view_electee_groups')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
template = loader.get_template('generic_form.html')
help_text = r'''YOUR EVALUATION HERE IS ONE OF THE MOST IMPORTANT CRITERIA PERMITTING THE ELECTEE TO CONTINUE THE ELECTING PROCESS.
**Recommend**: You are confident that the electee has demonstrated exemplary character and would be a great member of Tau Beta Pi
**Not Sure**: This should only be selected in the extreme case, in which even after the interview you still have absolutely no idea whether or not the electee would be a good candidate.
We trust your judgment as TBP members, so please make a decision (Recommend or Not) if at all possible. Please explain this choice *in detail* so that we can better understand your decision.
**Do Not Recommend**: You are confident that the electee does not demonstrate exemplary character and would not be a good member of Tau Beta Pi. Please explain this choice *in detail* so that we can better understand your decision.
Remember the [eligibility code of the association](http://www.tbp.org/off/eligCode.cfm), particularly that "the fact that people may not have shown unselfish activity to an appreciable degree throughout their courses of study is no infallible indication that they would not if the opportunity offered."
###Submission of this form constitutes your signature that the information contained herein is an accurate representation of the interview conducted.
'''
context_dict = {
'form':form,
'prefix':prefix,
'has_files':False,
'submit_name':verb+' Interview Followup',
'form_title':verb+' Submit Interview Followup for Electee: '+','.join([unicode(user_profile) for user_profile in interview.interviewee_shift.attendees.all()])+'---'+interview.interviewee_shift.event.name[:-10],
'help_text':help_text,
'base':'electees/base_electees.html',
'back_button':{'link':reverse('electees:view_electee_groups'),'text':'To Electee Resources'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def view_interview_follow_up(request,follow_up_id):
follow_up = get_object_or_404(ElecteeInterviewFollowup,id=follow_up_id)
if not Permissions.can_see_follow_up(request.user):
request.session['error_message']='You are not authorized to view this followup'
return get_previous_page(request,alternate='electees:view_electee_groups')
if not follow_up.interview.interviewee_shift.attendees.all()[0].is_electee():
request.session['error_message']='You are not authorized to view this followup'
return get_previous_page(request,alternate='electees:view_electee_groups')
template = loader.get_template('electees/interview_followup.html')
context_dict = {
'follow_up':follow_up,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def view_my_interview_forms(request):
if not user_is_member(request.user) or not request.user.userprofile.memberprofile.status.name=='Active':
request.session['error_message']='Only active members can fill out interview followups'
return get_previous_page(request,alternate='electees:view_electee_groups')
userprofile =request.user.userprofile
my_interviews = InterviewShift.objects.filter(term=AcademicTerm.get_current_term(),interviewer_shift__attendees__in=[userprofile]).exclude(interviewee_shift__attendees=None)
unpacked_interviews =[]
for interview in my_interviews:
unpacked_interviews.append({'interview':interview,'enabled':interview.user_can_followup(request.user),'completed':ElecteeInterviewFollowup.objects.filter(interview=interview,member=userprofile.memberprofile).exists()})
template = loader.get_template('electees/interview_forms.html')
context_dict = {
'interviews':unpacked_interviews,
'back_button':{'link':reverse('electees:view_electee_groups'),'text':'To Electee Resources'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def edit_electee_process_visibility(request):
if not Permissions.can_manage_electee_progress(request.user):
request.session['error_message']='You are not authorized to edit the electee process visibility settings.'
return redirect('electees:view_electee_groups')
current_vis = ElecteeProcessVisibility.objects.get_or_create(term=AcademicTerm.get_current_term())
VisibilityForm = modelform_factory(ElecteeProcessVisibility,exclude=['term'])
prefix='visibility'
form = VisibilityForm(request.POST or None ,prefix=prefix,instance=current_vis[0])
if request.method =='POST':
if form.is_valid():
form.save()
request.session['success_message']='Electee settings updated successfully'
return redirect('electees:manage_survey')
else:
request.session['error_message']='Form is invalid. Please correct the noted errors.'
template = loader.get_template('generic_form.html')
context_dict = {
'form':form,
'prefix':prefix,
'has_files':False,
'submit_name':'Update Visibility Settings',
'form_title':'Update Electee Visibility Settings for %s'%(unicode(AcademicTerm.get_current_term())),
'help_text':'Change whether certain electee items are visible to all actives.',
'base':'electees/base_electees.html',
'back_button':{'link':reverse('electees:manage_survey'),'text':'To Survey Manager'},
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def view_interview_follow_up_table(request):
if not Permissions.can_see_follow_up(request.user):
request.session['error_message']='You are not authorized to view followups'
return get_previous_page(request,alternate='electees:view_electee_groups')
electees = MemberProfile.get_electees()
green_electees=[]
yellow_electees=[]
red_electees=[]
blank_electees=[]
num_followups=0
for electee in electees:
follow_ups = ElecteeInterviewFollowup.objects.filter(interview__interviewee_shift__attendees=electee).exclude(recommendation='X')
num_followups=follow_ups.count() if follow_ups.count()>num_followups else num_followups
num_red = follow_ups.filter(recommendation='N').count()
num_yellow = follow_ups.filter(recommendation='M').count()
if num_red:
red_electees.append({'electee':electee,'followups':follow_ups})
elif num_yellow:
yellow_electees.append({'electee':electee,'followups':follow_ups})
elif follow_ups.count():
green_electees.append({'electee':electee,'followups':follow_ups})
else:
blank_electees.append({'electee':electee,'followups':follow_ups})
template = loader.get_template('electees/interview_followup_table.html')
interviewer_headers = ['Interviewer %d'%count for count in range(1,num_followups+1)]
context_dict = {
'interviewer_headers':interviewer_headers,
'green_electees':green_electees,
'yellow_electees':yellow_electees,
'red_electees':red_electees,
'blank_electees':blank_electees,
'base':'electees/base_electees.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
|
def add_subject_pronoun(sentence, lemma, gender=None, is_plural=None, position=None):
word = sentence.register_word(lemma)
word.set_tag("pronoun")
if lemma in ["il", "ils"]:
word.set_tag("gender", value="masc")
elif lemma in ["elle", "elles"]:
word.set_tag("gender", value="fem")
elif gender is not None:
word.set_tag("gender", value=gender)
if lemma in ["je", "tu", "il", "elle", "on", "ça", "cela", "ceci"]:
word.set_tag("is_plural", value=False)
elif lemma in ["nous", "ils", "elles"]:
word.set_tag("is_plural", value=True)
elif is_plural is not None:
word.set_tag("is_plural", value=is_plural)
if position is not None:
sentence.tokens.insert(position, word)
else:
sentence.tokens.append(word)
return word
def add_reflexive_pronoun(sentence, subject_id, position=None):
subject = sentence.words[subject_id]
lemma = None
if subject.lemma == 'je':
lemma = 'me'
elif subject.lemma == 'tu':
lemma = 'te'
elif subject.lemma == 'nous':
lemma = 'nous'
elif subject.lemma == 'vous':
lemma = 'vous'
if subject.lemma in ["il", "elle", "on", "ça", "cela", "ceci", "ce", "ils", "elles"]:
lemma = 'se'
word = None
if lemma is not None:
word = sentence.register_word(lemma)
word.set_tag("pronoun")
word.set_tag("agrees_with", subject_id)
if position is not None:
sentence.tokens.insert(position, word)
else:
sentence.tokens.append(word)
return word
|
embedding_dim1 = 8
embedding_dim2 = 16
sequence_length = 10
# Attention
# dot product attention only allows vector/matrix of the same size
vector = torch.rand((1, embedding_dim1,))
matrix = torch.rand((1, sequence_length, embedding_dim1))
attention = DotProductAttention()
output = attention(vector, matrix)
print('Output from DotProductAttention:', output)
# bilinear & linear attention allows inputs of different sizes
vector = torch.rand((1, embedding_dim1,))
matrix = torch.rand((1, sequence_length, embedding_dim2))
attention = BilinearAttention(vector_dim=embedding_dim1, matrix_dim=embedding_dim2)
output = attention(vector, matrix)
print('Output from BilinearAttention:', output)
tanh = Activation.by_name('tanh')()
attention = LinearAttention(
tensor_1_dim=embedding_dim1, tensor_2_dim=embedding_dim2,
combination='x,y', activation=tanh)
output = attention(vector, matrix)
print('Output from LinearAttention:', output)
# MatrixAttention
sequence_length1 = 10
sequence_length2 = 15
# dot product attention only allows matrices of the same size
matrix1 = torch.rand((1, sequence_length1, embedding_dim1))
matrix2 = torch.rand((1, sequence_length2, embedding_dim1))
matrix_attention = DotProductMatrixAttention()
output = matrix_attention(matrix1, matrix2)
print('Output shape of DotProductMatrixAttention:', output.shape)
# bilinear & linear attention allows inputs of different sizes
matrix1 = torch.rand((1, sequence_length1, embedding_dim1))
matrix2 = torch.rand((1, sequence_length2, embedding_dim2))
matrix_attention = BilinearMatrixAttention(
matrix_1_dim=embedding_dim1, matrix_2_dim=embedding_dim2)
output = matrix_attention(matrix1, matrix2)
print('Output shape of BilinearMatrixAttention:', output.shape)
matrix_attention = LinearMatrixAttention(
tensor_1_dim=embedding_dim1, tensor_2_dim=embedding_dim2,
combination='x,y', activation=tanh)
output = matrix_attention(matrix1, matrix2)
print('Output shape of LinearMatrixAttention:', output.shape)
|
import numpy as np
import tqdm
from losses.dsm import dsm_score_estimation
import torch.nn.functional as F
import logging
import torch
import os
import shutil
import tensorboardX
import torch.optim as optim
from torchvision.datasets import MNIST, CIFAR10, FashionMNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Subset
from datasets.celeba import CelebA
from models.refinenet_dilated_baseline import RefineNetDilated
__all__ = ['BaselineRunner']
class BaselineRunner():
def __init__(self, args, config):
self.args = args
self.config = config
def get_optimizer(self, parameters):
if self.config.optim.optimizer == 'Adam':
return optim.Adam(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay,
betas=(self.config.optim.beta1, 0.999), amsgrad=self.config.optim.amsgrad)
elif self.config.optim.optimizer == 'RMSProp':
return optim.RMSprop(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay)
elif self.config.optim.optimizer == 'SGD':
return optim.SGD(parameters, lr=self.config.optim.lr, momentum=0.9)
else:
raise NotImplementedError('Optimizer {} not understood.'.format(self.config.optim.optimizer))
def logit_transform(self, image, lam=1e-6):
image = lam + (1 - 2 * lam) * image
return torch.log(image) - torch.log1p(-image)
def train(self):
if self.config.data.random_flip is False:
tran_transform = test_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
else:
tran_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor()
])
test_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'CIFAR10':
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True,
transform=tran_transform)
test_dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10_test'), train=False, download=True,
transform=test_transform)
elif self.config.data.dataset == 'MNIST':
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True,
transform=tran_transform)
test_dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist_test'), train=False, download=True,
transform=test_transform)
elif self.config.data.dataset == 'CELEBA':
if self.config.data.random_flip:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]), download=True)
else:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
test_dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba_test'), split='test',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
dataloader = DataLoader(dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=self.config.training.batch_size, shuffle=True,
num_workers=4, drop_last=True)
test_iter = iter(test_loader)
self.config.input_dim = self.config.data.image_size ** 2 * self.config.data.channels
tb_path = os.path.join(self.args.run, 'tensorboard', self.args.doc)
if os.path.exists(tb_path):
shutil.rmtree(tb_path)
tb_logger = tensorboardX.SummaryWriter(log_dir=tb_path)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
optimizer = self.get_optimizer(score.parameters())
if self.args.resume_training:
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'))
score.load_state_dict(states[0])
optimizer.load_state_dict(states[1])
step = 0
for epoch in range(self.config.training.n_epochs):
for i, (X, y) in enumerate(dataloader):
step += 1
score.train()
X = X.to(self.config.device)
X = X / 256. * 255. + torch.rand_like(X) / 256.
if self.config.data.logit_transform:
X = self.logit_transform(X)
loss = dsm_score_estimation(score, X, sigma=0.01)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tb_logger.add_scalar('loss', loss, global_step=step)
logging.info("step: {}, loss: {}".format(step, loss.item()))
if step >= self.config.training.n_iters:
return 0
if step % 100 == 0:
score.eval()
try:
test_X, test_y = next(test_iter)
except StopIteration:
test_iter = iter(test_loader)
test_X, test_y = next(test_iter)
test_X = test_X.to(self.config.device)
test_X = test_X / 256. * 255. + torch.rand_like(test_X) / 256.
if self.config.data.logit_transform:
test_X = self.logit_transform(test_X)
with torch.no_grad():
test_dsm_loss = dsm_score_estimation(score, test_X, sigma=0.01)
tb_logger.add_scalar('test_dsm_loss', test_dsm_loss, global_step=step)
if step % self.config.training.snapshot_freq == 0:
states = [
score.state_dict(),
optimizer.state_dict(),
]
torch.save(states, os.path.join(self.args.log, 'checkpoint_{}.pth'.format(step)))
torch.save(states, os.path.join(self.args.log, 'checkpoint.pth'))
def Langevin_dynamics(self, x_mod, scorenet, n_steps=1000, step_lr=0.00002):
images = []
with torch.no_grad():
for _ in range(n_steps):
images.append(torch.clamp(x_mod, 0.0, 1.0).to('cpu'))
noise = torch.randn_like(x_mod) * np.sqrt(step_lr * 2)
grad = scorenet(x_mod)
x_mod = x_mod + step_lr * grad + noise
print("modulus of grad components: mean {}, max {}".format(grad.abs().mean(), grad.abs().max()))
return images
def test(self):
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'), map_location=self.config.device)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
score.load_state_dict(states[0])
if not os.path.exists(self.args.image_folder):
os.makedirs(self.args.image_folder)
score.eval()
if self.config.data.dataset == 'MNIST' or self.config.data.dataset == 'FashionMNIST':
transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'MNIST':
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True,
transform=transform)
else:
dataset = FashionMNIST(os.path.join(self.args.run, 'datasets', 'fmnist'), train=True, download=True,
transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
samples, _ = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
elif self.config.data.dataset == 'CELEBA':
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='test',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4)
samples, _ = next(iter(dataloader))
samples = torch.rand(100, 3, self.config.data.image_size, self.config.data.image_size,
device=self.config.device)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
else:
transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'CIFAR10':
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True,
transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
samples, _ = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
|
"""
Trains and validates models
"""
import os
import torch
import random
import pandas
import models
import warnings
import datasets
import argparse
import itertools
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, recall_score
warnings.filterwarnings('always')
# Reproducibility
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
def main():
parser = argparse.ArgumentParser()
# Names, paths, logs
parser.add_argument('--logger_path', default='checkpoints/sidann', help='relative path to log')
parser.add_argument('--source_domain', default='', help='MSP-Improv or IEMOCAP')
parser.add_argument('--target_domain', default='', help='MSP-Improv or IEMOCAP')
parser.add_argument('--verbose', type=bool, default=False, help='True or False')
# Data parameters
parser.add_argument('--workers_num', type=int, default=4, help='number of workers for data loading')
# Training and optimization
parser.add_argument('--epochs_num', type=int, default=25, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=256, help='size of a mini-batch')
parser.add_argument('--learning_rate', type=float, default=3e-4, help='initial learning rate')
parser.add_argument('--domain_weight', type=float, default=10)
parser.add_argument('--subject_weight', type=float, default=0.1)
# Modality
parser.add_argument('--acoustic_modality', type=bool, default=True)
parser.add_argument('--visual_modality', type=bool, default=True)
parser.add_argument('--lexical_modality', type=bool, default=False)
# Model parameters
parser.add_argument('--visual_feature_dim', type=int, default=2048)
parser.add_argument('--acoustic_feature_dim', type=int, default=40)
parser.add_argument('--lexical_feature_dim', type=int, default=768)
parser.add_argument('--conv_width_v', type=int, default=64, help='64 or 128')
parser.add_argument('--conv_width_a', type=int, default=128, help='64 or 128')
parser.add_argument('--kernel_size_v', type=int, default=3, help='2 or 3')
parser.add_argument('--kernel_size_a', type=int, default=2, help='2 or 3')
parser.add_argument('--max_pool_width', type=int, default=2)
parser.add_argument('--rnn_layer_num_v', type=int, default=3, help='2 or 3')
parser.add_argument('--rnn_layer_num_a', type=int, default=2, help='2 or 3')
parser.add_argument('--rnn_width', type=int, default=32)
parser.add_argument('--linear_width_l', type=int, default=32, help='32')
parser.add_argument('--linear_width', type=int, default=32, help='32 or 64')
parser.add_argument('--dropout_rate', type=float, default=0.3, help='0.3')
# GPU
parser.add_argument('--gpu_num', default='cuda:0', help='GPU device')
opt = parser.parse_args()
if opt.verbose:
print('Training and validating models')
for arg in vars(opt):
print(arg + ' = ' + str(getattr(opt, arg)))
opt.source_domain = 'MSP-Improv'
opt.target_domain = 'IEMOCAP'
acc_1, uar_1, acc_std_1, uar_std_1 = domain_adaptation(opt)
opt.source_domain = 'IEMOCAP'
opt.target_domain = 'MSP-Improv'
acc_2, uar_2, acc_std_2, uar_std_2 = domain_adaptation(opt)
print(acc_1, ',', uar_1, ',', acc_2, ',', uar_2, ',', acc_1+uar_1+acc_2+uar_2, ',', acc_std_1, uar_std_1, acc_std_2, uar_std_2)
def domain_adaptation(opt):
# Use specific GPU
device = torch.device(opt.gpu_num)
half_batch = opt.batch_size // 2
opt.batch_size = half_batch
# Dataloaders
test_dataset_file_path = os.path.join('../dataset', opt.target_domain, 'dataset.csv')
test_loader = get_dataloader(test_dataset_file_path, 'test', opt)
if opt.target_domain == 'MSP-Improv':
folder_num = 6
else:
folder_num = 5
test_loader_list = []
for i in range(folder_num):
dataset_file_path = os.path.join('../dataset', opt.target_domain, str(i), 'test.csv')
loader = get_dataloader(dataset_file_path, 'test', opt)
test_loader_list.append(loader)
# Model, optimizer and loss function
checkpoint = torch.load(os.path.join('checkpoints/bl', opt.source_domain, 'model.pth.tar'), map_location=device)
emotion_recognizer = models.Model(opt)
emotion_recognizer.load_state_dict(checkpoint['emotion_recognizer'])
for param in emotion_recognizer.parameters():
param.requires_grad = True
emotion_recognizer.to(device)
domain_discriminator = models.DomainDiscriminator(opt)
domain_discriminator.apply(models.init_weights)
for param in domain_discriminator.parameters():
param.requires_grad = True
domain_discriminator.to(device)
speaker_discriminator = models.SpeakerDiscriminator(opt)
speaker_discriminator.apply(models.init_weights)
for param in speaker_discriminator.parameters():
param.requires_grad = True
speaker_discriminator.to(device)
optimizer = torch.optim.Adam( list(emotion_recognizer.parameters())
+list(domain_discriminator.parameters())
+list(speaker_discriminator.parameters()),
lr=opt.learning_rate)
criterion = torch.nn.CrossEntropyLoss()
best_acc = 0.
best_acc_std = 0.
best_uar = 0.
best_uar_std = 0.
# Train and validate
for epoch in range(opt.epochs_num):
if opt.verbose:
print('epoch: {}/{}'.format(epoch + 1, opt.epochs_num))
batch_iterator, n_batches = get_batch_iterator(opt)
domain_loss, domain_acc, speaker_loss, speaker_acc, train_loss, train_acc \
= train(batch_iterator, n_batches, emotion_recognizer,
domain_discriminator, speaker_discriminator,
optimizer, criterion, device, opt)
test_loss, test_acc, test_uar = test(test_loader, emotion_recognizer, criterion, device, opt)
acc_list = []
uar_list = []
for i in range(folder_num):
loader = test_loader_list[i]
_, acc, uar = test(loader, emotion_recognizer, criterion, device, opt)
acc_list.append(acc)
uar_list.append(uar)
acc_list = np.array(acc_list)
uar_list = np.array(uar_list)
acc_std = np.std(acc_list)
uar_std = np.std(uar_list)
if opt.verbose:
print( 'domain_loss: {0:.5f}'.format(domain_loss),\
'domain_acc: {0:.3f}'.format(domain_acc),
'speaker_loss: {0:.5f}'.format(speaker_loss),
'speaker_acc: {0:.3f}'.format(speaker_acc),
'train_loss: {0:.5f}'.format(train_loss),
'train_acc: {0:.3f}'.format(train_acc),
'test_loss: {0:.5f}'.format(test_loss),
'test_acc: {0:.3f}'.format(test_acc),
'test_uar: {0:.3f}'.format(test_uar))
os.makedirs(os.path.join(opt.logger_path, opt.source_domain), exist_ok=True)
model_file_name = os.path.join(opt.logger_path, opt.source_domain, 'checkpoint.pth.tar')
state = { 'epoch': epoch+1, 'emotion_recognizer': emotion_recognizer.state_dict(),
'domain_discriminator' : domain_discriminator.state_dict(), 'opt': opt}
torch.save(state, model_file_name)
if test_acc > best_acc and epoch >= 3:
model_file_name = os.path.join(opt.logger_path, opt.source_domain, 'model.pth.tar')
torch.save(state, model_file_name)
best_acc = test_acc
best_acc_std = acc_std
if test_uar > best_uar and epoch >= 3:
best_uar = test_uar
best_uar_std = uar_std
return best_acc, best_uar, best_acc_std, best_uar_std
def get_dataloader(dataset_file_path, loader_type, opt):
# Data
data = pandas.read_csv(dataset_file_path)
file_name_list = data['file_name_list'].tolist()
dataloader = datasets.get_loaders_temporal_dataset( dataset_file_path,
file_name_list,
loader_type, opt)
return dataloader
def get_batch_iterator(opt):
source_dataset_file_path = os.path.join('../dataset', opt.source_domain, 'dataset.csv')
source_loader = get_dataloader(source_dataset_file_path, 'train', opt)
target_dataset_file_path = os.path.join('../dataset', opt.target_domain, 'dataset.csv')
target_loader = get_dataloader(target_dataset_file_path, 'train', opt)
batches = zip(source_loader, target_loader)
n_batches = min(len(source_loader), len(target_loader))
return batches, n_batches
def train(batches, n_batches, model, domain_discriminator, speaker_discriminator, optimizer, criterion, device, opt):
model.train()
total_domain_loss = 0
domain_acc = 0
total_speaker_loss = 0
speaker_acc = 0
total_label_loss = 0
label_acc = 0
for i, train_data in enumerate(batches):
(source_x_v, _, source_x_a, _, source_x_l, _, _, source_y_a, _, source_speaker), \
(target_x_v, _, target_x_a, _, target_x_l, _, _, _, _, target_speaker) = train_data
source_x_v = source_x_v.to(device)
source_x_a = source_x_a.to(device)
source_x_l = source_x_l.to(device)
source_y_a = source_y_a.to(device)
source_speaker = source_speaker.to(device)
target_x_v = target_x_v.to(device)
target_x_a = target_x_a.to(device)
target_x_l = target_x_l.to(device)
target_speaker = target_speaker.to(device)
source_encoded_x = model.encoder(source_x_v, source_x_a, source_x_l)
target_encoded_x = model.encoder(target_x_v, target_x_a, target_x_l)
encoded_x = torch.cat([source_encoded_x, target_encoded_x])
encoded_x = encoded_x.to(device)
domain_y = torch.cat([ torch.ones(source_encoded_x.shape[0], dtype=torch.int64),
torch.zeros(target_encoded_x.shape[0], dtype=torch.int64)]).to(device)
speaker_y = torch.cat([source_speaker, target_speaker])
speaker_y = speaker_y.to(device)
label_y = source_y_a.to(device)
domain_preds = domain_discriminator(encoded_x)
speaker_preds = speaker_discriminator(encoded_x)
label_preds = model.recognizer(source_encoded_x)
domain_loss = criterion(domain_preds, domain_y)
speaker_loss = criterion(speaker_preds, speaker_y)
label_loss = criterion(label_preds, label_y)
loss = domain_loss + speaker_loss + label_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_domain_loss += domain_loss.item()
total_speaker_loss += speaker_loss.item()
total_label_loss += label_loss.item()
domain_preds = domain_preds.argmax(dim=1, keepdim=True)
domain_acc += domain_preds.eq(domain_y.view_as(domain_preds)).sum().item() / len(domain_preds)
speaker_preds = speaker_preds.argmax(dim=1, keepdim=True)
speaker_acc += speaker_preds.eq(domain_y.view_as(speaker_preds)).sum().item() / len(speaker_preds)
label_preds = label_preds.argmax(dim=1, keepdim=True)
label_acc += label_preds.eq(label_y.view_as(label_preds)).sum().item() / len(label_preds)
if opt.verbose and i > 0 and i % int(n_batches / 10) == 0:
print('.', flush=True, end='')
if i >= n_batches:
break
domain_loss = total_domain_loss / n_batches
domain_acc = domain_acc / n_batches
speaker_loss = total_speaker_loss / n_batches
speaker_acc = speaker_acc / n_batches
label_loss = total_label_loss / n_batches
label_acc = label_acc / n_batches
return domain_loss, domain_acc, speaker_loss, speaker_acc, label_loss, label_acc
def test(test_loader, model, criterion, device, opt):
model.eval()
running_loss = 0.
running_acc = 0.
with torch.no_grad():
groundtruth = []
prediction = []
for i, test_data in enumerate(test_loader):
visual_features, _, acoustic_features, _, lexical_features, _, _, a_labels, _, speakers = test_data
visual_features = visual_features.to(device)
acoustic_features = acoustic_features.to(device)
lexical_features = lexical_features.to(device)
labels = a_labels.to(device)
predictions = model(visual_features, acoustic_features, lexical_features)
loss = criterion(predictions, labels)
running_loss += loss.item()
groundtruth.append(labels.tolist())
predictions = predictions.argmax(dim=1, keepdim=True)
prediction.append(predictions.view_as(labels).tolist())
test_loss = running_loss / len(test_loader)
groundtruth = list(itertools.chain.from_iterable(groundtruth))
prediction = list(itertools.chain.from_iterable(prediction))
test_acc = accuracy_score(prediction, groundtruth)
test_uar = recall_score(prediction, groundtruth, average='macro')
return test_loss, test_acc, test_uar
if __name__ == '__main__':
main()
|
#! /usr/bin/python3
import os, sys, re
with open('uni2pinyin.txt') as f:
u2p_table = f.read()
def unicode2pinyin(dir_name):
os.chdir(dir_name)
filenames = os.listdir(u'.')
for filename in filenames:
if os.path.isdir(filename):
unicode2pinyin(filename)
filename_tmp = ''
for x in filename:
if 0x4e00 <= ord(x) <= 0x9fff: # Chinese Character Unicode range
hexCH = (hex(ord(x))[2:]).upper() # strip leading '0x' and change to uppercase
p = re.compile(hexCH+'\t([a-z]+)[\d]*') # define the match pattern
mp = p.search(u2p_table)
filename_tmp += mp.group(1).title()
else:
filename_tmp += x
os.rename(filename, filename_tmp)
os.chdir('..')
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Usage: {} path/to/dir1 path/to/dir2 ...\n\t".format(sys.argv[0]),
"dir1, dir2, ... will be renamed as well")
for dirname in sys.argv[1:]:
if os.path.isdir(dirname):
unicode2pinyin(dirname)
else:
print(dirname + 'is not a directory, skipping')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pygments-style-soft-era',
version='1.0.3',
description='Pygments version of the soft-era theme.',
keywords=['pygments', 'style', 'soft-era'],
author='Audrey Moon',
maintainer='GinShio',
maintainer_email='ginshio78@gmail.com',
utl='http://soft-aesthetic.club/soft-era.html',
download_url='https://github.com/GinShio/pygments',
license='MIT',
packages=find_packages(),
install_requires=['pygments >= 1.5'],
zip_safe=False,
entry_points="""[pygments.styles]
soft-era=pygments_style_soft_era.soft_era:SoftEraStyle""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
import argparse
import random
def main():
n, e = parse_args()
generate_graph(n, e)
def parse_args():
parser = argparse.ArgumentParser(
description='Generate Graph for the FDEB benchmark')
parser.add_argument('num_nodes', metavar='N', type=int,
help='The number of nodes')
parser.add_argument('num_edges', metavar='E', type=int,
help='The number of edges')
args = parser.parse_args()
return args.num_nodes, args.num_edges
def generate_graph(n, e):
random.seed(0)
# f_node = open(str(n) + 'x' + str(e) + '_node.data', 'w')
# for i in range(0, n):
# f_node.write(str(random.random()) + ',' + str(random.random()) + '\n')
scale = 10
nodes = []
for i in range(0, n):
nodes.append((random.random() * scale, random.random() * scale));
f_edges = open(str(n) + 'x' + str(e) + '.csv', 'w')
for i in range(0, e):
src = random.randint(0, n - 1)
dst = random.randint(0, n - 1)
f_edges.write(str(nodes[src][0]) + ',' +
str(nodes[src][1]) + ',' +
str(nodes[dst][0]) + ',' +
str(nodes[dst][1]) + ',' +
'\n')
if __name__ == '__main__':
main()
|
# Rasterize a shapefile with PNGCanvas
import shapefile
import pngcanvas
r = shapefile.Reader("hancock.shp")
xdist = r.bbox[2] - r.bbox[0]
ydist = r.bbox[3] - r.bbox[1]
iwidth = 400
iheight = 600
xratio = iwidth/xdist
yratio = iheight/ydist
pixels = []
for x,y in r.shapes()[0].points:
px = int(iwidth - ((r.bbox[2] - x) * xratio))
py = int((r.bbox[3] - y) * yratio)
pixels.append([px,py])
c = pngcanvas.PNGCanvas(iwidth,iheight)
c.polyline(pixels)
f = file("hancock_pngcvs.png", "wb")
f.write(c.dump())
f.close()
|
from distutils.version import LooseVersion
import six
from django.template import RequestContext
from django.utils.translation import override
import cms
from cms.api import add_plugin
from ..models import SelectedCategory
from .test_base import AldrynFaqTest
def _render_plugin(request, plugin):
def _render_via_django():
from django.template import Engine
context = RequestContext(request)
updates = {}
engine = Engine.get_default()
for processor in engine.template_context_processors:
updates.update(processor(context.request))
context.dicts[context._processors_index] = updates
return plugin.render_plugin(context)
def _render_via_cms():
from cms.plugin_rendering import ContentRenderer
renderer = ContentRenderer(request)
context = RequestContext(request)
# Avoid errors if plugin require a request object
# when rendering.
context['request'] = request
return renderer.render_plugin(plugin, context)
cms_lt_3_4 = LooseVersion(cms.__version__) < LooseVersion('3.4') # COMPAT: CMS3.4
if cms_lt_3_4:
return _render_via_django()
else:
return _render_via_cms()
class TestQuestionListPlugin(AldrynFaqTest):
def test_plugin(self):
page1 = self.get_or_create_page("Page One")
ph = page1.placeholders.get(slot="content")
plugin = add_plugin(ph, "QuestionListPlugin", language="en")
# First test that it is initially empty
request = self.get_page_request(
page1, self.user, None, lang_code="en", edit=False)
rendered = _render_plugin(request, plugin)
self.assertTrue(rendered.find("No entry found.") > -1)
# Now, add a question, and test that it renders.
question1 = self.reload(self.question1, "en")
plugin.questions.add(question1)
plugin.save()
request = self.get_page_request(
page1, self.user, None, lang_code="en", edit=False)
rendered = _render_plugin(request, plugin)
self.assertTrue(rendered.find(question1.title) > -1)
# Test its unicode method
self.assertEqual(str(plugin), "1 question selected")
# Test its copy_relations. To do this, we'll create another instance
# that is empty, then copy_relations to it, and prove that it contains
# questions.
plugin2 = add_plugin(ph, "QuestionListPlugin", language="en")
plugin2.copy_relations(plugin)
self.assertTrue(plugin.get_questions(), plugin2.get_questions())
class TestLatestQuestionsPlugin(AldrynFaqTest):
def test_plugin(self):
with override("de"):
page1 = self.get_or_create_page("Page One")
ph = page1.placeholders.get(slot="content")
plugin = add_plugin(ph, "LatestQuestionsPlugin", language="de")
request = self.get_page_request(
page1, self.user, None, lang_code="de", edit=False)
url1 = self.reload(self.question1, "de").get_absolute_url()
url2 = self.reload(self.question2, "de").get_absolute_url()
rendered = _render_plugin(request, plugin)
self.assertTrue(rendered.find(url1) > -1)
self.assertTrue(rendered.find(url2) > -1)
# Test that question2 appears before question1
self.assertTrue(rendered.find(url2) < rendered.find(url1))
class TestTopQuestionsPlugin(AldrynFaqTest):
def test_plugin(self):
page1 = self.get_or_create_page("Page One")
ph = page1.placeholders.get(slot="content")
plugin = add_plugin(ph, "TopQuestionsPlugin", language="en")
# First test that no plugins are found initially
request = self.get_page_request(
page1, self.user, None, lang_code="en", edit=False)
rendered = _render_plugin(request, plugin)
self.assertTrue(rendered.find("No entry found") > -1)
# Now test, set a question to be "top", then test that it appears.
self.question1.is_top = True
self.question1.save()
request = self.get_page_request(
page1, self.user, None, lang_code="en", edit=False)
question1 = self.reload(self.question1, "en")
rendered = _render_plugin(request, plugin)
self.assertTrue(rendered.find(question1.title) > -1)
class TestMostReadQuestionsPlugin(AldrynFaqTest):
def test_plugin(self):
# Prepare the questions...
self.question1.number_of_visits = 5
self.question1.save()
self.question2.number_of_visits = 10
self.question2.save()
with override("de"):
page1 = self.get_or_create_page("Page One")
ph = page1.placeholders.get(slot="content")
plugin = add_plugin(ph, "MostReadQuestionsPlugin", language="de")
request = self.get_page_request(
page1, self.user, None, lang_code="de", edit=False)
url1 = self.reload(self.question1, "de").get_absolute_url()
url2 = self.reload(self.question2, "de").get_absolute_url()
rendered = _render_plugin(request, plugin)
# Ensure both questions appear...
self.assertTrue(rendered.find(url1) > -1)
self.assertTrue(rendered.find(url2) > -1)
# Test that question2 appears before question1
self.assertTrue(rendered.find(url2) < rendered.find(url1))
class TestCategoryListPlugin(AldrynFaqTest):
def test_plugin(self):
page1 = self.get_or_create_page("Page One")
ph = page1.placeholders.get(slot='content')
plugin = add_plugin(ph, 'CategoryListPlugin', language="de")
request = self.get_page_request(
page1, self.user, None, lang_code="de", edit=False)
category1 = self.category1
category1.save()
category2 = self.category2
category2.save()
url = category1.get_absolute_url(language="de")
rendered = _render_plugin(request, plugin)
self.assertFalse(rendered.find(url) > -1)
# Add some selected categories
categories = [self.category1, self.category2]
sc = None
for idx, category in enumerate(categories):
sc = SelectedCategory(
category=category, position=idx, cms_plugin=plugin)
sc.save()
self.assertEqualItems(
[c.id for c in plugin.get_categories()],
[c.id for c in categories]
)
# While we're here, let's test that SelectedCategory's __str__ works
if six.PY2:
self.assertEqual(unicode(sc), categories[-1].name)
else:
self.assertEqual(str(sc), categories[-1].name)
# Test that copy_relations works
plugin2 = add_plugin(ph, "CategoryListPlugin", language="de")
plugin2.copy_relations(plugin)
self.assertEqualItems(
[c.id for c in plugin.get_categories()],
[c.id for c in plugin2.get_categories()]
)
|
__author__ = 'ddustin'
import time
from twisted.trial import unittest
from market.btcprice import BtcPrice
class MarketProtocolTest(unittest.TestCase):
def test_BtcPrice(self):
btcPrice = BtcPrice()
btcPrice.start()
time.sleep(0.01)
rate = BtcPrice.instance().get("USD")
self.assertGreater(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitcoinaverage(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitcoinaverage"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitpay(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitpay"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadblockchain(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadblockchain"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitcoincharts(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitcoincharts"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
|
# -*- coding: utf-8 -*-
"""Timesketch scaffolder that generates analyzer plugins."""
import os
import logging
from typing import Dict
from typing import Iterator
from typing import Tuple
from l2tscaffolder.lib import definitions
from l2tscaffolder.lib import mapping_helper
from l2tscaffolder.scaffolders import interface
class TimesketchBaseScaffolder(interface.Scaffolder):
"""The Timesketch base scaffolder interface.
Attributes:
class_name (str): class name of the Timesketch analyzer to be generated.
"""
# The name of the plugin this scaffolder plugin provides.
NAME = 'timesketch_base'
# One liner describing what the scaffolder provides.
DESCRIPTION = 'This is a scaffolder for Timesketch analyzers'
# Define which project this particular scaffolder belongs to.
PROJECT = definitions.DEFINITION_TIMESKETCH
# Filename of templates.
TEMPLATE_PLUGIN_FILE = ''
TEMPLATE_PLUGIN_TEST = ''
# Questions, a list that contains all the needed questions that the
# user should be prompted about before the plugin or parser is created.
# Each element in the list should be of the named tuple question.
QUESTIONS = []
def __init__(self):
"""Initializes the Timesketch scaffolder."""
super(TimesketchBaseScaffolder, self).__init__()
self._plugin_path = os.path.join('timesketch', 'lib', 'analyzers')
self._plugin_test_path = os.path.join('timesketch', 'lib', 'analyzers')
# Timesketch uses 4 spaces instead of 2, thus we need to set a different
# formatter.
self._mapping_helper = mapping_helper.MappingHelper(
formatter_path='.style.ts.yapf')
self.class_name = ''
def _GeneratePlugin(self) -> str:
"""Generates the plugin file."""
return self._mapping_helper.RenderTemplate(
self.TEMPLATE_PLUGIN_FILE, self.GetJinjaContext())
def _GeneratePluginTest(self) -> str:
"""Generates the plugin test file."""
return self._mapping_helper.RenderTemplate(
self.TEMPLATE_PLUGIN_TEST, self.GetJinjaContext())
def GetInitFileChanges(self) -> Iterator[Tuple[str, str]]:
"""Generate a list of init files that need changing and the changes to them.
Yields:
Tuple[str, str]: path to the init file and the entry to add to it.
"""
plugin_path = self._plugin_path.replace(os.sep, '.')
plugin_string = 'from {0:s} import {1:s}\n'.format(
plugin_path, self._output_name)
plugin_init_path = os.path.join(self._plugin_path, '__init__.py')
yield plugin_init_path, plugin_string
def GetFilesToCopy(self) -> Iterator[Tuple[str, str]]:
"""Return a list of files that need to be copied.
Returns:
an empty iterator.
"""
return iter(())
def GetJinjaContext(self) -> Dict[str, object]:
"""Returns a dict that can be used as a context for Jinja2 templates.
Returns:
dict: containing:
str: name of Jinja argument.
object: Jinja argument value.
"""
context = super(TimesketchBaseScaffolder, self).GetJinjaContext()
context['class_name'] = self.class_name
context['plugin_name'] = self._output_name
return context
def GenerateFiles(self) -> Iterator[Tuple[str, str]]:
"""Generates all the files required for a Timesketch analyzer plugin.
Yields:
list[tuple]: containing:
str: file name.
str: file content.
"""
plugin_name = '{0:s}.py'.format(self._output_name)
self.class_name = self._mapping_helper.GenerateClassName(
self._output_name)
try:
plugin_path = os.path.join(self._plugin_path, plugin_name)
plugin_content = self._GeneratePlugin()
yield plugin_path, plugin_content
except SyntaxError as exception:
logging.error((
'Syntax error while attempting to generate plugin, error '
'message: {0!s}').format(exception))
test_file_name = '{0:s}_test.py'.format(self._output_name)
test_path = os.path.join(self._plugin_test_path, test_file_name)
try:
test_content = self._GeneratePluginTest()
yield test_path, test_content
except SyntaxError as exception:
logging.error((
'Syntax error while attempting to generate plugin test, error '
'message: {0!s}').format(exception))
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
import os
class Ftgl(AutotoolsPackage):
"""Library to use arbitrary fonts in OpenGL applications."""
homepage = "http://ftgl.sourceforge.net/docs/html/"
url = "https://sourceforge.net/projects/ftgl/files/FTGL%20Source/2.1.2/ftgl-2.1.2.tar.gz/download"
list_url = "https://sourceforge.net/projects/ftgl/files/FTGL%20Source/"
list_depth = 1
version('2.1.2', 'f81c0a7128192ba11e036186f9a968f2')
# There is an unnecessary qualifier around, which makes modern GCC sad
patch('remove-extra-qualifier.diff')
# Ftgl does not come with a configure script
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('pkgconfig', type='build')
depends_on('gl')
depends_on('glu')
depends_on('freetype@2.0.9:')
# Currently, "make install" will fail if the docs weren't built
#
# FIXME: Can someone with autotools experience fix the build system
# so that it doesn't fail when that happens?
#
depends_on('doxygen', type='build')
@property
@when('@2.1.2')
def configure_directory(self):
subdir = 'unix'
if sys.platform == 'darwin':
subdir = 'mac'
return os.path.join(self.stage.source_path, subdir)
|
#程序说明: 统计共有写过多少行程序,并分别列出来空行和注释
# 注意:没有考虑到行后的注释
import re, os
import string
filename = './2_Gen_ActiveCode.py'
total_line = 0
blank_line = 0
note_line = 0
f = open(filename, 'r', encoding='utf-8')
lines = f.readlines()
f.close()
total_line = len(lines)
line_index = 0
while line_index < total_line:
line = lines[line_index]
if line.strip().startswith('#'):
note_line += 1
print(line)
elif re.match("\s*'''", line) is not None:
note_line += 1
print(line)
line_index += 1 # 记得要更新到下一行
line = lines[line_index]
while re.match(".*'''$", line) is None:
print(line)
note_line += 1
line_index += 1
line = lines[line_index]
elif line.isspace():
blank_line += 1
line_index += 1
print('代码总行数为', total_line)
print(' 空行数为', blank_line)
print(' 注释行数为', note_line)
|
#! /usr/bin/python3.5
import sys
import linecache
def main():
prin("HELLO THERE")
# >>>>>>>>>>>>>>>>>>>>>>> MYDIE MODULE USED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def mydie(exitCont_):
print(exitCont_)
print("*** ERROR OCCURRED *** : ROLL BACK PROCEDURES EXECUTING BELOW")
# DEFINE DB DISCONNECT HERE
print("*** ROLL BACK *** : CLOSING DB CONNECTION")
# DEFINE CLOSE FTP CONNECTION HERE
# DEFINE CLEARING THE TEMPORARY FILES HERE
# DEFINE CREATING A PICKLE HERE
sys.exit(0)
# >>>>>>>>>>>>>>>>>>>>>>> EXCEPTION BRIEFER USED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def ExceptionBrief():
# CREATE EXCEPTION REPORT
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return 'EXCEPTION CAPTURED : ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
def main():
prit("hellow world")
# >>>>>>>>>>>>>>>>>>>>>>> DECLARE THE MAIN FUNCTION ERROR CATCH MECHANISM HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
if __name__=="__main__":
try:
main()
# check the type of the exception
# use hash attribute to print of hash type values
# and if it is a non hash type attribute,
# you can print the exception normally
except KeyboardInterrupt:
full_execption_report = ExceptionBrief()
full_execption_report = full_execption_report+" Program Interrupted By External Source"
mydie(full_execption_report)
except :
full_execption_report=ExceptionBrief()
# DO NOTHING, USED AS DEFAULT EXIT FOR PROGRAM
if full_execption_report != 0:
mydie(full_execption_report)
|
from .discovery import RoombaDiscovery
from .getpassword import RoombaPassword
from .roomba import Roomba, RoombaConnectionError, RoombaInfo
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class coinmate (Exchange):
def describe(self):
return self.deep_extend(super(coinmate, self).describe(), {
'id': 'coinmate',
'name': 'CoinMate',
'countries': ['GB', 'CZ', 'EU'], # UK, Czech Republic
'rateLimit': 1000,
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27811229-c1efb510-606c-11e7-9a36-84ba2ce412d8.jpg',
'api': 'https://coinmate.io/api',
'www': 'https://coinmate.io',
'doc': [
'http://docs.coinmate.apiary.io',
'https://coinmate.io/developers',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'orderBook',
'ticker',
'transactions',
],
},
'private': {
'post': [
'balances',
'bitcoinWithdrawal',
'bitcoinDepositAddresses',
'buyInstant',
'buyLimit',
'cancelOrder',
'cancelOrderWithInfo',
'createVoucher',
'openOrders',
'redeemVoucher',
'sellInstant',
'sellLimit',
'transactionHistory',
'unconfirmedBitcoinDeposits',
],
},
},
'markets': {
'BTC/EUR': {'id': 'BTC_EUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'precision': {'amount': 4, 'price': 2}},
'BTC/CZK': {'id': 'BTC_CZK', 'symbol': 'BTC/CZK', 'base': 'BTC', 'quote': 'CZK', 'precision': {'amount': 4, 'price': 2}},
'LTC/BTC': {'id': 'LTC_BTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'precision': {'amount': 4, 'price': 5}},
},
'fees': {
'trading': {
'maker': 0.0005,
'taker': 0.0035,
},
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostBalances()
balances = response['data']
result = {'info': balances}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balances:
account['free'] = balances[currency]['available']
account['used'] = balances[currency]['reserved']
account['total'] = balances[currency]['balance']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
response = await self.publicGetOrderBook(self.extend({
'currencyPair': self.market_id(symbol),
'groupByPriceLimit': 'False',
}, params))
orderbook = response['data']
timestamp = orderbook['timestamp'] * 1000
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'amount')
async def fetch_ticker(self, symbol, params={}):
response = await self.publicGetTicker(self.extend({
'currencyPair': self.market_id(symbol),
}, params))
ticker = response['data']
timestamp = ticker['timestamp'] * 1000
last = float(ticker['last'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'bidVolume': None,
'ask': float(ticker['ask']),
'vwap': None,
'askVolume': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['amount']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
if not market:
market = self.markets_by_id[trade['currencyPair']]
return {
'id': trade['transactionId'],
'info': trade,
'timestamp': trade['timestamp'],
'datetime': self.iso8601(trade['timestamp']),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetTransactions(self.extend({
'currencyPair': market['id'],
'minutesIntoHistory': 10,
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
}
if type == 'market':
if side == 'buy':
order['total'] = amount # amount in fiat
else:
order['amount'] = amount # amount in fiat
method += 'Instant'
else:
order['amount'] = amount # amount in crypto
order['price'] = price
method += self.capitalize(type)
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': str(response['data']),
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelOrder({'orderId': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.urlencode(self.extend({
'clientId': self.uid,
'nonce': nonce,
'publicKey': self.apiKey,
'signature': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
from typing import Optional
from apis.version1.route_login import get_current_user_from_token
from db.models.users import User
from db.repository.jobs import create_new_job
from db.repository.jobs import list_jobs
from db.repository.jobs import retreive_job
from db.repository.jobs import search_job
from db.session import get_db
from fastapi import APIRouter
from fastapi import Depends
from fastapi import Request
from fastapi import responses
from fastapi import status
from fastapi.security.utils import get_authorization_scheme_param
from fastapi.templating import Jinja2Templates
from schemas.jobs import JobCreate
from sqlalchemy.orm import Session
from webapps.jobs.forms import JobCreateForm
templates = Jinja2Templates(directory="templates")
router = APIRouter(include_in_schema=False)
@router.get("/")
async def home(request: Request, db: Session = Depends(get_db), msg: str = None):
jobs = list_jobs(db=db)
return templates.TemplateResponse(
"general_pages/homepage.html", {"request": request, "jobs": jobs, "msg": msg}
)
@router.get("/details/{id}")
def job_detail(id: int, request: Request, db: Session = Depends(get_db)):
job = retreive_job(db=db, id=id)
return templates.TemplateResponse(
"jobs/detail.html", {"request": request, "job": job}
)
@router.get("/post-a-job/")
def create_job(request: Request, db: Session = Depends(get_db)):
return templates.TemplateResponse("jobs/create_job.html", {"request": request})
@router.post("/post-a-job/")
async def create_job(request: Request, db: Session = Depends(get_db)):
form = JobCreateForm(request)
await form.load_data()
if form.is_valid():
try:
token = request.cookies.get("access_token")
scheme, param = get_authorization_scheme_param(
token
) # scheme will hold "Bearer" and param will hold actual token value
current_user: User = get_current_user_from_token(token=param, db=db)
job = JobCreate(**form.__dict__)
job = create_new_job(job=job, db=db, owner_id=current_user.id)
return responses.RedirectResponse(
f"/details/{job.id}", status_code=status.HTTP_302_FOUND
)
except Exception as e:
print(e)
form.__dict__.get("errors").append(
"You might not be logged in, In case problem persists please contact us."
)
return templates.TemplateResponse("jobs/create_job.html", form.__dict__)
return templates.TemplateResponse("jobs/create_job.html", form.__dict__)
@router.get("/delete-job/")
def show_jobs_to_delete(request: Request, db: Session = Depends(get_db)):
jobs = list_jobs(db=db)
return templates.TemplateResponse(
"jobs/show_jobs_to_delete.html", {"request": request, "jobs": jobs}
)
@router.get("/search/")
def search_jobs(
request: Request, db: Session = Depends(get_db), query: Optional[str] = None
):
jobs = search_job(query=query, db=db)
return templates.TemplateResponse(
"general_pages/homepage.html", {"request": request, "jobs": jobs}
)
@router.get("/autocomplete/")
def autocomplete(term: Optional[str] = None, db: Session = Depends(get_db)):
jobs = search_job(term, db=db)
job_titles = []
for job in jobs:
job_titles.append(job.title)
return job_titles
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/services/account_budget_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import account_budget_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_account__budget__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/account_budget_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB\031AccountBudgetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nCgoogle/ads/googleads_v2/proto/services/account_budget_service.proto\x12 google.ads.googleads.v2.services\x1a<google/ads/googleads_v2/proto/resources/account_budget.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\"0\n\x17GetAccountBudgetRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xef\x01\n\x14\x41\x63\x63ountBudgetService\x12\xb9\x01\n\x10GetAccountBudget\x12\x39.google.ads.googleads.v2.services.GetAccountBudgetRequest\x1a\x30.google.ads.googleads.v2.resources.AccountBudget\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v2/{resource_name=customers/*/accountBudgets/*}\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x80\x02\n$com.google.ads.googleads.v2.servicesB\x19\x41\x63\x63ountBudgetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_account__budget__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETACCOUNTBUDGETREQUEST = _descriptor.Descriptor(
name='GetAccountBudgetRequest',
full_name='google.ads.googleads.v2.services.GetAccountBudgetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetAccountBudgetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=222,
serialized_end=270,
)
DESCRIPTOR.message_types_by_name['GetAccountBudgetRequest'] = _GETACCOUNTBUDGETREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountBudgetRequest = _reflection.GeneratedProtocolMessageType('GetAccountBudgetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETACCOUNTBUDGETREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.account_budget_service_pb2'
,
__doc__ = """Request message for
[AccountBudgetService.GetAccountBudget][google.ads.googleads.v2.services.AccountBudgetService.GetAccountBudget].
Attributes:
resource_name:
The resource name of the account-level budget to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.GetAccountBudgetRequest)
))
_sym_db.RegisterMessage(GetAccountBudgetRequest)
DESCRIPTOR._options = None
_ACCOUNTBUDGETSERVICE = _descriptor.ServiceDescriptor(
name='AccountBudgetService',
full_name='google.ads.googleads.v2.services.AccountBudgetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=273,
serialized_end=512,
methods=[
_descriptor.MethodDescriptor(
name='GetAccountBudget',
full_name='google.ads.googleads.v2.services.AccountBudgetService.GetAccountBudget',
index=0,
containing_service=None,
input_type=_GETACCOUNTBUDGETREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_account__budget__pb2._ACCOUNTBUDGET,
serialized_options=_b('\202\323\344\223\0022\0220/v2/{resource_name=customers/*/accountBudgets/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_ACCOUNTBUDGETSERVICE)
DESCRIPTOR.services_by_name['AccountBudgetService'] = _ACCOUNTBUDGETSERVICE
# @@protoc_insertion_point(module_scope)
|
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
NUM_CAPTURE_CHANNELS = 2
global tcSym_Capture_Channel
tcSym_Capture_Channel = []
tcSym_Capture_Trigger_Source = []
tcSym_Capture_Trigger_Edge = []
tcSym_Capture_Trigger_Action = []
tcSym_Capture_INTENSET_MC = []
tcSym_Capture_EVCTRL_MCEO = []
###################################################################################################
########################################## Callbacks #############################################
###################################################################################################
def updateCaptureMenuVisibleProperty(symbol, event):
if event["value"] == "Capture":
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tcChannelVisible(symbol, event):
id = (symbol.getID()[-1:])
channelID = int(id)
if tcSym_Capture_Channel[channelID].getValue() == True:
symbol.setVisible(True)
else:
symbol.setVisible(False)
def updateTCCaptureInterruptValue(symbol, event):
errInt = Database.getSymbolValue(tcInstanceName.getValue().lower(), "TC_CAPTURE_ERR_INTERRUPT_MODE")
ovfInt = Database.getSymbolValue(tcInstanceName.getValue().lower(), "TC_CAPTURE_OVF_INTERRUPT_MODE")
mc0Int = Database.getSymbolValue(tcInstanceName.getValue().lower(), "TC_CAPTURE_INTSET_MC0")
mc1Int = Database.getSymbolValue(tcInstanceName.getValue().lower(), "TC_CAPTURE_INTSET_MC1")
symbol.clearValue()
if errInt or ovfInt or mc0Int or mc1Int:
symbol.setValue(True, 2)
else:
symbol.setValue(False, 2)
def tcCaptureEvsys(symbol, event):
if(event["id"] == "TC_CAPTURE_EVCTRL_MCEO0"):
Database.setSymbolValue("evsys", "GENERATOR_"+tcInstanceName.getValue()+"_MC_0_ACTIVE", event["value"], 2)
if(event["id"] == "TC_CAPTURE_EVCTRL_MCEO1"):
Database.setSymbolValue("evsys", "GENERATOR_"+tcInstanceName.getValue()+"_MC_1_ACTIVE", event["value"], 2)
if(event["id"] == "TC_OPERATION_MODE" and event["value"] == "Capture"):
Database.setSymbolValue("evsys", "USER_"+tcInstanceName.getValue()+"_EVU_READY", True, 2)
###################################################################################################
######################################## Capture Mode #############################################
###################################################################################################
#capture menu
tcSym_CaptureMenu = tcComponent.createMenuSymbol("TC_CAPTURE_MENU", tcSym_OperationMode)
tcSym_CaptureMenu.setLabel("Capture")
tcSym_CaptureMenu.setVisible(False)
tcSym_CaptureMenu.setDependencies(updateCaptureMenuVisibleProperty, ["TC_OPERATION_MODE"])
tcSym_CaptureNumChannels = tcComponent.createIntegerSymbol("TC_NUM_CHANNELS", tcSym_OperationMode)
tcSym_CaptureNumChannels.setLabel("Number of capture channels")
tcSym_CaptureNumChannels.setVisible(False)
tcSym_CaptureNumChannels.setDefaultValue(int(NUM_CAPTURE_CHANNELS))
for channelID in range (0, NUM_CAPTURE_CHANNELS):
#capture channel 0
tcSym_Capture_Channel.append(channelID)
tcSym_Capture_Channel[channelID] = tcComponent.createBooleanSymbol("TC_CAPTURE_CTRLC_CPTEN"+str(channelID), tcSym_CaptureMenu)
tcSym_Capture_Channel[channelID].setLabel("Enable Capture Channel "+str(channelID))
tcSym_Capture_Channel[channelID].setDefaultValue(True)
if (channelID == 1):
tcSym_Capture_Channel[channelID].setReadOnly(True)
#capture channel trigger source
if (channelID == 0):
tcSym_Capture_Trigger_Source.append(channelID)
tcSym_Capture_Trigger_Source[channelID] = tcComponent.createKeyValueSetSymbol("TC_CAPTURE_CTRLA_COPEN"+str(channelID), tcSym_Capture_Channel[channelID])
tcSym_Capture_Trigger_Source[channelID].setLabel("Capture Trigger Source")
tcSym_Capture_Trigger_Source[channelID].setReadOnly(True)
tcSym_Capture_Trigger_Source[channelID].addKey("EVENT", "0", "Input Event")
tcSym_Capture_Trigger_Source[channelID].setDefaultValue(0)
tcSym_Capture_Trigger_Source[channelID].setOutputMode("Value")
tcSym_Capture_Trigger_Source[channelID].setDisplayMode("Description")
tcSym_Capture_Trigger_Source[channelID].setDependencies(tcChannelVisible, ["TC_CAPTURE_CTRLA_CPTEN"+str(channelID)])
#capture trigger edge
tcSym_Capture_Trigger_Edge.append(channelID)
tcSym_Capture_Trigger_Edge[channelID] = tcComponent.createKeyValueSetSymbol("TC_CAPTURE_TRIGGER_EDGE"+str(channelID), tcSym_Capture_Channel[channelID])
tcSym_Capture_Trigger_Edge[channelID].setLabel("Capture Trigger Edge")
tcSym_Capture_Trigger_Edge[channelID].addKey("RISE_EDGE", "0", "Rising Edge")
tcSym_Capture_Trigger_Edge[channelID].addKey("FALL_EDGE", "1", "Falling Edge")
tcSym_Capture_Trigger_Edge[channelID].setDefaultValue(0)
tcSym_Capture_Trigger_Edge[channelID].setOutputMode("Value")
tcSym_Capture_Trigger_Edge[channelID].setDisplayMode("Description")
tcSym_Capture_Trigger_Edge[channelID].setDependencies(tcChannelVisible, ["TC_CAPTURE_CTRLA_CPTEN"+str(channelID)])
#capture event trigger action
tcSym_Capture_Trigger_Action.append(channelID)
tcSym_Capture_Trigger_Action[channelID] = tcComponent.createKeyValueSetSymbol("TC_CAPTURE_TRIGGER_ACTION"+str(channelID), tcSym_Capture_Channel[channelID])
tcSym_Capture_Trigger_Action[channelID].setLabel("Capture Trigger Action")
tcSym_Capture_Trigger_Action[channelID].addKey("PPW", "5", "Period captured in CC0, pulse width in CC1")
tcSym_Capture_Trigger_Action[channelID].addKey("PWP", "6", "Period captured in CC1, pulse width in CC0")
tcSym_Capture_Trigger_Action[channelID].setDefaultValue(0)
tcSym_Capture_Trigger_Action[channelID].setVisible(True)
tcSym_Capture_Trigger_Action[channelID].setOutputMode("Key")
tcSym_Capture_Trigger_Action[channelID].setDisplayMode("Description")
tcSym_Capture_Trigger_Action[channelID].setDependencies(tcChannelVisible, ["TC_CAPTURE_CTRLA_CPTEN"+str(channelID)])
#capture channel counter/compare interrupt
tcSym_Capture_INTENSET_MC.append(channelID)
tcSym_Capture_INTENSET_MC[channelID] = tcComponent.createBooleanSymbol("TC_CAPTURE_INTSET_MC"+str(channelID), tcSym_Capture_Channel[channelID])
tcSym_Capture_INTENSET_MC[channelID].setLabel("Enable Capture " + str(channelID) + " Interrupt")
tcSym_Capture_INTENSET_MC[channelID].setDefaultValue(False)
tcSym_Capture_INTENSET_MC[channelID].setDependencies(tcChannelVisible, ["TC_CAPTURE_CTRLC_CPTEN"+str(channelID)])
#capture event out
tcSym_Capture_EVCTRL_MCEO.append(channelID)
tcSym_Capture_EVCTRL_MCEO[channelID] = tcComponent.createBooleanSymbol("TC_CAPTURE_EVCTRL_MCEO"+str(channelID), tcSym_Capture_Channel[channelID])
tcSym_Capture_EVCTRL_MCEO[channelID].setLabel("Enable Capture " + str(channelID) + " Event Out")
tcSym_Capture_EVCTRL_MCEO[channelID].setDefaultValue(False)
tcSym_Capture_EVCTRL_MCEO[channelID].setDependencies(tcChannelVisible, ["TC_CAPTURE_CTRLC_CPTEN"+str(channelID)])
#capture error interrupt
tcSym_Capture_INTENSET_ERR = tcComponent.createBooleanSymbol("TC_CAPTURE_ERR_INTERRUPT_MODE", tcSym_CaptureMenu)
tcSym_Capture_INTENSET_ERR.setLabel("Enable Capture Error Interrupt")
#capture overflow interrupt
tcSym_Capture_INTENSET_OVF = tcComponent.createBooleanSymbol("TC_CAPTURE_OVF_INTERRUPT_MODE", tcSym_CaptureMenu)
tcSym_Capture_INTENSET_OVF.setLabel("Enable Capture Overflow Interrupt")
#capture interrupt
global tcSym_Capture_InterruptMode
tcSym_Capture_InterruptMode = tcComponent.createBooleanSymbol("TC_CAPTURE_INTERRUPT", tcSym_CaptureMenu)
tcSym_Capture_InterruptMode.setVisible(False)
tcSym_Capture_InterruptMode.setDependencies(updateTCCaptureInterruptValue, ["TC_CAPTURE_ERR_INTERRUPT_MODE", "TC_CAPTURE_OVF_INTERRUPT_MODE", "TC_CAPTURE_INTSET_MC0", "TC_CAPTURE_INTSET_MC1"])
tcSym_Capture_EVSYS_CONFIGURE = tcComponent.createIntegerSymbol("TC_CAPTURE_EVSYS_CONFIGURE", tcSym_CaptureMenu)
tcSym_Capture_EVSYS_CONFIGURE.setVisible(False)
tcSym_Capture_EVSYS_CONFIGURE.setDependencies(tcCaptureEvsys, ["TC_OPERATION_MODE", "TC_CAPTURE_EVCTRL_MCEO0", "TC_CAPTURE_EVCTRL_MCEO1"])
|
# Copyright (c) Facebook, Inc. and its affiliates.
import csv
import json
import os
import torch
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from mmf.common.batch_collator import BatchCollator
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env
from mmf.utils.distributed import gather_tensor, is_dist_initialized, is_master
from mmf.utils.file_io import PathManager
from mmf.utils.general import (
ckpt_name_from_core_args,
foldername_from_config_override,
get_batch_size,
)
from mmf.utils.timer import Timer
class TestReporter(Dataset):
def __init__(self, multi_task_instance):
self.test_task = multi_task_instance
self.task_type = multi_task_instance.dataset_type
self.config = registry.get("config")
self.writer = registry.get("writer")
self.report = []
self.timer = Timer()
self.training_config = self.config.training
self.num_workers = self.training_config.num_workers
self.batch_size = self.training_config.batch_size
self.report_folder_arg = get_mmf_env(key="report_dir")
self.experiment_name = self.training_config.experiment_name
self.datasets = []
for dataset in self.test_task.get_datasets():
self.datasets.append(dataset)
self.current_dataset_idx = -1
self.current_dataset = self.datasets[self.current_dataset_idx]
self.save_dir = get_mmf_env(key="save_dir")
self.report_folder = ckpt_name_from_core_args(self.config)
self.report_folder += foldername_from_config_override(self.config)
self.report_folder = os.path.join(self.save_dir, self.report_folder)
self.report_folder = os.path.join(self.report_folder, "reports")
if self.report_folder_arg:
self.report_folder = self.report_folder_arg
PathManager.mkdirs(self.report_folder)
def next_dataset(self):
if self.current_dataset_idx >= 0:
self.flush_report()
self.current_dataset_idx += 1
if self.current_dataset_idx == len(self.datasets):
return False
else:
self.current_dataset = self.datasets[self.current_dataset_idx]
self.writer.write("Predicting for " + self.current_dataset.dataset_name)
return True
def flush_report(self):
if not is_master():
return
name = self.current_dataset.dataset_name
time_format = "%Y-%m-%dT%H:%M:%S"
time = self.timer.get_time_hhmmss(None, format=time_format)
filename = name + "_"
if len(self.experiment_name) > 0:
filename += self.experiment_name + "_"
filename += self.task_type + "_"
filename += time
if self.config.evaluation.predict_file_format == "csv":
filepath = os.path.join(self.report_folder, filename + ".csv")
self.csv_dump(filepath)
else:
filepath = os.path.join(self.report_folder, filename + ".json")
self.json_dump(filepath)
self.writer.write(
"Wrote evalai predictions for {} to {}".format(
name, os.path.abspath(filepath)
)
)
self.report = []
def csv_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
title = self.report[0].keys()
cw = csv.DictWriter(f, title, delimiter=",", quoting=csv.QUOTE_MINIMAL)
cw.writeheader()
cw.writerows(self.report)
def json_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
json.dump(self.report, f)
def get_dataloader(self):
other_args = self._add_extra_args_for_dataloader()
return DataLoader(
dataset=self.current_dataset,
collate_fn=BatchCollator(
self.current_dataset.dataset_name, self.current_dataset.dataset_type
),
num_workers=self.num_workers,
pin_memory=self.config.training.pin_memory,
**other_args
)
def _add_extra_args_for_dataloader(self, other_args=None):
if other_args is None:
other_args = {}
if is_dist_initialized():
other_args["sampler"] = DistributedSampler(
self.current_dataset, shuffle=False
)
else:
other_args["shuffle"] = False
other_args["batch_size"] = get_batch_size()
return other_args
def prepare_batch(self, batch):
return self.current_dataset.prepare_batch(batch)
def __len__(self):
return len(self.current_dataset)
def __getitem__(self, idx):
return self.current_dataset[idx]
def add_to_report(self, report, model):
# TODO: Later gather whole report for no opinions
if self.current_dataset.dataset_name == "coco":
report.captions = gather_tensor(report.captions)
if isinstance(report.image_id, torch.Tensor):
report.image_id = gather_tensor(report.image_id).view(-1)
else:
report.scores = gather_tensor(report.scores).view(
-1, report.scores.size(-1)
)
keys = ["id", "question_id", "image_id", "context_tokens"]
for key in keys:
report = self.reshape_and_gather(report, key)
if not is_master():
return
results = self.current_dataset.format_for_prediction(report)
if hasattr(model, "format_for_prediction"):
results = model.format_for_prediction(results, report)
elif hasattr(model.module, "format_for_prediction"):
results = model.module.format_for_prediction(results, report)
self.report = self.report + results
def reshape_and_gather(self, report, key):
if key in report:
num_dims = report[key].dim()
if num_dims == 1:
report[key] = gather_tensor(report[key]).view(-1)
elif num_dims == 2:
_, enc_size = report[key].size()
report[key] = gather_tensor(report[key]).view(-1, enc_size)
else:
raise RuntimeError(
"Expect 1 or 2 dimensions for {} in report for 'reshape and gather'"
" in 'TestReporter', but got {} instead.".format(key, num_dims)
)
return report
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.