hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70ed4d4893404d136e150e76527e68e303195838 | 5,871 | py | Python | substrate_uptake_kinetics.py | Chaowu88/zymomonas_modeling | 90d0c9c3b081c1e2421321c77d3ddffa2ea02bba | [
"MIT"
] | null | null | null | substrate_uptake_kinetics.py | Chaowu88/zymomonas_modeling | 90d0c9c3b081c1e2421321c77d3ddffa2ea02bba | [
"MIT"
] | null | null | null | substrate_uptake_kinetics.py | Chaowu88/zymomonas_modeling | 90d0c9c3b081c1e2421321c77d3ddffa2ea02bba | [
"MIT"
] | null | null | null | #!/usr/bin/env pyhton
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
__date__ = '05/25/2020'
__version__ = '1.0'
r'''
This script estimates the kinetic parameters of glucose facilitator protein (glf) which transports both glucose and xylose with competitive inhibition of each other
Usage:
python path\to\substrate_uptake_kinetics.py
'''
OUT_DIR = r'output\directory'
DATA_File = r'path\to\measured_kinetics.xlsx'
OD2BIOMASS = 0.33
GLC_MW = 180.156 # g/mol
XYL_MW = 150.13 # g/mol
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp, odeint
from scipy.optimize import least_squares
from openopt import NLP
import matplotlib.pyplot as plt
class ParamsFitting:
def __init__(self, data_file):
self.data = pd.read_excel(data_file, index_col = 0, usecols = [0, 1, 2, 3], names = ['time', 'OD', 'glucose', 'xylose'])
self.timepoints = self.data.index.values
self.exp_biom = (self.data['OD'] * OD2BIOMASS).values # g/L
self.exp_glc = (self.data['glucose'] / GLC_MW * 1000).values # mmol/L
self.exp_xyl = (self.data['xylose'] / XYL_MW * 1000).values # mmol/L
self.ini_concs = [self.exp_glc[0], self.exp_xyl[0]]
self.interp_biomass = interp1d(self.timepoints, self.exp_biom, kind = 'linear', fill_value = 'extrapolate') # don't use cubic, or biomass will be negative
@staticmethod
def v_glucose(c_glc, c_xyl, vmax_glc, km_glc, ki_xyl):
return vmax_glc * c_glc / (km_glc * (1 + c_xyl/ki_xyl) + c_glc)
@staticmethod
def v_xylose(c_xyl, c_glc, vmax_xyl, km_xyl, ki_glc):
return vmax_xyl * c_xyl / (km_xyl * (1 + c_glc/ki_glc) + c_xyl)
def _concs_der(self, t, y, *args):
c_glc, c_xyl = y
vmax_glc, vmax_xyl, km_glc, km_xyl, ki_glc, ki_xyl = args
biomass = self.interp_biomass(t)
dglcdt = -self.v_glucose(c_glc, c_xyl, vmax_glc, km_glc, ki_xyl) * biomass
dxyldt = -self.v_xylose(c_xyl, c_glc, vmax_xyl, km_xyl, ki_glc) * biomass
return dglcdt, dxyldt
def _concs(self, params, ts):
glucose, xylose = odeint(self._concs_der, y0 = self.ini_concs, t = ts, tfirst = True, args = tuple(params)).T
return glucose, xylose
def fit(self):
def f(x):
glucose, xylose = self._concs(x, self.timepoints)
resid_glucose = glucose - self.exp_glc
resid_xylose = xylose - self.exp_xyl
SSR = np.sum(resid_glucose**2) + np.sum(resid_xylose**2)
return SSR
x0 = [10, 2, 50, 100, 10, 10]
res = NLP(f, x0 = x0, lb = [0]*len(x0), ub = [np.inf]*len(x0)).solve('ralg')
self.fitted_params = pd.Series(res.xf, index = ['Vmax_glc(mmol/gDCW/L)', 'Vmax_xyl(mmol/gDCW/L)',
'Km_glc(mmol/L)', 'Km_xyl(mmol/L)', 'Ki_glc(mmol/L)',
'Ki_xyl(mmol/L)'])
sim_glc, sim_xyl = self._concs(self.fitted_params.values, self.timepoints)
R2 = 1 - (np.sum((self.exp_glc - sim_glc)**2) + np.sum((self.exp_xyl - sim_xyl)**2)) / \
(np.sum((self.exp_glc - self.exp_glc.mean())**2) + np.sum((self.exp_xyl - self.exp_xyl.mean())**2))
print('R2 = %.3f' % R2)
def save_fitted_parameters(self, out_dir):
if hasattr(self, 'fitted_params'):
self.fitted_params.to_csv('%s/fitted_params.tsv' % out_dir, sep = '\t', header = False)
else:
raise AttributeError('run fit method first')
def plot_fitted_vs_measured_curve(self, out_dir):
if hasattr(self, 'fitted_params'):
ts = np.linspace(self.timepoints.min(), self.timepoints.max(), 1000)
sim_glc, sim_xyl = self._concs(self.fitted_params.values, ts)
plt.plot(ts, sim_glc, 'forestgreen', linewidth = 3)
plt.plot(ts, sim_xyl, 'royalblue', linewidth = 3)
plt.plot(self.timepoints, self.exp_glc, 'forestgreen', linestyle = '', marker = '.', markersize = 15)
plt.plot(self.timepoints, self.exp_xyl, 'royalblue', linestyle = '', marker = '.', markersize = 15)
plt.tick_params(labelsize = 15)
plt.plot([], [], 'forestgreen', marker = '.', label = 'Glucose')
plt.plot([], [], 'royalblue', marker = '.', label = 'Xylose')
plt.legend(loc = 'center', bbox_to_anchor = (0.75, 0.3), fontsize = 18)
names = ['$V_{max,glc}$',
'$V_{max,xyl}$',
'$K_{m,glc}$',
'$K_{m,xyl}$',
'$K_{i,glc}$',
'$K_{i,xyl}$']
units = ['mmol gDCW$^{-1}$ h$^{-1}$',
'mmol gDCW$^{-1}$ h$^{-1}$',
'mmol L$^{-1}$',
'mmol L$^{-1}$',
'mmol L$^{-1}$',
'mmol L$^{-1}$']
paramsStr = ['%s: %.2f %s' % (name, value, unit) for name, value, unit in zip(names, self.fitted_params.values, units)]
msg = 'Fitted parameters:\n' + '\n'.join(paramsStr)
plt.text(0.35, 0.5, msg, transform = plt.gca().transAxes, fontsize = 13.5)
plt.xlabel('Time (h)', fontsize = 20)
plt.ylabel('Substrate conc. (mmol $L^{-1}$)', fontsize = 20)
plt.savefig('%s/fitted_vs_measured_data.jpg' % out_dir, dpi = 300, bbox_inches = 'tight')
plt.close()
else:
raise AttributeError('run fit method first')
def save_kinetics_data(self, out_dir):
if hasattr(self, 'fitted_params'):
ts = np.linspace(self.timepoints.min(), self.timepoints.max(), 100)
c_glc, c_xyl = self._concs(self.fitted_params.values, ts)
vmax_glc, vmax_xyl, km_glc, km_xyl, ki_glc, ki_xyl = self.fitted_params
v_glc = self.v_glucose(c_glc, c_xyl, vmax_glc, km_glc, ki_xyl)
v_xyl = self.v_xylose(c_xyl, c_glc, vmax_xyl, km_xyl, ki_glc)
kineticData = pd.DataFrame({'time': ts,
'glc_c': c_glc,
'xyl_c': c_xyl,
'glc_v': v_glc,
'xyl_v': v_xyl})
kineticData.to_csv('%s/kinetic_data.tsv' % out_dir, sep = '\t', index = False)
else:
raise AttributeError('run fit method first')
if __name__ == '__main__':
glfFitting = ParamsFitting(DATA_File)
glfFitting.fit()
glfFitting.save_fitted_parameters(OUT_DIR)
glfFitting.plot_fitted_vs_measured_curve(OUT_DIR)
glfFitting.save_kinetics_data(OUT_DIR)
| 31.395722 | 165 | 0.648612 |
39d06f11ff733db3b39a5406178f2f84020a092d | 1,916 | py | Python | source/plot_theta_time.py | raj-krishnan/SDES_project_pendulum_with_friction | 6f24614914d58d8912346b43bd7e1faa925574f8 | [
"Apache-2.0"
] | null | null | null | source/plot_theta_time.py | raj-krishnan/SDES_project_pendulum_with_friction | 6f24614914d58d8912346b43bd7e1faa925574f8 | [
"Apache-2.0"
] | null | null | null | source/plot_theta_time.py | raj-krishnan/SDES_project_pendulum_with_friction | 6f24614914d58d8912346b43bd7e1faa925574f8 | [
"Apache-2.0"
] | null | null | null | import numpy
from matplotlib import pyplot as plt
from matplotlib import lines as mlines
from oscillator import Oscillator
def plot_underdamped_pendulum():
pendulum = Oscillator(alpha=0.8, radius=1)
time_steps = 400
time_max = 10.0
time = numpy.linspace(0, time_max, time_steps + 1)
plt.axes(xlim=(0, 10), ylim=(-0.2, 0.2))
trajectory = pendulum.get_trajectory(time_max, time_steps)
theta = [position for position, velocity in trajectory]
plt.plot(time, theta, "r")
def plot_overdamped_pendulum():
pendulum = Oscillator(alpha=30, radius=1)
time_steps = 400
time_max = 10.0
time = numpy.linspace(0, time_max, time_steps + 1)
plt.axes(xlim=(0, 10), ylim=(-0.2, 0.2))
trajectory = pendulum.get_trajectory(time_max, time_steps)
theta = [position for position, velocity in trajectory]
plt.plot(time, theta, "b")
def plot_critically_damped_pendulum():
pendulum = Oscillator(alpha=6, radius=1)
time_steps = 400
time_max = 10.0
time = numpy.linspace(0, time_max, time_steps + 1)
plt.axes(xlim=(0, 10), ylim=(-0.2, 0.2))
trajectory = pendulum.get_trajectory(time_max, time_steps)
theta = [position for position, velocity in trajectory]
plt.plot(time, theta, "g")
plot_underdamped_pendulum()
plot_overdamped_pendulum()
plot_critically_damped_pendulum()
plt.xlabel("Time", fontsize=13, family='monospace')
plt.ylabel("Angular Displacement",fontsize=13, family='monospace')
red_line = mlines.Line2D([], [], color='red',
markersize=15, label='Underdamped Pendulum')
blue_line = mlines.Line2D([], [], color='blue',
markersize=15, label='Overdamped Pendulum')
green_line = mlines.Line2D([], [], color='green',
markersize=15, label='Critically Damped Pendulum')
plt.legend(handles=[red_line, blue_line, green_line])
plt.savefig("pendulum.png")
| 33.034483 | 77 | 0.682672 |
233f929ef4916a2646b4a6885abdd38c1ed77ec1 | 533 | py | Python | pythontutorials/Udacity/CS101/Lesson 05 - How to Repeat/Q19-More Friends.py | JoseALermaIII/python-tutorials | 9d6cb78beec0bb55e27c49da1217317ba4d5f4fc | [
"MIT"
] | 2 | 2017-04-20T02:57:19.000Z | 2018-10-12T20:15:47.000Z | pythontutorials/Udacity/CS101/Lesson 05 - How to Repeat/Q19-More Friends.py | JoseALermaIII/python-tutorials | 9d6cb78beec0bb55e27c49da1217317ba4d5f4fc | [
"MIT"
] | 8 | 2021-03-18T21:50:16.000Z | 2022-03-11T23:38:01.000Z | pythontutorials/Udacity/CS101/Lesson 05 - How to Repeat/Q19-More Friends.py | JoseALermaIII/python-tutorials | 9d6cb78beec0bb55e27c49da1217317ba4d5f4fc | [
"MIT"
] | 3 | 2018-08-30T20:30:50.000Z | 2022-01-18T13:40:51.000Z | # Define a procedure, is_friend, that takes
# a string as its input, and returns a
# Boolean indicating if the input string
# is the name of a friend. Assume
# I am friends with everyone whose name
# starts with either 'D' or 'N', but no one
# else. You do not need to check for
# lower case 'd' or 'n'
def is_friend(name):
if name[0] == 'D':
return True
if name[0] == 'N':
return True
return False
print is_friend('Diane')
#>>> True
print is_friend('Ned')
#>>> True
print is_friend('Moe')
#>>> False
| 21.32 | 43 | 0.649156 |
bc9902e6a5d99a78c9befe2d892f997baaf50bc7 | 17,948 | py | Python | neutron/services/qos/qos_plugin.py | kklimonda/neutron | ccdddad358a4bf802d59b3fbbfe88a1e9881c96c | [
"Apache-2.0"
] | 4 | 2018-08-05T00:43:03.000Z | 2021-10-13T00:45:45.000Z | neutron/services/qos/qos_plugin.py | kklimonda/neutron | ccdddad358a4bf802d59b3fbbfe88a1e9881c96c | [
"Apache-2.0"
] | 8 | 2018-06-14T14:50:16.000Z | 2018-11-13T16:30:42.000Z | neutron/services/qos/qos_plugin.py | kklimonda/neutron | ccdddad358a4bf802d59b3fbbfe88a1e9881c96c | [
"Apache-2.0"
] | 7 | 2018-06-12T18:57:04.000Z | 2019-05-09T15:42:30.000Z | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import exceptions as lib_exc
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects import base as base_obj
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import qos_policy_validator as checker
from neutron.objects.qos import rule_type as rule_type_object
from neutron.services.qos.drivers import manager
from neutron.services.qos import qos_consts
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that provides quality of
service parameters over ports and networks.
"""
supported_extension_aliases = ['qos',
'qos-bw-limit-direction',
'qos-default',
'qos-rule-type-details']
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
super(QoSPlugin, self).__init__()
self.driver_manager = manager.QosServiceDriverManager()
callbacks_registry.subscribe(
self._validate_create_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_CREATE)
callbacks_registry.subscribe(
self._validate_update_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_update_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_UPDATE)
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
context, network_id=networks_ids)
# Filter only this ports which don't have overwritten policy
ports_with_net_policy = [
port for port in ports_with_net_policy if
port.qos_policy_id is None
]
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
context, id=ports_ids)
return list(set(ports_with_policy + ports_with_net_policy))
def _validate_create_port_callback(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
port_id = kwargs['port']['id']
port = ports_object.Port.get_object(context, id=port_id)
network = network_object.Network.get_object(context,
id=port.network_id)
policy_id = port.qos_policy_id or network.qos_policy_id
if policy_id is None:
return
policy = policy_object.QosPolicy.get_object(context, id=policy_id)
self.validate_policy_for_port(policy, port)
def _validate_update_port_callback(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
original_policy_id = kwargs['original_port'].get(
qos_consts.QOS_POLICY_ID)
policy_id = kwargs['port'].get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
updated_port = ports_object.Port.get_object(
context, id=kwargs['port']['id'])
policy = policy_object.QosPolicy.get_object(context, id=policy_id)
self.validate_policy_for_port(policy, updated_port)
def _validate_update_network_callback(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
original_network = kwargs['original_network']
updated_network = kwargs['network']
original_policy_id = original_network.get(qos_consts.QOS_POLICY_ID)
policy_id = updated_network.get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
policy = policy_object.QosPolicy.get_object(context, id=policy_id)
ports = ports_object.Port.get_objects(
context, network_id=updated_network['id'])
# Filter only this ports which don't have overwritten policy
ports = [
port for port in ports if port.qos_policy_id is None
]
self.validate_policy_for_ports(policy, ports)
def validate_policy(self, context, policy):
ports = self._get_ports_with_policy(context, policy)
self.validate_policy_for_ports(policy, ports)
def validate_policy_for_ports(self, policy, ports):
for port in ports:
self.validate_policy_for_port(policy, port)
def validate_policy_for_port(self, policy, port):
for rule in policy.rules:
if not self.driver_manager.validate_rule_for_port(rule, port):
raise n_exc.QosRuleNotSupported(rule_type=rule.rule_type,
port_id=port['id'])
@db_base_plugin_common.convert_result_to_dict
def create_policy(self, context, policy):
"""Create a QoS policy.
:param context: neutron api request context
:type context: neutron_lib.context.Context
:param policy: policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
# NOTE(dasm): body 'policy' contains both tenant_id and project_id
# but only latter needs to be used to create QosPolicy object.
# We need to remove redundant keyword.
# This cannot be done in other place of stacktrace, because neutron
# needs to be backward compatible.
policy['policy'].pop('tenant_id', None)
policy_obj = policy_object.QosPolicy(context, **policy['policy'])
with db_api.context_manager.writer.using(context):
policy_obj.create()
self.driver_manager.call(qos_consts.CREATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.CREATE_POLICY, context, policy_obj)
return policy_obj
@db_base_plugin_common.convert_result_to_dict
def update_policy(self, context, policy_id, policy):
"""Update a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:param policy_id: str uuid
:param policy: new policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
policy_data = policy['policy']
with db_api.context_manager.writer.using(context):
policy_obj = self._get_policy_obj(context, policy_id)
policy_obj.update_fields(policy_data, reset_changes=True)
policy_obj.update()
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.UPDATE_POLICY,
context, policy_obj)
return policy_obj
def delete_policy(self, context, policy_id):
"""Delete a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to delete
:type policy_id: str uuid
:returns: None
"""
with db_api.context_manager.writer.using(context):
policy = policy_object.QosPolicy(context)
policy.id = policy_id
policy.delete()
self.driver_manager.call(qos_consts.DELETE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.DELETE_POLICY,
context, policy)
def _get_policy_obj(self, context, policy_id):
"""Fetch a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to fetch
:type policy_id: str uuid
:returns: a QosPolicy object
:raises: n_exc.QosPolicyNotFound
"""
obj = policy_object.QosPolicy.get_object(context, id=policy_id)
if obj is None:
raise n_exc.QosPolicyNotFound(policy_id=policy_id)
return obj
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy(self, context, policy_id, fields=None):
"""Get a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:type policy_id: str uuid
:returns: a QosPolicy object
"""
return self._get_policy_obj(context, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policies(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
"""Get QoS policies.
:param context: neutron api request context
:type context: neutron.context.Context
:param filters: search criteria
:type filters: dict
:returns: QosPolicy objects meeting the search criteria
"""
filters = filters or dict()
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return policy_object.QosPolicy.get_objects(context, _pager=pager,
**filters)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_type(self, context, rule_type_name, fields=None):
if not context.is_admin:
raise lib_exc.NotAuthorized()
return rule_type_object.QosRuleType.get_object(rule_type_name)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_types(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
if not filters:
filters = {}
return rule_type_object.QosRuleType.get_objects(**filters)
def supported_rule_type_details(self, rule_type_name):
return self.driver_manager.supported_rule_type_details(rule_type_name)
@property
def supported_rule_types(self):
return self.driver_manager.supported_rule_types
@db_base_plugin_common.convert_result_to_dict
def create_policy_rule(self, context, rule_cls, policy_id, rule_data):
"""Create a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to create the rule
:type policy_id: str uuid
:param rule_data: the rule data to be applied
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.autonested_transaction(context.session):
# Ensure that we have access to the policy.
policy = self._get_policy_obj(context, policy_id)
checker.check_bandwidth_rule_conflict(policy, rule_data)
rule = rule_cls(context, qos_policy_id=policy_id, **rule_data)
rule.create()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
@db_base_plugin_common.convert_result_to_dict
def update_policy_rule(self, context, rule_cls, rule_id, policy_id,
rule_data):
"""Update a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to update
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:param rule_data: the new rule data to update
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy = self._get_policy_obj(context, policy_id)
# Ensure the rule belongs to the policy.
checker.check_bandwidth_rule_conflict(policy, rule_data)
policy.get_rule_by_id(rule_id)
rule = rule_cls(context, id=rule_id)
rule.update_fields(rule_data, reset_changes=True)
rule.update()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
def delete_policy_rule(self, context, rule_cls, rule_id, policy_id):
"""Delete a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QosPolicy Rule to delete
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: None
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy = self._get_policy_obj(context, policy_id)
rule = policy.get_rule_by_id(rule_id)
rule.delete()
policy.obj_load_attr('rules')
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rule(self, context, rule_cls, rule_id, policy_id,
fields=None):
"""Get a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to get
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: a QoS policy rule object
:raises: n_exc.QosRuleNotFound
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
self._get_policy_obj(context, policy_id)
rule = rule_cls.get_object(context, id=rule_id)
if not rule:
raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
return rule
# TODO(QoS): enforce rule types when accessing rule objects
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rules(self, context, rule_cls, policy_id, filters=None,
fields=None, sorts=None, limit=None, marker=None,
page_reverse=False):
"""Get QoS policy rules.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to get rules
:type policy_id: str uuid
:returns: QoS policy rule objects meeting the search criteria
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
self._get_policy_obj(context, policy_id)
filters = filters or dict()
filters[qos_consts.QOS_POLICY_ID] = policy_id
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return rule_cls.get_objects(context, _pager=pager, **filters)
| 40.790909 | 79 | 0.657622 |
4d47bff40784994c85806fe1fe3bf479090628d9 | 13,352 | py | Python | models/mobile_net_v3.py | chuliuT/MobileNet_V3_SSD.pytorch | f28e38fd197e8eff72346dc70f2b8430111edac1 | [
"MIT"
] | 4 | 2020-04-01T02:17:04.000Z | 2022-01-16T07:17:28.000Z | models/mobile_net_v3.py | chuliuT/MobileNet_V3_SSD.pytorch | f28e38fd197e8eff72346dc70f2b8430111edac1 | [
"MIT"
] | null | null | null | models/mobile_net_v3.py | chuliuT/MobileNet_V3_SSD.pytorch | f28e38fd197e8eff72346dc70f2b8430111edac1 | [
"MIT"
] | 5 | 2020-01-07T03:52:46.000Z | 2020-05-17T12:36:11.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
# from https://github.com/kuan-wang/pytorch-mobilenet-v3/blob/master/mobilenetv3.py
# modified by chuliuT
# date:2019.12.9
__all__ = ['MobileNetV3', 'mobilenetv3']
def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 3, stride, 1, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid()
# nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self).__init__()
def forward(self, x):
return x
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
class MobileBottleneck(nn.Module):
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'):
super(MobileBottleneck, self).__init__()
assert stride in [1, 2]
assert kernel in [3, 5]
padding = (kernel - 1) // 2
self.use_res_connect = stride == 1 and inp == oup
conv_layer = nn.Conv2d
norm_layer = nn.BatchNorm2d
if nl == 'RE':
nlin_layer = nn.ReLU # or ReLU6
elif nl == 'HS':
nlin_layer = Hswish
else:
raise NotImplementedError
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
# pw
conv_layer(inp, exp, 1, 1, 0, bias=False),
norm_layer(exp),
nlin_layer(inplace=True),
# dw
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
norm_layer(exp),
SELayer(exp),
nlin_layer(inplace=True),
# pw-linear
conv_layer(exp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True):
super(BasicConv, self).__init__()
self.out_channels = out_planes
if bn:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True) if relu else None
else:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
self.bn = None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale = 0.1, map_reduce=8, vision=1, groups=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = in_planes // map_reduce
self.branch0 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision+1, dilation=vision+1, relu=False, groups=groups)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2*inter_planes, kernel_size=(3,3), stride=stride, padding=(1,1), groups=groups),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=vision + 2, dilation=vision + 2, relu=False, groups=groups)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, (inter_planes//2)*3, kernel_size=3, stride=1, padding=1, groups=groups),
BasicConv((inter_planes//2)*3, 2*inter_planes, kernel_size=3, stride=stride, padding=1, groups=groups),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=vision + 4, dilation=vision + 4, relu=False, groups=groups)
)
self.ConvLinear = BasicConv(6*inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
def forward(self,x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0,x1,x2),1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out*self.scale + short
out = self.relu(out)
return out
class MobileNetV3(nn.Module):
def __init__(self, n_class=1000, input_size=224, dropout=0.8, mode='large', width_mult=1.0):
super(MobileNetV3, self).__init__()
input_channel = 16
last_channel = 1280
if mode == 'large':
# refer to Table 1 in paper
mobile_setting = [
# k, exp, c, se, nl, s,
[3, 16, 16, False, 'RE', 1],
[3, 64, 24, False, 'RE', 2],
[3, 72, 24, False, 'RE', 1],
[5, 72, 40, True, 'RE', 2],
[5, 120, 40, True, 'RE', 1],
[5, 120, 40, True, 'RE', 1],
[3, 240, 80, False, 'HS', 2],
[3, 200, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 480, 112, True, 'HS', 1],
[3, 672, 112, True, 'HS', 1],
[5, 672, 160, True, 'HS', 2],
[5, 960, 160, True, 'HS', 1],
[5, 960, 160, True, 'HS', 1],
]
elif mode == 'small':
# refer to Table 2 in paper
mobile_setting = [
# k, exp, c, se, nl, s,
[3, 16, 16, True, 'RE', 2],
[3, 72, 24, False, 'RE', 2],
[3, 88, 24, False, 'RE', 1],
[5, 96, 40, True, 'HS', 2],
[5, 240, 40, True, 'HS', 1],
[5, 240, 40, True, 'HS', 1],
[5, 120, 48, True, 'HS', 1],
[5, 144, 48, True, 'HS', 1],
[5, 288, 96, True, 'HS', 2],
[5, 576, 96, True, 'HS', 1],
[5, 576, 96, True, 'HS', 1],
]
else:
raise NotImplementedError
# building first layer
assert input_size % 32 == 0
last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)]
self.classifier = []
# building mobile blocks
for k, exp, c, se, nl, s in mobile_setting:
output_channel = make_divisible(c * width_mult)
exp_channel = make_divisible(exp * width_mult)
self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl))
input_channel = output_channel
# # building last several layers
# if mode == 'large':
# last_conv = make_divisible(960 * width_mult)
# self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
# self.features.append(nn.AdaptiveAvgPool2d(1))
# self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
# self.features.append(Hswish(inplace=True))
# elif mode == 'small':
# last_conv = make_divisible(576 * width_mult)
# self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
# # self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake
# self.features.append(nn.AdaptiveAvgPool2d(1))
# self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
# self.features.append(Hswish(inplace=True))
# else:
# raise NotImplementedError
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# print(type(self.features))
self.stage1=self.features[:7]
# print(self.stage1)
self.stage2 = self.features[7:13]
# print(self.stage2)
self.stage3 = self.features[13:16]
# print(self.stage3)
self.stage4 = BasicRFB(160, 160, stride=2, scale=1.0)
self.stage5 = BasicRFB(160, 160, stride=2, scale=1.0)
self.stage6 = BasicRFB(160, 160, stride=2, scale=1.0)
self.AdaptiveAvgPool2d = nn.AdaptiveAvgPool2d(1)
self.stage7 = nn.Conv2d(160, 160,kernel_size=1)
# building classifier
# self.classifier = nn.Sequential(
# nn.Dropout(p=dropout), # refer to paper section 6
# nn.Linear(last_channel, n_class),
# )
self._initialize_weights()
def forward(self, x):
# x = self.features(x)
x=self.stage1(x)
Scale1=x
x = self.stage2(x)
Scale2 = x
x = self.stage3(x)
Scale3 = x
x = self.stage4(x)
Scale4 = x
x = self.stage5(x)
Scale5 = x
x = self.stage6(x)
x = self.AdaptiveAvgPool2d(x)
x = self.stage7(x)
Scale6 = x
# print(Scale1.shape)
# print(Scale2.shape)
# print(Scale3.shape)
# print(Scale4.shape)
# print(Scale5.shape)
# print(Scale6.shape)
return Scale1,Scale2,Scale3,Scale4,Scale5,Scale6
def _initialize_weights(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def mobilenetv3(pretrained=False, **kwargs):
model = MobileNetV3(**kwargs)
if pretrained:
state_dict = torch.load('mobilenetv3_small_67.4.pth.tar')
model.load_state_dict(state_dict, strict=True)
# raise NotImplementedError
return model
if __name__ == '__main__':
net = mobilenetv3()
print('mobilenetv3:\n', net)
print('Total params: %.2fM' % (sum(p.numel() for p in net.parameters())/1000000.0))
input= (1, 3, 300, 300)
# x = torch.randn(input)
# # pip install --upgrade git+https://github.com/kuan-wang/pytorch-OpCounter.git
# from thop import profile
# flops, params = profile(net,inputs=x)
# # print(flops)
# # print(params)
# print('Total params: %.2fM' % (params/1000000.0))
# print('Total flops: %.2fM' % (flops/1000000.0))
x = torch.randn(input)
out = net(x)
| 37.296089 | 159 | 0.566058 |
7754e78d2d6ce6119ccd11287e2263ceb3f883d2 | 488 | py | Python | actioneer/errors.py | vbe0201/Actioneer | 96df066ea40d2a51b1abb4bde2504af23c2c7c82 | [
"MIT"
] | null | null | null | actioneer/errors.py | vbe0201/Actioneer | 96df066ea40d2a51b1abb4bde2504af23c2c7c82 | [
"MIT"
] | null | null | null | actioneer/errors.py | vbe0201/Actioneer | 96df066ea40d2a51b1abb4bde2504af23c2c7c82 | [
"MIT"
] | null | null | null |
class ConvertingError(Exception):
"""Raised when converting a argument failed"""
class NoClosingQuote(Exception):
"""No closing quote on your command arguments"""
class NoCommandFound(Exception):
"""No command has been found with that name"""
class AlreadyAActionWithThatName(Exception):
"""When A Action is trying to be created with a name of a Action
with the same name"""
class CheckFailed(Exception):
"""Raised when a check for a command failed"""
| 23.238095 | 68 | 0.717213 |
eb37818c34fafcdd99d543c455e6d37b72513743 | 2,967 | py | Python | tests/utils/test_experiment.py | google/timecast | 11f01d81e240d38ff194df54bbf219e6367f3cf5 | [
"Apache-2.0"
] | 11 | 2020-07-16T15:22:38.000Z | 2021-09-23T01:08:19.000Z | tests/utils/test_experiment.py | google/timecast | 11f01d81e240d38ff194df54bbf219e6367f3cf5 | [
"Apache-2.0"
] | 3 | 2020-08-26T00:26:37.000Z | 2021-01-02T19:29:21.000Z | tests/utils/test_experiment.py | google/timecast | 11f01d81e240d38ff194df54bbf219e6367f3cf5 | [
"Apache-2.0"
] | 1 | 2020-10-13T17:07:10.000Z | 2020-10-13T17:07:10.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""timecast/tests/utils/test_experiment.py"""
import jax
import numpy as np
import pytest
from timecast.utils.experiment import experiment
from timecast.utils import random
@pytest.mark.parametrize("shape", [(), (1,), (1, 2), (1, 2, 3)])
@pytest.mark.parametrize("num_args", [1, 2, 10])
def test_experiment(shape, num_args):
"""Test normal experiment behavior"""
args = [
(
jax.random.uniform(random.generate_key(), shape=shape),
jax.random.uniform(random.generate_key(), shape=shape),
)
for _ in range(num_args)
]
@experiment("a,b", args)
def dummy(a, b):
"""dummy"""
return a + b
results = dummy.run()
print(results)
for i in range(len(results)):
np.testing.assert_array_almost_equal(results[i], np.sum(args[i], axis=0))
@pytest.mark.parametrize("times", [1, 2, 10])
def test_experiment_call(times):
"""Tests repeated decorator calls"""
def dummy(a, b):
"""dummy"""
return a + b
for _ in range(times):
dummy = experiment("a,b", [(1, 2)])(dummy)
dummy._func(1, 2)
assert isinstance(dummy, experiment)
def test_experiment_duplicated_argname():
"""Tests duplicated arguments"""
with pytest.raises(ValueError):
@experiment("a", [1])
@experiment("a,b", [(1, 2)])
def dummy(a, b):
"""dummy"""
return a + b
dummy._func(1, 2)
dummy._validate()
def test_experiment_missing_argument():
"""Test missing arguments"""
with pytest.raises(ValueError):
@experiment("a", [1])
def dummy(a, b):
"""dummy"""
return a + b
dummy._func(1, 2)
dummy._validate()
def test_experiment_unused_arguments():
"""Testing unused arguments"""
with pytest.raises(ValueError):
@experiment("a,b,c", [(1, 2, 3)])
def dummy(a, b):
"""dummy"""
return a + b
dummy._func(1, 2)
dummy._validate()
def test_experiment_list_args():
"""Testing args as list"""
@experiment(["a", "b"], [(1, 2)])
def dummy(a, b):
"""dummmy"""
return a + b
dummy.run()
def test_experiment_list_atoms():
"""Testing atoms"""
@experiment(["a"], [1])
def dummy(a):
"""dummy"""
return a
assert 1 == dummy.run()[0]
| 24.725 | 81 | 0.601618 |
6f42e35d957f987764a9e74c3371506cb7cc89d9 | 1,761 | py | Python | hca/upload/cli/select_command.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | hca/upload/cli/select_command.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | hca/upload/cli/select_command.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | from hca.upload import UploadException, UploadAreaURI, UploadConfig
from .common import UploadCLICommand
class SelectCommand(UploadCLICommand):
"""
Select upload area to which you wish to upload files.
"""
@classmethod
def add_parser(cls, upload_subparsers):
select_parser = upload_subparsers.add_parser(
'select',
help=cls.__doc__,
description=cls.__doc__
)
select_parser.add_argument('uri_or_alias',
help="S3 URI of an upload area, or short alias.")
select_parser.set_defaults(entry_point=SelectCommand)
def __init__(self, args):
if args.uri_or_alias.startswith('s3://'): # URI
self._save_and_select_area_by_uri(args.uri_or_alias)
else: # alias
self._select_area_by_alias(args.uri_or_alias)
def _save_and_select_area_by_uri(self, uri_string):
if not uri_string.endswith('/'):
uri_string += '/'
uri = UploadAreaURI(uri_string)
config = UploadConfig()
if uri.area_uuid in config.areas:
config.select_area(uri.area_uuid)
else:
config.add_area(uri)
config.select_area(uri.area_uuid)
print("Upload area %s selected." % uri.area_uuid)
print("In future you may refer to this upload area using the alias \"%s\"" %
config.unique_prefix(uri.area_uuid))
def _select_area_by_alias(self, alias):
try:
config = UploadConfig()
uuid = config.area_uuid_from_partial_uuid(partial_uuid=alias)
config.select_area(uuid)
print("Upload area %s selected." % uuid)
except UploadException as e:
print(str(e))
| 36.6875 | 84 | 0.626349 |
60c5320df00de17ef04dd01719df6c02f50b6f02 | 165 | py | Python | wasm/webapi/status.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 8 | 2019-04-09T21:13:05.000Z | 2021-11-23T17:25:18.000Z | wasm/webapi/status.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 21 | 2021-03-31T19:48:22.000Z | 2022-03-12T00:24:53.000Z | wasm/webapi/status.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 11 | 2019-04-12T01:20:16.000Z | 2021-11-23T17:25:02.000Z | def main(request, response):
status = int(request.GET["status"])
module = b"\0asm\1\0\0\0"
return status, [("Content-Type", "application/wasm")], module
| 33 | 65 | 0.642424 |
10a1dc879a279ae21acfed4d6161e14472ade388 | 2,860 | py | Python | scripts/gen_tbtf_outputs.py | duncanmmacleod/feedstock-outputs | 5f069f6dec1f62b9053f3e2b84a1ff3bd71f646d | [
"BSD-3-Clause"
] | null | null | null | scripts/gen_tbtf_outputs.py | duncanmmacleod/feedstock-outputs | 5f069f6dec1f62b9053f3e2b84a1ff3bd71f646d | [
"BSD-3-Clause"
] | 12 | 2020-03-06T23:06:16.000Z | 2021-12-10T14:36:48.000Z | scripts/gen_tbtf_outputs.py | duncanmmacleod/feedstock-outputs | 5f069f6dec1f62b9053f3e2b84a1ff3bd71f646d | [
"BSD-3-Clause"
] | 9 | 2020-06-30T15:55:03.000Z | 2021-12-09T19:28:17.000Z | import sys
import pprint
import requests
import json
from conda_forge_tick.utils import load_graph
# 1. load the graph
gx = load_graph()
# 2. read the nodes
with open("../feedstock-outputs/scripts/tbtf_nodes.txt", "r") as fp:
tbtf_nodes = [n.strip() for n in fp.readlines()]
# 3. add things that seem to be missed...
tbtf_nodes = set(tbtf_nodes)
tbtf_nodes |= set([
"clang-compiler-activation",
"clang-win-activation",
"gfortran_osx-64",
"gfortran_impl_osx-64",
"_openmp_mutex",
"intel_repack",
"numba",
"cython",
"pybind11",
])
seen = set()
tbtf_outputs = {}
for n in tbtf_nodes:
tbtf_outputs[n] = set()
if "feedstock_name" not in gx.nodes[n]["payload"]:
print(n, gx.nodes[n]["payload"].data)
sys.exit(1)
assert n == gx.nodes[n]["payload"]["feedstock_name"]
outs = gx.nodes[n]["payload"].get("outputs_names", [])
if outs:
for out in outs:
tbtf_outputs[n].add(out)
else:
tbtf_outputs[n].add(n)
if tbtf_outputs[n] & seen:
for bado in tbtf_outputs[n] & seen:
print("OUTPUT CONFLICT: %s" % bado)
for k, v in tbtf_outputs.items():
if bado in tbtf_outputs[k]:
print(" %s: %s" % (k, bado))
seen |= tbtf_outputs[n]
# 3. now check to make sure we have the right names
tails = ["linux-64", "win-64", "osx-64", "linux-aarch64", "linux-ppc64le"]
for p in tbtf_outputs:
final_outs = set()
for out in tbtf_outputs[p]:
if out.endswith("_"):
for tail in tails:
# a little special casing
if p.startswith('ctng') and tail == 'osx-64':
continue
if p == "clang-compiler-activation" and tail == "win-64":
continue
r = requests.get(
"https://api.anaconda.org/package/conda-forge/%s%s" % (
out, tail))
if r.status_code == 200:
final_outs |= set([out + tail])
r = requests.get(
"https://api.anaconda.org/package/conda-forge/%s%s" % (
out, ""))
if r.status_code == 200:
final_outs |= set([out])
else:
final_outs.add(out)
if final_outs != tbtf_outputs[p]:
print("MUNGED NEW OUTPUTS:", p)
tbtf_outputs[p] = final_outs
with open("../feedstock-outputs/scripts/final_outputs.txt", "w") as fp:
fp.write(pprint.pformat(tbtf_outputs))
# 4. now reverse the mapping and write out
rev = {}
for p, outs in tbtf_outputs.items():
for out in outs:
if out not in rev:
rev[out] = set()
rev[out].add(p)
for p in rev:
with open("../feedstock-outputs/outputs/%s.json" % p, "w") as fp:
json.dump({"feedstocks": list(rev[p])}, fp)
| 28.6 | 75 | 0.554545 |
98a99e3cd3a01f744c79f77faae87f39fe12070d | 14,713 | py | Python | tests/test_sql_to_python.py | nickolay/ctds | 50cd3bb993c7bea7a8a13619fcee367c6e389460 | [
"MIT"
] | null | null | null | tests/test_sql_to_python.py | nickolay/ctds | 50cd3bb993c7bea7a8a13619fcee367c6e389460 | [
"MIT"
] | null | null | null | tests/test_sql_to_python.py | nickolay/ctds | 50cd3bb993c7bea7a8a13619fcee367c6e389460 | [
"MIT"
] | null | null | null | from datetime import date, datetime, time
from binascii import hexlify, unhexlify
from decimal import Decimal
import platform
import uuid
import ctds
from .base import TestExternalDatabase
from .compat import unichr_, unicode_
class TestSQLToPython(TestExternalDatabase): # pylint: disable=too-many-public-methods
'''Unit tests related to SQL to Python type conversion.
'''
def setUp(self):
TestExternalDatabase.setUp(self)
self.connection = self.connect()
self.cursor = self.connection.cursor()
def tearDown(self):
TestExternalDatabase.tearDown(self)
self.cursor.close()
self.connection.close()
def test_notsupportederror(self):
self.cursor.execute("SELECT sql_variant_property(1, 'BaseType')")
try:
self.cursor.fetchone()
except ctds.NotSupportedError as ex:
self.assertEqual(str(ex), 'unsupported type 98 for column ""')
else:
self.fail('.fetchone() did not fail as expected') # pragma: nocover
def test_bit(self):
self.cursor.execute(
'''
SELECT
CONVERT(BIT, NULL),
CONVERT(BIT, 1),
CONVERT(BIT, 0)
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(None, True, False)
)
def test_binary(self):
self.cursor.execute(
'''
SELECT
CONVERT(BINARY(8), NULL),
CONVERT(BINARY(8), 0xdeadbeef),
CONVERT(VARBINARY(8), NULL),
CONVERT(VARBINARY(16), 0xdeadbeef)
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
unhexlify('deadbeef00000000'),
None,
unhexlify('deadbeef'),
)
)
def test_char(self):
self.cursor.execute(
'''
SELECT
CONVERT(CHAR(4), NULL),
CONVERT(CHAR(4), '1234'),
CONVERT(VARCHAR(4), NULL),
CONVERT(VARCHAR(4), '1234'),
CONVERT(VARCHAR(4), ''),
CONVERT(VARCHAR(4), '1'),
REPLICATE(CONVERT(VARCHAR(MAX), 'x'), 8001),
CONVERT(CHAR(1), CHAR(189))
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
unicode_('1234'),
None,
unicode_('1234'),
unicode_(''),
unicode_('1'),
unicode_('x' * 8001),
unicode_(b'\xc2\xbd', encoding='utf-8'),
)
)
def test_nchar(self):
non_ucs2_emoji = unichr_(127802) if self.UCS4_SUPPORTED else self.UNICODE_REPLACEMENT
self.cursor.execute(
'''
SELECT
CONVERT(NCHAR(4), NULL),
CONVERT(NCHAR(4), '1234'),
CONVERT(NVARCHAR(4), NULL),
CONVERT(NVARCHAR(4), N'1234'),
CONVERT(NVARCHAR(4), N''),
CONVERT(NVARCHAR(4), N'1'),
REPLICATE(CONVERT(NVARCHAR(MAX), N'x'), 8001),
NCHAR(189),
NCHAR(256),
CONVERT(NVARCHAR(100), 0x{0})
'''.format(hexlify(non_ucs2_emoji.encode('utf-16le')).decode('ascii'))
)
# Windows builds properly decode codepoints from the supplementary plane.
# Possibly due to the iconv implementation??
decoded = self.use_utf16 or platform.system() == 'Windows' or not self.UCS4_SUPPORTED
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
unicode_('1234'),
None,
unicode_('1234'),
unicode_(''),
unicode_('1'),
unicode_('x' * 8001),
unicode_(b'\xc2\xbd', encoding='utf-8'),
unicode_(b'\xc4\x80', encoding='utf-8'),
non_ucs2_emoji if decoded else unicode_('??')
)
)
def test_text(self):
self.cursor.execute(
'''
SELECT
CONVERT(TEXT, NULL),
CONVERT(TEXT, N''),
CONVERT(TEXT, N'1234')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
unicode_(''),
unicode_('1234')
)
)
def test_ntext(self):
self.cursor.execute(
'''
SELECT
CONVERT(NTEXT, NULL),
CONVERT(NTEXT, N''),
CONVERT(NTEXT, N'1234')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
unicode_(''),
unicode_('1234')
)
)
def test_int(self):
ints = (2 ** 8 - 1, 2 ** 15 - 1, 2 ** 31 - 1, 2 ** 63 - 1)
self.cursor.execute(
'''
SELECT
CONVERT(TINYINT, NULL),
CONVERT(TINYINT, {0}),
CONVERT(SMALLINT, NULL),
CONVERT(SMALLINT, {1}),
CONVERT(SMALLINT, -{1}),
CONVERT(INT, NULL),
CONVERT(INT, {2}),
CONVERT(INT, -{2}),
CONVERT(BIGINT, NULL),
CONVERT(BIGINT, {3}),
CONVERT(BIGINT, -{3})
'''.format(*ints)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None, ints[0],
None, ints[1], -ints[1],
None, ints[2], -ints[2],
None, ints[3], -ints[3],
)
)
def test_float(self):
self.cursor.execute(
'''
SELECT
CONVERT(REAL, NULL),
CONVERT(REAL, '-3.40E+38'),
CONVERT(REAL, '-1.18E-38'),
CONVERT(REAL, 0),
CONVERT(REAL, '1.18E-38'),
CONVERT(REAL, '3.40E+38'),
CONVERT(FLOAT(24), NULL),
CONVERT(FLOAT(24), '-3.40E+38'),
CONVERT(FLOAT(24), '-1.18E-38'),
CONVERT(FLOAT(24), 0),
CONVERT(FLOAT(24), '1.18E-38'),
CONVERT(FLOAT(24), '3.40E+38'),
CONVERT(FLOAT(53), NULL),
CONVERT(FLOAT(53), '-1.79E+308'),
CONVERT(FLOAT(53), '-2.23E-308'),
CONVERT(FLOAT(53), 0),
CONVERT(FLOAT(53), '2.23E-308'),
CONVERT(FLOAT(53), '1.79E+308')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
-3.3999999521443642e+38,
-1.179999945774631e-38,
float(0),
1.179999945774631e-38,
3.3999999521443642e+38,
None,
-3.3999999521443642e+38,
-1.179999945774631e-38,
float(0),
1.179999945774631e-38,
3.3999999521443642e+38,
None,
-1.79e+308,
-2.23e-308,
float(0),
2.23e-308,
1.79e+308,
)
)
def test_numeric(self):
self.cursor.execute(
'''
SELECT
CONVERT(NUMERIC(5,3), NULL),
CONVERT(NUMERIC(5,3), '12.345'),
CONVERT(NUMERIC(5,3), '12.34567'),
CONVERT(NUMERIC(5,3), '12.34543'),
CONVERT(NUMERIC(5,3), 0),
CONVERT(NUMERIC(5,3), 66),
CONVERT(DECIMAL(5,3), NULL),
CONVERT(DECIMAL(5,3), '12.345'),
CONVERT(DECIMAL(5,3), '12.34567'),
CONVERT(DECIMAL(5,3), '12.34543'),
CONVERT(DECIMAL(5,3), 0),
CONVERT(DECIMAL(5,3), 66)
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
Decimal('12.345'),
Decimal('12.346'),
Decimal('12.345'),
Decimal('0.000'),
Decimal('66.000'),
None,
Decimal('12.345'),
Decimal('12.346'),
Decimal('12.345'),
Decimal('0.000'),
Decimal('66.000'),
)
)
def test_money(self):
self.cursor.execute(
'''
SELECT
CONVERT(MONEY, NULL),
CONVERT(MONEY, '-922,337,203,685,477.5808'),
CONVERT(MONEY, '922,337,203,685,477.5807'),
CONVERT(SMALLMONEY, NULL),
CONVERT(SMALLMONEY, '-214,748.3648'),
CONVERT(SMALLMONEY, '214,748.3647')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
self.round_money(Decimal('-922337203685477.5808')),
self.round_money(Decimal('922337203685477.5807')),
None,
# SMALLMONEY seems to be rounded properly by FreeTDS ...
Decimal('-214748.3648'),
Decimal('214748.3647'),
)
)
def test_date(self):
self.cursor.execute(
'''
SELECT
CONVERT(DATE, NULL),
CONVERT(DATE, '0001-01-01'),
CONVERT(DATE, '9999-12-31')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
(
'0001-01-01'
if self.connection.tds_version < '7.3'
else date(1, 1, 1)
),
(
'9999-12-31'
if self.connection.tds_version < '7.3'
else date(9999, 12, 31)
),
)
)
def test_time(self):
self.cursor.execute(
'''
SELECT
CONVERT(TIME, NULL),
CONVERT(TIME, '01:02:03.01'),
CONVERT(TIME, '23:59:59.99')
'''
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
(
'01:02:03.0100000'
if self.connection.tds_version < '7.3'
else time(1, 2, 3, 10000)
),
(
'23:59:59.9900000'
if self.connection.tds_version < '7.3'
else time(23, 59, 59, 990000)
),
)
)
def test_datetime(self):
self.cursor.execute(
'''
SELECT
CONVERT(DATETIME, NULL),
CONVERT(DATETIME, :0),
CONVERT(DATETIME, :1)
''',
(
datetime(1753, 1, 1),
datetime(9999, 12, 31, 23, 59, 59, 997000)
)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
datetime(1753, 1, 1),
datetime(9999, 12, 31, 23, 59, 59, 997000)
)
)
def test_smalldatetime(self):
self.cursor.execute(
'''
SELECT
CONVERT(SMALLDATETIME, NULL),
CONVERT(SMALLDATETIME, :0),
CONVERT(SMALLDATETIME, :1)
''',
(
datetime(1900, 1, 1),
datetime(2076, 6, 6, 23, 59, 59)
)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
datetime(1900, 1, 1),
# SMALLDATETIME only has minute accuracy.
datetime(2076, 6, 7)
)
)
def test_datetime2(self):
self.cursor.execute(
'''
SELECT
CONVERT(DATETIME2, NULL),
CONVERT(DATETIME2, :0),
CONVERT(DATETIME2, :1)
''',
(
datetime(1, 1, 1),
# $future: fix rounding issues. DB lib doesn't expose a good way to access
# the more precise DATETIME2 structure
datetime(9999, 12, 31, 23, 59, 59, 997 * 1000)
)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
(
'2001-01-01 00:00:00.0000000'
if self.connection.tds_version < '7.3'
else datetime(2001, 1, 1)
),
(
'9999-12-31 23:59:59.9966667'
if self.connection.tds_version < '7.3'
else datetime(9999, 12, 31, 23, 59, 59, 997 * 1000)
),
)
)
def test_guid(self):
uuid1 = uuid.uuid4()
uuid2 = uuid.uuid4()
self.cursor.execute(
'''
SELECT
CONVERT(UNIQUEIDENTIFIER, NULL),
CONVERT(UNIQUEIDENTIFIER, :0),
CONVERT(UNIQUEIDENTIFIER, :1)
''',
(
uuid1,
uuid2
)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
uuid1,
uuid2
)
)
def test_xml(self):
xml = '<foo><bar>1</bar><baz/></foo>'
self.cursor.execute(
'''
SELECT
CONVERT(XML, NULL),
CONVERT(XML, :0)
''',
(
xml,
)
)
self.assertEqual(
tuple(self.cursor.fetchone()),
(
None,
xml
)
)
def test_unsupported(self):
obj = object()
try:
self.cursor.execute('SELECT :0', (obj,))
except ctds.InterfaceError as ex:
self.assertEqual(
str(ex),
'could not implicitly convert Python type "{0}" to SQL'.format(type(obj))
)
else:
self.fail('.execute() did not fail as expected') # pragma: nocover
| 28.624514 | 93 | 0.413444 |
dcc0eeb0556460922ec096f696351c2b0bc1dffe | 1,142 | py | Python | tests/utils.py | greggles/cutadapt | fb3e8f0828c2cb92bfaea69d91ffb539ab24cdd0 | [
"MIT"
] | 375 | 2015-01-16T14:04:50.000Z | 2022-03-16T02:19:43.000Z | tests/utils.py | greggles/cutadapt | fb3e8f0828c2cb92bfaea69d91ffb539ab24cdd0 | [
"MIT"
] | 589 | 2015-03-05T20:06:03.000Z | 2022-03-29T22:49:56.000Z | tests/utils.py | greggles/cutadapt | fb3e8f0828c2cb92bfaea69d91ffb539ab24cdd0 | [
"MIT"
] | 150 | 2015-02-10T12:19:40.000Z | 2022-03-25T05:06:50.000Z | import sys
import os.path
import subprocess
def datapath(path):
return os.path.join(os.path.dirname(__file__), 'data', path)
def cutpath(path):
return os.path.join(os.path.dirname(__file__), 'cut', path)
class FilesDifferent(Exception):
pass
def assert_files_equal(path1, path2, ignore_trailing_space: bool = False):
cmd = ["diff", "-u"]
if sys.platform == "win32":
cmd.append("--strip-trailing-cr")
if ignore_trailing_space:
if sys.platform == "darwin":
# Ignores too much, but macOS doesn’t have the option below
cmd.append("-b")
else:
cmd.append("--ignore-trailing-space")
try:
subprocess.check_output(cmd + [path1, path2], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise FilesDifferent('\n' + e.output.decode()) from None
def binomial(n, k):
"""
Return binomial coefficient ('n choose k').
This implementation does not use factorials.
"""
k = min(k, n - k)
if k < 0:
return 0
r = 1
for j in range(k):
r *= n - j
r //= j + 1
return r
| 24.297872 | 79 | 0.608581 |
16fb19fbea73a10863caa6e5d20b629e41840799 | 4,820 | py | Python | test/programytest/parser/template/node_tests/test_input.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 2 | 2018-06-16T09:32:22.000Z | 2019-07-21T13:16:00.000Z | test/programytest/parser/template/node_tests/test_input.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 3 | 2020-07-16T04:00:42.000Z | 2021-03-31T18:52:22.000Z | test/programytest/parser/template/node_tests/test_input.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 4 | 2018-06-29T23:50:44.000Z | 2020-11-05T08:13:47.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.input import TemplateInputNode
from programy.dialog.dialog import Conversation, Question
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateInputNode(TemplateInputNode):
def __init__(self):
TemplateInputNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateInputNodeTests(ParserTestsBaseClass):
def test_to_str_defaults(self):
node = TemplateInputNode()
self.assertEquals("INPUT", node.to_string())
def test_to_str_no_defaults(self):
node = TemplateInputNode(index=2)
self.assertEquals("INPUT index=2", node.to_string())
def test_to_xml_defaults(self):
root = TemplateNode()
node = TemplateInputNode()
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><input /></template>", xml_str)
def test_to_xml_no_defaults(self):
root = TemplateNode()
node = TemplateInputNode(index=3)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><input index="3" /></template>', xml_str)
def test_resolve_with_defaults(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateInputNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual(0, node.index)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context.brain.tokenizer, "Hello world")
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
self._client_context.bot._conversations["testid"] = conversation
response = root.resolve(self._client_context)
self.assertIsNotNone(response)
self.assertEqual(response, "Hello world")
def test_resolve_no_defaults(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateInputNode(index=1)
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual(1, node.index)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context.brain.tokenizer, "Hello world")
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
question = Question.create_from_text(self._client_context.brain.tokenizer, "How are you. Are you well")
question.current_sentence()._response = "Fine thanks"
conversation.record_dialog(question)
self._client_context.bot._conversations["testid"] = conversation
response = root.resolve(self._client_context)
self.assertIsNotNone(response)
self.assertEqual(response, "How are you")
def test_resolve_no_sentence(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateInputNode(index=3)
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual(3, node.index)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context.brain.tokenizer, "Hello world")
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
question = Question.create_from_text(self._client_context.brain.tokenizer, "How are you. Are you well")
question.current_sentence()._response = "Fine thanks"
conversation.record_dialog(question)
self._client_context.bot._conversations["testid"] = conversation
response = root.resolve(self._client_context)
self.assertIsNotNone(response)
self.assertEqual(response, "")
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateInputNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEquals("", result) | 34.927536 | 111 | 0.689419 |
9712d7814086a89724d5c144fbc68bdff58466ae | 23,797 | py | Python | riscvmodel/insn.py | bluwireless/riscv-python-model | a5328f44e8ad82dc9bf3d7e4d6d1580ff2d18f2f | [
"MIT"
] | null | null | null | riscvmodel/insn.py | bluwireless/riscv-python-model | a5328f44e8ad82dc9bf3d7e4d6d1580ff2d18f2f | [
"MIT"
] | null | null | null | riscvmodel/insn.py | bluwireless/riscv-python-model | a5328f44e8ad82dc9bf3d7e4d6d1580ff2d18f2f | [
"MIT"
] | null | null | null | # Copyright Stefan Wallentowitz
# Licensed under the MIT License, see LICENSE for details.
# SPDX-License-Identifier: MIT
"""
Instructions
"""
from .isa import *
from .variant import *
from .model import State
@isa("lui", RV32I, opcode=0b0110111)
class InstructionLUI(InstructionUType):
"""
The Load Upper Immediate (LUI) instruction loads the given immediate (unsigned 20 bit) to the upper 20 bit
of the destination register. The lower bits are set to zero in the destination register. This instruction
can be used to efficiently form constants, as a sequence of LUI and ORI for example.
"""
def execute(self, model: Model):
model.state.intreg[self.rd] = (self.imm << 12)
@isa("auipc", RV32I, opcode=0b0010111)
class InstructionAUIPC(InstructionUType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.pc + (self.imm << 12)
@isa("jal", RV32I, opcode=0b1101111)
class InstructionJAL(InstructionJType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.pc + 4
model.state.pc += self.imm
@isa("jalr", RV32I, opcode=0b1100111, funct3=0b000)
class InstructionJALR(InstructionIType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.pc + 4
model.state.pc = model.state.intreg[self.rs1] + self.imm
@isa("beq", RV32I, opcode=0b1100011, funct3=0b000)
class InstructionBEQ(InstructionBType):
def execute(self, model: Model):
# todo: problem with __cmp__
if model.state.intreg[self.rs1].value == model.state.intreg[self.rs2].value:
model.state.pc = model.state.pc + self.imm
@isa("bne", RV32I, opcode=0b1100011, funct3=0b001)
class InstructionBNE(InstructionBType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].value != model.state.intreg[self.rs2].value:
model.state.pc = model.state.pc + self.imm
@isa("blt", RV32I, opcode=0b1100011, funct3=0b100)
class InstructionBLT(InstructionBType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].value < model.state.intreg[self.rs2].value:
model.state.pc = model.state.pc + self.imm
@isa("bge", RV32I, opcode=0b1100011, funct3=0b101)
class InstructionBGE(InstructionBType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].value >= model.state.intreg[self.rs2].value:
model.state.pc = model.state.pc + self.imm
@isa("bltu", RV32I, opcode=0b1100011, funct3=0b110)
class InstructionBLTU(InstructionBType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].unsigned() < model.state.intreg[
self.rs2].unsigned():
model.state.pc = model.state.pc + self.imm
@isa("bgeu", RV32I, opcode=0b1100011, funct3=0b111)
class InstructionBGEU(InstructionBType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].unsigned() >= model.state.intreg[
self.rs2].unsigned():
model.state.pc = model.state.pc + self.imm
@isa("lb", RV32I, opcode=0b0000011, funct3=0b000)
class InstructionLB(InstructionILType):
def execute(self, model: Model):
data = model.state.memory.lb((model.state.intreg[self.rs1] + self.imm).unsigned())
if (data >> 7) & 0x1:
data |= 0xFFFFFF00
model.state.intreg[self.rd] = data
@isa("lh", RV32I, opcode=0b0000011, funct3=0b001)
class InstructionLH(InstructionILType):
def execute(self, model: Model):
data = model.state.memory.lh((model.state.intreg[self.rs1] + self.imm).unsigned())
if (data >> 15) & 0x1:
data |= 0xFFFF0000
model.state.intreg[self.rd] = data
@isa("lw", RV32I, opcode=0b0000011, funct3=0b010)
class InstructionLW(InstructionILType):
def execute(self, model: Model):
data = model.state.memory.lw((model.state.intreg[self.rs1] + self.imm).unsigned())
model.state.intreg[self.rd] = data
@isa("lbu", RV32I, opcode=0b0000011, funct3=0b100)
class InstructionLBU(InstructionILType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.memory.lb(
(model.state.intreg[self.rs1] + self.imm).unsigned())
@isa("lhu", RV32I, opcode=0b0000011, funct3=0b101)
class InstructionLHU(InstructionILType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.memory.lh(
(model.state.intreg[self.rs1] + self.imm).unsigned())
@isa("sb", RV32I, opcode=0b0100011, funct3=0b000)
class InstructionSB(InstructionSType):
def execute(self, model: Model):
model.state.memory.sb((model.state.intreg[self.rs1] + self.imm).unsigned(),
model.state.intreg[self.rs2])
@isa("sh", RV32I, opcode=0b0100011, funct3=0b001)
class InstructionSH(InstructionSType):
def execute(self, model: Model):
model.state.memory.sh((model.state.intreg[self.rs1] + self.imm).unsigned(),
model.state.intreg[self.rs2])
@isa("sw", RV32I, opcode=0b0100011, funct3=0b010)
class InstructionSW(InstructionSType):
def execute(self, model: Model):
model.state.memory.sw((model.state.intreg[self.rs1] + self.imm).unsigned(),
model.state.intreg[self.rs2])
@isa("addi", RV32I, opcode=0b0010011, funct3=0b000)
class InstructionADDI(InstructionIType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] + self.imm
@isa("slti", RV32I, opcode=0b0010011, funct3=0b010)
class InstructionSLTI(InstructionIType):
def execute(self, model: Model):
if model.state.intreg[self.rs1] < self.imm:
model.state.intreg[self.rd] = 1
else:
model.state.intreg[self.rd] = 0
@isa("sltiu", RV32I, opcode=0b0010011, funct3=0b011)
class InstructionSLTIU(InstructionIType):
def execute(self, model: Model):
if model.state.intreg[self.rs1].unsigned() < int(self.imm):
model.state.intreg[self.rd] = 1
else:
model.state.intreg[self.rd] = 0
@isa("xori", RV32I, opcode=0b0010011, funct3=0b100)
class InstructionXORI(InstructionIType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] ^ self.imm
@isa("ori", RV32I, opcode=0b0010011, funct3=0b110)
class InstructionORI(InstructionIType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] | self.imm
@isa("andi", RV32I, opcode=0b0010011, funct3=0b111)
class InstructionANDI(InstructionIType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] & self.imm
@isa("slli", RV32I, opcode=0b0010011, funct3=0b001, funct7=0b0000000)
class InstructionSLLI(InstructionISType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] << self.shamt
@isa("srli", RV32I, opcode=0b0010011, funct3=0b101, funct7=0b0000000)
class InstructionSRLI(InstructionISType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1].unsigned() >> int(
self.shamt)
@isa("srai", RV32I, opcode=0b0010011, funct3=0b101, funct7=0b0100000)
class InstructionSRAI(InstructionISType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] >> self.shamt
@isa("add", RV32I, opcode=0b0110011, funct3=0b000, funct7=0b0000000)
class InstructionADD(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] + model.state.intreg[self.rs2]
@isa("sub", RV32I, opcode=0b0110011, funct3=0b000, funct7=0b0100000)
class InstructionSUB(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] - model.state.intreg[self.rs2]
@isa("sll", RV32I, opcode=0b0110011, funct3=0b001, funct7=0b0000000)
class InstructionSLL(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] << (
model.state.intreg[self.rs2] & 0x1f)
@isa("slt", RV32I, opcode=0b0110011, funct3=0b010, funct7=0b0000000)
class InstructionSLT(InstructionRType):
def execute(self, model: Model):
if model.state.intreg[self.rs1] < model.state.intreg[self.rs2]:
model.state.intreg[self.rd] = 1
else:
model.state.intreg[self.rd] = 0
@isa("sltu", RV32I, opcode=0b0110011, funct3=0b011, funct7=0b0000000)
class InstructionSLTU(InstructionRType):
def execute(self, state: State):
if state.intreg[self.rs1].unsigned() < state.intreg[
self.rs2].unsigned():
state.intreg[self.rd] = 1
else:
state.intreg[self.rd] = 0
@isa("xor", RV32I, opcode=0b0110011, funct3=0b100, funct7=0b0000000)
class InstructionXOR(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] ^ model.state.intreg[self.rs2]
@isa("srl", RV32I, opcode=0b0110011, funct3=0b101, funct7=0b0000000)
class InstructionSRL(InstructionRType):
def execute(self, model: Model):
model.state.intreg[
self.rd] = model.state.intreg[self.rs1] >> model.state.intreg[self.rs2]
@isa("sra", RV32I, opcode=0b0110011, funct3=0b101, funct7=0b0100000)
class InstructionSRA(InstructionRType):
def execute(self, model: Model):
model.state.intreg[
self.rd] = model.state.intreg[self.rs1] >> model.state.intreg[self.rs2]
@isa("or", RV32I, opcode=0b0110011, funct3=0b110, funct7=0b0000000)
class InstructionOR(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] | model.state.intreg[self.rs2]
@isa("and", RV32I, opcode=0b0110011, funct3=0b111, funct7=0b0000000)
class InstructionAND(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] & model.state.intreg[self.rs2]
@isa("fence", RV32I, opcode=0b0001111, funct3=0b000)
class InstructionFENCE(InstructionIType):
isa_format_id = "FENCE"
def execute(self, model: Model):
pass
@isa("fence.i", RV32IZifencei, opcode=0b0001111, funct3=0b001)
class InstructionFENCEI(InstructionIType):
def execute(self, model: Model):
pass
@isa("ecall", RV32I, opcode=0b1110011, funct3=0b000, imm=0b000000000000, rd=0b00000, rs1=0b00000)
class InstructionECALL(InstructionIType):
def execute(self, model: Model):
model.environment.call(model.state)
def __str__(self):
return "ecall"
@isa("wfi", RV32I, opcode=0b1110011, funct3=0b000, imm=0b000100000101, rs1=0b00000, rd=0b00000)
class InstructionWFI(InstructionIType):
def execute(self, model: Model):
pass
@isa("ebreak", RV32I, opcode=0b1110011, funct3=0b000, imm=0b000000000001)
class InstructionEBREAK(InstructionIType):
def execute(self, model: Model):
pass
def __str__(self):
return "ebreak"
@isa("csrrw", RV32IZicsr, opcode=0b1110011, funct3=0b001)
class InstructionCSRRW(InstructionIType):
def execute(self, model: Model):
pass
@isa("csrrs", RV32IZicsr, opcode=0b1110011, funct3=0b010)
class InstructionCSRRS(InstructionIType):
def execute(self, model: Model):
pass
@isa("csrrc", RV32IZicsr, opcode=0b1110011, funct3=0b011)
class InstructionCSRRC(InstructionIType):
def execute(self, model: Model):
pass
#@isa("csrrwi", RV32IZicsr, opcode=0b1110011, funct3=0b101)
#class InstructionCSRRWI(Instruction):
# def execute(self, model: Model):
# pass
#@isa("csrrsi", RV32IZicsr, opcode=0b1110011, funct3=0b110)
#class InstructionCSRRSI(Instruction):
# def execute(self, model: Model):
# pass
#@isa("csrrci", RV32IZicsr, opcode=0b1110011, funct3=0b111)
#class InstructionCSRRCI(Instruction):
# def execute(self, model: Model):
# pass
@isa("lwu", RV64I, opcode=0b0000011, funct3=0b110)
class InstructionLWU(InstructionIType):
def execute(self, model: Model):
pass
@isa("ld", RV64I, opcode=0b0000011, funct3=0b011)
class InstructionLD(InstructionIType):
def execute(self, model: Model):
pass
@isa("sd", RV64I, opcode=0b0100011, funct3=0b011)
class InstructionSD(InstructionISType):
def execute(self, model: Model):
pass
@isa_pseudo()
class InstructionNOP(InstructionADDI):
def __init__(self):
super().__init__(0, 0, 0)
def __str__(self):
return "nop"
@isa("mul", RV32IM, opcode=0b0110011, funct3=0b000, funct7=0b0000001)
class InstructionMUL(InstructionRType):
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs1] * model.state.intreg[self.rs2]
@isa("mulh", RV32IM, opcode=0b0110011, funct3=0b001, funct7=0b0000001)
class InstructionMULH(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("mulhsu", RV32IM, opcode=0b0110011, funct3=0b010, funct7=0b0000001)
class InstructionMULHSU(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("mulhu", RV32IM, opcode=0b0110011, funct3=0b011, funct7=0b0000001)
class InstructionMULHU(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("div", RV32IM, opcode=0b0110011, funct3=0b100, funct7=0b0000001)
class InstructionDIV(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("divu", RV32IM, opcode=0b0110011, funct3=0b101, funct7=0b0000001)
class InstructionDIVU(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("rem", RV32IM, opcode=0b0110011, funct3=0b110, funct7=0b0000001)
class InstructionREM(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa("remu", RV32IM, opcode=0b0110011, funct3=0b111, funct7=0b0000001)
class InstructionREMU(InstructionRType):
def execute(self, model: Model):
# TODO: implement
pass
@isa_c("c.addi", RV32IC, opcode=1, funct3=0b000)
class InstructionCADDI(InstructionCIType):
def expand(self):
pass
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rd] + self.imm
@isa_c("c.andi", RV32IC, opcode=1, funct3=0b100)
class InstructionCANDI(InstructionCBType):
def expand(self):
pass
def execute(self, model: Model):
pass
@isa_c("c.swsp", RV32IC, opcode=2, funct3=6)
class InstructionCSWSP(InstructionCSSType):
def expand(self):
pass
def decode(self, machinecode: int):
self.rs = (machinecode >> 2) & 0x1f
imm12to9 = (machinecode >> 9) & 0xf
imm8to7 = (machinecode >> 7) & 0x3
self.imm.set_from_bits((imm8to7 << 4) | imm12to9)
def execute(self, model: Model):
pass
@isa_c("c.li", RV32IC, opcode=1, funct3=2)
class InstructionCLI(InstructionCIType):
def expand(self):
pass
def execute(self, model: Model):
model.state.intreg[self.rd] = self.imm
@isa_c("c.mv", RV32IC, opcode=2, funct4=8)
class InstructionCMV(InstructionCRType):
def expand(self):
pass
def execute(self, model: Model):
model.state.intreg[self.rd] = model.state.intreg[self.rs]
@isa("lr", RV32A, opcode=0b0101111, funct5=0b00010, funct3=0b010)
class InstructionLR(InstructionAMOType):
""" Load reserved """
def execute(self, model: Model):
# Perform a normal load
data = model.state.memory.lw(model.state.intreg[self.rs1].unsigned())
model.state.intreg[self.rd] = data
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("sc", RV32A, opcode=0b0101111, funct5=0b00011, funct3=0b010)
class InstructionSC(InstructionAMOType):
""" Store conditional """
def execute(self, model: Model):
# Check if this address is reserved
if model.state.atomic_reserved(model.state.intreg[self.rs1]):
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
model.state.intreg[self.rs2]
)
model.state.intreg[self.rd] = 0
else:
model.state.intreg[self.rd] = 1
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amoadd", RV32A, opcode=0b0101111, funct5=0b00000, funct3=0b010)
class InstructionAMOADD(InstructionAMOType):
""" Atomic add operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
(model.state.intreg[self.rs2] + model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amoxor", RV32A, opcode=0b0101111, funct5=0b00100, funct3=0b010)
class InstructionAMOXOR(InstructionAMOType):
""" Atomic XOR operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
(model.state.intreg[self.rs2] ^ model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amoor", RV32A, opcode=0b0101111, funct5=0b01000, funct3=0b010)
class InstructionAMOOR(InstructionAMOType):
""" Atomic OR operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
(model.state.intreg[self.rs2] | model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amoand", RV32A, opcode=0b0101111, funct5=0b01100, funct3=0b010)
class InstructionAMOAND(InstructionAMOType):
""" Atomic AND operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
(model.state.intreg[self.rs2] & model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amomin", RV32A, opcode=0b0101111, funct5=0b10000, funct3=0b010)
class InstructionAMOMIN(InstructionAMOType):
""" Atomic minimum operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
min(model.state.intreg[self.rs2], model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amomax", RV32A, opcode=0b0101111, funct5=0b10100, funct3=0b010)
class InstructionAMOMAX(InstructionAMOType):
""" Atomic maximum operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
max(model.state.intreg[self.rs2], model.state.intreg[self.rd])
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amominu", RV32A, opcode=0b0101111, funct5=0b11000, funct3=0b010)
class InstructionAMOMINU(InstructionAMOType):
""" Atomic unsigned minimum operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
min(
model.state.intreg[self.rs2].unsigned(),
model.state.intreg[self.rd].unsigned()
)
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amomaxu", RV32A, opcode=0b0101111, funct5=0b11100, funct3=0b010)
class InstructionAMOMAXU(InstructionAMOType):
""" Atomic unsigned maximum operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
max(
model.state.intreg[self.rs2].unsigned(),
model.state.intreg[self.rd].unsigned()
)
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
@isa("amoswap", RV32A, opcode=0b0101111, funct5=0b00001, funct3=0b010)
class InstructionAMOSWAP(InstructionAMOType):
""" Atomic swap operation """
def execute(self, model: Model):
# This models a single HART with 1 stage pipeline, so will always succeed
model.state.intreg[self.rd] = model.state.memory.lw(
model.state.intreg[self.rs1].unsigned()
)
model.state.memory.sw(
model.state.intreg[self.rs1].unsigned(),
model.state.intreg[self.rs2]
)
# Perform correct lock or release actions
if self.rl: model.state.atomic_release(model.state.intreg[self.rs1])
elif self.aq: model.state.atomic_acquire(model.state.intreg[self.rs1])
| 35.359584 | 110 | 0.67206 |
3bf87a72438832d4b0d5b9b3dc71ad4e159b810a | 2,678 | py | Python | shred-1/karatsuba10.py | farallons/shark | ab155b0f9b2ddf71d7cd70bd76f683be51c14f1f | [
"MIT"
] | null | null | null | shred-1/karatsuba10.py | farallons/shark | ab155b0f9b2ddf71d7cd70bd76f683be51c14f1f | [
"MIT"
] | null | null | null | shred-1/karatsuba10.py | farallons/shark | ab155b0f9b2ddf71d7cd70bd76f683be51c14f1f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import os
base_dir = os.getcwd()
join = os.path.join
exists = os.path.exists
def main(x, y, algo):
if algo:
print("algo")
product = calculate_product(x,y)
else:
print("no algo")
product = x * y
print(product)
def digits(a):
return [int(digit) for digit in str(a)]
def smallest_multiple_of_two(n):
i = 0
n -= 1
m = 1
while n > 0:
n >>= 1
i += 1
m <<= 1
return m
def left_pad(a, n):
return [0]*(n-len(a)) + a
def calculate_product(x, y):
x = digits(x)
y = digits(y)
if len(y) > len(x):
x, y = y, x
n = smallest_multiple_of_two( len(x) )
x = left_pad(x, n)
y = left_pad(y, n)
return karatsuba(x, y, n)
def add(a, b, n):
c = [0]*n
carry = False
for i in range(0,n):
index = n - i - 1
sum = a[index] + b[index]
if carry:
sum += 1
if sum > 9:
carry = True
c[index] = sum - 10
else:
carry = False
c[index] = sum
return carry, c
def add_zeroes_on_right(x, n):
return int(str(x) + '0' * n)
def number(a):
return int(''.join(map(str, a)))
def karatsuba(x, y, n):
if n == 1:
return x[0] * y[0]
else:
n >>= 1
a = x[:n]
b = x[n:]
c = y[:n]
d = y[n:]
ac = karatsuba(a, c, n)
bd = karatsuba(b, d, n)
step1 = ac
step2 = bd
carry_a_b, sum_a_b = add(a, b, n)
carry_c_d, sum_c_d = add(c, d, n)
partial_product = karatsuba(sum_a_b, sum_c_d, n)
if carry_a_b and carry_c_d:
product3 = add_zeroes_on_right(1, n<<1) + add_zeroes_on_right(number(sum_a_b), n) + add_zeroes_on_right(number(sum_c_d), n) + partial_product
elif carry_a_b:
product3 = add_zeroes_on_right(number(sum_c_d), n) + partial_product
elif carry_c_d:
product3 = add_zeroes_on_right(number(sum_a_b), n) + partial_product
else:
product3 = partial_product
step3 = product3
step4 = step3 - step1 - step2
step5 = add_zeroes_on_right(step1, n<<1) + add_zeroes_on_right(step4, n) + step2
return step5
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate the product of two numbers using Karatsuba's algorithm")
parser.add_argument('x', type=int)
parser.add_argument('y', type=int)
parser.add_argument('--no_algo', action="store_true")
args = parser.parse_args()
x = args.x
y = args.y
no_algo = args.no_algo
main(x, y, not no_algo) | 21.772358 | 153 | 0.544436 |
b46909d434a1dee5d7ffe00f662fab1babfa05a7 | 6,366 | py | Python | d2go/utils/testing/data_loader_helper.py | apivovarov/d2go | 44e410843fb10508c911022a80f15276c76d9e60 | [
"Apache-2.0"
] | null | null | null | d2go/utils/testing/data_loader_helper.py | apivovarov/d2go | 44e410843fb10508c911022a80f15276c76d9e60 | [
"Apache-2.0"
] | null | null | null | d2go/utils/testing/data_loader_helper.py | apivovarov/d2go | 44e410843fb10508c911022a80f15276c76d9e60 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import itertools
import json
import os
import uuid
from d2go.data.datasets import register_dataset_split
from d2go.runner import create_runner
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
from PIL import Image
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
def create_toy_dataset(
image_generator, num_images, num_classes=-1, num_keypoints=0, is_rotated=False
):
""" given image_generator, create a dataset with toy annotations and catagories """
categories = []
images = []
annotations = []
meta_data = {}
if num_classes == -1:
num_classes = num_images
for i in range(num_images):
image_generator.prepare_image(i)
image_dict = image_generator.get_image_dict(i)
width = image_dict["width"]
height = image_dict["height"]
images.append(image_dict)
if i < num_classes:
categories.append({"name": "class_{}".format(i), "id": i})
bbox = (
[width / 4, height / 4, width / 2, height / 2] # XYWH_ABS
if not is_rotated
else [width / 2, height / 2, width / 2, height / 2, 45] # cXcYWHO_ABS
)
annotations.append(
{
"image_id": i,
"category_id": i % num_classes,
"id": i,
"bbox": bbox,
"keypoints": list(
itertools.chain.from_iterable(
[
(
float(idx) / width / 2 + width / 4,
float(idx) / height / 2 + height / 4,
1,
)
for idx in range(num_keypoints)
]
)
),
"area": width * height,
"iscrowd": 0,
"ignore": 0,
"segmentation": [],
}
)
if num_keypoints > 0:
keypoint_names = [f"kp_{idx}" for idx in range(num_keypoints)]
meta_data.update({"keypoint_names": keypoint_names, "keypoint_flip_map": ()})
return (
{"categories": categories, "images": images, "annotations": annotations},
meta_data,
)
@contextlib.contextmanager
def register_toy_dataset(
dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
):
json_dataset, meta_data = create_toy_dataset(
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
json_file = os.path.join(tmp_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: image_generator.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
register_dataset_split(dataset_name, split_dict)
try:
yield
finally:
DatasetCatalog.remove(dataset_name)
MetadataCatalog.remove(dataset_name)
def create_local_dataset(
out_dir,
num_images,
image_width,
image_height,
num_classes=-1,
num_keypoints=0,
is_rotated=False,
):
dataset_name = "_test_ds_" + str(uuid.uuid4())
img_gen = LocalImageGenerator(out_dir, image_width, image_height)
json_dataset, meta_data = create_toy_dataset(
img_gen,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
json_file = os.path.join(out_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: img_gen.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
if is_rotated:
split_dict["evaluator_type"] = "rotated_coco"
register_dataset_split(dataset_name, split_dict)
return dataset_name
class LocalImageGenerator:
def __init__(self, image_dir, width, height):
self._width = width
self._height = height
self._image_dir = image_dir
def get_image_dir(self):
return self._image_dir
def get_image_dict(self, i):
return {
"file_name": "{}.jpg".format(i),
"width": self._width,
"height": self._height,
"id": i,
}
def prepare_image(self, i):
image = Image.new("RGB", (self._width, self._height))
image.save(os.path.join(self._image_dir, self.get_image_dict(i)["file_name"]))
@contextlib.contextmanager
def create_fake_detection_data_loader(height, width, is_train):
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
min_size = min(width, height)
max_size = max(width, height)
cfg.INPUT.MIN_SIZE_TRAIN = (min_size,)
cfg.INPUT.MAX_SIZE_TRAIN = max_size
cfg.INPUT.MIN_SIZE_TEST = min_size
cfg.INPUT.MAX_SIZE_TEST = max_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
if is_train:
with register_toy_dataset(
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_dataset(
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
yield test_loader
| 31.205882 | 88 | 0.590638 |
ef360cc877789caa3cb7dbff692a1d1c4c571b86 | 15,498 | py | Python | deploy/determined_deploy/aws/aws.py | ryantd/determined | b4f3be3c1878a9a7fdad4775647018753b39ef21 | [
"Apache-2.0"
] | 1 | 2021-03-29T13:39:45.000Z | 2021-03-29T13:39:45.000Z | deploy/determined_deploy/aws/aws.py | ZithaChitra/determined | 1466d46dfd6abc56ad65d9904d4173ea62cff771 | [
"Apache-2.0"
] | null | null | null | deploy/determined_deploy/aws/aws.py | ZithaChitra/determined | 1466d46dfd6abc56ad65d9904d4173ea62cff771 | [
"Apache-2.0"
] | null | null | null | import sys
import time
from typing import Any, Dict, List, Optional, Tuple
import boto3
import tqdm
from botocore.exceptions import ClientError, WaiterError
from determined_deploy.aws import constants
# Try waiting for stack to delete this many times. We break up the waiting so the delete job
# will not fail CI.
NUM_WAITS = 5
def get_user(boto3_session: boto3.session.Session) -> str:
sts = boto3_session.client("sts")
response = sts.get_caller_identity()
arn = response["Arn"]
assert isinstance(arn, str), f"expected a string Arn but got {arn}"
return arn.split("/")[-1]
def stop_master(master_id: str, boto3_session: boto3.session.Session) -> None:
ec2 = boto3_session.client("ec2")
waiter = ec2.get_waiter("instance_stopped")
ec2.stop_instances(InstanceIds=[master_id])
ec2.modify_instance_attribute(
Attribute="disableApiTermination", Value="false", InstanceId=master_id
)
for n in range(NUM_WAITS):
print("Waiting For Master Instance To Stop")
try:
waiter.wait(InstanceIds=[master_id], WaiterConfig={"Delay": 10})
break
except WaiterError as e:
if n == NUM_WAITS - 1:
raise e
print("Master Instance Stopped")
def delete(stack_name: str, boto3_session: boto3.session.Session) -> None:
ec2 = boto3_session.client("ec2")
# First, shut down the master so no new agents are started.
stack_output = get_output(stack_name, boto3_session)
master_id = stack_output[constants.cloudformation.MASTER_ID]
describe_instance_response = ec2.describe_instances(
Filters=[{"Name": "instance-id", "Values": [master_id]}],
)
if describe_instance_response["Reservations"]:
print("Stopping Master Instance")
stop_master(master_id, boto3_session)
# Second, terminate the agents so nothing can write to the checkpoint bucket. We create agent
# instances outside of cloudformation, so we have to manually terminate them.
if stack_uses_spot(stack_name, boto3_session):
print("Terminating Running Agents and Pending Spot Requests")
clean_up_spot(stack_name, boto3_session)
print("Agents and Spot Requests Terminated")
else:
print("Terminating Running Agents")
terminate_running_agents(
stack_output[constants.cloudformation.AGENT_TAG_NAME], boto3_session
)
print("Agents Terminated")
# Third, empty the bucket that was created for this stack.
bucket_name = get_output(stack_name, boto3_session).get(
constants.cloudformation.CHECKPOINT_BUCKET
)
if bucket_name:
print("Emptying Checkpoint Bucket")
empty_bucket(bucket_name, boto3_session)
print("Checkpoint Bucket Empty")
delete_stack(stack_name, boto3_session)
# Cloudformation
def stack_exists(stack_name: str, boto3_session: boto3.session.Session) -> bool:
cfn = boto3_session.client("cloudformation")
print(f"Checking if the CloudFormation Stack ({stack_name}) exists:", end=" ")
try:
cfn.describe_stacks(StackName=stack_name)
except ClientError:
return False
return True
def delete_stack(stack_name: str, boto3_session: boto3.session.Session) -> None:
cfn = boto3_session.client("cloudformation")
delete_waiter = cfn.get_waiter("stack_delete_complete")
if stack_exists(stack_name, boto3_session):
print(
f"True - Deleting stack {stack_name}. This may take a few minutes... "
f"Check the CloudFormation Console for updates"
)
else:
print(f"False. {stack_name} does not exist")
cfn.delete_stack(StackName=stack_name)
delete_waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 10})
def update_stack(
stack_name: str,
template_body: str,
boto3_session: boto3.session.Session,
parameters: Optional[List] = None,
) -> None:
cfn = boto3_session.client("cloudformation")
ec2 = boto3_session.client("ec2")
update_waiter = cfn.get_waiter("stack_update_complete")
print(
f"Updating stack {stack_name}. This may take a few minutes... "
f"Check the CloudFormation Console for updates"
)
stack_output = get_output(stack_name, boto3_session)
stop_master(stack_output[constants.cloudformation.MASTER_ID], boto3_session)
if stack_uses_spot(stack_name, boto3_session):
clean_up_spot(stack_name, boto3_session, disable_tqdm=True)
else:
terminate_running_agents(
stack_output[constants.cloudformation.AGENT_TAG_NAME], boto3_session
)
try:
if parameters:
cfn.update_stack(
StackName=stack_name,
TemplateBody=template_body,
Parameters=parameters,
Capabilities=["CAPABILITY_IAM"],
)
else:
cfn.update_stack(
StackName=stack_name, TemplateBody=template_body, Capabilities=["CAPABILITY_IAM"]
)
except ClientError as e:
if e.response["Error"]["Message"] != "No updates are to be performed.":
raise e
print(e.response["Error"]["Message"])
ec2.start_instances(InstanceIds=[stack_output[constants.cloudformation.MASTER_ID]])
start_waiter = ec2.get_waiter("instance_running")
start_waiter.wait(
InstanceIds=[stack_output[constants.cloudformation.MASTER_ID]],
WaiterConfig={"Delay": 10},
)
return
update_waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 10})
def create_stack(
stack_name: str,
template_body: str,
boto3_session: boto3.session.Session,
parameters: Optional[List] = None,
) -> None:
print(
f"Creating stack {stack_name}. This may take a few minutes... "
f"Check the CloudFormation Console for updates"
)
cfn = boto3_session.client("cloudformation")
create_waiter = cfn.get_waiter("stack_create_complete")
if parameters:
cfn.create_stack(
StackName=stack_name,
TemplateBody=template_body,
Parameters=parameters,
Capabilities=["CAPABILITY_IAM"],
Tags=[
{
"Key": constants.defaults.STACK_TAG_KEY,
"Value": constants.defaults.STACK_TAG_VALUE,
}
],
)
else:
cfn.create_stack(
StackName=stack_name,
TemplateBody=template_body,
Capabilities=["CAPABILITY_IAM"],
Tags=[
{
"Key": constants.defaults.STACK_TAG_KEY,
"Value": constants.defaults.STACK_TAG_VALUE,
}
],
)
create_waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 10})
def list_stacks(boto3_session: boto3.session.Session) -> List[Dict[str, Any]]:
cfn = boto3_session.client("cloudformation")
response = cfn.describe_stacks()
output = []
for stack in response["Stacks"]:
for tag in stack["Tags"]:
if (
tag["Key"] == constants.defaults.STACK_TAG_KEY
and tag["Value"] == constants.defaults.STACK_TAG_VALUE
):
output.append(stack)
return output
def get_output(stack_name: str, boto3_session: boto3.session.Session) -> Dict[str, str]:
cfn = boto3_session.client("cloudformation")
response = cfn.describe_stacks(StackName=stack_name)
response_dict = {}
for output in response["Stacks"][0]["Outputs"]:
k, v = output["OutputKey"], output["OutputValue"]
response_dict[k] = v
return response_dict
def get_params(stack_name: str, boto3_session: boto3.session.Session) -> Dict[str, str]:
cfn = boto3_session.client("cloudformation")
response = cfn.describe_stacks(StackName=stack_name)
response_dict = {}
params = response["Stacks"][0]["Parameters"]
for param_obj in params:
k = param_obj["ParameterKey"]
v = param_obj["ParameterValue"]
response_dict[k] = v
return response_dict
def stack_uses_spot(stack_name: str, boto3_session: boto3.session.Session) -> bool:
params = get_params(stack_name, boto3_session)
if constants.cloudformation.SPOT_ENABLED not in params.keys():
return False
spot_enabled_str_val = params[constants.cloudformation.SPOT_ENABLED]
if spot_enabled_str_val.lower() == "true":
return True
else:
return False
def get_management_tag_key_value(stack_name: str) -> Tuple[str, str]:
tag_key = f"det-{stack_name}"
tag_val = f"det-agent-{stack_name}"
return tag_key, tag_val
def deploy_stack(
stack_name: str,
template_body: str,
keypair: str,
boto3_session: boto3.session.Session,
parameters: Optional[List] = None,
) -> None:
cfn = boto3_session.client("cloudformation")
cfn.validate_template(TemplateBody=template_body)
check_keypair(keypair, boto3_session)
if stack_exists(stack_name, boto3_session):
print("True - Updating Stack")
update_stack(stack_name, template_body, boto3_session, parameters)
else:
print("False - Creating Stack")
create_stack(stack_name, template_body, boto3_session, parameters)
# EC2
def get_ec2_info(instance_id: str, boto3_session: boto3.session.Session) -> Dict:
ec2 = boto3_session.client("ec2")
response = ec2.describe_instances(InstanceIds=[instance_id])
info = response["Reservations"][0]["Instances"][0]
assert isinstance(info, dict), f"expected a dict of instance info but got {info}"
return info
def check_keypair(name: str, boto3_session: boto3.session.Session) -> bool:
ec2 = boto3_session.client("ec2")
print(f"Checking if the SSH Keypair ({name}) exists:", end=" ")
all_keys = ec2.describe_key_pairs()["KeyPairs"]
names = [x["KeyName"] for x in all_keys]
if name in names:
print("True")
return True
print("False")
print(
f"Key pair {name} not found in {boto3_session.region_name}. "
f"Please create the key pair {name} in {boto3_session.region_name} first"
)
sys.exit(1)
def terminate_running_agents(agent_tag_name: str, boto3_session: boto3.session.Session) -> None:
ec2 = boto3_session.client("ec2")
waiter = ec2.get_waiter("instance_terminated")
response = ec2.describe_instances(
Filters=[
{"Name": "tag:Name", "Values": [agent_tag_name]},
{"Name": "instance-state-name", "Values": ["running", "pending"]},
]
)
reservations = response["Reservations"]
instance_ids = []
for reservation in reservations:
for instance in reservation["Instances"]:
instance_ids.append(instance["InstanceId"])
if instance_ids:
ec2.terminate_instances(InstanceIds=instance_ids)
for n in range(NUM_WAITS):
print("Waiting For Agents To Terminate")
try:
waiter.wait(InstanceIds=instance_ids, WaiterConfig={"Delay": 10})
break
except WaiterError as e:
if n == NUM_WAITS - 1:
raise e
# EC2 Spot
def list_spot_requests_for_stack(
stack_name: str, boto3_session: boto3.session.Session
) -> List[Dict]:
tag_key, tag_val = get_management_tag_key_value(stack_name)
ec2 = boto3_session.client("ec2")
response = ec2.describe_spot_instance_requests(
Filters=[
{"Name": f"tag:{tag_key}", "Values": [tag_val]},
{"Name": "state", "Values": ["open", "active"]},
]
)
spot_requests = response["SpotInstanceRequests"]
reqs = []
for s in spot_requests:
req = {
"id": s["SpotInstanceRequestId"],
"state": s["State"],
"statusCode": s["Status"]["Code"],
"statusMessage": s["Status"]["Message"],
"instanceId": s.get("InstanceId", None),
}
reqs.append(req)
return reqs
def delete_spot_requests_and_agents(
stack_name: str, boto3_session: boto3.session.Session
) -> List[str]:
"""
List all spot requests. Any requests that have an associated instance,
terminate the instances (this will automatically cancel the spot
request). Any requests that do not have an associated instance, cancel
the spot requests.
Returns the list of instance_ids that were deleted so at the end of spot
cleanup, we can wait until all instances have been terminated.
"""
spot_reqs = list_spot_requests_for_stack(stack_name, boto3_session)
instances_to_del = []
requests_to_term = []
for req in spot_reqs:
if req["instanceId"] is not None:
instances_to_del.append(req["instanceId"])
else:
requests_to_term.append(req["id"])
ec2 = boto3_session.client("ec2")
if len(instances_to_del) > 0:
ec2.terminate_instances(InstanceIds=instances_to_del)
if len(requests_to_term) > 0:
ec2.cancel_spot_instance_requests(SpotInstanceRequestIds=requests_to_term)
return instances_to_del
def clean_up_spot(
stack_name: str, boto3_session: boto3.session.Session, disable_tqdm: bool = False
) -> None:
# The spot API is eventually consistent and the only way to guarantee
# that we don't leave any spot requests alive (that may eventually be
# fulfilled and lead to running EC2 instances) is to wait a long enough
# period that any created spot requests will have shown up in the API.
# 60 seconds seems like a relatively safe amount of time.
SPOT_WAIT_SECONDS = 60
start_time = time.time()
all_terminated_instance_ids = set()
format_str = "{l_bar}{bar}| (remaining time: {remaining})"
pbar = tqdm.tqdm(
total=SPOT_WAIT_SECONDS,
desc="Cleaning up spot instances and spot instance requests",
bar_format=format_str,
disable=disable_tqdm,
)
progress_bar_state = 0.0
while True:
elapsed_time = time.time() - start_time
if elapsed_time >= SPOT_WAIT_SECONDS:
pbar.update(SPOT_WAIT_SECONDS - progress_bar_state) # Exit TQDM with it showing 100%
pbar.close()
break
tqdm_update = elapsed_time - progress_bar_state
pbar.update(tqdm_update)
progress_bar_state = elapsed_time
instance_ids = delete_spot_requests_and_agents(stack_name, boto3_session)
for i in instance_ids:
all_terminated_instance_ids.add(i)
# Final cleanup
instance_ids = delete_spot_requests_and_agents(stack_name, boto3_session)
for i in instance_ids:
all_terminated_instance_ids.add(i)
if len(instance_ids) > 0:
ec2 = boto3_session.client("ec2")
waiter = ec2.get_waiter("instance_terminated")
for n in range(NUM_WAITS):
print("Waiting For Spot Agents To Terminate")
try:
waiter.wait(InstanceIds=instance_ids, WaiterConfig={"Delay": 10})
break
except WaiterError as e:
if n == NUM_WAITS - 1:
raise e
# S3
def empty_bucket(bucket_name: str, boto3_session: boto3.session.Session) -> None:
s3 = boto3_session.resource("s3")
try:
bucket = s3.Bucket(bucket_name)
bucket.objects.all().delete()
except ClientError as e:
if e.response["Error"]["Code"] != "NoSuchBucket":
raise e
| 33.044776 | 97 | 0.656085 |
5b998ac6991d1b365b4651cd6198b9eaf9da1bfc | 2,252 | py | Python | crequest.py | kendricktan/flaskrestful-custom-request | 1ffb95d1a9e066c5852879dacdd49d998f6a31e1 | [
"MIT"
] | 3 | 2017-06-11T11:53:19.000Z | 2021-09-23T20:09:56.000Z | crequest.py | kendricktan/flaskrestful-custom-request | 1ffb95d1a9e066c5852879dacdd49d998f6a31e1 | [
"MIT"
] | null | null | null | crequest.py | kendricktan/flaskrestful-custom-request | 1ffb95d1a9e066c5852879dacdd49d998f6a31e1 | [
"MIT"
] | null | null | null | import msgpack
from flask import Request, _request_ctx_stack
from flask.wrappers import _get_data
from werkzeug.exceptions import BadRequest
class RequestWithMsgPack(Request):
"""
Extending on Flask's Request class to support msgpack mimetype
"""
@property
def is_msgpack(self):
"""
Checks if request is msgpack type or not.
"""
mt = self.mimetype
return mt.startswith('application/') and mt.endswith('msgpack')
def msgpack(self, force=False, silent=False):
"""
NOTE: This function name needs to be the same name specified on the
'location' variable of the request parser. e.g.
parser.add_argument('data', location='msgpack') `location needs to have the same
name as the callable function
Parses the incoming request data and decodes it from msgpack to python
__dict__ type. By default this function will return `None` if the mimetype
is not `application/msgpack` but can be overridden by the ``force`` parameter.
If parsing fails the
:param force: if set to ``True`` the mimetype is ignored
:param silent: if set to ``True`` this method will fail silently and return ``None``
"""
if not (force or self.is_msgpack):
return None
request_charset = self.mimetype_params.get('charset', 'utf-8')
try:
data = _get_data(self, False)
rv = msgpack.unpackb(data, encoding=request_charset)
except ValueError as e:
if silent:
return None
else:
rv = self.on_msgpack_loading_failed(e)
# Returns a converted dictionary (byte literal dict to unicode dict)
# reason why this is done is because in Python3
# my_dict[u'key'] is different to my_dict['key']
return rv
def on_msgpack_loading_failed(self, e):
"""
Called if decoding of msgpack data failed
"""
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.config.get('DEBUG', False):
raise BadRequest('Failed to decode msgpack object: {0}'.format(e))
raise BadRequest()
| 34.646154 | 92 | 0.619893 |
e4fe713c9a4fc6a9f7556d2e3133f1fbc7cbafa1 | 1,664 | py | Python | examples/demo.py | willwx/XDream | ee7022a35e94f00d08fdb1e49ca784fc497740c0 | [
"MIT"
] | 38 | 2019-04-19T16:37:37.000Z | 2022-02-15T21:42:24.000Z | examples/demo.py | willwx/XDream | ee7022a35e94f00d08fdb1e49ca784fc497740c0 | [
"MIT"
] | null | null | null | examples/demo.py | willwx/XDream | ee7022a35e94f00d08fdb1e49ca784fc497740c0 | [
"MIT"
] | 12 | 2019-05-01T20:29:26.000Z | 2021-04-30T07:49:25.000Z | """
Runs an experiment to maximize activity of a 'neuron,' simulated
by a unit in a CNN.
Demonstrates a high-level API: `CNNExperiment`, which is designed to simulate an
electrophysiology experiment and behaves similarly to `EphysExperiment`
"""
from pathlib import Path
import sys
sys.path.append('../xdream') # or, add to path in your environment
from Experiments import CNNExperiment
# parameters
exp_settings = {
'project_dir': 'demo', # change as needed
'target_neuron': ('alexnet', 'classifier.6', 1),
# 'target_neuron': ('caffenet', 'fc8', 1), # toggle with above to use CaffeNet
'scorer_parameters': {'engine': 'pytorch'}, # comment out to use CaffeNet
'optimizer_name': 'genetic',
'optimizer_parameters': {
'generator_parameters': {'engine': 'pytorch'}, # comment out to use caffe
'generator_name': 'deepsim-fc6',
'population_size': 20,
'mutation_rate': 0.5,
'mutation_size': 0.5,
'selectivity': 2,
'heritability': 0.5,
'n_conserve': 0},
'with_write': True, # simulates file-writing behavior of EphysExperiment
'image_size': 128, # comment out to use default generator output size
'max_optimize_images': 1000, # query at most this many images
'random_seed': 0,
'config_file_path': __file__, # makes a copy of this file in exp_dir, for debugging/recordkeeping
# 'stochastic': False, # simulates neuronal noise
# 'wait_each_step': 0 # simulates a delay of image presentation
}
Path(exp_settings['project_dir']).mkdir(exist_ok=True)
experiment = CNNExperiment(**exp_settings)
experiment.run()
| 38.697674 | 104 | 0.675481 |
114a9ba4999b275e79957ec7a8cdeb5f134ae4b6 | 1,422 | py | Python | python/updatePermisos.py | maestromark55/bust-radio | d3552304e9e0f551359b3a6b72f0f2bc31e863f5 | [
"Apache-2.0"
] | null | null | null | python/updatePermisos.py | maestromark55/bust-radio | d3552304e9e0f551359b3a6b72f0f2bc31e863f5 | [
"Apache-2.0"
] | null | null | null | python/updatePermisos.py | maestromark55/bust-radio | d3552304e9e0f551359b3a6b72f0f2bc31e863f5 | [
"Apache-2.0"
] | null | null | null | import urllib2
import json
import sqlite3
import os
from classes.classErrorLog import classErrorLog
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
DBpath = os.path.join(current_dir, 'RFID_Lock.sqlite')
def updateSQLite():
try:
jsonRequest = urllib2.urlopen("http://www.desa-net.com/TOTAI/dbm/seguridad_permios/").read()
JSONs = json.loads(jsonRequest)
db = sqlite3.connect(DBpath)
with db:
cur = db.cursor()
print "DB was modified 1"
#cur.execute("DELETE FROM seguridad_permisos")
for item in JSONs:
i1 = item["SEQ"]
i2 = item["puerta_SEQ"]
i3 = item["tarjeta_RFID"]
i4 = item["persona_SEQ"]
i5 = item["persona"]
i6 = item["persona_PIN"]
i7 = item["permiso"]
i8 = item["domingo"]
i9 = item["noche_inicio"]
i10 = item["noche_fin"]
i11 = item["fecha_vencida"]
all = [i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11]
print "DB was modified"
if i2 == '002':
cur.execute("INSERT INTO seguridad_permisos values(?,?,?,?,?,?,?,?,?,?,?)", all)
print "SUCESS: " + str(len(JSONs)) + " records"
except Exception:
classErrorLog(sys.exc_info())
updateSQLite() | 32.318182 | 100 | 0.531646 |
cdc4ae4db30e79a4d6ecd1b622870a863e28462e | 4,328 | py | Python | tc_images/tc_img_torch_trainer/train.py | cabukela/iorek-byrnison | a683d0e07f6de6ca568a0941ced6570feb2a6aa4 | [
"Apache-2.0"
] | 1 | 2022-03-18T11:00:18.000Z | 2022-03-18T11:00:18.000Z | tc_images/tc_img_torch_trainer/train.py | cabukela/iorek-byrnison | a683d0e07f6de6ca568a0941ced6570feb2a6aa4 | [
"Apache-2.0"
] | null | null | null | tc_images/tc_img_torch_trainer/train.py | cabukela/iorek-byrnison | a683d0e07f6de6ca568a0941ced6570feb2a6aa4 | [
"Apache-2.0"
] | 2 | 2022-03-18T11:00:21.000Z | 2022-03-30T04:08:05.000Z |
import os
import subprocess
import datetime
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
class TrainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
class BinaryClassifier(nn.Module):
def __init__(self):
super(BinaryClassifier, self).__init__()
# 27 input features
self.h1 = nn.Linear(48, 64)
self.h2 = nn.Linear(64, 64)
self.output_layer = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.h1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.h2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.output_layer(x)
return x
def binary_acc(y_pred, y_true):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_true).sum().float()
acc = correct_results_sum/y_true.shape[0]
acc = torch.round(acc * 100)
return acc
AIP_MODEL_DIR = os.environ["AIP_MODEL_DIR"]
def train_evaluate(training_dataset_path, validation_dataset_path, batch_size, num_epochs):
batch_size = int(batch_size)
num_epochs = int(num_epochs)
# Read in train/validation data and concat
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
df = pd.concat([df_train, df_validation])
categorical_features = ['SeniorCitizen', 'Contract', 'TechSupport', 'OnlineSecurity',
'InternetService', 'PaperlessBilling', 'PaymentMethod',
'StreamingMovies', 'OnlineBackup', 'SeniorCitizen', 'MultipleLines',
'Dependents', 'StreamingTV', 'Partner', 'gender', 'PhoneService', 'DeviceProtection']
target='Churn'
# One-hot encode categorical variables
df = pd.get_dummies(df,columns=categorical_features)
df[target] = df[target].apply(lambda x: 0 if x=='Yes' else 1)
# Split features and labels into 2 different vars
X_train = df.loc[:, df.columns != target]
y_train = np.array(df[target])
# Normalize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# Training data
train_data = TrainData(torch.FloatTensor(X_train),
torch.FloatTensor(y_train))
# Use torch DataLoader to feed data to model
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, drop_last=True)
# Instantiate model
model = BinaryClassifier()
# Loss is binary crossentropy w/ logits. Must manually implement sigmoid for inference
criterion = nn.BCEWithLogitsLoss()
# Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
model.train()
for e in range(1, num_epochs+1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in train_loader:
optimizer.zero_grad()
y_pred = model(X_batch)
loss = criterion(y_pred, y_batch.unsqueeze(1))
acc = binary_acc(y_pred, y_batch.unsqueeze(1))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print(f'Epoch {e}: Loss = {epoch_loss/len(train_loader):.5f} | Acc = {epoch_acc/len(train_loader):.3f}')
# Save the model locally
model_filename='model.pt'
torch.save(model.state_dict(), model_filename)
#EXPORT_PATH = os.path.join(AIP_MODEL_DIR, 'savedmodel')
EXPORT_PATH = os.path.join(AIP_MODEL_DIR)
# Copy the model to GCS
gcs_model_path = '{}/{}'.format(EXPORT_PATH, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path])
print('Saved model in: {}'.format(gcs_model_path))
if __name__ == '__main__':
fire.Fire(train_evaluate)
| 31.136691 | 112 | 0.643253 |
325466ca8f3dfcfcc9dd0436f9a89ca16ede8d45 | 48,110 | py | Python | tests/test_fx.py | va6996/moviepy | 60b95c37816413da6bf304e85f8c0ba8e2d2c6e7 | [
"MIT"
] | null | null | null | tests/test_fx.py | va6996/moviepy | 60b95c37816413da6bf304e85f8c0ba8e2d2c6e7 | [
"MIT"
] | null | null | null | tests/test_fx.py | va6996/moviepy | 60b95c37816413da6bf304e85f8c0ba8e2d2c6e7 | [
"MIT"
] | null | null | null | """MoviePy video and audio effects tests."""
import decimal
import importlib
import math
import numbers
import os
import random
import sys
import cupy as np
import pytest
from moviepy import (
AudioClip,
AudioFileClip,
BitmapClip,
ColorClip,
VideoClip,
VideoFileClip,
)
from moviepy.audio.fx import (
audio_delay,
audio_fadein,
audio_fadeout,
audio_normalize,
multiply_stereo_volume,
multiply_volume,
)
from moviepy.tools import convert_to_seconds
from moviepy.video.fx import (
blackwhite,
crop,
even_size,
fadein,
fadeout,
freeze,
freeze_region,
invert_colors,
loop,
lum_contrast,
make_loopable,
margin,
mask_and,
mask_or,
mirror_x,
mirror_y,
multiply_color,
multiply_speed,
resize,
rotate,
time_mirror,
time_symmetrize,
)
def test_accel_decel():
pass
def test_blackwhite():
# Create black/white spectrum ``bw_color_dict`` to compare against it.
# Colors after ``blackwhite`` FX must be inside this dictionary
# Note: black/white spectrum is made of colors with same numbers
# [(0, 0, 0), (1, 1, 1), (2, 2, 2)...]
bw_color_dict = {}
for num in range(0, 256):
bw_color_dict[chr(num + 255)] = (num, num, num)
color_dict = bw_color_dict.copy()
# update dictionary with default BitmapClip color_dict values
color_dict.update(BitmapClip.DEFAULT_COLOR_DICT)
# add row with random colors in b/w spectrum
random_row = ""
for num in range(512, 515):
# use unique unicode representation for each color
char = chr(num)
random_row += char
# random colors in the b/w spectrum
color_dict[char] = tuple(random.randint(0, 255) for i in range(3))
# clip converted below to black/white
clip = BitmapClip([["RGB", random_row]], color_dict=color_dict, fps=1)
# for each possible ``preserve_luminosity`` boolean argument value
for preserve_luminosity in [True, False]:
# default argument (``RGB=None``)
clip_bw = blackwhite(clip, preserve_luminosity=preserve_luminosity)
bitmap = clip_bw.to_bitmap()
assert bitmap
for i, row in enumerate(bitmap[0]):
for char in row:
# all characters returned by ``to_bitmap`` are in the b/w spectrum
assert char in bw_color_dict
if i == 0: # pure "RGB" colors are converted to [85, 85, 85]
assert char == row[0] # so are equal
# custom random ``RGB`` argument
clip_bw_custom_rgb = blackwhite(
clip,
RGB=(random.randint(0, 255), 0, 0),
preserve_luminosity=preserve_luminosity,
)
bitmap = clip_bw_custom_rgb.to_bitmap()
for i, row in enumerate(bitmap[0]):
for i2, char in enumerate(row):
# all characters returned by ``to_bitmap`` are in the b/w spectrum
assert char in bw_color_dict
# for clip "RGB" row, two latest converted colors are equal
if i == 0 and i2 > 0:
assert char == row[1] and char == row[2]
# ``RGB="CRT_phosphor"`` argument
clip_bw_crt_phosphor = blackwhite(
clip, RGB="CRT_phosphor", preserve_luminosity=preserve_luminosity
)
bitmap = clip_bw_crt_phosphor.to_bitmap()
assert bitmap
for row in bitmap[0]:
for char in row:
# all characters returned by ``to_bitmap`` are in the b/w spectrum
assert char in bw_color_dict
# This currently fails with a with_mask error!
# def test_blink(util):
# with VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0,10) as clip:
# clip1 = blink(clip, 1, 1)
# clip1.write_videofile(os.path.join(util.TMP_DIR,"blink1.webm"))
def test_multiply_color():
color_dict = {"H": (0, 0, 200), "L": (0, 0, 50), "B": (0, 0, 255), "O": (0, 0, 0)}
clip = BitmapClip([["LLO", "BLO"]], color_dict=color_dict, fps=1)
clipfx = multiply_color(clip, 4)
target = BitmapClip([["HHO", "BHO"]], color_dict=color_dict, fps=1)
assert target == clipfx
def test_crop():
# x: 0 -> 4, y: 0 -> 3 inclusive
clip = BitmapClip([["ABCDE", "EDCBA", "CDEAB", "BAEDC"]], fps=1)
clip1 = crop(clip)
target1 = BitmapClip([["ABCDE", "EDCBA", "CDEAB", "BAEDC"]], fps=1)
assert clip1 == target1
clip2 = crop(clip, x1=1, y1=1, x2=3, y2=3)
target2 = BitmapClip([["DC", "DE"]], fps=1)
assert clip2 == target2
clip3 = crop(clip, y1=2)
target3 = BitmapClip([["CDEAB", "BAEDC"]], fps=1)
assert clip3 == target3
clip4 = crop(clip, x1=2, width=2)
target4 = BitmapClip([["CD", "CB", "EA", "ED"]], fps=1)
assert clip4 == target4
# TODO x_center=1 does not perform correctly
clip5 = crop(clip, x_center=2, y_center=2, width=3, height=3)
target5 = BitmapClip([["ABC", "EDC", "CDE"]], fps=1)
assert clip5 == target5
clip6 = crop(clip, x_center=2, width=2, y1=1, y2=2)
target6 = BitmapClip([["DC"]], fps=1)
assert clip6 == target6
def test_even_size():
clip1 = BitmapClip([["ABC", "BCD"]], fps=1) # Width odd
clip1even = even_size(clip1)
target1 = BitmapClip([["AB", "BC"]], fps=1)
assert clip1even == target1
clip2 = BitmapClip([["AB", "BC", "CD"]], fps=1) # Height odd
clip2even = even_size(clip2)
target2 = BitmapClip([["AB", "BC"]], fps=1)
assert clip2even == target2
clip3 = BitmapClip([["ABC", "BCD", "CDE"]], fps=1) # Width and height odd
clip3even = even_size(clip3)
target3 = BitmapClip([["AB", "BC"]], fps=1)
assert clip3even == target3
def test_fadein():
color_dict = {
"I": (0, 0, 0),
"R": (255, 0, 0),
"G": (0, 255, 0),
"B": (0, 0, 255),
"W": (255, 255, 255),
}
clip = BitmapClip([["R"], ["G"], ["B"]], color_dict=color_dict, fps=1)
clip1 = fadein(clip, 1) # default initial color
target1 = BitmapClip([["I"], ["G"], ["B"]], color_dict=color_dict, fps=1)
assert clip1 == target1
clip2 = fadein(clip, 1, initial_color=(255, 255, 255)) # different initial color
target2 = BitmapClip([["W"], ["G"], ["B"]], color_dict=color_dict, fps=1)
assert clip2 == target2
def test_fadeout(util, video):
clip = video(end_time=0.5)
clip1 = fadeout(clip, 0.5)
clip1.write_videofile(os.path.join(util.TMP_DIR, "fadeout1.webm"))
@pytest.mark.parametrize(
(
"t",
"freeze_duration",
"total_duration",
"padding_end",
"output_frames",
),
(
# at start, 1 second (default t == 0)
(
None,
1,
None,
None,
["R", "R", "G", "B"],
),
# at start, 1 second (explicit t)
(
0,
1,
None,
None,
["R", "R", "G", "B"],
),
# at end, 1 second
(
"end",
1,
None,
None,
["R", "G", "B", "B"],
),
# at end 1 second, padding end 1 second
(
"end",
1,
None,
1,
["R", "G", "G", "B"],
),
# at 2nd frame, 1 second
(
1, # second 0 is frame 1, second 1 is frame 2...
1,
None,
None,
["R", "G", "G", "B"],
),
# at 2nd frame, 2 seconds
(
1,
2,
None,
None,
["R", "G", "G", "G", "B"],
),
# `freeze_duration`, `total_duration` are None
(1, None, None, None, ValueError),
# `total_duration` 5 at start (2 seconds)
(None, None, 5, None, ["R", "R", "R", "G", "B"]),
# total duration 5 at end
("end", None, 5, None, ["R", "G", "B", "B", "B"]),
# total duration 5 padding end
("end", None, 5, 1, ["R", "G", "G", "G", "B"]),
),
ids=[
"at start, 1 second (default t == 0)",
"at start, 1 second (explicit t)",
"at end, 1 second",
"at end 1 second, padding end 1 second",
"at 2nd frame, 1 second",
"at 2nd frame, 2 seconds",
"`freeze_duration`, `total_duration` are None",
"`total_duration` 5 at start (2 seconds)",
"`total_duration` 5 at end",
"`total_duration` 5 padding end",
],
)
def test_freeze(t, freeze_duration, total_duration, padding_end, output_frames):
input_frames = ["R", "G", "B"]
clip_duration = len(input_frames)
# create BitmapClip with predefined set of colors, during 1 second each one
clip = BitmapClip([list(color) for color in input_frames], fps=1).with_duration(
clip_duration
)
# build kwargs passed to `freeze`
possible_kwargs = {
"t": t,
"freeze_duration": freeze_duration,
"total_duration": total_duration,
"padding_end": padding_end,
}
kwargs = {
kw_name: kw_value
for kw_name, kw_value in possible_kwargs.items()
if kw_value is not None
}
# freeze clip
if hasattr(output_frames, "__traceback__"):
with pytest.raises(output_frames):
freeze(clip, **kwargs)
return
else:
freezed_clip = freeze(clip, **kwargs)
# assert new duration
expected_freeze_duration = (
freeze_duration
if freeze_duration is not None
else total_duration - clip_duration
)
assert freezed_clip.duration == clip_duration + expected_freeze_duration
# assert colors are the expected
for i, color in enumerate(freezed_clip.iter_frames()):
expected_color = list(BitmapClip.DEFAULT_COLOR_DICT[output_frames[i]])
assert list(color[0][0]) == expected_color
def test_freeze_region():
clip = BitmapClip([["AAB", "CCC"], ["BBR", "DDD"], ["CCC", "ABC"]], fps=1)
# Test region
clip1 = freeze_region(clip, t=1, region=(2, 0, 3, 1))
target1 = BitmapClip([["AAR", "CCC"], ["BBR", "DDD"], ["CCR", "ABC"]], fps=1)
assert clip1 == target1
# Test outside_region
clip2 = freeze_region(clip, t=1, outside_region=(2, 0, 3, 1))
target2 = BitmapClip([["BBB", "DDD"], ["BBR", "DDD"], ["BBC", "DDD"]], fps=1)
assert clip2 == target2
def test_gamma_corr():
pass
def test_headblur():
pass
def test_invert_colors():
clip = BitmapClip(
[["AB", "BC"]],
color_dict={"A": (0, 0, 0), "B": (50, 100, 150), "C": (255, 255, 255)},
fps=1,
)
clip1 = invert_colors(clip)
target1 = BitmapClip(
[["CD", "DA"]],
color_dict={"A": (0, 0, 0), "D": (205, 155, 105), "C": (255, 255, 255)},
fps=1,
)
assert clip1 == target1
def test_loop(util, video):
clip = BitmapClip([["R"], ["G"], ["B"]], fps=1)
clip1 = loop(clip, n=2) # loop 2 times
target1 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"], ["B"]], fps=1)
assert clip1 == target1
clip2 = loop(clip, duration=8) # loop 8 seconds
target2 = BitmapClip(
[["R"], ["G"], ["B"], ["R"], ["G"], ["B"], ["R"], ["G"]], fps=1
)
assert clip2 == target2
clip3 = loop(clip).with_duration(5) # infinite loop
target3 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"]], fps=1)
assert clip3 == target3
clip = video(start_time=0.2, end_time=0.3) # 0.1 seconds long
clip1 = loop(clip).with_duration(0.5) # infinite looping
clip1.write_videofile(os.path.join(util.TMP_DIR, "loop1.webm"))
clip2 = loop(clip, duration=0.5) # loop for 1 second
clip2.write_videofile(os.path.join(util.TMP_DIR, "loop2.webm"))
clip3 = loop(clip, n=3) # loop 3 times
clip3.write_videofile(os.path.join(util.TMP_DIR, "loop3.webm"))
# Test audio looping
clip = AudioClip(
lambda t: np.sin(440 * 2 * np.pi * t) * (t % 1) + 0.5, duration=2.5, fps=44100
)
clip1 = clip.loop(2)
# TODO fix AudioClip.__eq__()
# assert concatenate_audioclips([clip, clip]) == clip1
def test_lum_contrast(util, video):
clip = video()
clip1 = lum_contrast(clip)
clip1.write_videofile(os.path.join(util.TMP_DIR, "lum_contrast1.webm"))
# what are the correct value ranges for function arguments lum,
# contrast and contrast_thr? Maybe we should check for these in
# lum_contrast.
def test_make_loopable(util, video):
clip = video()
clip1 = make_loopable(clip, 0.4)
clip1.write_videofile(os.path.join(util.TMP_DIR, "make_loopable1.webm"))
@pytest.mark.parametrize(
("ClipClass"),
(ColorClip, BitmapClip),
ids=("ColorClip", "BitmapClip"),
)
@pytest.mark.parametrize(
(
"margin_size",
"margins", # [left, right, top, bottom]
"color",
"expected_result",
),
(
pytest.param(
None,
None,
None,
[["RRR", "RRR"], ["RRR", "RRR"]],
id="default arguments",
),
pytest.param(
1,
None,
None,
[
["OOOOO", "ORRRO", "ORRRO", "OOOOO"],
["OOOOO", "ORRRO", "ORRRO", "OOOOO"],
],
id="margin_size=1,color=(0, 0, 0)",
),
pytest.param(
1,
None,
(0, 255, 0),
[
["GGGGG", "GRRRG", "GRRRG", "GGGGG"],
["GGGGG", "GRRRG", "GRRRG", "GGGGG"],
],
id="margin_size=1,color=(0, 255, 0)",
),
pytest.param(
None,
[1, 0, 0, 0],
(0, 255, 0),
[["GRRR", "GRRR"], ["GRRR", "GRRR"]],
id="left=1,color=(0, 255, 0)",
),
pytest.param(
None,
[0, 1, 0, 0],
(0, 255, 0),
[["RRRG", "RRRG"], ["RRRG", "RRRG"]],
id="right=1,color=(0, 255, 0)",
),
pytest.param(
None,
[1, 0, 1, 0],
(0, 255, 0),
[["GGGG", "GRRR", "GRRR"], ["GGGG", "GRRR", "GRRR"]],
id="left=1,top=1,color=(0, 255, 0)",
),
pytest.param(
None,
[0, 1, 1, 1],
(0, 255, 0),
[["GGGG", "RRRG", "RRRG", "GGGG"], ["GGGG", "RRRG", "RRRG", "GGGG"]],
id="right=1,top=1,bottom=1,color=(0, 255, 0)",
),
pytest.param(
None,
[3, 0, 0, 0],
(255, 255, 255),
[["WWWRRR", "WWWRRR"], ["WWWRRR", "WWWRRR"]],
id="left=3,color=(255, 255, 255)",
),
pytest.param(
None,
[0, 0, 0, 4],
(255, 255, 255),
[
["RRR", "RRR", "WWW", "WWW", "WWW", "WWW"],
["RRR", "RRR", "WWW", "WWW", "WWW", "WWW"],
],
id="bottom=4,color=(255, 255, 255)",
),
),
)
def test_margin(ClipClass, margin_size, margins, color, expected_result):
if ClipClass is BitmapClip:
clip = BitmapClip([["RRR", "RRR"], ["RRR", "RRR"]], fps=1)
else:
clip = ColorClip(color=(255, 0, 0), size=(3, 2), duration=2).with_fps(1)
# if None, set default argument values
if color is None:
color = (0, 0, 0)
if margins is None:
margins = [0, 0, 0, 0]
left, right, top, bottom = margins
new_clip = margin(
clip,
margin_size=margin_size,
left=left,
right=right,
top=top,
bottom=bottom,
color=color,
)
assert new_clip == BitmapClip(expected_result, fps=1)
@pytest.mark.parametrize("image_from", ("np.ndarray", "ImageClip"))
@pytest.mark.parametrize("duration", (None, "random"))
@pytest.mark.parametrize(
("color", "mask_color", "expected_color"),
(
(
(0, 0, 0),
(255, 255, 255),
(0, 0, 0),
),
(
(255, 0, 0),
(0, 0, 255),
(0, 0, 0),
),
(
(255, 255, 255),
(0, 10, 20),
(0, 10, 20),
),
(
(10, 10, 10),
(20, 0, 20),
(10, 0, 10),
),
),
)
def test_mask_and(image_from, duration, color, mask_color, expected_color):
"""Checks ``mask_and`` FX behaviour."""
clip_size = tuple(random.randint(3, 10) for i in range(2))
if duration == "random":
duration = round(random.uniform(0, 0.5), 2)
# test ImageClip and np.ndarray types as mask argument
clip = ColorClip(color=color, size=clip_size).with_duration(duration)
mask_clip = ColorClip(color=mask_color, size=clip.size)
masked_clip = mask_and(
clip, mask_clip if image_from == "ImageClip" else mask_clip.get_frame(0)
)
assert masked_clip.duration == clip.duration
assert np.array_equal(masked_clip.get_frame(0)[0][0], np.array(expected_color))
# test VideoClip as mask argument
color_frame, mask_color_frame = (np.array([[color]]), np.array([[mask_color]]))
clip = VideoClip(lambda t: color_frame).with_duration(duration)
mask_clip = VideoClip(lambda t: mask_color_frame).with_duration(duration)
masked_clip = mask_and(clip, mask_clip)
assert np.array_equal(masked_clip.get_frame(0)[0][0], np.array(expected_color))
def test_mask_color():
pass
@pytest.mark.parametrize("image_from", ("np.ndarray", "ImageClip"))
@pytest.mark.parametrize("duration", (None, "random"))
@pytest.mark.parametrize(
("color", "mask_color", "expected_color"),
(
(
(0, 0, 0),
(255, 255, 255),
(255, 255, 255),
),
(
(255, 0, 0),
(0, 0, 255),
(255, 0, 255),
),
(
(255, 255, 255),
(0, 10, 20),
(255, 255, 255),
),
(
(10, 10, 10),
(20, 0, 20),
(20, 10, 20),
),
),
)
def test_mask_or(image_from, duration, color, mask_color, expected_color):
"""Checks ``mask_or`` FX behaviour."""
clip_size = tuple(random.randint(3, 10) for i in range(2))
if duration == "random":
duration = round(random.uniform(0, 0.5), 2)
# test ImageClip and np.ndarray types as mask argument
clip = ColorClip(color=color, size=clip_size).with_duration(duration)
mask_clip = ColorClip(color=mask_color, size=clip.size)
masked_clip = mask_or(
clip, mask_clip if image_from == "ImageClip" else mask_clip.get_frame(0)
)
assert masked_clip.duration == clip.duration
assert np.array_equal(masked_clip.get_frame(0)[0][0], np.array(expected_color))
# test VideoClip as mask argument
color_frame, mask_color_frame = (np.array([[color]]), np.array([[mask_color]]))
clip = VideoClip(lambda t: color_frame).with_duration(duration)
mask_clip = VideoClip(lambda t: mask_color_frame).with_duration(duration)
masked_clip = mask_or(clip, mask_clip)
assert np.array_equal(masked_clip.get_frame(0)[0][0], np.array(expected_color))
def test_mirror_x():
clip = BitmapClip([["AB", "CD"]], fps=1)
clip1 = mirror_x(clip)
target = BitmapClip([["BA", "DC"]], fps=1)
assert clip1 == target
def test_mirror_y():
clip = BitmapClip([["AB", "CD"]], fps=1)
clip1 = mirror_y(clip)
target = BitmapClip([["CD", "AB"]], fps=1)
assert clip1 == target
def test_painting():
pass
@pytest.mark.parametrize("library", ("PIL", "cv2", "scipy"))
@pytest.mark.parametrize("apply_to_mask", (True, False))
@pytest.mark.parametrize(
(
"size",
"duration",
"new_size",
"width",
"height",
),
(
(
[8, 2],
1,
[4, 1],
None,
None,
),
(
[8, 2],
1,
None,
4,
None,
),
(
[2, 8],
1,
None,
None,
4,
),
# neither 'new_size', 'height' or 'width'
(
[2, 2],
1,
None,
None,
None,
),
# `new_size` as scaling factor
(
[5, 5],
1,
2,
None,
None,
),
(
[5, 5],
1,
decimal.Decimal(2.5),
None,
None,
),
# arguments as functions
(
[2, 2],
4,
lambda t: {0: [4, 4], 1: [8, 8], 2: [11, 11], 3: [5, 8]}[t],
None,
None,
),
(
[2, 4],
2,
None,
None,
lambda t: {0: 3, 1: 4}[t],
),
(
[5, 2],
2,
None,
lambda t: {0: 3, 1: 4}[t],
None,
),
),
)
def test_resize(
library, apply_to_mask, size, duration, new_size, height, width, monkeypatch
):
"""Checks ``resize`` FX behaviours using all argument and third party
implementation combinations.
"""
# mock implementation
resize_fx_mod = sys.modules[resize.__module__]
resizer_func, error_msgs = {
"PIL": resize_fx_mod._get_PIL_resizer,
"cv2": resize_fx_mod._get_cv2_resizer,
"scipy": resize_fx_mod._get_scipy_resizer,
}[library]()
# if function is not available, skip test for implementation
if error_msgs:
pytest.skip(error_msgs[0].split(" (")[0])
monkeypatch.setattr(resize_fx_mod, "resizer", resizer_func)
# build expected sizes (using `width` or `height` arguments will be proportional
# to original size)
if new_size:
if hasattr(new_size, "__call__"):
# function
expected_new_sizes = [new_size(t) for t in range(duration)]
elif isinstance(new_size, numbers.Number):
# scaling factor
expected_new_sizes = [[int(size[0] * new_size), int(size[1] * new_size)]]
else:
# tuple or list
expected_new_sizes = [new_size]
elif height:
if hasattr(height, "__call__"):
expected_new_sizes = []
for t in range(duration):
new_height = height(t)
expected_new_sizes.append(
[int(size[0] * new_height / size[1]), new_height]
)
else:
expected_new_sizes = [[size[0] * height / size[1], height]]
elif width:
if hasattr(width, "__call__"):
expected_new_sizes = []
for t in range(duration):
new_width = width(t)
expected_new_sizes.append(
[new_width, int(size[1] * new_width / size[0])]
)
else:
expected_new_sizes = [[width, size[1] * width / size[0]]]
else:
expected_new_sizes = None
clip = ColorClip(size=size, color=(0, 0, 0), duration=duration)
clip.fps = 1
mask = ColorClip(size=size, color=0, is_mask=True)
clip = clip.with_mask(mask)
# any resizing argument passed, raises `ValueError`
if expected_new_sizes is None:
with pytest.raises(ValueError):
resized_clip = clip.resize(
new_size=new_size,
height=height,
width=width,
apply_to_mask=apply_to_mask,
)
resized_clip = clip
expected_new_sizes = [size]
else:
resized_clip = clip.resize(
new_size=new_size, height=height, width=width, apply_to_mask=apply_to_mask
)
# assert new size for each frame
for t in range(duration):
expected_width = expected_new_sizes[t][0]
expected_height = expected_new_sizes[t][1]
clip_frame = resized_clip.get_frame(t)
assert len(clip_frame[0]) == expected_width
assert len(clip_frame) == expected_height
mask_frame = resized_clip.mask.get_frame(t)
if apply_to_mask:
assert len(mask_frame[0]) == expected_width
assert len(mask_frame) == expected_height
@pytest.mark.parametrize("PIL_installed", (True, False))
@pytest.mark.parametrize("angle_offset", [-360, 0, 360, 720])
@pytest.mark.parametrize("unit", ["deg", "rad"])
@pytest.mark.parametrize("resample", ["bilinear", "nearest", "bicubic", "unknown"])
@pytest.mark.parametrize(
(
"angle",
"translate",
"center",
"bg_color",
"expected_frames",
),
(
(
0,
None,
None,
None,
[["AAAA", "BBBB", "CCCC"], ["ABCD", "BCDE", "CDEA"]],
),
(
90,
None,
None,
None,
[["ABC", "ABC", "ABC", "ABC"], ["DEA", "CDE", "BCD", "ABC"]],
),
(
lambda t: 90,
None,
None,
None,
[["ABC", "ABC", "ABC", "ABC"], ["DEA", "CDE", "BCD", "ABC"]],
),
(
180,
None,
None,
None,
[["CCCC", "BBBB", "AAAA"], ["AEDC", "EDCB", "DCBA"]],
),
(
270,
None,
None,
None,
[["CBA", "CBA", "CBA", "CBA"], ["CBA", "DCB", "EDC", "AED"]],
),
(
45,
(50, 50),
None,
(0, 255, 0),
[
["GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG"],
["GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG", "GGGGGG"],
],
),
(
45,
(50, 50),
(20, 20),
(255, 0, 0),
[
["RRRRRR", "RRRRRR", "RRRRRR", "RRRRRR", "RRRRRR"],
["RRRRRR", "RRRRRR", "RRRRRR", "RRRRRR", "RRRRRR"],
],
),
(
135,
(-100, -100),
None,
(0, 0, 255),
[
["BBBBBB", "BBBBBB", "BBBBBB", "BBBBBB", "BBBBBB"],
["BBBBBB", "BBBBBB", "BBBBBB", "BBBBBB", "BBBBBB"],
],
),
),
)
def test_rotate(
PIL_installed,
angle_offset,
angle,
unit,
resample,
translate,
center,
bg_color,
expected_frames,
monkeypatch,
):
"""Check ``rotate`` FX behaviour against possible combinations of arguments."""
original_frames = [["AAAA", "BBBB", "CCCC"], ["ABCD", "BCDE", "CDEA"]]
# angles are defined in degrees, so convert to radians testing ``unit="rad"``
if unit == "rad":
if hasattr(angle, "__call__"):
_angle = lambda t: math.radians(angle(0))
else:
_angle = math.radians(angle)
else:
_angle = angle
clip = BitmapClip(original_frames, fps=1)
kwargs = {
"unit": unit,
"resample": resample,
"translate": translate,
"center": center,
"bg_color": bg_color,
}
if resample not in ["bilinear", "nearest", "bicubic"]:
with pytest.raises(ValueError) as exc:
clip.rotate(_angle, **kwargs)
assert (
"'resample' argument must be either 'bilinear', 'nearest' or 'bicubic'"
) == str(exc.value)
return
# if the scenario implies that PIL is not installed, monkeypatch the
# module in which 'rotate' function resides
if not PIL_installed:
rotate_module = importlib.import_module("moviepy.video.fx.rotate")
monkeypatch.setattr(rotate_module, "Image", None)
rotate_func = rotate_module.rotate
else:
rotate_func = rotate
# resolve the angle, because if it is a multiple of 90, the rotation
# can be computed event without an available PIL installation
if hasattr(_angle, "__call__"):
_resolved_angle = _angle(0)
else:
_resolved_angle = _angle
if unit == "rad":
_resolved_angle = math.degrees(_resolved_angle)
if not PIL_installed and (
(_resolved_angle % 90 != 0) or center or translate or bg_color
):
with pytest.raises(ValueError) as exc:
rotated_clip = clip.fx(rotate_func, _angle, **kwargs)
assert (
'Without "Pillow" installed, only angles that are a multiple of 90'
) in str(exc.value)
else:
rotated_clip = clip.fx(rotate_func, _angle, **kwargs)
expected_clip = BitmapClip(expected_frames, fps=1)
assert rotated_clip.to_bitmap() == expected_clip.to_bitmap()
def test_rotate_nonstandard_angles(util):
# Test rotate with color clip
clip = ColorClip([600, 400], [150, 250, 100]).with_duration(1).with_fps(5)
clip = rotate(clip, 20)
clip.write_videofile(os.path.join(util.TMP_DIR, "color_rotate.webm"))
def test_rotate_mask():
# Prior to https://github.com/Zulko/moviepy/pull/1399
# all the pixels of the resulting video were 0
clip = (
ColorClip(color=0.5, size=(1, 1), is_mask=True)
.with_fps(1)
.with_duration(1)
.fx(rotate, 45)
)
assert clip.get_frame(0)[1][1] != 0
@pytest.mark.parametrize(
("unsupported_kwargs",),
(
(["bg_color"],),
(["center"],),
(["translate"],),
(["translate", "center"],),
(["center", "bg_color", "translate"],),
),
ids=(
"bg_color",
"center",
"translate",
"translate,center",
"center,bg_color,translate",
),
)
def test_rotate_supported_PIL_kwargs(
unsupported_kwargs,
monkeypatch,
):
"""Test supported 'rotate' FX arguments by PIL version."""
rotate_module = importlib.import_module("moviepy.video.fx.rotate")
# patch supported kwargs data by PIL version
new_PIL_rotate_kwargs_supported, min_version_by_kwarg_name = ({}, {})
for kwarg, (
kw_name,
supported,
min_version,
) in rotate_module.PIL_rotate_kwargs_supported.items():
supported = kw_name not in unsupported_kwargs
new_PIL_rotate_kwargs_supported[kwarg] = [kw_name, supported, min_version]
min_version_by_kwarg_name[kw_name] = ".".join(str(n) for n in min_version)
monkeypatch.setattr(
rotate_module,
"PIL_rotate_kwargs_supported",
new_PIL_rotate_kwargs_supported,
)
with pytest.warns(UserWarning) as record:
BitmapClip([["R", "G", "B"]], fps=1).fx(
rotate_module.rotate,
45,
bg_color=(10, 10, 10),
center=(1, 1),
translate=(1, 0),
)
# assert number of warnings
assert len(record.list) == len(unsupported_kwargs)
# assert messages contents
messages = []
for warning in record.list:
messages.append(warning.message.args[0])
for unsupported_kwarg in unsupported_kwargs:
expected_message = (
f"rotate '{unsupported_kwarg}' argument is not supported by your"
" Pillow version and is being ignored. Minimum Pillow version"
f" required: v{min_version_by_kwarg_name[unsupported_kwarg]}"
)
assert expected_message in messages
def test_scroll():
pass
def test_multiply_speed():
clip = BitmapClip([["A"], ["B"], ["C"], ["D"]], fps=1)
clip1 = multiply_speed(clip, 0.5) # 1/2x speed
target1 = BitmapClip(
[["A"], ["A"], ["B"], ["B"], ["C"], ["C"], ["D"], ["D"]], fps=1
)
assert clip1 == target1
clip2 = multiply_speed(clip, final_duration=8) # 1/2x speed
target2 = BitmapClip(
[["A"], ["A"], ["B"], ["B"], ["C"], ["C"], ["D"], ["D"]], fps=1
)
assert clip2 == target2
clip3 = multiply_speed(clip, final_duration=12) # 1/2x speed
target3 = BitmapClip(
[
["A"],
["A"],
["A"],
["B"],
["B"],
["B"],
["C"],
["C"],
["C"],
["D"],
["D"],
["D"],
],
fps=1,
)
assert clip3 == target3
clip4 = multiply_speed(clip, 2) # 2x speed
target4 = BitmapClip([["A"], ["C"]], fps=1)
assert clip4 == target4
clip5 = multiply_speed(clip, final_duration=2) # 2x speed
target5 = BitmapClip([["A"], ["C"]], fps=1)
assert clip5 == target5
clip6 = multiply_speed(clip, 4) # 4x speed
target6 = BitmapClip([["A"]], fps=1)
assert (
clip6 == target6
), f"{clip6.duration} {target6.duration} {clip6.fps} {target6.fps}"
def test_supersample():
pass
def test_time_mirror():
clip = BitmapClip([["AA", "AA"], ["BB", "BB"], ["CC", "CC"]], fps=1)
clip1 = time_mirror(clip)
target1 = BitmapClip([["CC", "CC"], ["BB", "BB"], ["AA", "AA"]], fps=1)
assert clip1 == target1
clip2 = BitmapClip([["AA", "AA"], ["BB", "BB"], ["CC", "CC"], ["DD", "DD"]], fps=1)
clip3 = time_mirror(clip2)
target3 = BitmapClip(
[["DD", "DD"], ["CC", "CC"], ["BB", "BB"], ["AA", "AA"]], fps=1
)
assert clip3 == target3
def test_time_symmetrize():
clip = BitmapClip([["AA", "AA"], ["BB", "BB"], ["CC", "CC"]], fps=1)
clip1 = time_symmetrize(clip)
target1 = BitmapClip(
[
["AA", "AA"],
["BB", "BB"],
["CC", "CC"],
["CC", "CC"],
["BB", "BB"],
["AA", "AA"],
],
fps=1,
)
assert clip1 == target1
def test_audio_normalize():
clip = AudioFileClip("media/crunching.mp3")
clip = audio_normalize(clip)
assert clip.max_volume() == 1
def test_audio_normalize_muted():
z_array = np.array([0.0])
make_frame = lambda t: z_array
clip = AudioClip(make_frame, duration=1, fps=44100)
clip = audio_normalize(clip)
assert np.array_equal(clip.to_soundarray(), z_array)
@pytest.mark.parametrize(
("sound_type", "factor", "duration", "start_time", "end_time"),
(
pytest.param(
"stereo",
0,
None,
None,
None,
id="stereo-0",
),
pytest.param(
"stereo",
2,
None,
None,
None,
id="stereo-2",
),
pytest.param(
"mono",
3,
None,
None,
None,
id="mono-3",
),
pytest.param(
"stereo",
0,
0.2,
"00:00:00,1",
None,
id="stereo-0-start=.1",
),
pytest.param(
"stereo",
0,
0.3,
None,
(0, 0, 0.2),
id="stereo-0-end=.2",
),
pytest.param(
"stereo",
0,
0.3,
0.1,
0.2,
id="stereo-0-start=.1-end=.2",
),
pytest.param(
"mono",
0,
0.3,
0.2,
None,
id="mono-0-start=.2",
),
pytest.param(
"mono",
0,
0.2,
None,
"00:00:00.1",
id="mono-0-end=.1",
),
pytest.param(
"mono",
2,
0.3,
0.1,
0.2,
id="mono-0-start=.1-end=.2",
),
),
)
def test_multiply_volume_audioclip(
sound_type,
factor,
duration,
start_time,
end_time,
):
if sound_type == "stereo":
make_frame = lambda t: np.array(
[
np.sin(440 * 2 * np.pi * t),
np.sin(160 * 2 * np.pi * t),
]
).T.copy(order="C")
else:
make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)]
clip = AudioClip(
make_frame,
duration=duration if duration else 0.1,
fps=22050,
)
clip_array = clip.to_soundarray()
clip_transformed = multiply_volume(
clip,
factor,
start_time=start_time,
end_time=end_time,
)
clip_transformed_array = clip_transformed.to_soundarray()
assert len(clip_transformed_array)
if hasattr(clip_array, "shape") and len(clip_array.shape) > 1:
# stereo clip
left_channel_transformed = clip_transformed_array[:, 0]
right_channel_transformed = clip_transformed_array[:, 1]
if start_time is None and end_time is None:
expected_left_channel_transformed = clip_array[:, 0] * factor
expected_right_channel_transformed = clip_array[:, 1] * factor
else:
start_time = convert_to_seconds(start_time) if start_time else clip.start
end_time = convert_to_seconds(end_time) if end_time else clip.end
expected_left_channel_transformed = np.array([])
expected_right_channel_transformed = np.array([])
for i, frame in enumerate(clip_array):
t = i / clip.fps
transformed_frame = frame * (
factor if start_time <= t <= end_time else 1
)
expected_left_channel_transformed = np.append(
expected_left_channel_transformed,
transformed_frame[0],
)
expected_right_channel_transformed = np.append(
expected_right_channel_transformed,
transformed_frame[1],
)
assert len(left_channel_transformed)
assert len(expected_left_channel_transformed)
assert np.array_equal(
left_channel_transformed,
expected_left_channel_transformed,
)
assert len(right_channel_transformed)
assert len(expected_right_channel_transformed)
assert np.array_equal(
right_channel_transformed,
expected_right_channel_transformed,
)
else:
# mono clip
if start_time is None and end_time is None:
expected_clip_transformed_array = clip_array * factor
else:
start_time = convert_to_seconds(start_time) if start_time else clip.start
end_time = convert_to_seconds(end_time) if end_time else clip.end
expected_clip_transformed_array = np.array([])
for i, frame in enumerate(clip_array[0]):
t = i / clip.fps
transformed_frame = frame * (
factor if start_time <= t <= end_time else 1
)
expected_clip_transformed_array = np.append(
expected_clip_transformed_array,
transformed_frame,
)
expected_clip_transformed_array = np.array(
[
expected_clip_transformed_array,
]
)
assert len(expected_clip_transformed_array)
assert np.array_equal(
expected_clip_transformed_array,
clip_transformed_array,
)
def test_multiply_volume_videoclip():
start_time, end_time = (0.1, 0.2)
clip = multiply_volume(
VideoFileClip("media/chaplin.mp4").subclip(0, 0.3),
0,
start_time=start_time,
end_time=end_time,
)
clip_soundarray = clip.audio.to_soundarray()
assert len(clip_soundarray)
expected_silence = np.zeros(clip_soundarray.shape[1])
for i, frame in enumerate(clip_soundarray):
t = i / clip.audio.fps
if start_time <= t <= end_time:
assert np.array_equal(frame, expected_silence)
else:
assert not np.array_equal(frame, expected_silence)
def test_multiply_stereo_volume():
clip = AudioFileClip("media/crunching.mp3")
# stereo mute
clip_left_channel_muted = multiply_stereo_volume(clip, left=0)
clip_right_channel_muted = multiply_stereo_volume(clip, right=0, left=2)
left_channel_muted = clip_left_channel_muted.to_soundarray()[:, 0]
right_channel_muted = clip_right_channel_muted.to_soundarray()[:, 1]
z_channel = np.zeros(len(left_channel_muted))
assert np.array_equal(left_channel_muted, z_channel)
assert np.array_equal(right_channel_muted, z_channel)
# stereo level doubled
left_channel_doubled = clip_right_channel_muted.to_soundarray()[:, 0]
expected_left_channel_doubled = clip.to_soundarray()[:, 0] * 2
assert np.array_equal(left_channel_doubled, expected_left_channel_doubled)
# mono muted
sinus_wave = lambda t: [np.sin(440 * 2 * np.pi * t)]
mono_clip = AudioClip(sinus_wave, duration=1, fps=22050)
muted_mono_clip = multiply_stereo_volume(mono_clip, left=0)
mono_channel_muted = muted_mono_clip.to_soundarray()
z_channel = np.zeros(len(mono_channel_muted))
assert np.array_equal(mono_channel_muted, z_channel)
# mono doubled
mono_clip = AudioClip(sinus_wave, duration=1, fps=22050)
doubled_mono_clip = multiply_stereo_volume(
mono_clip, left=None, right=2
) # using right
mono_channel_doubled = doubled_mono_clip.to_soundarray()
d_channel = mono_clip.to_soundarray() * 2
assert np.array_equal(mono_channel_doubled, d_channel)
@pytest.mark.parametrize(
("duration", "offset", "n_repeats", "decay"),
(
(0.1, 0.2, 11, 0),
(0.4, 2, 5, 2),
(0.5, 0.6, 3, -1),
(0.3, 1, 7, 4),
),
)
def test_audio_delay(stereo_wave, duration, offset, n_repeats, decay):
"""Check that creating a short pulse of audio, the delay converts to a sound
with the volume level in the form `-_-_-_-_-`, being `-` pulses expressed by
`duration` argument and `_` being chunks of muted audio. Keep in mind that this
way of test the FX only works if `duration <= offset`, but as does not make sense
create a delay with `duration > offset`, this is enough for our purposes.
Note that decayment values are not tested here, but are created using
`multiply_volume`, should be OK.
"""
# limits of this test
assert n_repeats > 0 # some repetition, if not does not make sense
assert duration <= offset # avoid wave distorsion
assert not offset * 1000000 % 2 # odd offset -> no accurate muted chunk size
# stereo audio clip
clip = AudioClip(
make_frame=stereo_wave(left_freq=440, right_freq=880),
duration=duration,
fps=44100,
)
clip_array = clip.to_soundarray()
# stereo delayed clip
delayed_clip = audio_delay(clip, offset=offset, n_repeats=n_repeats, decay=decay)
delayed_clip_array = delayed_clip.to_soundarray()
# size of chunks with audios
sound_chunk_size = clip_array.shape[0]
# muted chunks size
muted_chunk_size = int(sound_chunk_size * offset / duration) - sound_chunk_size
zeros_expected_chunk_as_muted = np.zeros((muted_chunk_size, 2))
decayments = np.linspace(1, max(0, decay), n_repeats)
for i in range(n_repeats + 1): # first clip, is not part of the repeated ones
if i == n_repeats:
# the delay ends in sound, so last muted chunk does not exists
break
# sound chunk
sound_start_at = i * sound_chunk_size + i * muted_chunk_size
sound_ends_at = sound_start_at + sound_chunk_size
# first sound chunk
if i == 0:
assert np.array_equal(
delayed_clip_array[:, :][sound_start_at:sound_ends_at],
multiply_volume(clip, decayments[i]).to_soundarray(),
)
# muted chunk
mute_starts_at = sound_ends_at + 1
mute_ends_at = mute_starts_at + muted_chunk_size
assert np.array_equal(
delayed_clip_array[:, :][mute_starts_at:mute_ends_at],
zeros_expected_chunk_as_muted,
)
# check muted bounds
assert not np.array_equal(
delayed_clip_array[:, :][mute_starts_at - 1 : mute_ends_at],
zeros_expected_chunk_as_muted,
)
assert not np.array_equal(
delayed_clip_array[:, :][mute_starts_at : mute_ends_at + 1],
zeros_expected_chunk_as_muted,
)
@pytest.mark.parametrize("sound_type", ("stereo", "mono"))
@pytest.mark.parametrize("fps", (44100, 22050))
@pytest.mark.parametrize(
("clip_duration", "fadein_duration"),
(
(
(0.2, 0.1),
(1, "00:00:00,4"),
(0.3, 0.13),
)
),
)
def test_audio_fadein(
mono_wave, stereo_wave, sound_type, fps, clip_duration, fadein_duration
):
if sound_type == "stereo":
make_frame = stereo_wave(left_freq=440, right_freq=160)
else:
make_frame = mono_wave(440)
clip = AudioClip(make_frame, duration=clip_duration, fps=fps)
new_clip = audio_fadein(clip, fadein_duration)
# first frame is muted
first_frame = new_clip.get_frame(0)
if sound_type == "stereo":
assert len(first_frame) > 1
for value in first_frame:
assert value == 0.0
else:
assert first_frame == 0.0
fadein_duration = convert_to_seconds(fadein_duration)
n_parts = 10
# cut transformed part into subclips and check the expected max_volume for
# each one
time_foreach_part = fadein_duration / n_parts
start_times = np.arange(0, fadein_duration, time_foreach_part)
for i, start_time in enumerate(start_times):
end_time = start_time + time_foreach_part
subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume()
possible_value = (i + 1) / n_parts
assert round(subclip_max_volume, 2) in [
possible_value,
round(possible_value - 0.01, 5),
]
# cut non transformed part into subclips and check the expected max_volume
# for each one (almost 1)
time_foreach_part = (clip_duration - fadein_duration) / n_parts
start_times = np.arange(fadein_duration, clip_duration, time_foreach_part)
for i, start_time in enumerate(start_times):
end_time = start_time + time_foreach_part
subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume()
assert round(subclip_max_volume, 4) == 1
@pytest.mark.parametrize("sound_type", ("stereo", "mono"))
@pytest.mark.parametrize("fps", (44100, 22050))
@pytest.mark.parametrize(
("clip_duration", "fadeout_duration"),
(
(
(0.2, 0.1),
(0.7, "00:00:00,4"),
(0.3, 0.13),
)
),
)
def test_audio_fadeout(
mono_wave, stereo_wave, sound_type, fps, clip_duration, fadeout_duration
):
if sound_type == "stereo":
make_frame = stereo_wave(left_freq=440, right_freq=160)
else:
make_frame = mono_wave(440)
clip = AudioClip(make_frame, duration=clip_duration, fps=fps)
new_clip = audio_fadeout(clip, fadeout_duration)
fadeout_duration = convert_to_seconds(fadeout_duration)
n_parts = 10
# cut transformed part into subclips and check the expected max_volume for
# each one
time_foreach_part = fadeout_duration / n_parts
start_times = np.arange(
clip_duration - fadeout_duration,
clip_duration,
time_foreach_part,
)
for i, start_time in enumerate(start_times):
end_time = start_time + time_foreach_part
subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume()
possible_value = 1 - i * 0.1
assert round(subclip_max_volume, 2) in [
round(possible_value, 2),
round(possible_value - 0.01, 5),
]
# cut non transformed part into subclips and check the expected max_volume
# for each one (almost 1)
time_foreach_part = (clip_duration - fadeout_duration) / n_parts
start_times = np.arange(0, clip_duration - fadeout_duration, time_foreach_part)
for i, start_time in enumerate(start_times):
end_time = start_time + time_foreach_part
subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume()
assert round(subclip_max_volume, 4) == 1
if __name__ == "__main__":
pytest.main()
| 29.299635 | 87 | 0.550613 |
f43a961310e3ac52654866231f8cfac70589d42d | 36,861 | py | Python | ckan/ckanext-spatial/ckanext/spatial/model/harvested_metadata.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | ckan/ckanext-spatial/ckanext/spatial/model/harvested_metadata.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | ckan/ckanext-spatial/ckanext/spatial/model/harvested_metadata.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | from lxml import etree
import logging
log = logging.getLogger(__name__)
class MappedXmlObject(object):
elements = []
class MappedXmlDocument(MappedXmlObject):
def __init__(self, xml_str=None, xml_tree=None):
assert (xml_str or xml_tree is not None), 'Must provide some XML in one format or another'
self.xml_str = xml_str
self.xml_tree = xml_tree
def read_values(self):
'''For all of the elements listed, finds the values of them in the
XML and returns them.'''
values = {}
tree = self.get_xml_tree()
for element in self.elements:
values[element.name] = element.read_value(tree)
self.infer_values(values)
return values
def read_value(self, name):
'''For the given element name, find the value in the XML and return
it.
'''
tree = self.get_xml_tree()
for element in self.elements:
if element.name == name:
return element.read_value(tree)
raise KeyError
def get_xml_tree(self):
if self.xml_tree is None:
parser = etree.XMLParser(remove_blank_text=True)
if type(self.xml_str) == unicode:
xml_str = self.xml_str.encode('utf8')
else:
xml_str = self.xml_str
self.xml_tree = etree.fromstring(xml_str, parser=parser)
return self.xml_tree
def infer_values(self, values):
pass
class MappedXmlElement(MappedXmlObject):
namespaces = {}
def __init__(self, name, search_paths=[], multiplicity="*", elements=[]):
self.name = name
self.search_paths = search_paths
self.multiplicity = multiplicity
self.elements = elements or self.elements
def read_value(self, tree):
values = []
for xpath in self.get_search_paths():
elements = self.get_elements(tree, xpath)
values = self.get_values(elements)
if values:
break
return self.fix_multiplicity(values)
def get_search_paths(self):
if type(self.search_paths) != type([]):
search_paths = [self.search_paths]
else:
search_paths = self.search_paths
return search_paths
def get_elements(self, tree, xpath):
return tree.xpath(xpath, namespaces=self.namespaces)
def get_values(self, elements):
values = []
if len(elements) == 0:
pass
else:
for element in elements:
value = self.get_value(element)
values.append(value)
return values
def get_value(self, element):
if self.elements:
value = {}
for child in self.elements:
value[child.name] = child.read_value(element)
return value
elif type(element) == etree._ElementStringResult:
value = str(element)
elif type(element) == etree._ElementUnicodeResult:
value = unicode(element)
else:
value = self.element_tostring(element)
return value
def element_tostring(self, element):
return etree.tostring(element, pretty_print=False)
def fix_multiplicity(self, values):
'''
When a field contains multiple values, yet the spec says
it should contain only one, then return just the first value,
rather than a list.
In the ISO19115 specification, multiplicity relates to:
* 'Association Cardinality'
* 'Obligation/Condition' & 'Maximum Occurence'
'''
if self.multiplicity == "0":
# 0 = None
if values:
log.warn("Values found for element '%s' when multiplicity should be 0: %s", self.name, values)
return ""
elif self.multiplicity == "1":
# 1 = Mandatory, maximum 1 = Exactly one
if not values:
log.warn("Value not found for element '%s'" % self.name)
return ''
return values[0]
elif self.multiplicity == "*":
# * = 0..* = zero or more
return values
elif self.multiplicity == "0..1":
# 0..1 = Mandatory, maximum 1 = optional (zero or one)
if values:
return values[0]
else:
return ""
elif self.multiplicity == "1..*":
# 1..* = one or more
return values
else:
log.warning('Multiplicity not specified for element: %s',
self.name)
return values
class ISOElement(MappedXmlElement):
namespaces = {
"gts": "http://www.isotc211.org/2005/gts",
"gml": "http://www.opengis.net/gml",
"gml32": "http://www.opengis.net/gml/3.2",
"gmx": "http://www.isotc211.org/2005/gmx",
"gsr": "http://www.isotc211.org/2005/gsr",
"gss": "http://www.isotc211.org/2005/gss",
"gco": "http://www.isotc211.org/2005/gco",
"gmd": "http://www.isotc211.org/2005/gmd",
"srv": "http://www.isotc211.org/2005/srv",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
class ISOResourceLocator(ISOElement):
elements = [
ISOElement(
name="url",
search_paths=[
"gmd:linkage/gmd:URL/text()",
],
multiplicity="1",
),
ISOElement(
name="function",
search_paths=[
"gmd:function/gmd:CI_OnLineFunctionCode/@codeListValue",
],
multiplicity="0..1",
),
ISOElement(
name="name",
search_paths=[
"gmd:name/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="description",
search_paths=[
"gmd:description/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="protocol",
search_paths=[
"gmd:protocol/gco:CharacterString/text()",
],
multiplicity="0..1",
),
]
class ISOResponsibleParty(ISOElement):
elements = [
ISOElement(
name="individual-name",
search_paths=[
"gmd:individualName/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="organisation-name",
search_paths=[
"gmd:organisationName/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="position-name",
search_paths=[
"gmd:positionName/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="contact-info",
search_paths=[
"gmd:contactInfo/gmd:CI_Contact",
],
multiplicity="0..1",
elements = [
ISOElement(
name="email",
search_paths=[
"gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOResourceLocator(
name="online-resource",
search_paths=[
"gmd:onlineResource/gmd:CI_OnlineResource",
],
multiplicity="0..1",
),
]
),
ISOElement(
name="role",
search_paths=[
"gmd:role/gmd:CI_RoleCode/@codeListValue",
],
multiplicity="0..1",
),
]
class ISODataFormat(ISOElement):
elements = [
ISOElement(
name="name",
search_paths=[
"gmd:name/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="version",
search_paths=[
"gmd:version/gco:CharacterString/text()",
],
multiplicity="0..1",
),
]
class ISOReferenceDate(ISOElement):
elements = [
ISOElement(
name="type",
search_paths=[
"gmd:dateType/gmd:CI_DateTypeCode/@codeListValue",
"gmd:dateType/gmd:CI_DateTypeCode/text()",
],
multiplicity="1",
),
ISOElement(
name="value",
search_paths=[
"gmd:date/gco:Date/text()",
"gmd:date/gco:DateTime/text()",
],
multiplicity="1",
),
]
class ISOCoupledResources(ISOElement):
elements = [
ISOElement(
name="title",
search_paths=[
"@xlink:title",
],
multiplicity="*",
),
ISOElement(
name="href",
search_paths=[
"@xlink:href",
],
multiplicity="*",
),
ISOElement(
name="uuid",
search_paths=[
"@uuidref",
],
multiplicity="*",
),
]
class ISOBoundingBox(ISOElement):
elements = [
ISOElement(
name="west",
search_paths=[
"gmd:westBoundLongitude/gco:Decimal/text()",
],
multiplicity="1",
),
ISOElement(
name="east",
search_paths=[
"gmd:eastBoundLongitude/gco:Decimal/text()",
],
multiplicity="1",
),
ISOElement(
name="north",
search_paths=[
"gmd:northBoundLatitude/gco:Decimal/text()",
],
multiplicity="1",
),
ISOElement(
name="south",
search_paths=[
"gmd:southBoundLatitude/gco:Decimal/text()",
],
multiplicity="1",
),
]
class ISOBrowseGraphic(ISOElement):
elements = [
ISOElement(
name="file",
search_paths=[
"gmd:fileName/gco:CharacterString/text()",
],
multiplicity="1",
),
ISOElement(
name="description",
search_paths=[
"gmd:fileDescription/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="type",
search_paths=[
"gmd:fileType/gco:CharacterString/text()",
],
multiplicity="0..1",
),
]
class ISOKeyword(ISOElement):
elements = [
ISOElement(
name="keyword",
search_paths=[
"gmd:keyword/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOElement(
name="type",
search_paths=[
"gmd:type/gmd:MD_KeywordTypeCode/@codeListValue",
"gmd:type/gmd:MD_KeywordTypeCode/text()",
],
multiplicity="0..1",
),
# If Thesaurus information is needed at some point, this is the
# place to add it
]
class ISOUsage(ISOElement):
elements = [
ISOElement(
name="usage",
search_paths=[
"gmd:specificUsage/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOResponsibleParty(
name="contact-info",
search_paths=[
"gmd:userContactInfo/gmd:CI_ResponsibleParty",
],
multiplicity="0..1",
),
]
class ISOAggregationInfo(ISOElement):
elements = [
ISOElement(
name="aggregate-dataset-name",
search_paths=[
"gmd:aggregateDatasetName/gmd:CI_Citation/gmd:title/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="aggregate-dataset-identifier",
search_paths=[
"gmd:aggregateDatasetIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="association-type",
search_paths=[
"gmd:associationType/gmd:DS_AssociationTypeCode/@codeListValue",
"gmd:associationType/gmd:DS_AssociationTypeCode/text()",
],
multiplicity="0..1",
),
ISOElement(
name="initiative-type",
search_paths=[
"gmd:initiativeType/gmd:DS_InitiativeTypeCode/@codeListValue",
"gmd:initiativeType/gmd:DS_InitiativeTypeCode/text()",
],
multiplicity="0..1",
),
]
class ISODocument(MappedXmlDocument):
# Attribute specifications from "XPaths for GEMINI" by Peter Parslow.
elements = [
ISOElement(
name="guid",
search_paths="gmd:fileIdentifier/gco:CharacterString/text()",
multiplicity="0..1",
),
ISOElement(
name="metadata-language",
search_paths=[
"gmd:language/gmd:LanguageCode/@codeListValue",
"gmd:language/gmd:LanguageCode/text()",
],
multiplicity="0..1",
),
ISOElement(
name="metadata-standard-name",
search_paths="gmd:metadataStandardName/gco:CharacterString/text()",
multiplicity="0..1",
),
ISOElement(
name="metadata-standard-version",
search_paths="gmd:metadataStandardVersion/gco:CharacterString/text()",
multiplicity="0..1",
),
ISOElement(
name="resource-type",
search_paths=[
"gmd:hierarchyLevel/gmd:MD_ScopeCode/@codeListValue",
"gmd:hierarchyLevel/gmd:MD_ScopeCode/text()",
],
multiplicity="*",
),
ISOResponsibleParty(
name="metadata-point-of-contact",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty",
],
multiplicity="1..*",
),
ISOElement(
name="metadata-date",
search_paths=[
"gmd:dateStamp/gco:DateTime/text()",
"gmd:dateStamp/gco:Date/text()",
],
multiplicity="1",
),
ISOElement(
name="spatial-reference-system",
search_paths=[
"gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="title",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString/text()",
],
multiplicity="1",
),
ISOElement(
name="alternate-title",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOReferenceDate(
name="dataset-reference-date",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date",
],
multiplicity="1..*",
),
ISOElement(
name="unique-resource-identifier",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/gco:CharacterString/text()",
"gmd:identificationInfo/gmd:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="presentation-form",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:presentationForm/gmd:CI_PresentationFormCode/text()",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:presentationForm/gmd:CI_PresentationFormCode/@codeListValue",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:presentationForm/gmd:CI_PresentationFormCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:citation/gmd:CI_Citation/gmd:presentationForm/gmd:CI_PresentationFormCode/@codeListValue",
],
multiplicity="*",
),
ISOElement(
name="abstract",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:abstract/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:abstract/gco:CharacterString/text()",
],
multiplicity="1",
),
ISOElement(
name="purpose",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:purpose/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:purpose/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOResponsibleParty(
name="responsible-organisation",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty",
"gmd:contact/gmd:CI_ResponsibleParty",
],
multiplicity="1..*",
),
ISOElement(
name="frequency-of-update",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceAndUpdateFrequency/gmd:MD_MaintenanceFrequencyCode/@codeListValue",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceAndUpdateFrequency/gmd:MD_MaintenanceFrequencyCode/@codeListValue",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceAndUpdateFrequency/gmd:MD_MaintenanceFrequencyCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceAndUpdateFrequency/gmd:MD_MaintenanceFrequencyCode/text()",
],
multiplicity="0..1",
),
ISOElement(
name="maintenance-note",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceNote/gco:CharacterString/text()",
"gmd:identificationInfo/gmd:SV_ServiceIdentification/gmd:resourceMaintenance/gmd:MD_MaintenanceInformation/gmd:maintenanceNote/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="progress",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:status/gmd:MD_ProgressCode/@codeListValue",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:status/gmd:MD_ProgressCode/@codeListValue",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:status/gmd:MD_ProgressCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:status/gmd:MD_ProgressCode/text()",
],
multiplicity="*",
),
ISOKeyword(
name="keywords",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords",
],
multiplicity="*"
),
ISOElement(
name="keyword-inspire-theme",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString/text()",
],
multiplicity="*",
),
# Deprecated: kept for backwards compatibilty
ISOElement(
name="keyword-controlled-other",
search_paths=[
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:keywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOUsage(
name="usage",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceSpecificUsage/gmd:MD_Usage",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceSpecificUsage/gmd:MD_Usage",
],
multiplicity="*"
),
ISOElement(
name="limitations-on-public-access",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOElement(
name="access-constraints",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode/@codeListValue",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode/@codeListValue",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode/text()",
],
multiplicity="*",
),
ISOElement(
name="use-constraints",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOAggregationInfo(
name="aggregation-info",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:aggregationInfo/gmd:MD_AggregateInformation",
"gmd:identificationInfo/gmd:SV_ServiceIdentification/gmd:aggregationInfo/gmd:MD_AggregateInformation",
],
multiplicity="*"
),
ISOElement(
name="spatial-data-service-type",
search_paths=[
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:serviceType/gco:LocalName/text()",
],
multiplicity="0..1",
),
ISOElement(
name="spatial-resolution",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/text()",
],
multiplicity="0..1",
),
ISOElement(
name="spatial-resolution-units",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/@uom",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/@uom",
],
multiplicity="0..1",
),
ISOElement(
name="equivalent-scale",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer/text()",
],
multiplicity="*",
),
ISOElement(
name="dataset-language",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:language/gmd:LanguageCode/@codeListValue",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:language/gmd:LanguageCode/@codeListValue",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:language/gmd:LanguageCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:language/gmd:LanguageCode/text()",
],
multiplicity="*",
),
ISOElement(
name="topic-category",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode/text()",
],
multiplicity="*",
),
ISOElement(
name="extent-controlled",
search_paths=[
],
multiplicity="*",
),
ISOElement(
name="extent-free-text",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString/text()",
],
multiplicity="*",
),
ISOBoundingBox(
name="bbox",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox",
],
multiplicity="*",
),
ISOElement(
name="temporal-extent-begin",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition/text()",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition/text()",
],
multiplicity="*",
),
ISOElement(
name="temporal-extent-end",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition/text()",
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition/text()",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition/text()",
],
multiplicity="*",
),
ISOElement(
name="vertical-extent",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:verticalElement/gmd:EX_VerticalExtent",
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:extent/gmd:EX_Extent/gmd:verticalElement/gmd:EX_VerticalExtent",
],
multiplicity="*",
),
ISOCoupledResources(
name="coupled-resource",
search_paths=[
"gmd:identificationInfo/srv:SV_ServiceIdentification/srv:operatesOn",
],
multiplicity="*",
),
ISOElement(
name="additional-information-source",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:supplementalInformation/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISODataFormat(
name="data-format",
search_paths=[
"gmd:distributionInfo/gmd:MD_Distribution/gmd:distributionFormat/gmd:MD_Format",
],
multiplicity="*",
),
ISOResponsibleParty(
name="distributor",
search_paths=[
"gmd:distributionInfo/gmd:MD_Distribution/gmd:distributor/gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty",
],
multiplicity="*",
),
ISOResourceLocator(
name="resource-locator",
search_paths=[
"gmd:distributionInfo/gmd:MD_Distribution/gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource",
"gmd:distributionInfo/gmd:MD_Distribution/gmd:distributor/gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource"
],
multiplicity="*",
),
ISOResourceLocator(
name="resource-locator-identification",
search_paths=[
"gmd:identificationInfo//gmd:CI_OnlineResource",
],
multiplicity="*",
),
ISOElement(
name="conformity-specification",
search_paths=[
"gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification",
],
multiplicity="0..1",
),
ISOElement(
name="conformity-pass",
search_paths=[
"gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean/text()",
],
multiplicity="0..1",
),
ISOElement(
name="conformity-explanation",
search_paths=[
"gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOElement(
name="lineage",
search_paths=[
"gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString/text()",
],
multiplicity="0..1",
),
ISOBrowseGraphic(
name="browse-graphic",
search_paths=[
"gmd:identificationInfo/gmd:MD_DataIdentification/gmd:graphicOverview/gmd:MD_BrowseGraphic",
"gmd:identificationInfo/srv:SV_ServiceIdentification/gmd:graphicOverview/gmd:MD_BrowseGraphic",
],
multiplicity="*",
),
]
def infer_values(self, values):
# Todo: Infer name.
self.infer_date_released(values)
self.infer_date_updated(values)
self.infer_date_created(values)
self.infer_url(values)
# Todo: Infer resources.
self.infer_tags(values)
self.infer_publisher(values)
self.infer_contact(values)
self.infer_contact_email(values)
return values
def infer_date_released(self, values):
value = ''
for date in values['dataset-reference-date']:
if date['type'] == 'publication':
value = date['value']
break
values['date-released'] = value
def infer_date_updated(self, values):
value = ''
dates = []
# Use last of several multiple revision dates.
for date in values['dataset-reference-date']:
if date['type'] == 'revision':
dates.append(date['value'])
if len(dates):
if len(dates) > 1:
dates.sort(reverse=True)
value = dates[0]
values['date-updated'] = value
def infer_date_created(self, values):
value = ''
for date in values['dataset-reference-date']:
if date['type'] == 'creation':
value = date['value']
break
values['date-created'] = value
def infer_url(self, values):
value = ''
for locator in values['resource-locator']:
if locator['function'] == 'information':
value = locator['url']
break
values['url'] = value
def infer_tags(self, values):
tags = []
for key in ['keyword-inspire-theme', 'keyword-controlled-other']:
for item in values[key]:
if item not in tags:
tags.append(item)
values['tags'] = tags
def infer_publisher(self, values):
value = ''
for responsible_party in values['responsible-organisation']:
if responsible_party['role'] == 'publisher':
value = responsible_party['organisation-name']
if value:
break
values['publisher'] = value
def infer_contact(self, values):
value = ''
for responsible_party in values['responsible-organisation']:
value = responsible_party['organisation-name']
if value:
break
values['contact'] = value
def infer_contact_email(self, values):
value = ''
for responsible_party in values['responsible-organisation']:
if isinstance(responsible_party, dict) and \
isinstance(responsible_party.get('contact-info'), dict) and \
responsible_party['contact-info'].has_key('email'):
value = responsible_party['contact-info']['email']
if value:
break
values['contact-email'] = value
class GeminiDocument(ISODocument):
'''
For backwards compatibility
'''
| 38.158385 | 225 | 0.581102 |
bb9a7f20ec0069533f783ef57ff0e95fef68986b | 174 | py | Python | tacotron/models/__init__.py | mib32/Tacotron-2 | f440b1fa41636084c78d2cae7d4c9d23e4a9d871 | [
"MIT"
] | 2,154 | 2017-12-28T08:02:58.000Z | 2022-03-30T09:14:04.000Z | tacotron/models/__init__.py | mib32/Tacotron-2 | f440b1fa41636084c78d2cae7d4c9d23e4a9d871 | [
"MIT"
] | 481 | 2018-01-24T08:50:39.000Z | 2022-02-14T01:39:00.000Z | tacotron/models/__init__.py | mib32/Tacotron-2 | f440b1fa41636084c78d2cae7d4c9d23e4a9d871 | [
"MIT"
] | 813 | 2017-12-29T02:15:01.000Z | 2022-03-31T15:49:08.000Z | from .tacotron import Tacotron
def create_model(name, hparams):
if name == 'Tacotron':
return Tacotron(hparams)
else:
raise Exception('Unknown model: ' + name)
| 19.333333 | 45 | 0.695402 |
13bbbc0bacdbdb747aea6cb03478d9fab53cfa8f | 6,485 | py | Python | sdk/python/pulumi_aws/secretsmanager/secret_policy.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/secretsmanager/secret_policy.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/secretsmanager/secret_policy.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['SecretPolicy']
class SecretPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a resource to manage AWS Secrets Manager secret policy.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example_secret = aws.secretsmanager.Secret("exampleSecret")
example_secret_policy = aws.secretsmanager.SecretPolicy("exampleSecretPolicy",
secret_arn=example_secret.arn,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "EnableAllPermissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}
]
}
\"\"\")
```
## Import
`aws_secretsmanager_secret_policy` can be imported by using the secret Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:secretsmanager/secretPolicy:SecretPolicy example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] policy: A valid JSON document representing a [resource policy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html).
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['block_public_policy'] = block_public_policy
if policy is None and not opts.urn:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if secret_arn is None and not opts.urn:
raise TypeError("Missing required property 'secret_arn'")
__props__['secret_arn'] = secret_arn
super(SecretPolicy, __self__).__init__(
'aws:secretsmanager/secretPolicy:SecretPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None) -> 'SecretPolicy':
"""
Get an existing SecretPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] policy: A valid JSON document representing a [resource policy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html).
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["block_public_policy"] = block_public_policy
__props__["policy"] = policy
__props__["secret_arn"] = secret_arn
return SecretPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="blockPublicPolicy")
def block_public_policy(self) -> pulumi.Output[Optional[bool]]:
"""
Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
"""
return pulumi.get(self, "block_public_policy")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[str]:
"""
A valid JSON document representing a [resource policy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html).
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> pulumi.Output[str]:
"""
Secret ARN.
"""
return pulumi.get(self, "secret_arn")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.83871 | 202 | 0.641326 |
7bfd06f39e87c395019446aa5d054957b0626892 | 5,260 | py | Python | airflow/api_connexion/exceptions.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/api_connexion/exceptions.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/api_connexion/exceptions.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
import werkzeug
from connexion import FlaskApi, ProblemException, problem
from airflow import version
if any(suffix in version.version for suffix in ['dev', 'a', 'b']):
doc_link = (
"http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com/docs/apache-airflow/latest"
"/stable-rest-api-ref.html"
)
else:
doc_link = f'https://airflow.apache.org/docs/{version.version}/stable-rest-api-ref.html'
EXCEPTIONS_LINK_MAP = {
400: f"{doc_link}#section/Errors/BadRequest",
404: f"{doc_link}#section/Errors/NotFound",
405: f"{doc_link}#section/Errors/MethodNotAllowed",
401: f"{doc_link}#section/Errors/Unauthenticated",
409: f"{doc_link}#section/Errors/AlreadyExists",
403: f"{doc_link}#section/Errors/PermissionDenied",
500: f"{doc_link}#section/Errors/Unknown",
}
def common_error_handler(exception):
"""
Used to capture connexion exceptions and add link to the type field
:type exception: Exception
"""
if isinstance(exception, ProblemException):
link = EXCEPTIONS_LINK_MAP.get(exception.status)
if link:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=link,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=exception.type,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
if not isinstance(exception, werkzeug.exceptions.HTTPException):
exception = werkzeug.exceptions.InternalServerError()
response = problem(title=exception.name, detail=exception.description, status=exception.code)
return FlaskApi.get_response(response)
class NotFound(ProblemException):
"""Raise when the object cannot be found"""
def __init__(
self, title: str = 'Not Found', detail: Optional[str] = None, headers: Optional[Dict] = None, **kwargs
):
super().__init__(
status=404, type=EXCEPTIONS_LINK_MAP[404], title=title, detail=detail, headers=headers, **kwargs
)
class BadRequest(ProblemException):
"""Raise when the server processes a bad request"""
def __init__(
self,
title: str = 'Bad Request',
detail: Optional[str] = None,
headers: Optional[Dict] = None,
**kwargs,
):
super().__init__(
status=400, type=EXCEPTIONS_LINK_MAP[400], title=title, detail=detail, headers=headers, **kwargs
)
class Unauthenticated(ProblemException):
"""Raise when the user is not authenticated"""
def __init__(
self,
title: str = 'Unauthorized',
detail: Optional[str] = None,
headers: Optional[Dict] = None,
**kwargs,
):
super().__init__(
status=401, type=EXCEPTIONS_LINK_MAP[401], title=title, detail=detail, headers=headers, **kwargs
)
class PermissionDenied(ProblemException):
"""Raise when the user does not have the required permissions"""
def __init__(
self, title: str = 'Forbidden', detail: Optional[str] = None, headers: Optional[Dict] = None, **kwargs
):
super().__init__(
status=403, type=EXCEPTIONS_LINK_MAP[403], title=title, detail=detail, headers=headers, **kwargs
)
class AlreadyExists(ProblemException):
"""Raise when the object already exists"""
def __init__(
self, title='Conflict', detail: Optional[str] = None, headers: Optional[Dict] = None, **kwargs
):
super().__init__(
status=409, type=EXCEPTIONS_LINK_MAP[409], title=title, detail=detail, headers=headers, **kwargs
)
class Unknown(ProblemException):
"""Returns a response body and status code for HTTP 500 exception"""
def __init__(
self,
title: str = 'Internal Server Error',
detail: Optional[str] = None,
headers: Optional[Dict] = None,
**kwargs,
):
super().__init__(
status=500, type=EXCEPTIONS_LINK_MAP[500], title=title, detail=detail, headers=headers, **kwargs
)
| 33.503185 | 110 | 0.648099 |
a8acf84ac542aeeeb76ac2ba2deb4db73a02cc26 | 7,595 | py | Python | netket/optimizer/qgt/qgt_jacobian_dense.py | pesvut/netket | 7f19574ddc567748344bb75a4ddd507578d94b0d | [
"Apache-2.0"
] | null | null | null | netket/optimizer/qgt/qgt_jacobian_dense.py | pesvut/netket | 7f19574ddc567748344bb75a4ddd507578d94b0d | [
"Apache-2.0"
] | 26 | 2021-08-06T15:27:57.000Z | 2022-03-30T16:55:18.000Z | netket/optimizer/qgt/qgt_jacobian_dense.py | pesvut/netket | 7f19574ddc567748344bb75a4ddd507578d94b0d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from functools import partial
import jax
from jax import numpy as jnp
from flax import struct
from netket.utils.types import PyTree
from netket.utils import mpi
import netket.jax as nkjax
from ..linear_operator import LinearOperator, Uninitialized
from .qgt_jacobian_dense_logic import prepare_centered_oks, vec_to_real
from .qgt_jacobian_common import choose_jacobian_mode
def QGTJacobianDense(
vstate=None,
*,
mode: str = None,
holomorphic: bool = None,
rescale_shift=False,
**kwargs,
) -> "QGTJacobianDenseT":
"""
Semi-lazy representation of an S Matrix where the Jacobian O_k is precomputed
and stored as a dense matrix.
The matrix of gradients O is computed on initialisation, but not S,
which can be computed by calling :code:`to_dense`.
The details on how the ⟨S⟩⁻¹⟨F⟩ system is solved are contaianed in
the field `sr`.
Args:
vstate: The variational state
mode: "real", "complex" or "holomorphic": specifies the implementation
used to compute the jacobian. "real" discards the imaginary part
of the output of the model. "complex" splits the real and imaginary
part of the parameters and output. It works also for non holomorphic
models. holomorphic works for any function assuming it's holomorphic
or real valued.
holomorphic: a flag to indicate that the function is holomorphic.
rescale_shift: If True rescales the diagonal shift
"""
if vstate is None:
return partial(
QGTJacobianDense,
mode=mode,
holomorphic=holomorphic,
rescale_shift=rescale_shift,
**kwargs,
)
if mode is None:
mode = choose_jacobian_mode(
vstate._apply_fun,
vstate.parameters,
vstate.model_state,
vstate.samples,
mode=mode,
holomorphic=holomorphic,
)
elif holomorphic is not None:
raise ValueError("Cannot specify both `mode` and `holomorphic`.")
O, scale = prepare_centered_oks(
vstate._apply_fun,
vstate.parameters,
vstate.samples.reshape(-1, vstate.samples.shape[-1]),
vstate.model_state,
mode,
rescale_shift,
)
return QGTJacobianDenseT(O=O, scale=scale, mode=mode, **kwargs)
@struct.dataclass
class QGTJacobianDenseT(LinearOperator):
"""
Semi-lazy representation of an S Matrix behaving like a linear operator.
The matrix of gradients O is computed on initialisation, but not S,
which can be computed by calling :code:`to_dense`.
The details on how the ⟨S⟩⁻¹⟨F⟩ system is solved are contaianed in
the field `sr`.
"""
O: jnp.ndarray = Uninitialized
"""Gradients O_ij = ∂log ψ(σ_i)/∂p_j of the neural network
for all samples σ_i at given values of the parameters p_j
Average <O_j> subtracted for each parameter
Divided through with sqrt(#samples) to normalise S matrix
If scale is not None, columns normalised to unit norm
"""
scale: Optional[jnp.ndarray] = None
"""If not None, contains 2-norm of each column of the gradient matrix,
i.e., the sqrt of the diagonal elements of the S matrix
"""
mode: str = struct.field(pytree_node=False, default=Uninitialized)
"""Differentiation mode:
- "real": for real-valued R->R and C->R ansatze, splits the complex inputs
into real and imaginary part.
- "complex": for complex-valued R->C and C->C ansatze, splits the complex
inputs and outputs into real and imaginary part
- "holomorphic": for any ansatze. Does not split complex values.
- "auto": autoselect real or complex.
"""
_in_solve: bool = struct.field(pytree_node=False, default=False)
"""Internal flag used to signal that we are inside the _solve method and matmul should
not take apart into real and complex parts the other vector"""
def __matmul__(self, vec: Union[PyTree, jnp.ndarray]) -> Union[PyTree, jnp.ndarray]:
return _matmul(self, vec)
def _solve(self, solve_fun, y: PyTree, *, x0: Optional[PyTree] = None) -> PyTree:
return _solve(self, solve_fun, y, x0=x0)
def to_dense(self) -> jnp.ndarray:
"""
Convert the lazy matrix representation to a dense matrix representation.
Returns:
A dense matrix representation of this S matrix.
"""
return _to_dense(self)
def __repr__(self):
return (
f"QGTJacobianDense(diag_shift={self.diag_shift}, "
f"scale={self.scale}, mode={self.mode})"
)
########################################################################################
##### QGT Logic #####
########################################################################################
@jax.jit
def _matmul(
self: QGTJacobianDenseT, vec: Union[PyTree, jnp.ndarray]
) -> Union[PyTree, jnp.ndarray]:
unravel = None
if not hasattr(vec, "ndim"):
vec, unravel = nkjax.tree_ravel(vec)
# Real-imaginary split RHS in R→R and R→C modes
reassemble = None
if self.mode != "holomorphic" and not self._in_solve:
vec, reassemble = vec_to_real(vec)
if self.scale is not None:
vec = vec * self.scale
result = (
mpi.mpi_sum_jax(((self.O @ vec).T.conj() @ self.O).T.conj())[0]
+ self.diag_shift * vec
)
if self.scale is not None:
result = result * self.scale
if reassemble is not None:
result = reassemble(result)
if unravel is not None:
result = unravel(result)
return result
@jax.jit
def _solve(
self: QGTJacobianDenseT, solve_fun, y: PyTree, *, x0: Optional[PyTree] = None
) -> PyTree:
# Ravel input PyTrees, record unravelling function too
y, unravel = nkjax.tree_ravel(y)
if self.mode != "holomorphic":
y, reassemble = vec_to_real(y)
if x0 is not None:
x0, _ = nkjax.tree_ravel(x0)
if self.scale is not None:
x0 = x0 * self.scale
if self.scale is not None:
y = y / self.scale
# to pass the object LinearOperator itself down
# but avoid rescaling, we pass down an object with
# scale = None
unscaled_self = self.replace(scale=None, _in_solve=True)
out, info = solve_fun(unscaled_self, y, x0=x0)
if self.scale is not None:
out = out / self.scale
if self.mode != "holomorphic":
out = reassemble(out)
return unravel(out), info
@jax.jit
def _to_dense(self: QGTJacobianDenseT) -> jnp.ndarray:
if self.scale is None:
O = self.O
diag = jnp.eye(self.O.shape[1])
else:
O = self.O * self.scale[jnp.newaxis, :]
diag = jnp.diag(self.scale ** 2)
return mpi.mpi_sum_jax(O.T.conj() @ O)[0] + self.diag_shift * diag
| 32.046414 | 90 | 0.63239 |
f778b0b0bab85f0e3b1bcd83c7c18aad72a567ef | 1,598 | py | Python | test/test_list_xrp_ripple_transactions_by_block_height_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | test/test_list_xrp_ripple_transactions_by_block_height_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | test/test_list_xrp_ripple_transactions_by_block_height_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.list_xrp_ripple_transactions_by_block_height_ri import ListXRPRippleTransactionsByBlockHeightRI
globals()['ListXRPRippleTransactionsByBlockHeightRI'] = ListXRPRippleTransactionsByBlockHeightRI
from cryptoapis.model.list_xrp_ripple_transactions_by_block_height_r_data import ListXRPRippleTransactionsByBlockHeightRData
class TestListXRPRippleTransactionsByBlockHeightRData(unittest.TestCase):
"""ListXRPRippleTransactionsByBlockHeightRData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testListXRPRippleTransactionsByBlockHeightRData(self):
"""Test ListXRPRippleTransactionsByBlockHeightRData"""
# FIXME: construct object with mandatory attributes with example values
# model = ListXRPRippleTransactionsByBlockHeightRData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 40.974359 | 484 | 0.797247 |
3bf755be2d5efd518be7e3f1a0a9da042cb72a8d | 36 | py | Python | src/__init__.py | JacksonianDemocracy/url-rewrite-tracer | 67398c5daa8f4627ca374a0db717102062f1e658 | [
"MIT"
] | null | null | null | src/__init__.py | JacksonianDemocracy/url-rewrite-tracer | 67398c5daa8f4627ca374a0db717102062f1e658 | [
"MIT"
] | null | null | null | src/__init__.py | JacksonianDemocracy/url-rewrite-tracer | 67398c5daa8f4627ca374a0db717102062f1e658 | [
"MIT"
] | null | null | null | from .tracer import UrlRewriteTracer | 36 | 36 | 0.888889 |
7ab1b0d75428beb014ccba71f433ac5755b55815 | 1,252 | py | Python | Iris_recog/iris.py | Theocrat/Iris | 5aaba5dc915f53d148106c0c6bca57e09c548d9c | [
"MIT"
] | null | null | null | Iris_recog/iris.py | Theocrat/Iris | 5aaba5dc915f53d148106c0c6bca57e09c548d9c | [
"MIT"
] | null | null | null | Iris_recog/iris.py | Theocrat/Iris | 5aaba5dc915f53d148106c0c6bca57e09c548d9c | [
"MIT"
] | null | null | null | import cv2
from PIL import Image
from numpy import zeros, arange
from matplotlib.pyplot import figure, show
from skimage.color import rgb2grey
import cv2 as cv
from sys import argv
from morph import *
from imworks import *
def iris_detect(fname):
img = cv2.imread(fname)
orig = zeros(img.shape)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
for k in range(img.shape[2]):
orig[i,j,k] = img[i,j,k]
pos1 = zeros([img.shape[0],img.shape[1]])
pos2 = zeros([img.shape[0],img.shape[1]])
im = Image.open(fname)
pix = im.load()
#cv2.imshow('detected Edge',img)
height, width = img.shape[:2]
#print(height,width)
height=height-1
width=width-1
count=0
#print(pix[width,height])
#print(pix[0,0])
for eh in range(height):
for ew in range(width):
r,g,b=pix[ew,eh]
if r<=120 and r>30 and g<= 120 and g> 30 and b<= 120 and b>30:
#print(eh,ew)
cv2.circle(img,(ew,eh),1,(255,0,0),1)
cv2.circle(pos2,(ew,eh),1,255,1)
# pos2 is the iris
pos2 /= 255
iris = dilate(pos2,15)
iris = erode(iris,8)
#fig = figure()
#ii = fig.add_subplot(121)
#4pd = fig.add_subplot(122)
#ii.imshow(orig/orig.max())
#pd.imshow(iris, cmap="Greys_r")
#show()
return iris
| 21.964912 | 71 | 0.635783 |
b06a084812cefc24ac5c06c6faf1e6adff215d59 | 5,050 | py | Python | higl/models.py | junsu-kim97/HIGL | fd8926f850552d032a6692747d1dd030ffc7ac84 | [
"MIT"
] | 7 | 2021-11-06T11:13:48.000Z | 2022-03-30T23:59:01.000Z | higl/models.py | junsu-kim97/HIGL | fd8926f850552d032a6692747d1dd030ffc7ac84 | [
"MIT"
] | 1 | 2021-11-04T13:15:23.000Z | 2021-11-04T15:06:01.000Z | higl/models.py | junsu-kim97/HIGL | fd8926f850552d032a6692747d1dd030ffc7ac84 | [
"MIT"
] | 2 | 2021-12-19T02:01:30.000Z | 2022-01-13T04:08:11.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim + goal_dim, 300)
self.l2 = nn.Linear(300, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x, g=None):
if g is not None:
x = F.relu(self.l1(torch.cat([x, g], 1)))
else:
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x
class Critic(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + goal_dim + action_dim, 300)
self.l2 = nn.Linear(300, 300)
self.l3 = nn.Linear(300, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + goal_dim + action_dim, 300)
self.l5 = nn.Linear(300, 300)
self.l6 = nn.Linear(300, 1)
def forward(self, x, g=None, u=None):
if g is not None:
xu = torch.cat([x, g, u], 1)
else:
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, x, g=None, u=None):
if g is not None:
xu = torch.cat([x, g, u], 1)
else:
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class ControllerActor(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim, scale=1):
super(ControllerActor, self).__init__()
if scale is None:
scale = torch.ones(state_dim)
self.scale = nn.Parameter(torch.tensor(scale).float(),
requires_grad=False)
self.actor = Actor(state_dim, goal_dim, action_dim, 1)
def forward(self, x, g):
return self.scale*self.actor(x, g)
class ControllerCritic(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim):
super(ControllerCritic, self).__init__()
self.critic = Critic(state_dim, goal_dim, action_dim)
def forward(self, x, sg, u):
return self.critic(x, sg, u)
def Q1(self, x, sg, u):
return self.critic.Q1(x, sg, u)
class ManagerActor(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim, scale=None, absolute_goal=False, absolute_goal_scale=8.):
super(ManagerActor, self).__init__()
if scale is None:
scale = torch.ones(action_dim)
self.scale = nn.Parameter(torch.tensor(scale[:action_dim]).float(), requires_grad=False)
self.actor = Actor(state_dim, goal_dim, action_dim, 1)
self.absolute_goal = absolute_goal
self.absolute_goal_scale = absolute_goal_scale
def forward(self, x, g):
if self.absolute_goal:
return self.scale * self.actor(x, g) + self.absolute_goal_scale
else:
return self.scale * self.actor(x, g)
class ManagerCritic(nn.Module):
def __init__(self, state_dim, goal_dim, action_dim):
super(ManagerCritic, self).__init__()
self.critic = Critic(state_dim, goal_dim, action_dim)
def forward(self, x, g, u):
return self.critic(x, g, u)
def Q1(self, x, g, u):
return self.critic.Q1(x, g, u)
class ANet(nn.Module):
def __init__(self, state_dim, hidden_dim, embedding_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.fc4 = nn.Linear(hidden_dim, embedding_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
class RndPredictor(nn.Module):
def __init__(self, state_dim, hidden_dim=300, output_dim=128):
super().__init__()
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x1 = F.relu(self.l1(x))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class ValueFunction(nn.Module):
def __init__(self, state_dim, goal_dim):
super(ValueFunction, self).__init__()
self.l1 = nn.Linear(state_dim + goal_dim, 300)
self.l2 = nn.Linear(300, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, x, g=None):
if g is not None:
xu = torch.cat([x, g], 1)
else:
xu = x
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
| 28.370787 | 113 | 0.577426 |
8be6960de668dc19c349d211c507796db5e6d68e | 6,390 | py | Python | utils/extract_batch.py | linshoa/siamese-cnn | 2194c097c932a605582d2ee9d58ef31bfa83eaff | [
"Apache-2.0"
] | 1 | 2018-10-27T10:03:06.000Z | 2018-10-27T10:03:06.000Z | utils/extract_batch.py | linshoa/siamese-cnn | 2194c097c932a605582d2ee9d58ef31bfa83eaff | [
"Apache-2.0"
] | null | null | null | utils/extract_batch.py | linshoa/siamese-cnn | 2194c097c932a605582d2ee9d58ef31bfa83eaff | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from random import shuffle
from utils.image_precess import *
from utils import config
"""
for DukeMTMC example:
0001_c2_f0046302.jpg--> {{'id':0001},{location:c2},{time:0046302},}
"""
name_dir = config.DukeMTMC_name_dir
DukeMTMC_directory = config.DukeMTMC_img_dir
diff_set = {'train': 'bounding_box_train.txt', 'test': 'bounding_box_test.txt', 'query': 'query.txt'}
def get_id(name_select):
"""
get the set of person id
:param name_select: whether select train, test or query
:return: return the no-repeat person id
"""
"""save as the json."""
# person_set_id = []
# name_select = diff_set[name_select]
# with open(name_dir + name_select, 'r') as f:
# for line in f:
# person_set_id.append(line.split('_')[0])
# # set 无序不重复集
# person_set_id = list(set(person_set_id))
# with open('./person_set_id.json', 'w') as w:
# json.dump(person_set_id, w)
with open('person_set_id.json', 'r') as r:
person_set_id = json.load(r)
return person_set_id
def get_identity(_person_id):
"""will change every time"""
# identity = [0. for i in range(7140)]
identity = np.zeros([702])
index = set_id.index(_person_id)
print(index)
identity[int(index)-1] = float(1)
return identity
def get_id_corresponding_name():
"""
reload the json.
the json have save the id and its corrsponding img.
like : '0178': ['0178_c1_f0086107.jpg', '0178_c1_f0086227.jpg', '0178_c1_f0086347.jpg'...]
:return: dict
"""
save_id_name = open(name_dir + 'train_id_name.json', 'r')
id_person = json.load(save_id_name)
# print(id_person)
return id_person
def get_id_spatio_temporal(line):
"""
get the id location time of the img_name
:param line: img_name
:return: [person_id, location, _time]
"""
data = line[:-4].split('_')
person_id, location, _time = data[0], data[1][1: ], data[2][1:]
person_id = get_identity(person_id)
return [person_id, location, _time]
# another reference : https://blog.csdn.net/appleml/article/details/57413615
# 另外需要注意的是,前三种方式只是所有语料遍历一次,而最后一种方法是,所有语料遍历了num_epochs次
# reference: https://github.com/digitalbrain79/person-reid
# here comes a idea, a image in the batch may be the one the that have appear in the last batch
def get_exact_id_pair(person_id, positive):
set_id_copy = set_id.copy()
candidate = id_dict[person_id]
if positive:
left_name, right_name = list(np.random.choice(candidate, 2))
# positive
label = [1, 0]
else:
left_name = np.random.choice(candidate, 1)[0]
set_id_copy.remove(person_id)
person_id_another = np.random.choice(set_id_copy, 1)[0]
candidate_another = id_dict[person_id_another]
right_name = np.random.choice(candidate_another, 1)[0]
# negative
label = [0, 1]
return left_name, right_name, label
def get_pair(_ids, start, end):
left_imgs = list()
right_imgs = list()
labels = list()
if start < end:
person_id = _ids[start:end]
else:
person_id = _ids[start:] + _ids[:end]
# split into positive and negative
for i in person_id:
if person_id.index(i) < len(person_id) // 2:
# positive
left, right, label = get_exact_id_pair(i, positive=True)
else:
# negative
left, right, label = get_exact_id_pair(i, positive=False)
left_imgs.append(left)
right_imgs.append(right)
labels.append(label)
# here comes the shuffle
shuffle_index = np.arange(len(labels))
shuffle(shuffle_index)
shuffle_left_imgs = []
shuffle_right_imgs = []
shuffle_labels = []
left_info = []
right_info = []
for index in shuffle_index:
shuffle_left_imgs.append(left_imgs[index])
shuffle_right_imgs.append(right_imgs[index])
shuffle_labels.append(labels[index])
# labels should convert to row data like (2,1)
# # in the networks labels should convert it to float32.
# print(np.asarray(shuffle_labels, dtype='float32')[:, np.newaxis].shape)
shuffle_labels = list(np.asarray(shuffle_labels, dtype='float32')[:, np.newaxis])
for left_name in shuffle_left_imgs:
left_info.append(get_id_spatio_temporal(left_name))
for right_name in shuffle_right_imgs:
right_info.append(get_id_spatio_temporal(right_name))
return shuffle_left_imgs, shuffle_right_imgs, shuffle_labels, left_info, right_info
def precess_to_array(left_imgs_name, right_imgs_name, target_size, name_select):
data_dir = DukeMTMC_directory+diff_set[name_select][:-4]
left = list()
right = list()
for l in left_imgs_name:
left.append(preprocess_input(img_2_array(load_img(data_dir+'/'+l, target_size=target_size))))
for r in right_imgs_name:
right.append(preprocess_input(img_2_array(load_img(data_dir+'/'+r, target_size=target_size))))
return left, right
def next_batch(batch_size, target_size, is_train, start):
"""
get the next batch
:param batch_size:
:param target_size: []
:param is_train: bool
:return: left_imgs_array, right_imgs_array, labels, end
"""
if is_train:
name_select = 'train'
_ids = get_id(name_select)
# you'd better just get the name of the img here
# remember the rank you get positive and negative should not only
# make sure the ratio is 1:1 but also shuffle the order.
end = (start+batch_size) % len(_ids)
# positive pair add in sequence of the set!!(so just take care the odd),
# while negative just randomly select pairs
# take care start > end !!!
# todo the format of imgs_array is wrong ! while labels not sure!!
left_imgs_name, right_imgs_name, labels, info_left, info_right = get_pair(_ids, start, end)
left_imgs_array, right_imgs_array = precess_to_array(left_imgs_name, right_imgs_name, target_size, name_select)
return left_imgs_array, right_imgs_array, labels, info_left, info_right, end
# load just once!!!
set_id = get_id('train')
# just load once !!
id_dict = get_id_corresponding_name()
if __name__ == '__main__':
# 475
print(get_identity('0001')) | 33.631579 | 119 | 0.673709 |
1eca0a0c9dc3deffe5b9b5c01c84b71468d91ec5 | 3,109 | py | Python | tests/config/test_converter.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 2 | 2020-09-02T13:46:06.000Z | 2020-10-11T16:11:02.000Z | tests/config/test_converter.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | tests/config/test_converter.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 4 | 2018-10-15T07:08:34.000Z | 2019-11-26T01:52:47.000Z | # -*- coding: utf-8 -*-
import textwrap
from typing import List, Dict, Optional, Union
from unittest import TestCase
from xTool.config.converter import DictConfigConverter
from xTool.codec.yaml_codec import YamlCodec
class TestDictConfigConverter(TestCase):
def test_unmarshal(self):
yaml_config = textwrap.dedent("""\
object1:
b: 1
c: 2
d:
- d1
- d2
e:
e1: 1
e2: "2"
f: [f1, f2]
object2:
a: 1
b: 2
object3:
- c: 1
d: 2
- c: 3
d: 4
""")
dict_config = YamlCodec.decode(yaml_config)
assert dict_config == {
'object1': {
'b': 1,
'c': 2,
'd': [
'd1',
'd2'],
'e': {
'e1': 1,
'e2': '2'},
'f': ['f1', 'f2'],
'object2': {'a': 1, 'b': 2},
'object3': [{'c': 1, 'd': 2}, {'c': 3, 'd': 4}]
}
}
converter = DictConfigConverter(dict_config)
class Object2:
def __init__(self, a: int = None, b: str = None):
self.a = a
self.b = b
def __eq__(self, other):
return self.a == other.a and self.b == other.b
class Object3:
def __init__(self, c: int = None, d: str = None):
self.c = c
self.d = d
def __eq__(self, other):
return self.c == other.c and self.d == other.d
class Object1:
def __init__(self,
a=None,
b: str = None,
c: int = None,
d: Optional[List] = None,
e: Union[None, Dict] = None,
object2: Object2 = None,
object3: List[Object3] = None):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
self.object2 = object2 if object2 else {}
self.object3 = object3 if object3 else []
def __eq__(self, other):
return (self.a == other.a and self.b == other.b and self.c == other.c and self.d ==
other.d and self.e == other.e and
self.object2 == other.object2 and self.object3 == other.object3)
object1 = converter.unmarshal("object1", Object1())
assert object1.object2 == Object2(1, '2')
assert object1.object3 == [Object3(1, '2'), Object3(3, '4')]
assert object1 == Object1(
a=None,
b='1',
c=2,
d=['d1', 'd2'],
e={'e1': 1, 'e2': '2'},
object2=Object2(1, '2'),
object3=[Object3(1, '2'), Object3(3, '4')]
)
| 30.480392 | 99 | 0.389836 |
e96227646e6023c38aa7c2e6a8b3f1a46b69b211 | 2,496 | py | Python | tests/actors/test_performance.py | reapler/geckordp | 29dab2e6e691954a473e054fa95ba40a3ad10e53 | [
"MIT"
] | 1 | 2021-12-24T04:37:02.000Z | 2021-12-24T04:37:02.000Z | tests/actors/test_performance.py | jpramosi/geckordp | 29dab2e6e691954a473e054fa95ba40a3ad10e53 | [
"MIT"
] | 1 | 2021-07-23T13:38:36.000Z | 2021-08-07T14:17:54.000Z | tests/actors/test_performance.py | reapler/geckordp | 29dab2e6e691954a473e054fa95ba40a3ad10e53 | [
"MIT"
] | 1 | 2021-10-31T17:31:35.000Z | 2021-10-31T17:31:35.000Z | # pylint: disable=unused-import
import pytest
import tests.helpers.constants as constants
from tests.helpers.utils import *
from geckordp.rdp_client import RDPClient
from geckordp.actors.root import RootActor
from geckordp.actors.descriptors.tab import TabActor
from geckordp.actors.performance import PerformanceActor
from geckordp.logger import log, logdict
def init():
cl = RDPClient(3)
cl.connect(constants.REMOTE_HOST, constants.REMOTE_PORT)
root = RootActor(cl)
current_tab = root.current_tab()
tab = TabActor(cl, current_tab["actor"])
actor_ids = tab.get_target()
performance = PerformanceActor(cl, actor_ids["performanceActor"])
performance.connect()
return cl, performance
def test_connect():
cl = None
try:
cl, performance = init()
val = performance.connect()
assert val.get("traits", None) is not None
finally:
cl.disconnect()
def test_can_currently_record():
cl = None
try:
cl, performance = init()
val = performance.can_currently_record()
assert val.get("value", None) is not None
finally:
cl.disconnect()
def test_start_recording():
cl = None
try:
cl, performance = init()
val = performance.start_recording()
assert val.get("actor", None) is not None
finally:
cl.disconnect()
def test_stop_recording():
cl = None
try:
cl, performance = init()
val = performance.start_recording()
val = performance.stop_recording(val.get("actor", ""))
assert val.get("actor", None) is not None
finally:
cl.disconnect()
def test_is_recording():
cl = None
try:
cl, performance = init()
val = performance.is_recording()
assert isinstance(val, bool)
finally:
cl.disconnect()
def test_get_recordings():
cl = None
try:
cl, performance = init()
val = performance.get_recordings()
assert isinstance(val, list)
finally:
cl.disconnect()
def test_get_configuration():
cl = None
try:
cl, performance = init()
val = performance.get_configuration()
assert isinstance(val, dict)
assert val.get("interval", None) is not None
finally:
cl.disconnect()
def test_set_profiler_status_interval():
cl = None
try:
cl, performance = init()
performance.set_profiler_status_interval(100)
finally:
cl.disconnect()
| 24.23301 | 69 | 0.645433 |
a3d68a86f99cdb0f58b209eaa04840f7e07c2904 | 127 | py | Python | 2021/python/tests/test_day17.py | shalgrim/advent-of-code | d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8 | [
"MIT"
] | null | null | null | 2021/python/tests/test_day17.py | shalgrim/advent-of-code | d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8 | [
"MIT"
] | null | null | null | 2021/python/tests/test_day17.py | shalgrim/advent-of-code | d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8 | [
"MIT"
] | null | null | null | from day17_1 import main as main1
def test_main1():
txt = 'target area: x=20..30, y=-10..-5'
assert main1(txt) == 45
| 18.142857 | 44 | 0.622047 |
094d6b3452d0f18e7ac597b22fa89f0ba391e4b8 | 865 | py | Python | utilities/test_chromosome.py | Platygator/visually-supervised-learning-for-robotic-manipulation | d509bc7a747622dd4d585887ba92f04843e2a531 | [
"MIT"
] | 1 | 2021-06-01T18:53:52.000Z | 2021-06-01T18:53:52.000Z | utilities/test_chromosome.py | Platygator/visually-supervised-learning-for-robotic-manipulation | d509bc7a747622dd4d585887ba92f04843e2a531 | [
"MIT"
] | 1 | 2021-02-04T10:21:49.000Z | 2021-02-04T10:21:49.000Z | utilities/test_chromosome.py | Platygator/visually-supervised-learning-for-robotic-manipulation | d509bc7a747622dd4d585887ba92f04843e2a531 | [
"MIT"
] | null | null | null | # Implementation to Test Chromosome
import os
import numpy as np
from EvaluateChromosome import evaluateChromosome
def testChromosome(x, y, z, inputFile="bestChromosome.npy"):
"""
Function to test Chromosome
Parameters
----------
x : x-position of the Hubert Robot
y : y-position of the Hubert Robot
z : z-position of the Hubert Robot
inputFile : Name of the Input File (Default: bestChromosome.npy)
Returns
-------
theta1 : Angle 1 input to the Hubert Robot
theta2 : Angle 2 input to the Hubert Robot
theta3 : Angle 3 input to the Hubert Robot
"""
chromosome = np.load(inputFile)
constants = np.array([1, -1, 2])
nVariables = 7
theta1, theta2, theta3 = evaluateChromosome(x, y, z, chromosome, nVariables, constants)
return theta1, theta2, theta3 | 27.903226 | 91 | 0.648555 |
a4b707b8aaa0b416a5bec6d1bcf0a1cf9bb4b8c6 | 17,680 | py | Python | goemotions/ppca.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | goemotions/ppca.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | goemotions/ppca.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Script for running PPCA on the emotion dataset and generating plots.
The goal of this analysis is to understand which dimensions of the emotion label
space are significant via Principal Preserved Component Analysis
(Cowen et al., 2019). PPCA seeks to identify dimensions of the latent space
that maximally covary across two datasets (in our case, randomly split raters).
Reference:
Cowen, A. S., Laukka, P., Elfenbein, H. A., Liu, R., & Keltner, D. (2019).
The primacy of categories in the recognition of 12 emotions in speech prosody
across two cultures. Nature human behaviour, 3(4), 369.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import random
from absl import app
from absl import flags
import altair as alt
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg as LA
import pandas as pd
from scipy.stats import spearmanr
from scipy.stats import wilcoxon
import seaborn as sns
from sklearn.manifold import TSNE
from statsmodels.stats.multitest import multipletests
sns.set_style("whitegrid")
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["figure.dpi"] = 1000
FLAGS = flags.FLAGS
flags.DEFINE_string("data", "data/full_dataset",
"Directory containing full dataset.")
flags.DEFINE_string("plot_dir", "plots", "Directory for saving plots.")
flags.DEFINE_string("emotion_file", "data/emotions.txt",
"File containing list of emotions.")
flags.DEFINE_string("rgb_colors", "plots/colors.tsv",
"File containing list of distinctive rgb colors.")
flags.DEFINE_string(
"emotion_color_order", "plots/color_order.txt",
"File containing emotions in order for coloring based on FLAGS.rgb_colors.")
def PPCA(x, y):
"""Function that returns PPCA weights for x and y."""
x = x - x.mean(axis=0) # demean x
y = y - y.mean(axis=0) # demean y
crosscov = np.matmul(x.transpose(), y) + np.matmul(
y.transpose(), x) # symmetrized cross-covariance
# v is the eigenvalues (or component covariances)
# w is the eigenvectors (or PPCs)
v, w = LA.eigh(crosscov)
w = np.flip(w, 1) # reverse w so it is in descending order of eigenvalue
v = np.flip(v) # reverse v so it is in descending order
return w, v
def Demean(m):
return m - m.mean(axis=0)
def PartialCorr(x, y, covar):
"""Calculate partial correlation."""
cvar = np.atleast_2d(covar)
beta_x = np.linalg.lstsq(cvar, x, rcond=None)[0]
beta_y = np.linalg.lstsq(cvar, y, rcond=None)[0]
res_x = x - np.dot(cvar, beta_x)
res_y = y - np.dot(cvar, beta_y)
return spearmanr(res_x, res_y)
def Varimax(phi, gamma=1, q=20, tol=1e-6):
"""Source: https://stackoverflow.com/questions/17628589/perform-varimax-rotation-in-python-using-numpy."""
p, k = phi.shape
r = np.eye(k)
d = 0
for _ in range(q):
d_old = d
l = np.dot(phi, r)
u, s, vh = LA.svd(
np.dot(
phi.T,
np.asarray(l)**3 -
(gamma / p) * np.dot(l, np.diag(np.diag(np.dot(l.T, l))))))
r = np.dot(u, vh)
d = np.sum(s)
if d / d_old < tol:
break
return np.dot(phi, r)
def LeaveOut(ratings, rater_msk, worker2examples, worker_id):
"""Calculate correlations and partial correlations for a particular rater."""
examples = worker2examples[worker_id]
use_examples = copy.deepcopy(ratings[[idx for idx, _ in examples]])
use_examples_msk = copy.deepcopy(rater_msk[[idx for idx, _ in examples
]]).sum(axis=1).astype(int)
rater_indices = [rater_idx for _, rater_idx in examples]
# Take remaining raters and split them randomly
x = []
y = []
exclude = []
average_insample = []
for i, ex in enumerate(use_examples):
# Separate leave-out rater from others
keep = []
for j, worker_rating in enumerate(ex[:use_examples_msk[i]]):
if j != rater_indices[i]:
keep.append(list(worker_rating))
else:
exclude.append(list(worker_rating))
# Calculate average of insample ratings
avg_insample_ex = np.array(keep).mean(axis=0)
assert np.isnan(avg_insample_ex).sum() == 0
average_insample.append(list(avg_insample_ex))
# Shuffle raters randomly
random.shuffle(keep)
# If there are two in-sample raters, just separate them
num_raters = len(keep)
if num_raters == 2:
x.append(keep[0])
y.append(keep[1])
else:
x.append(list(np.array(keep[:int(num_raters / 2)]).mean(axis=0)))
y.append(list(np.array(keep[int(num_raters / 2):]).mean(axis=0)))
assert np.isnan(x).sum() == 0
assert np.isnan(y).sum() == 0
x = np.array(x)
y = np.array(y)
w, _ = PPCA(x, y) # get PPCs
exclude = np.array(exclude)
assert np.isnan(exclude).sum() == 0
average_insample = np.array(average_insample)
exclude = Demean(exclude) # demean held out rater's values
average_insample = Demean(average_insample) # demean in sample rater's values
assert np.isnan(exclude).sum() == 0
assert np.isnan(average_insample).sum() == 0
left_out_scores = exclude.dot(w) # get scores for leave-out rater
insample_scores = average_insample.dot(w) # scores for in-sample raters
assert np.isnan(left_out_scores).sum() == 0
assert np.isnan(insample_scores).sum() == 0
# Run partial Spearman correlation for each component, doing a regular
# Spearman correlation for the first dimension
first_corr = spearmanr(left_out_scores[:, 0], insample_scores[:, 0])[0]
partial_corrs = [first_corr]
corrs = [first_corr]
for i in range(1, left_out_scores.shape[1]):
# each column represents a component
# partial out insample raters' scores for previous components
pc = PartialCorr(left_out_scores[:, i], insample_scores[:, i],
insample_scores[:, :i])[0]
# regular spearman correlation
c = spearmanr(left_out_scores[:, i], insample_scores[:, i])[0]
# if no correlation (i.e. the standard deviation of the vectors is 0, this
# happens when the rater only labeled a 1-2 examples) ignore that rater
if np.isnan(pc):
break
partial_corrs.append(pc)
corrs.append(c)
return partial_corrs, corrs
def PlotCovar(v):
var_explained = v / np.sum(v) * 100
num_components = 28
fig = plt.figure()
plt.plot(np.arange(num_components), var_explained, marker="o")
plt.ylabel("Percentage of Covariance Explained", fontsize="x-large")
plt.xlabel("Component", fontsize="x-large")
plt.xlim(-1, num_components)
fig.savefig(
FLAGS.plot_dir + "/covar_explained.pdf",
dpi=600,
format="pdf",
bbox_inches="tight")
def main(_):
print("Loading data...")
dfs = []
for filename in os.listdir(FLAGS.data):
if filename.endswith(".csv"):
dfs.append(
pd.read_csv(os.path.join(FLAGS.data, filename), encoding="utf-8"))
data = pd.concat(dfs)
print("%d Examples" % (len(set(data["id"]))))
print("%d Annotations" % len(data))
if not os.path.isdir(FLAGS.plot_dir):
os.makedirs(FLAGS.plot_dir)
with open(FLAGS.emotion_file, "r") as f:
all_emotions = f.read().splitlines()
all_emotions_neutral = all_emotions + ["neutral"]
emotion2idx = {e: i for i, e in enumerate(all_emotions)}
print("%d emotion Categories" % len(all_emotions))
print("Processing data...")
# Remove neutral labels
data = data[data["neutral"] == 0]
# Remove examples with no ratings (difficult examples)
data = data[data[all_emotions_neutral].sum(axis=1) != 0]
# Convert into num_examples x num_raters x num_ratings format
data = data.groupby("id").filter(lambda x: len(x) >= 3)
id_groups = data.groupby("id")
worker2examples = {} # dict mapping worker ids to (example, rater id) tuples
max_num_raters = data.groupby("id").size().max()
ratings = np.zeros(
(len(id_groups), max_num_raters, len(all_emotions))) # ignore "neutral"
rater_msk = np.zeros(
(len(id_groups), max_num_raters)) # for masking out non-existent raters
print("Ratings shape", ratings.shape)
# Get ratings and rater mask
texts = []
for ex_idx, (_, g) in enumerate(id_groups):
texts.append(g.iloc[0]["text"])
rater_count = 0
# iterate through workers
for _, row in g.iterrows():
for e in all_emotions:
ratings[ex_idx, rater_count, emotion2idx[e]] = row[e]
rater_msk[ex_idx, rater_count] = 1
worker_id = row["rater_id"]
if worker_id in worker2examples:
worker2examples[worker_id].append((ex_idx, rater_count))
else:
worker2examples[worker_id] = [(ex_idx, rater_count)]
rater_count += 1
print("Calculating leave-out (partial) correlations...")
partial_corr_per_rater = []
corr_per_rater = []
for worker_id in worker2examples:
partial_corrs, corrs = LeaveOut(ratings, rater_msk, worker2examples,
worker_id)
if len(partial_corrs) < len(all_emotions):
continue
partial_corr_per_rater.append(partial_corrs)
corr_per_rater.append(corrs)
corr_per_rater = np.array(corr_per_rater)
partial_corr_per_rater = np.array(partial_corr_per_rater)
# Verify that there are no NaN values
assert np.isnan(corr_per_rater).sum() == 0
# Apply Wilcoxon signed rank test to test significance of each dimension
p_vals = np.apply_along_axis(wilcoxon, 0, partial_corr_per_rater)[1]
# Apply Bonferroni correction
reject, corr_pvals, _, newalpha = multipletests(
p_vals, alpha=0.05, method="bonferroni")
print("Which dimensions to keep?")
print(reject)
print(corr_pvals)
print(newalpha)
print("Running PPCA on all the data...")
# Take all raters and split them randomly
x = []
y = []
rater_counts = rater_msk.sum(axis=1).astype(int)
all_ratings_avg = []
for i, ex in enumerate(ratings):
# Get actual raters based on mask
keep = []
for worker_rating in ex[:rater_counts[i]]:
keep.append(list(worker_rating))
all_ratings_avg.append(list(np.array(keep).mean(axis=0)))
# Shuffle raters randomly
random.shuffle(keep)
num_raters = len(keep)
x.append(list(np.array(keep[:int(num_raters / 2)]).mean(axis=0)))
y.append(list(np.array(keep[int(num_raters / 2):]).mean(axis=0)))
x = np.array(x)
y = np.array(y)
all_ratings_avg = np.array(all_ratings_avg)
w, v = PPCA(x, y) # final components (p-values determine which ones to keep)
print("Plotting percentage of covariance explained...")
PlotCovar(v)
# Apply varimax rotation
w_vari = Varimax(w)
# Get mapping between ppcs and emotions
map_df = pd.DataFrame(
w_vari, index=all_emotions, columns=np.arange(len(all_emotions))).round(4)
# Sort to move values to diagonal
map_df = map_df[list(
np.argsort(map_df.apply(lambda x: pd.Series.nonzero(x)[0]).values)[0])]
f = plt.figure(figsize=(10, 6), dpi=300)
sns.heatmap(
map_df,
center=0,
cmap=sns.diverging_palette(240, 10, n=50),
yticklabels=all_emotions)
plt.xlabel("Component")
plt.savefig(
FLAGS.plot_dir + "/component_loadings.pdf",
dpi=600,
format="pdf",
bbox_inches="tight")
ppc2emotion = map_df.abs().idxmax().to_dict()
emotion2ppc = {e: i for i, e in ppc2emotion.items()}
print(ppc2emotion)
print("Plotting frequency and mean left-out rater correlations...")
corr_mean = corr_per_rater.mean(axis=0)
corr_mean_ordered = [corr_mean[emotion2ppc[e]] for e in all_emotions]
df_plot = pd.DataFrame({
"emotion": all_emotions,
"agreement": corr_mean_ordered
})
df_plot["count"] = df_plot["emotion"].map(
data[all_emotions].sum(axis=0).to_dict())
df_plot.sort_values("count", ascending=False, inplace=True)
df_plot.to_csv(FLAGS.plot_dir + "/emotion_agreements.csv", index=False)
# Get colors
norm = plt.Normalize(df_plot["agreement"].min(), df_plot["agreement"].max())
sm = plt.cm.ScalarMappable(cmap="BuPu", norm=norm)
sm.set_array([])
# Generate figure
fig = plt.figure(dpi=600, figsize=(5, 6))
ax = sns.barplot(
data=df_plot,
y="emotion",
x="count",
orient="h",
hue="agreement",
palette="BuPu",
dodge=False,
edgecolor="black",
linewidth=1)
ax.get_legend().remove()
ax.figure.colorbar(sm)
plt.text(18000, 31, "Interrater\nCorrelation", ha="center")
plt.xlabel("Number of Examples")
plt.ylabel("")
plt.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
ax.set_xticklabels(["%dk" % (int(int(label) / 1000)) for label in labels])
plt.tight_layout()
fig.savefig(
FLAGS.plot_dir + "/label_distr_agreement.pdf",
dpi=600,
format="pdf",
bbox_inches="tight")
print("Generating t-SNE plot...")
# Get PPC scores for all examples
all_ratings_avg = Demean(all_ratings_avg) # demean all ratings
ppc_scores = all_ratings_avg.dot(w_vari) # project onto ppcs
ppc_scores_abs = np.absolute(ppc_scores)
# Load maximally distinct colors
colors = pd.read_csv(
FLAGS.rgb_colors, sep="\t", header=None, names=np.arange(3))
# Set colors (todo(ddemszky): add names to colors in file)
palette_rgb = colors.values
with open(FLAGS.emotion_color_order) as f:
color_order = f.read().splitlines()
ppc2color = {emotion2ppc[e]: i for i, e in enumerate(color_order)}
# get rgb value for each example based on weighted average of top emotions
rgb_vals = []
hex_vals = []
top_categories = []
threshold = 0.5 # exclude points not loading on any of the top 10 categories
counter = 0
rgb_max = 255
other_color = palette_rgb[len(all_emotions), :]
for i, scores in enumerate(ppc_scores_abs):
top_ppcs = [
idx for idx in (-scores).argsort()[:2] if scores[idx] > threshold
]
top_emotions = ",".join([ppc2emotion[idx] for idx in top_ppcs
]) if top_ppcs else "other"
top_categories.append(top_emotions)
if len(top_ppcs) < 1: # doesn't have top emotions from list
color = other_color # use grey
counter += 1
else:
# Weighted average of top emotions (square->weighted average->square root)
color_ids = [ppc2color[idx] for idx in top_ppcs]
weights = [scores[idx] for idx in top_ppcs]
# Need to round, otherwise floating point precision issues will result
# in values slightly above 1
avg = np.round(
np.sqrt(
np.average(
np.power(palette_rgb[color_ids] * rgb_max, 2),
axis=0,
weights=weights)) / rgb_max, 4)
if (avg > 1).sum() > 0:
print(avg)
color = avg
rgb_vals.append(list(color))
hex_vals.append("#%02x%02x%02x" %
tuple(np.array(color * rgb_max, dtype=int)))
rgb_vals = np.array(rgb_vals)
# Create t-SNE model
tsne_model = TSNE(
perplexity=30,
n_components=2,
n_iter=1000,
random_state=23,
learning_rate=500,
init="pca")
new_values = tsne_model.fit_transform(ppc_scores)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
# Put data in dataframe
df = pd.DataFrame({
"x": x,
"y": y,
"color": hex_vals,
"label(s)": top_categories,
"text": texts
})
df = df[df["label(s)"] != "other"]
df["top_label"] = df["label(s)"].str.split(",").str[0]
# Two selections:
# - a brush that is active on the top panel
# - a multi-click that is active on the bottom panel
brush = alt.selection(type="interval")
click = alt.selection_multi(encodings=["color"])
sample = df.sample(5000) # max 5000 examples can be plotted
points = alt.Chart(sample).mark_point(
filled=True, size=50).encode(
x="x:Q",
y="y:Q",
color=alt.Color("color", scale=None),
tooltip=["label(s)", "text"]).properties(
width=700, height=600).add_selection(brush)
# Bottom panel is a bar chart
bars = alt.Chart(sample).mark_bar().encode(
x="count()",
y="top_label:N",
color=alt.condition(click, alt.Color("color:N", scale=None),
alt.value("lightgray")),
).transform_filter(brush.ref()).properties(
width=700, selection=click)
chart = alt.vconcat(
points, bars, data=sample, title="t-SNE Projection of Examples")
chart.save(FLAGS.plot_dir + "/tsne.html", format="html")
if __name__ == "__main__":
app.run(main)
| 33.295669 | 108 | 0.670023 |
81c978bb7282a97e03eaf94c47e7d18a842eaf7b | 5,823 | py | Python | gcn/train.py | NicolasMeseguer/gcn | 6323856b5c98e7eb59079479d2e11840e0e04fe0 | [
"MIT"
] | null | null | null | gcn/train.py | NicolasMeseguer/gcn | 6323856b5c98e7eb59079479d2e11840e0e04fe0 | [
"MIT"
] | null | null | null | gcn/train.py | NicolasMeseguer/gcn | 6323856b5c98e7eb59079479d2e11840e0e04fe0 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import sys
import subprocess
from gcn.utils import *
from gcn.models import GCN, MLP
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
# METIS FLAG
metis = 1
if(metis==1):
# Parse data to METIS
print("Starting to parse adj_matrix to .graph file...")
adj_array = adj.toarray()
num_vert = 0
num_edges = 0
bool_flag = 0
for vert in adj_array:
for edg in vert:
if(edg == 1):
bool_flag = 1
num_edges+=1
if(bool_flag == 1):
num_vert += 1
# edges count as a mirror
num_edges = int(num_edges/2)
graphFile = open("gcn_dataset.graph", "w")
graphFile.write(str(num_vert) + " " + str(num_edges))
graphFile.write("\n")
vert_counter = 0
edg_idx = 1
for vert in adj_array:
vert_counter += 1
for edg in vert:
if(edg == 1):
graphFile.write(str(edg_idx) + " ")
edg_idx += 1
edg_idx = 1
if(vert_counter < num_vert):
graphFile.write("\n")
graphFile.close()
# METIS Call
num_partitions = 4
print("Parse finalized. Calling METIS to do the partitioning...")
subprocess.call("./METISscript.sh " + str(num_partitions), shell=True)
metis_file_name = "gcn_dataset.graph.part." + str(num_partitions)
# Convert output of METIS
print("METIS Partition finalized. Calling converter...")
subprocess.call("./METISConverter.sh " + metis_file_name, shell=True)
# Some preprocessing
features = preprocess_features(features)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
print("\n*******************************\n(1) train.py: function model_func() for the creation of the model.\n*******************************\n\n")
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
print("\n*******************************\n(2) train.py: function initialize() session.\n*******************************\n\n")
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
# Init variables
print("\n*******************************\n(3) train.py: function initialize for all variables in session().\n*******************************\n\n")
sess.run(tf.global_variables_initializer())
cost_val = []
# Train model
print("\n*******************************\n(4) train.py: trainning model (for-loop).\n*******************************\n\n")
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
# Validation
cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
# Testing
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
| 35.506098 | 147 | 0.64417 |
0037aca82fc46cf766ae1f4c006baf28ac626176 | 12,709 | py | Python | cluster_pack/uploader.py | isabella232/cluster-pack | 0052e0e2d37a209f9983bdb8117e423f840dafa0 | [
"Apache-2.0"
] | null | null | null | cluster_pack/uploader.py | isabella232/cluster-pack | 0052e0e2d37a209f9983bdb8117e423f840dafa0 | [
"Apache-2.0"
] | 1 | 2022-02-21T12:33:11.000Z | 2022-02-21T12:33:11.000Z | cluster_pack/uploader.py | isabella232/cluster-pack | 0052e0e2d37a209f9983bdb8117e423f840dafa0 | [
"Apache-2.0"
] | null | null | null | import getpass
import hashlib
import json
import logging
import os
import sys
import pathlib
import tempfile
from typing import (
Tuple,
Dict,
Collection,
List,
Any
)
from urllib import parse, request
from pex.pex_info import PexInfo
from wheel_filename import parse_wheel_filename
from cluster_pack import filesystem, packaging
_logger = logging.getLogger(__name__)
def _get_archive_metadata_path(package_path: str) -> str:
url = parse.urlparse(package_path)
return url._replace(path=str(pathlib.Path(url.path).with_suffix('.json'))).geturl()
def _is_archive_up_to_date(package_path: str,
current_packages_list: List[str],
resolved_fs: Any = None
) -> bool:
if not resolved_fs.exists(package_path):
return False
archive_meta_data = _get_archive_metadata_path(package_path)
if not resolved_fs.exists(archive_meta_data):
_logger.debug(f'metadata for archive {package_path} does not exist')
return False
with resolved_fs.open(archive_meta_data, "rb") as fd:
packages_installed = json.loads(fd.read())
return sorted(packages_installed) == sorted(current_packages_list)
def _dump_archive_metadata(package_path: str,
current_packages_list: List[str],
resolved_fs: Any = None
) -> None:
archive_meta_data = _get_archive_metadata_path(package_path)
with tempfile.TemporaryDirectory() as tempdir:
tempfile_path = os.path.join(tempdir, "metadata.json")
with open(tempfile_path, "w") as fd:
fd.write(json.dumps(current_packages_list, sort_keys=True, indent=4))
if resolved_fs.exists(archive_meta_data):
resolved_fs.rm(archive_meta_data)
resolved_fs.put(tempfile_path, archive_meta_data)
def upload_zip(
zip_file: str,
package_path: str = None,
force_upload: bool = False,
fs_args: Dict[str, Any] = {}
) -> str:
packer = packaging.detect_packer_from_file(zip_file)
package_path, _, _ = packaging.detect_archive_names(packer, package_path)
resolved_fs, _ = filesystem.resolve_filesystem_and_path(package_path, **fs_args)
with tempfile.TemporaryDirectory() as tempdir:
parsed_url = parse.urlparse(zip_file)
if parsed_url.scheme == "http":
tmp_zip_file = os.path.join(tempdir, os.path.basename(parsed_url.path))
request.urlretrieve(zip_file, tmp_zip_file)
zip_file = tmp_zip_file
_upload_zip(zip_file, package_path, resolved_fs, force_upload)
return package_path
def upload_env(
package_path: str = None,
packer: packaging.Packer = None,
additional_packages: Dict[str, str] = {},
ignored_packages: Collection[str] = [],
force_upload: bool = False,
include_editable: bool = False,
fs_args: Dict[str, Any] = {}
) -> Tuple[str, str]:
if packer is None:
packer = packaging.detect_packer_from_env()
package_path, env_name, pex_file = packaging.detect_archive_names(packer, package_path)
resolved_fs, _ = filesystem.resolve_filesystem_and_path(package_path, **fs_args)
if not packaging._running_from_pex():
_upload_env_from_venv(
package_path, packer,
additional_packages, ignored_packages,
resolved_fs,
force_upload,
include_editable
)
else:
_upload_zip(pex_file, package_path, resolved_fs, force_upload)
return (package_path,
env_name)
def upload_spec(
spec_file: str,
package_path: str = None,
force_upload: bool = False,
fs_args: Dict[str, Any] = {}
) -> str:
"""Upload an environment from a spec file
:param spec_file: the spec file, must be requirements.txt or conda.yaml
:param package_path: the path where to upload the package
:param force_upload: whether the cache should be cleared
:param fs_args: specific arguments for special file systems (like S3)
:return: package_path
"""
packer = packaging.detect_packer_from_spec(spec_file)
if not package_path:
package_path = (f"{packaging.get_default_fs()}/user/{getpass.getuser()}"
f"/envs/{_unique_filename(spec_file, packer)}")
elif not package_path.endswith(packer.extension()):
package_path = os.path.join(package_path, _unique_filename(spec_file, packer))
resolved_fs, path = filesystem.resolve_filesystem_and_path(package_path, **fs_args)
hash = _get_hash(spec_file)
_logger.info(f"Packaging from {spec_file} with hash={hash}")
reqs = [hash]
if force_upload or not _is_archive_up_to_date(package_path, reqs, resolved_fs):
_logger.info(
f"Zipping and uploading your env to {package_path}"
)
with tempfile.TemporaryDirectory() as tempdir:
archive_local = packer.pack_from_spec(
spec_file=spec_file,
output=f"{tempdir}/{packer.env_name()}.{packer.extension()}")
dir = os.path.dirname(package_path)
if not resolved_fs.exists(dir):
resolved_fs.mkdir(dir)
resolved_fs.put(archive_local, package_path)
_dump_archive_metadata(package_path, reqs, resolved_fs)
else:
_logger.info(f"{package_path} already exists")
return package_path
def _unique_filename(spec_file: str, packer: packaging.Packer) -> str:
repo = os.path.basename(os.path.dirname(spec_file))
if repo:
repo = "_" + repo
return f"cluster_pack{repo}.{packer.extension()}"
def _get_hash(spec_file: str) -> str:
with open(spec_file) as f:
return hashlib.sha1(f.read().encode()).hexdigest()
def _upload_zip(
zip_file: str, package_path: str,
resolved_fs: Any = None, force_upload: bool = False
) -> None:
packer = packaging.detect_packer_from_file(zip_file)
if packer == packaging.PEX_PACKER and resolved_fs.exists(package_path):
with tempfile.TemporaryDirectory() as tempdir:
local_copy_path = os.path.join(tempdir, os.path.basename(package_path))
resolved_fs.get(package_path, local_copy_path)
info_from_storage = PexInfo.from_pex(local_copy_path)
into_to_upload = PexInfo.from_pex(zip_file)
if not force_upload and info_from_storage.code_hash == into_to_upload.code_hash:
_logger.info(f"skip upload of current {zip_file}"
f" as it is already uploaded on {package_path}")
return
_logger.info(f"upload current {zip_file} to {package_path}")
dir = os.path.dirname(package_path)
if not resolved_fs.exists(dir):
resolved_fs.mkdir(dir)
resolved_fs.put(zip_file, package_path)
# Remove previous metadata
archive_meta_data = _get_archive_metadata_path(package_path)
if resolved_fs.exists(archive_meta_data):
resolved_fs.rm(archive_meta_data)
def _handle_packages(
current_packages: Dict[str, str],
additional_packages: Dict[str, str] = {},
ignored_packages: Collection[str] = []
) -> None:
if len(additional_packages) > 0:
additional_package_names = list(additional_packages.keys())
current_packages_names = list(current_packages.keys())
for name in current_packages_names:
for additional_package_name in additional_package_names:
if name in additional_package_name:
_logger.debug(f"Replace existing package {name} by {additional_package_name}")
current_packages.pop(name)
current_packages.update(additional_packages)
if len(ignored_packages) > 0:
for name in ignored_packages:
if name in current_packages:
_logger.debug(f"Remove package {name}")
current_packages.pop(name)
def _upload_env_from_venv(
package_path: str,
packer: packaging.Packer = packaging.PEX_PACKER,
additional_packages: Dict[str, str] = {},
ignored_packages: Collection[str] = [],
resolved_fs: Any = None,
force_upload: bool = False,
include_editable: bool = False
) -> None:
executable = packaging.get_current_pex_filepath() \
if packaging._running_from_pex() else sys.executable
current_packages = packaging.get_non_editable_requirements(executable)
_handle_packages(
current_packages,
additional_packages,
ignored_packages
)
reqs = packaging.format_requirements(current_packages)
_logger.debug(f"Packaging current_packages={reqs}")
if not force_upload and _is_archive_up_to_date(package_path, reqs, resolved_fs):
_logger.info(f"{package_path} already exists")
return
with tempfile.TemporaryDirectory() as tempdir:
env_copied_from_fallback_location = False
local_package_path = f'{tempdir}/{packer.env_name()}.{packer.extension()}'
local_fs, local_package_path = filesystem.resolve_filesystem_and_path(local_package_path)
fallback_path = os.environ.get('C_PACK_ENV_FALLBACK_PATH')
if not force_upload and fallback_path and packer.extension() == 'pex':
_logger.info(f"Copying pre-built env from {fallback_path} to {local_package_path}")
if fallback_path.startswith("http://") or fallback_path.startswith("https://"):
request.urlretrieve(fallback_path, local_package_path)
else:
fallback_fs, fallback_path = filesystem.resolve_filesystem_and_path(fallback_path)
fallback_fs.get(fallback_path, local_package_path)
_logger.info(f'Checking requirements in {local_package_path}')
pex_info = PexInfo.from_pex(local_package_path)
req_from_pex = _filter_out_requirements(
_sort_requirements(
_normalize_requirements(
_format_pex_requirements(pex_info)
)
)
)
req_from_venv = _filter_out_requirements(
_sort_requirements(
_normalize_requirements(reqs)
)
)
if (req_from_pex == req_from_venv):
env_copied_from_fallback_location = True
_dump_archive_metadata(local_package_path, reqs, local_fs)
_logger.info('Env copied from fallback location')
else:
_logger.warning(f'Requirements not met for pre-built {local_package_path}')
_logger.info(f'Requirements from pex {req_from_pex}')
_logger.info(f'Requirements from venv {req_from_venv}')
if not env_copied_from_fallback_location:
if include_editable:
editable_requirements = packaging.get_editable_requirements(executable)
else:
editable_requirements = {}
_logger.info(f"Generating and zipping your env to {local_package_path}")
local_package_path = packer.pack(
output=local_package_path,
reqs=reqs,
additional_packages=additional_packages,
ignored_packages=ignored_packages,
editable_requirements=editable_requirements
)
dir = os.path.dirname(package_path)
if not resolved_fs.exists(dir):
resolved_fs.mkdir(dir)
_logger.info(f'Uploading env at {local_package_path} to {package_path}')
resolved_fs.put(local_package_path, package_path)
_dump_archive_metadata(package_path, reqs, resolved_fs)
def _sort_requirements(a: List[str]) -> List[str]:
return sorted([item.lower() for item in a])
def _format_pex_requirements(pex_info: PexInfo) -> List[str]:
reqs = [parse_wheel_filename(req) for req in pex_info.distributions.keys()]
return [f"{req.project}=={req.version}" for req in reqs]
def _normalize_requirements(reqs: List[str]) -> List[str]:
return [req.replace('_', '-') for req in reqs]
def _filter_out_requirements(reqs: List[str]) -> List[str]:
def _keep(req: str) -> bool:
return all([d not in req for d in ["wheel", "pip", "setuptools"]])
return [req for req in reqs if _keep(req)]
| 37.937313 | 99 | 0.645527 |
ed72152785c8606b3a21daf78cf5eb64074db90b | 433 | py | Python | taskbuster/settings/tb_dev.py | bas-innovations/taskbuster_tutorial | c27c6c616e289aa960f1f87af9360340863957b2 | [
"MIT"
] | null | null | null | taskbuster/settings/tb_dev.py | bas-innovations/taskbuster_tutorial | c27c6c616e289aa960f1f87af9360340863957b2 | [
"MIT"
] | null | null | null | taskbuster/settings/tb_dev.py | bas-innovations/taskbuster_tutorial | c27c6c616e289aa960f1f87af9360340863957b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_variable('DATABASE_NAME'),
'USER': get_env_variable('DATABASE_USER'),
'PASSWORD': get_env_variable('DATABASE_PASSWORD'),
'HOST': 'localhost',
'PORT': '',
}
} | 27.0625 | 65 | 0.612009 |
0e445b0f144db9c61d97b3cb63d34e840e8046c1 | 8,271 | py | Python | tf_agents/utils/example_encoding_dataset.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 1 | 2021-09-22T12:04:03.000Z | 2021-09-22T12:04:03.000Z | tf_agents/utils/example_encoding_dataset.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | null | null | null | tf_agents/utils/example_encoding_dataset.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for for interacting with datasets of encoded examples of TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import trajectory
from tf_agents.utils import eager_utils
from tf_agents.utils import example_encoding
from tf_agents.utils import nest_utils
from tensorflow.core.protobuf import struct_pb2 # pylint:disable=g-direct-tensorflow-import # TF internal
# File extension used when saving data specs to file
_SPEC_FILE_EXTENSION = '.spec'
def encode_spec_to_file(output_path, tensor_data_spec):
"""Save a tensor data spec to a tfrecord file.
Args:
output_path: The path to the TFRecord file which will contain the spec.
tensor_data_spec: Nested list/tuple or dict of TensorSpecs, describing the
shape of the non-batched Tensors.
"""
spec_proto = tensor_spec.to_proto(tensor_data_spec)
with tf.io.TFRecordWriter(output_path) as writer:
writer.write(spec_proto.SerializeToString())
def parse_encoded_spec_from_file(input_path):
"""Returns the tensor data spec stored at a path.
Args:
input_path: The path to the TFRecord file which contains the spec.
Returns:
`TensorSpec` nested structure parsed from the TFRecord file.
Raises:
IOError: File at input path does not exist.
"""
if not tf.io.gfile.exists(input_path):
raise IOError('Could not find spec file at %s.' % input_path)
dataset = tf.data.TFRecordDataset(input_path, buffer_size=1)
dataset_iterator = eager_utils.dataset_iterator(dataset)
signature_proto_string = eager_utils.get_next(dataset_iterator)
if tf.executing_eagerly():
signature_proto = struct_pb2.StructuredValue.FromString(
signature_proto_string.numpy())
else:
# In non-eager mode a session must be run in order to get the value
with tf.Session() as sess:
signature_proto_string_value = sess.run(signature_proto_string)
signature_proto = struct_pb2.StructuredValue.FromString(
signature_proto_string_value)
return tensor_spec.from_proto(signature_proto)
class TFRecordObserver(object):
"""Observer for writing experience to TFRecord file.
To use this observer, create an instance using a trajectory spec object
and a dataset path:
trajectory_spec = agent.collect_data_spec
dataset_path = '/tmp/my_example_dataset'
tfrecord_observer = TFRecordObserver(dataset_path, trajectory_spec)
Then add it to the observers kwarg for the driver:
collect_op = MyDriver(
...
observers=[..., tfrecord_observer],
num_steps=collect_steps_per_iteration).run()
*Note*: Depending on your driver you may have to do
`common.function(tfrecord_observer)` to handle the use of a callable with no
return within a `tf.group` operation.
"""
def __init__(self, output_path, tensor_data_spec, py_mode=False):
"""Creates observer object.
Args:
output_path: The path to the TFRecords file.
tensor_data_spec: Nested list/tuple or dict of TensorSpecs, describing the
shape of the non-batched Tensors.
py_mode: Whether the observer is being used in a py_driver.
Raises:
ValueError: if the tensors and specs have incompatible dimensions or
shapes.
"""
self._py_mode = py_mode
self._array_data_spec = tensor_spec.to_nest_array_spec(tensor_data_spec)
self._encoder = example_encoding.get_example_serializer(
self._array_data_spec)
# Two output files: a tfrecord file and a file with the serialized spec
self.output_path = output_path
tf.io.gfile.makedirs(os.path.dirname(self.output_path))
self._writer = tf.io.TFRecordWriter(self.output_path)
logging.info('Writing dataset to TFRecord at %s', self.output_path)
# Save the tensor spec used to write the dataset to file
spec_output_path = self.output_path + _SPEC_FILE_EXTENSION
encode_spec_to_file(spec_output_path, tensor_data_spec)
def write(self, *data):
"""Encodes and writes (to file) a batch of data.
Args:
*data: (unpacked) list/tuple of batched np.arrays.
"""
if self._py_mode:
structured_data = data
else:
data = nest_utils.unbatch_nested_array(data)
structured_data = tf.nest.pack_sequence_as(self._array_data_spec, data)
self._writer.write(self._encoder(structured_data))
def flush(self):
"""Manually flush TFRecord writer."""
self._writer.flush()
def close(self):
"""Close the TFRecord writer."""
self._writer.close()
logging.info('Closing TFRecord file at %s', self.output_path)
def __del__(self):
self.close()
def __call__(self, data):
"""If not in py_mode Wraps write() into a TF op for eager execution."""
if self._py_mode:
self.write(data)
else:
flat_data = tf.nest.flatten(data)
tf.numpy_function(self.write, flat_data, [], name='encoder_observer')
def load_tfrecord_dataset(dataset_files, buffer_size=1000, as_experience=False,
as_trajectories=False, add_batch_dim=True,
decoder=None):
"""Loads a TFRecord dataset from file, sequencing samples as Trajectories.
Args:
dataset_files: List of paths to one or more datasets
buffer_size: (int) number of bytes in the read buffer. 0 means no buffering.
as_experience: (bool) Returns dataset as a pair of Trajectories. Samples
will be shaped as if they had been pulled from a replay buffer with
`num_steps=2`. These samples can be fed directly to agent's `train`
method.
as_trajectories: (bool) Remaps the data into trajectory objects. This should
be enabled when the resulting types must be trajectories as expected by
agents.
add_batch_dim: (bool) If True the data will have a batch dim of 1 to conform
with the expected tensor batch convention. Set to false if you want to
batch the data on your own.
decoder: Optional, a custom decoder to use rather than using the default
spec path.
Returns:
A dataset of type tf.data.Dataset. Samples follow the dataset's spec nested
structure. Samples are generated with a leading batch dim of 1
(or 2 if as_experience is enabled).
Raises:
IOError: One or more of the dataset files does not exist.
"""
if not decoder:
specs = []
for dataset_file in dataset_files:
spec_path = dataset_file + _SPEC_FILE_EXTENSION
dataset_spec = parse_encoded_spec_from_file(spec_path)
specs.append(dataset_spec)
if not all([dataset_spec == spec for spec in specs]):
raise IOError('One or more of the encoding specs do not match.')
decoder = example_encoding.get_example_decoder(specs[0])
logging.info('Loading TFRecord dataset...')
dataset = tf.data.TFRecordDataset(
dataset_files,
buffer_size=buffer_size,
num_parallel_reads=len(dataset_files))
def decode_fn(proto):
"""Decodes a proto object."""
return decoder(proto)
def decode_and_batch_fn(proto):
"""Decodes a proto object, and batch output tensors."""
sample = decoder(proto)
return nest_utils.batch_nested_tensors(sample)
if as_experience:
dataset = dataset.map(decode_fn).batch(2, drop_remainder=True)
elif add_batch_dim:
dataset = dataset.map(decode_and_batch_fn)
else:
dataset = dataset.map(decode_fn)
if as_trajectories:
as_trajectories_fn = lambda sample: trajectory.Trajectory(*sample)
dataset = dataset.map(as_trajectories_fn)
return dataset
| 36.276316 | 107 | 0.735703 |
f0a17a4e8fca591e7b7d85f6f158f09c0faf59f7 | 14,941 | py | Python | .executor/weather.py | gh0zialfat1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | 3 | 2021-06-02T04:54:09.000Z | 2021-06-06T04:29:01.000Z | .executor/weather.py | 0xft1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | null | null | null | .executor/weather.py | 0xft1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
This script retrieves weather data from http://openweathermap.org © 2012 — 2018 OpenWeatherMap, Inc.
1. Obtain API key at http://openweathermap.org;
2. find your city ID at https://openweathermap.org/find;
3. enter both values in the ~/t2ecol/weatherrc file;
4. edit other values if necessary.
README: https://github.com/nwg-piotr/tint2-executors/tree/master/arch-package
Author: Piotr Miller
e-mail: nwg.piotr@gmail.com
Website: http://nwg.pl
Project: https://github.com/nwg-piotr/tint2-executors
License: GPL3
Arguments to override some ~/t2ecol/weatherrc config file values:
[-I<items>] [-A<api_key>] [-C<city_id>] [-U<metric>|<imperial>] [-L<lang>] [-D[<city_id>]]
items: [s]hort description, [d]escription, [t]emperature, [p]ressure, [h]umidity, [w]ind, [c]ity ID
-D[<city_id>] shows details as a notification.
Dependencies: wget
"""
import subprocess
import json
from collections import namedtuple
import locale
import os
import sys
import re
import time
def main():
t2ec_dir = os.getenv("HOME") + "/.t2ecol"
response = None
name = None
img_path = "~/PycharmProjects/tint2-executors/images/" # todo change to /usr/share/t2ec/
settings = Settings()
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
if sys.argv[i].upper() == '-H' or sys.argv[i].upper() == '--HELP':
print_help()
exit(0)
if sys.argv[i].upper() == '-N':
name = settings.dict["_weather"] + ":"
if sys.argv[i].upper().startswith('-M'):
name = sys.argv[i][2::]
if sys.argv[i].startswith("-I"):
settings.items = sys.argv[i][2::]
if sys.argv[i].startswith("-A"):
settings.api_key = sys.argv[i][2::]
if sys.argv[i].startswith("-C"):
settings.city_id = sys.argv[i][2::]
if sys.argv[i].startswith("-U"):
settings.units = sys.argv[i][2::]
if sys.argv[i].startswith("-L"):
settings.lang = sys.argv[i][2::]
if sys.argv[i].startswith("-D"):
c_id = sys.argv[i][2::]
if c_id:
show_details(t2ec_dir, c_id)
else:
show_details(t2ec_dir, settings.city_id)
if settings.img_path is not None:
img_path = settings.img_path
if name is not None:
os.system("echo Checking...")
else:
os.system("echo /usr/share/t2ec/refresh.svg")
os.system("echo ''")
request_url = "http://api.openweathermap.org/data/2.5/weather?id=" + settings.city_id + "&appid=" + \
settings.api_key + "&units=" + settings.units + "&lang=" + settings.lang
try:
response = subprocess.check_output("wget -qO- '" + request_url + "'", shell=True)
except subprocess.CalledProcessError as exitcode:
if name is None:
os.system("echo /usr/share/t2ec/refresh.svg")
os.system("echo Exit code: " + str(exitcode.returncode))
exit(0)
if response is not None:
# Convert JSON to object - after DS. at https://stackoverflow.com/a/15882054/4040598
owm = json.loads(response, object_hook=lambda d: namedtuple('t', d.keys())(*d.values()))
icons = {'01d': 'ow-01d.svg',
'01n': 'ow-01n.svg',
'02d': 'ow-02d.svg',
'02n': 'ow-02n.svg',
'03d': 'ow-03d.svg',
'03n': 'ow-03d.svg',
'04d': 'ow-04d.svg',
'04n': 'ow-04d.svg',
'09d': 'ow-09d.svg',
'09n': 'ow-09d.svg',
'10d': 'ow-10d.svg',
'10n': 'ow-10n.svg',
'11d': 'ow-11d.svg',
'11n': 'ow-11d.svg',
'13d': 'ow-13d.svg',
'13n': 'ow-13d.svg',
'50d': 'ow-50d.svg',
'50n': 'ow-50d.svg'}
if owm.cod == 200:
# Prepare panel items
icon = "/usr/share/t2ec/refresh.svg"
try:
icon = img_path + icons[str(getattr(owm.weather[0], "icon"))]
except KeyError:
pass
city, s_desc, desc, temp, pressure, humidity, wind, deg, sunrise, sunset, cloudiness \
= None, None, None, None, None, None, None, None, None, None, None
try:
city = str(owm.name + ", " + getattr(owm.sys, "country"))
except AttributeError:
pass
try:
s_desc = str(getattr(owm.weather[0], "main"))
except AttributeError:
pass
try:
desc = str(getattr(owm.weather[0], "description"))
except AttributeError:
pass
unit = "℉" if settings.units == "imperial" else "℃"
try:
temp = str(round(float(str(getattr(owm.main, "temp"))), 1)) + unit
except AttributeError:
pass
try:
pressure = str(int(round(float(str(getattr(owm.main, "pressure"))), 0))) + " hpa"
except AttributeError:
pass
try:
humidity = str(int(round(float(str(getattr(owm.main, "humidity"))), 0))) + "%"
except AttributeError:
pass
unit = " m/h" if settings.units == "imperial" else " m/s"
try:
wind = str(getattr(owm.wind, "speed")) + unit
except AttributeError:
pass
try:
deg = str(getattr(owm.wind, "deg"))
except AttributeError:
pass
if deg is not None:
wind += ", " + wind_dir(float(deg))
# Values below will only be used in the details view (notification)
try:
sunrise = time.strftime('%H:%M', time.localtime(getattr(owm.sys, "sunrise")))
except AttributeError:
pass
try:
sunset = time.strftime('%H:%M', time.localtime(getattr(owm.sys, "sunset")))
except AttributeError:
pass
try:
cloudiness = str(getattr(owm.clouds, "all")) + "%"
except AttributeError:
pass
output = ""
if name is None:
os.system("echo " + icon)
else:
output += name
for i in range(len(settings.items)):
if settings.items[i] == "c" and city is not None:
output += " " + city + " "
if settings.items[i] == "s" and s_desc is not None:
output += " " + s_desc + " "
if settings.items[i] == "d" and desc is not None:
output += " " + desc + " "
if settings.items[i] == "t" and temp is not None:
output += " " + temp + " "
if settings.items[i] == "p" and pressure is not None:
output += " " + pressure + " "
if settings.items[i] == "h" and humidity is not None:
output += " " + humidity + " "
if settings.items[i] == "w" and wind is not None:
output += " " + wind + " "
print(re.sub(' +', ' ', output).strip())
details = icon + "\n"
if city is not None:
details += settings.dict["_in_weather"] + " " + city + "\n"
if temp is not None:
details += temp
if desc is not None:
details += ", " + desc
details += "\n"
if wind is not None:
details += settings.dict["_wind"] + ": " + wind + "\n"
if cloudiness is not None:
details += settings.dict["_cloudiness"] + ": " + cloudiness + "\n"
if pressure is not None:
details += settings.dict["_pressure"] + ": " + pressure + "\n"
if humidity is not None:
details += settings.dict["_humidity"] + ": " + humidity + "\n"
if sunrise is not None:
details += settings.dict["_sunrise"] + ": " + sunrise + "\n"
if sunset is not None:
details += settings.dict["_sunset"] + ": " + sunset + "\n"
subprocess.call(["echo '" + str(details) + "' > " + t2ec_dir + "/.weather-" + settings.city_id], shell=True)
else:
if name is None:
os.system("echo /usr/share/t2ec/refresh.svg")
os.system("echo HTTP status: " + str(owm.cod))
exit(0)
def show_details(t2ec_dir, city):
details = ""
try:
details = open(t2ec_dir + "/.weather-" + city, 'r').read().rstrip().splitlines()
except FileNotFoundError:
exit(0)
if details:
icon = details[0]
title = details[1]
message = '\n'.join(details[2:])
message = message.replace("-", "\\-")
os.system("notify-send '" + title + "' " + "'" + message + "' -i " + icon)
def print_help():
print("\nFor multiple executors you may override /home/user/.t2ecol/weatherrc settings with arguments:")
print("\nt2ec --weather [-I<items>] [-A<api_key>] [-C<city_id>] [-U<units>] [-L<lang>]")
print("\n<items>: [s]hort description, [d]escription, [t]emperature, [p]ressure, [h]umidity, [w]ind, [c]ity name")
print("\nTo show details as a notification:")
print("\nt2ec --weather -D[<city_id>]")
print("\nAdd [<city_id>] if varies from weatherrc city_id field.\n")
class Settings:
def __init__(self):
super().__init__()
t2ec_dir = os.getenv("HOME") + "/.t2ecol"
# Create settings file if not found
if not os.path.isdir(t2ec_dir):
os.makedirs(t2ec_dir)
if not os.path.isfile(t2ec_dir + "/weatherrc"):
config = [
"# Items: [s]hort description, [d]escription, [t]emperature, [p]ressure, [h]umidity, [w]ind, [c]ity name\n",
"# API key: go to http://openweathermap.org and get one\n",
"# city_id you will find at http://openweathermap.org/find\n",
"# units may be metric or imperial\n",
"# Uncomment lang to override system $LANG value\n",
"# Uncomment img_path to override built-in icons\n",
"# \n",
"# Delete this file if something goes wrong :)\n",
"# ------------------------------------------------\n",
"items = tpw\n",
"api_key = your_key_here\n",
"city_id = 2643743\n",
"units = metric\n",
"#lang = en\n",
"#img_path = /home/user/my_custom_icons/\n",
"\n",
"# You may translate your output below:\n",
"#\n",
"_weather = Weather\n",
"_in_weather = Weather in\n",
"_wind = Wind\n",
"_cloudiness = Cloudiness\n",
"_pressure = Pressure\n",
"_humidity = Humidity\n",
"_sunrise = Sunrise\n",
"_sunset = Sunset\n"]
subprocess.call(["echo '" + ''.join(config) + "' > " + t2ec_dir + "/weatherrc"], shell=True)
# Set default values
self.items = "tpw"
self.api_key = ""
self.city_id = "2643743" # London, UK
self.units = "metric"
self.lang = None
self.img_path = None
self.dict = {'_weather': 'Weather',
'_in_weather': 'Weather in',
'_wind': 'Wind',
'_cloudiness': 'Cloudiness',
'_pressure': 'Pressure',
'_humidity': 'Humidity',
'_sunrise': 'Sunrise',
'_sunset': 'Sunset'}
# Override defaults with config file values, if found
lines = open(t2ec_dir + "/weatherrc", 'r').read().rstrip().splitlines()
for line in lines:
if not line.startswith("#"):
if line.startswith("items"):
self.items = line.split("=")[1].strip()
elif line.startswith("api_key"):
self.api_key = line.split("=")[1].strip()
elif line.startswith("city_id"):
self.city_id = line.split("=")[1].strip()
elif line.startswith("units"):
self.units = line.split("=")[1].strip()
elif line.startswith("lang"):
self.lang = line.split("=")[1].strip()
elif line.startswith("img_path"):
self.img_path = line.split("=")[1].strip()
elif line.startswith("_weather"):
self.dict["_weather"] = line.split("=")[1].strip()
elif line.startswith("_in_weather"):
self.dict["_in_weather"] = line.split("=")[1].strip()
elif line.startswith("_wind"):
self.dict["_wind"] = line.split("=")[1].strip()
elif line.startswith("_cloudiness"):
self.dict["_cloudiness"] = line.split("=")[1].strip()
elif line.startswith("_pressure"):
self.dict["_pressure"] = line.split("=")[1].strip()
elif line.startswith("_humidity"):
self.dict["_humidity"] = line.split("=")[1].strip()
elif line.startswith("_sunrise"):
self.dict["_sunrise"] = line.split("=")[1].strip()
elif line.startswith("_sunset"):
self.dict["_sunset"] = line.split("=")[1].strip()
if self.lang is None:
loc = locale.getdefaultlocale()[0][:2]
self.lang = loc if loc else "en"
def wind_dir(deg):
if deg >= 348.75 or deg <= 11.25:
return "N"
elif 11.25 < deg <= 33.75:
return "NNE"
elif 33.75 < deg <= 56.25:
return "NE"
elif 56.25 < deg <= 78.75:
return "ENE"
elif 78.75 < deg <= 101.25:
return "E"
elif 101.25 < deg <= 123.75:
return "ESE"
elif 123.75 < deg <= 146.25:
return "SE"
elif 146.25 < deg <= 168.75:
return "SSE"
elif 168.75 < deg <= 191.25:
return "S"
elif 191.25 < deg <= 213.75:
return "SSW"
elif 213.75 < deg <= 236.25:
return "SW"
elif 236.25 < deg <= 258.75:
return "WSW"
elif 258.75 < deg <= 281.25:
return "W"
elif 281.25 < deg <= 303.75:
return "WNW"
elif 303.75 < deg <= 326.25:
return "NW"
elif 326.25 < deg <= 348.75:
return "NNW"
else:
return "WTF"
if __name__ == "__main__":
main()
| 36.00241 | 124 | 0.487986 |
49cafd0edbdd16629d58ea5ea728861782c45f8d | 1,586 | py | Python | montyHall/getDoors.py | code-mage/pythonTest | 34f2c7fe72c527b7ff8fa903ac18265d1f169c3c | [
"WTFPL"
] | null | null | null | montyHall/getDoors.py | code-mage/pythonTest | 34f2c7fe72c527b7ff8fa903ac18265d1f169c3c | [
"WTFPL"
] | null | null | null | montyHall/getDoors.py | code-mage/pythonTest | 34f2c7fe72c527b7ff8fa903ac18265d1f169c3c | [
"WTFPL"
] | 1 | 2021-03-17T15:35:11.000Z | 2021-03-17T15:35:11.000Z | import random
def getGates( ):
gates = [0,0,0]
moneyIndex = random.randint(0,2)
gates[moneyIndex] = 1;
return gates;
def reveal(gates, selected):
if (gates[selected] == 0):
for i in range(0,3):
if (i != selected and gates[i]==0):
return i
else:
index = random.randint(0,1)
for i in range(0,3):
if (gates[i]==0):
if (i == index):
return i;
index+= 1
def chooseGate():
return random.randint(0,2)
def product(gates, selected, revealed, switch):
if (switch):
for i in range(0,3):
if (i != selected and i != revealed):
return gates[i]
else:
return gates[selected]
times = 100000
predictSwitch = 0
for i in range(0,times):
gates = getGates();
firstUserSelection = chooseGate();
gateRevealedByHost = reveal(gates, firstUserSelection)
predictSwitch += product(gates, firstUserSelection, gateRevealedByHost, True)
predictNotSwitch = 0
for i in range(0,times):
gates = getGates();
firstUserSelection = chooseGate();
gateRevealedByHost = reveal(gates, firstUserSelection)
predictNotSwitch += product(gates, firstUserSelection, gateRevealedByHost, False)
# print("Switch: {0}, Not Switch: {1}".format(predictSwitch, predictNotSwitch))
print("Switch Count: {0}, Switch Probability: {1}".format(predictSwitch, predictSwitch/times))
print("Not Switch Count: {0}, Not Switch Probability: {1}".format(predictNotSwitch, predictNotSwitch/times)) | 31.098039 | 108 | 0.616646 |
85f917c555b06ccb058a12615320e3deca4fcf09 | 24 | py | Python | shapely/__init__.py | snorfalorpagus/Shapely | 088f0911451070456b46df035e600bc71e3a63b4 | [
"BSD-3-Clause"
] | 2 | 2019-05-16T09:10:01.000Z | 2021-09-03T14:56:09.000Z | shapely/__init__.py | paladin74/Shapely | 1f5c11918a3d26d17aaade63da3e1bd6d5b348db | [
"BSD-3-Clause"
] | null | null | null | shapely/__init__.py | paladin74/Shapely | 1f5c11918a3d26d17aaade63da3e1bd6d5b348db | [
"BSD-3-Clause"
] | null | null | null | __version__ = "1.6dev0"
| 12 | 23 | 0.708333 |
f5f7167c865a88aa4cc066dd1469dab6136b53d8 | 2,045 | py | Python | tests/test_blockchain.py | voycey/python-stashpay | 4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795 | [
"MIT"
] | null | null | null | tests/test_blockchain.py | voycey/python-stashpay | 4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795 | [
"MIT"
] | null | null | null | tests/test_blockchain.py | voycey/python-stashpay | 4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#####
# Part of `libstashpay`
#
# Copyright 2018 dustinface
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####
import os, logging
from stashpay.rpc import RPCConfig
from stashpay.blockchain import StashpayBlockchain
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
if __name__ == '__main__':
directory = os.path.dirname(os.path.realpath(__file__))
rpcConfig = RPCConfig('smart','cash')
# Test SQLITE
#chain = StashpayBlockchain('sqlite:////' + directory + '/tt.db', rpcConfig)
# Test MYSQL - You need to create the database before!
chain = StashpayBlockchain('mysql+mysqlconnector://root:stashpay@localhost/stashpay')
chain.run()
while True:
address = input("Address: ")
print("Outputs: {}".format(chain.getNumerOfOutputs(address)))
print("Balance {}".format(chain.getBalance(address)))
# Test POSTGRESS
#chain = StashpayBlockchain('sqlite:////' + directory + '/tt.db', rpcConfig)
| 38.584906 | 103 | 0.730562 |
2971c44b573e205c34c256daa23481db703e22bf | 5,828 | py | Python | pylearn2/scripts/ecog/make_model_plots.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | null | null | null | pylearn2/scripts/ecog/make_model_plots.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | null | null | null | pylearn2/scripts/ecog/make_model_plots.py | BouchardLab/pylearn2 | 4cab785b870d22cd9e85a5f536d4cac234b6bf60 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pylearn2.datasets import ecog_neuro
import os, h5py, argparse, cPickle
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from pylab import rcParams
import matplotlib.pyplot as plt
import analysis
import plotting
rcParams.update({'figure.autolayout': True})
def main(subject, bands, model_folders, plot_folder,
model_file_base='.pkl', overwrite=False, randomize_labels=False,
audio=False):
print(subject)
print(model_folders)
print('audio', audio)
run = '_'.join([os.path.basename(f) for f in model_folders])
fname_base = subject + '_' + run
data_folder = os.path.join(plot_folder, 'data')
files = [sorted([f for f in os.listdir(model_folder)
if model_file_base in f])
for model_folder in model_folders]
print(files)
with h5py.File(os.path.join(os.environ['PYLEARN2_DATA_PATH'],
'ecog/EC2_CV.h5'), 'r') as f:
ecog_E_lbls = f['Descriptors']['Event_ELbls'].value
kwargs = {'consonant_prediction': False,
'vowel_prediction': False,
'randomize_labels': randomize_labels,
'audio': audio}
data_fname = os.path.join(data_folder, fname_base + '_model_output.pkl')
if (not os.path.exists(data_fname) or overwrite):
# Run data through the models
accuracy_dicts = []
indices_dicts = []
y_hat_dicts = []
logits_dicts = []
hidden_dicts = []
for file_list in files:
accuracy_dict = {}
accuracy_dicts.append(accuracy_dict)
indices_dict = {}
indices_dicts.append(indices_dict)
y_hat_dict = {}
y_hat_dicts.append(y_hat_dict)
logits_dict = {}
logits_dicts.append(logits_dict)
hidden_dict = {}
hidden_dicts.append(hidden_dict)
for ii, filename in enumerate(file_list):
misclass, indices, y_hat, logits, hidden = analysis.get_model_results(filename,
model_folder,
subject,
bands,
ii,
kwargs)
accuracy_dict[filename] = [1.-m for m in misclass]
indices_dict[filename] = indices
y_hat_dict[filename] = y_hat
logits_dict[filename] = logits
hidden_dict[filename] = hidden
# Format model data
y_dims = None
for yd in y_hat_dicts:
for key in yd.keys():
ydim = tuple(ydi.shape[1] for ydi in yd[key])
if y_dims == None:
y_dims = ydim
else:
assert all(yds == ydi for yds, ydi in zip(y_dims, ydim))
ds = ecog_neuro.ECoG(subject,
bands,
'train',
**kwargs)
has_data = []
for ii in range(len(ecog_E_lbls)):
if (ds.y == ii).sum() > 0:
has_data.append(ii)
y_dims = [57]
dicts = (accuracy_dicts, indices_dicts, y_hat_dicts, logits_dicts,
hidden_dicts)
dicts2 = analysis.condensed_2_dense(indices_dicts,
y_hat_dicts, logits_dicts, ds)
with open(data_fname, 'w') as f:
cPickle.dump((dicts, dicts2, y_dims, has_data), f)
else:
with open(data_fname) as f:
dicts, dicts2, y_dims, has_data = cPickle.load(f)
(accuracy_dicts, indices_dicts, y_hat_dicts, logits_dicts,
hidden_dicts) = dicts
indices_dicts2, y_hat_dicts2, logits_dicts2 = dicts2
mats = analysis.indx_dict2conf_mat(indices_dicts2, y_dims)
c_mat, v_mat, cv_mat = mats
accuracy = analysis.conf_mat2accuracy(c_mat, v_mat, cv_mat)
(c_accuracy, v_accuracy, cv_accuracy, accuracy_per_cv,
p_accuracy, m_accuracy) = accuracy
if cv_accuracy is not None:
print('cv: ',cv_accuracy)
print('cv mean: ',cv_accuracy.mean())
print('cv std: ',cv_accuracy.std())
if c_accuracy is not None:
print('c mean: ',c_accuracy.mean())
print('c std: ',c_accuracy.std())
if v_accuracy is not None:
print('v mean: ',v_accuracy.mean())
print('v std: ',v_accuracy.std())
if p_accuracy is not None:
print('p mean: ',np.nanmean(p_accuracy))
print('p std: ',np.nanstd(p_accuracy))
if m_accuracy is not None:
print('m mean: ',np.nanmean(m_accuracy))
print('m std: ',np.nanstd(m_accuracy))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Make plots for an ECoG DNN model.')
parser.add_argument('subject', choices=['ec2', 'ec9', 'gp31', 'gp33'], default='ec2')
parser.add_argument('bands', type=str)
parser.add_argument('model_folder')
parser.add_argument('-p', '--plot_folder', type=str,
default=os.path.join(os.environ['HOME'], 'plots', 'model'))
parser.add_argument('-o', '--overwrite', action='store_true')
parser.add_argument('-r', '--randomize_labels', action='store_true')
parser.add_argument('-a', '--audio', action='store_true')
args = parser.parse_args()
main(args.subject, args.bands, [args.model_folder],
args.plot_folder, overwrite=args.overwrite,
randomize_labels=args.randomize_labels, audio=args.audio)
| 40.193103 | 100 | 0.554221 |
a6b9179d57ae341436ece93a61e88319d95258aa | 640 | py | Python | plts/phs_funcs.py | molaruna/rcs_lfp_analysis | 14b3950299f08c0d08d20175c05a471328bf074f | [
"MIT"
] | 1 | 2021-12-04T00:03:22.000Z | 2021-12-04T00:03:22.000Z | plts/phs_funcs.py | molaruna/rcs_lfp_analysis | 14b3950299f08c0d08d20175c05a471328bf074f | [
"MIT"
] | null | null | null | plts/phs_funcs.py | molaruna/rcs_lfp_analysis | 14b3950299f08c0d08d20175c05a471328bf074f | [
"MIT"
] | null | null | null | import scipy.signal as signal
import matplotlib.pyplot as plt
def fit_aps(freqs, signal_arr):
local_max_freqs_i = signal.find_peaks(signal_arr)[0]
local_base_signal = get_base(signal_arr, local_max_freqs_i)
plt.semilogy(freqs, signal_arr)
plt.plot(freqs[local_max_freqs_i],signal_arr[local_max_freqs_i], '^')
plt.plot(freqs[local_min_freqs_i],signal_arr[local_min_freqs_i], '+')
plt.show()
def get_base(signal_arr, i):
local_min_freqs_i = signal.find_peaks(-signal_arr)[0]
local_min_freqs_d = np.diff(signal_arr(local_min_freqs_i))
signal_d_infl_i = signal_d[signal_d > 0].index()
| 640 | 640 | 0.735938 |
9b4655ae475f14c718aaa47e390801993a2a9568 | 958 | py | Python | var/spack/repos/builtin/packages/dateutils/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/dateutils/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/dateutils/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Dateutils(AutotoolsPackage):
"""Dateutils are a bunch of tools that revolve around fiddling with dates
and times in the command line with a strong focus on use cases that arise
when dealing with large amounts of financial data."""
homepage = "https://www.fresse.org/dateutils/"
url = "https://github.com/hroptatyr/dateutils/releases/download/v0.4.6/dateutils-0.4.6.tar.xz"
version('0.4.7', sha256='49725457f5bef45ea424baade8999a6e54496e357f64280474ff7134a54f599a')
version('0.4.6', sha256='26a071317ae5710f226a3e6ba9a54d3764cd9efe3965aecc18e75372088757cd')
version('0.4.5', sha256='16d6a0fe7b7d49ddbb303f33538dd7304a0d4af5a0369bcbf275db6a5060cbde')
build_directory = 'spack-build'
| 45.619048 | 103 | 0.769311 |
825152899adf9d25c69b79b1083dd689522bb197 | 1,085 | py | Python | myconnectome/openfmri/washu_fieldmap.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 28 | 2015-04-02T16:43:14.000Z | 2020-06-17T20:04:26.000Z | myconnectome/openfmri/washu_fieldmap.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 11 | 2015-05-19T02:57:22.000Z | 2017-03-17T17:36:16.000Z | myconnectome/openfmri/washu_fieldmap.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 10 | 2015-05-21T17:01:26.000Z | 2020-11-11T04:28:08.000Z | import json,glob,os
import dicom
outdir='/scratch/01329/poldrack/selftracking/ds031/sub00001/ses105/fieldmap'
washubase='/scratch/01329/poldrack/selftracking/washu'
basedirs=['vc39556','vc39556_2']
fmseries=[[5,10,21],[5,17]]
ctr=1
for i in range(2):
basedir=os.path.join(washubase,basedirs[i])
for j in range(len(fmseries[i])):
seriesnum=fmseries[i][j]
dcmfile=glob.glob('%s/DICOM/VC*.MR.HEAD_LAUMANN.%04d.*.IMA'%(basedir,seriesnum))[0]
print dcmfile
dcmdata=dicom.read_file(dcmfile)
dcmdict={}
for k in dcmdata.dir():
dd=dcmdata.data_element(k)
try:
dd.value.decode('ascii')
dcmdict[dd.name]=dd.value
except:
try:
dd.value.original_string.decode('ascii')
dcmdict[dd.name]=dd.value
except:
pass
jsonfile='%s/sub00001_ses105_%03d.json'%(outdir,ctr)
ctr+=1
f=open(jsonfile,'w')
f.write(json.dumps(dcmdict,indent=4))
f.close()
| 30.138889 | 91 | 0.574194 |
e3d0d5529d91222878f4549782552eabd5cc60b3 | 1,638 | py | Python | hazelcast/protocol/codec/transactional_map_key_set_with_predicate_codec.py | murdockn/hazelcast-python-client | 597d90be5414cd56340fafcff916191704dcb86d | [
"Apache-2.0"
] | null | null | null | hazelcast/protocol/codec/transactional_map_key_set_with_predicate_codec.py | murdockn/hazelcast-python-client | 597d90be5414cd56340fafcff916191704dcb86d | [
"Apache-2.0"
] | null | null | null | hazelcast/protocol/codec/transactional_map_key_set_with_predicate_codec.py | murdockn/hazelcast-python-client | 597d90be5414cd56340fafcff916191704dcb86d | [
"Apache-2.0"
] | null | null | null | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.transactional_map_message_type import *
REQUEST_TYPE = TRANSACTIONALMAP_KEYSETWITHPREDICATE
RESPONSE_TYPE = 106
RETRYABLE = False
def calculate_size(name, txn_id, thread_id, predicate):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(txn_id)
data_size += LONG_SIZE_IN_BYTES
data_size += calculate_size_data(predicate)
return data_size
def encode_request(name, txn_id, thread_id, predicate):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, txn_id, thread_id, predicate))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_str(txn_id)
client_message.append_long(thread_id)
client_message.append_data(predicate)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for response_index in xrange(0, response_size):
response_item = client_message.read_data()
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters
| 34.125 | 99 | 0.778388 |
fdd51b929202fb1df0e9e3d27cfefccbb7c6aced | 14,277 | py | Python | barbot/core.py | frdfsnlght/Barbot-server | 4e3cfb9218a1d1d1c35288aaa5e6c756be83f84c | [
"MIT"
] | 1 | 2020-03-31T15:33:35.000Z | 2020-03-31T15:33:35.000Z | barbot/core.py | frdfsnlght/Barbot-server | 4e3cfb9218a1d1d1c35288aaa5e6c756be83f84c | [
"MIT"
] | null | null | null | barbot/core.py | frdfsnlght/Barbot-server | 4e3cfb9218a1d1d1c35288aaa5e6c756be83f84c | [
"MIT"
] | null | null | null |
import logging, datetime, time, subprocess, re, random
from threading import Thread, Event
from .bus import bus
from .config import config
from .db import db, ModelError
from . import serial
from . import utils
from .models.Drink import Drink
from .models.DrinkOrder import DrinkOrder
from .models.DrinkIngredient import DrinkIngredient
from .models.Pump import Pump, anyPumpsRunning
ST_START = 'start'
ST_DISPENSE = 'dispense'
ST_PICKUP = 'pickup'
ST_GLASS_CLEAR = 'glassClear'
ST_CANCEL_CLEAR = 'cancelClear'
CTL_START = 'start'
CTL_CANCEL = 'cancel'
CTL_OK = 'ok'
_sensorEventPattern = re.compile(r"(?i)S(\d)")
_logger = logging.getLogger('Barbot')
_exitEvent = Event()
_thread = None
_requestPumpSetup = False
_suppressMenuRebuild = False
_dispenseEvent = Event()
_lastDrinkOrderCheckTime = time.time()
_lastIdleAudioCheckTime = time.time()
dispenserHold = False
pumpSetup = False
glassReady = False
dispenseState = None
dispenseControl = None
dispenseDrinkOrder = None
@bus.on('server/start')
def _bus_serverStart():
global _thread
_rebuildMenu()
_exitEvent.clear()
_thread = Thread(target = _threadLoop, name = 'CoreThread', daemon = True)
_thread.start()
@bus.on('server/stop')
def _bus_serverStop():
_exitEvent.set()
@bus.on('serial/event')
def _bus_serialEvent(e):
global glassReady
m = _sensorEventPattern.match(e)
if m:
newGlassReady = m.group(1) == '1'
if newGlassReady != glassReady:
glassReady = newGlassReady
bus.emit('core/glassReady', glassReady)
_dispenseEvent.set()
@bus.on('socket/consoleConnect')
def _bus_consoleConnect():
try:
serial.write('RO', timeout = 1) # power on, turn off lights
except serial.SerialError as e:
_logger.error(e)
#-----------------
# TODO: remove this temp code someday
#glassThread = None
#import os.path
#@bus.on('server/start')
#def _bus_startGlassThread():
# global glassThread
# glassThread = Thread(target = _glassThreadLoop, name = 'BarbotGlassThread', daemon = True)
# glassThread.start()
#def _glassThreadLoop():
# global glassReady
# while not _exitEvent.is_set():
# newGlassReady = os.path.isfile(os.path.join(os.path.dirname(__file__), '..', 'var', 'glass'))
# if newGlassReady != glassReady:
# glassReady = newGlassReady
# bus.emit('core/glassReady', glassReady)
# _dispenseEvent.set()
# time.sleep(1)
# end of temp code
#---------------------
def restart():
cmd = config.get('server', 'restartCommand').split(' ')
out = subprocess.run(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
universal_newlines = True)
if out.returncode != 0:
_logger.error('Error trying to restart: {}'.format(out.stdout))
return
bus.emit('lights/play', 'restart')
bus.emit('audio/play', 'restart')
def shutdown():
cmd = config.get('server', 'shutdownCommand').split(' ')
out = subprocess.run(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
universal_newlines = True)
if out.returncode != 0:
_logger.error('Error trying to shutdown: {}'.format(out.stdout))
return
try:
serial.write('RT{}'.format(config.get('server', 'shutdownTimer')))
except SerialError as e:
_logger.error(e)
bus.emit('lights/play', 'shutdown')
bus.emit('audio/play', 'shutdown')
def toggleDispenserHold():
global dispenserHold
dispenserHold = not dispenserHold
bus.emit('core/dispenserHold', dispenserHold)
def startPumpSetup():
global _requestPumpSetup
_requestPumpSetup = True
def stopPumpSetup():
global _requestPumpSetup, pumpSetup
_requestPumpSetup = False
pumpSetup = False
_rebuildMenu()
bus.emit('core/pumpSetup', pumpSetup)
def setParentalLock(code):
if not code:
try:
os.remove(config.getpath('core', 'parentalCodeFile'))
except IOError:
pass
else:
open(config.getpath('core', 'parentalCodeFile'), 'w').write(code)
bus.emit('core/parentalLock', True if code else False)
def getParentalCode():
try:
return open(config.getpath('core', 'parentalCodeFile')).read().rstrip()
except IOError:
return False
def submitDrinkOrder(item):
d = Drink.get(Drink.id == item['drinkId'])
if d.isAlcoholic:
code = getParentalCode()
if code:
if not 'parentalCode' in item:
raise CoreError('Parental code required!')
if item['parentalCode'] != code:
raise CoreError('Invalid parental code!')
o = DrinkOrder.submitFromDict(item)
bus.emit('core/drinkOrderSubmitted', o)
bus.emit('audio/play', 'drinkOrderSubmitted', sessionId = o.sessionId)
def cancelDrinkOrder(id):
o = DrinkOrder.cancelById(id)
bus.emit('core/drinkOrderCancelled', o)
bus.emit('audio/play', 'drinkOrderCancelled', sessionId = o.sessionId)
def toggleDrinkOrderHold(id):
o = DrinkOrder.toggleHoldById(id)
bus.emit('core/drinkOrderHoldToggled', o)
bus.emit('audio/play', 'drinkOrderOnHold' if o.userHold else 'drinkOrderOffHold', sessionId = o.sessionId)
def setDispenseControl(ctl):
global dispenseControl
dispenseControl = ctl
_dispenseEvent.set()
def _threadLoop():
global _lastDrinkOrderCheckTime, _lastDrinkOrderCheckTime, _requestPumpSetup, pumpSetup
_logger.info('Core thread started')
while not _exitEvent.is_set():
if _requestPumpSetup:
_requestPumpSetup = False
pumpSetup = True
bus.emit('core/pumpSetup', pumpSetup)
while pumpSetup or dispenserHold or anyPumpsRunning():
_checkIdle()
time.sleep(1)
t = time.time()
if (t - _lastDrinkOrderCheckTime) > config.getfloat('core', 'drinkOrderCheckInterval'):
_lastDrinkOrderCheckTime = t
o = DrinkOrder.getFirstPending()
if o:
_dispenseDrinkOrder(o)
t = time.time()
_lastDrinkOrderCheckTime = t
_lastIdleAudioCheckTime = t
continue
_checkIdle()
time.sleep(1)
_logger.info('Core thread stopped')
def _checkIdle():
global _lastIdleAudioCheckTime
if (time.time() - _lastIdleAudioCheckTime) > config.getfloat('core', 'idleAudioInterval'):
_lastIdleAudioCheckTime = time.time()
if random.random() <= config.getfloat('core', 'idleAudioChance'):
bus.emit('audio/play', 'idle')
def _dispenseDrinkOrder(o):
global dispenseState, dispenseDrinkOrder, dispenseControl
# this gets the drink out of the queue
o.startedDate = datetime.datetime.now()
o.save()
dispenseDrinkOrder = o
_logger.info('Preparing to dispense {}'.format(dispenseDrinkOrder.desc()))
# wait for user to start or cancel the order
dispenseState = ST_START
_dispenseEvent.clear()
dispenseControl = None
bus.emit('core/dispenseState', dispenseState, dispenseDrinkOrder)
bus.emit('lights/play', 'waitForDispense')
bus.emit('audio/play', 'waitForDispense')
while True:
_dispenseEvent.wait()
_dispenseEvent.clear()
# glassReady or dispenseControl changed
if dispenseControl == CTL_CANCEL:
_logger.info('Cancelled dispensing {}'.format(dispenseDrinkOrder.desc()))
dispenseDrinkOrder.placeOnHold()
bus.emit('lights/play', None)
bus.emit('audio/play', 'cancelledDispense')
bus.emit('audio/play', 'drinkOrderOnHold', sessionId = dispenseDrinkOrder.sessionId)
dispenseDrinkOrder = None
dispenseState = None
bus.emit('core/dispenseState', dispenseState, dispenseDrinkOrder)
return
if dispenseControl == CTL_START and glassReady:
dispenseState = ST_DISPENSE
bus.emit('core/dispenseState', dispenseState, dispenseDrinkOrder)
bus.emit('lights/play', 'startDispense')
bus.emit('audio/play', 'startDispense')
_logger.info('Starting to dispense {}'.format(dispenseDrinkOrder.desc()))
break
drink = dispenseDrinkOrder.drink
dispenseControl = None
for step in sorted({i.step for i in drink.ingredients}):
ingredients = [di for di in drink.ingredients if di.step == step]
_logger.info('Executing step {}, {} ingredients'.format(step, len(ingredients)))
pumps = []
# start the pumps
for di in ingredients:
ingredient = di.ingredient
pump = ingredient.pump.first()
amount = utils.toML(di.amount, di.units)
pump.forward(amount)
ingredient.timesDispensed = ingredient.timesDispensed + 1
ingredient.amountDispensed = ingredient.amountDispensed + amount
ingredient.save()
pumps.append(pump)
# wait for the pumps to stop, glass removed, order cancelled
while True and dispenseState == ST_DISPENSE:
if not pumps[-1].running:
pumps.pop()
if not len(pumps):
# all pumps have stopped
break
if _dispenseEvent.wait(0.1):
_dispenseEvent.clear()
if not glassReady:
_logger.warning('Glass removed while dispensing {}'.format(dispenseDrinkOrder.desc()))
for pump in pumps:
pump.stop()
bus.emit('lights/play', 'glassRemovedDispense')
bus.emit('audio/play', 'glassRemovedDispense')
bus.emit('audio/play', 'drinkOrderOnHold', sessionId = dispenseDrinkOrder.sessionId)
dispenseDrinkOrder.placeOnHold()
dispenseDrinkOrder = None
dispenseState = ST_GLASS_CLEAR
if dispenseControl == CTL_CANCEL:
_logger.info('Cancelled dispensing {}'.format(dispenseDrinkOrder.desc()))
for pump in pumps:
pump.stop()
bus.emit('lights/play', None)
bus.emit('audio/play', 'cancelledDispense')
bus.emit('audio/play', 'drinkOrderOnHold', sessionId = dispenseDrinkOrder.sessionId)
dispenseDrinkOrder.placeOnHold()
dispenseDrinkOrder = None
dispenseState = ST_CANCEL_CLEAR
if dispenseState != ST_DISPENSE:
break
# proceed to next step...
# all done!
if dispenseState == ST_DISPENSE:
_logger.info('Done dispensing {}'.format(dispenseDrinkOrder.desc()))
drink.timesDispensed = drink.timesDispensed + 1
if not drink.isFavorite and drink.timesDispensed >= config.getint('core', 'favoriteDrinkCount'):
drink.isFavorite = True
drink.save()
dispenseDrinkOrder.completedDate = datetime.datetime.now()
dispenseDrinkOrder.save()
dispenseState = ST_PICKUP
bus.emit('lights/play', 'endDispense')
bus.emit('audio/play', 'endDispense')
bus.emit('audio/play', 'endDispense', sessionId = dispenseDrinkOrder.sessionId)
_dispenseEvent.clear()
bus.emit('core/dispenseState', dispenseState, dispenseDrinkOrder)
while dispenseState is not None:
if _dispenseEvent.wait(0.5):
_dispenseEvent.clear()
if dispenseState == ST_CANCEL_CLEAR or dispenseState == ST_PICKUP:
if not glassReady:
dispenseState = None
dispenseDrinkOrder = None
elif dispenseControl == CTL_OK:
dispenseState = None
bus.emit('core/dispenseState', dispenseState, dispenseDrinkOrder)
bus.emit('lights/play', None)
_rebuildMenu()
DrinkOrder.deleteOldCompleted(config.getint('core', 'maxDrinkOrderAge'))
@bus.on('model/drink/saved')
def _bus_drinkSaved(drink):
if not _suppressMenuRebuild:
_rebuildMenu()
@bus.on('model/drink/deleted')
def _bus_drinkDeleted(drink):
_rebuildMenu()
@db.atomic()
def _rebuildMenu():
global _suppressMenuRebuild
_logger.info('Rebuilding drinks menu')
_suppressMenuRebuild = True
menuUpdated = False
ingredients = Pump.getReadyIngredients()
menuDrinks = Drink.getMenuDrinks()
# trivial case
if not ingredients:
for drink in menuDrinks:
drink.isOnMenu = False
drink.save()
menuUpdated = True
else:
for drink in Drink.getDrinksWithIngredients(ingredients):
# remove this drink from the existing menu drinks
menuDrinks = [d for d in menuDrinks if d.id != drink.id]
onMenu = True
# check for all the drink's ingredients
for di in drink.ingredients:
pump = Pump.getPumpWithIngredientId(di.ingredient_id)
if not pump or pump.state == Pump.EMPTY or utils.toML(pump.amount, pump.units) < utils.toML(di.amount, di.units):
onMenu = False
break
if onMenu != drink.isOnMenu:
drink.isOnMenu = onMenu
drink.save()
menuUpdated = True
# any drinks in the original list are no longer on the menu
for drink in menuDrinks:
drink.isOnMenu = False
drink.save()
menuUpdated = True
bus.emit('barbot/drinksMenuUpdated')
_updateDrinkOrders()
_suppressMenuRebuild = False
@db.atomic()
def _updateDrinkOrders():
_logger.info('Updating drink orders')
readyPumps = Pump.getReadyPumps()
for o in DrinkOrder.getWaiting():
if o.drink.isOnMenu == o.ingredientHold:
o.ingredientHold = not o.drink.isOnMenu
o.save()
| 33.912114 | 129 | 0.616936 |
09b99992d1dc01e285e6eabe5b4ef0e9a2ca26ae | 4,374 | py | Python | runtests.py | The-Compiler/mypy | f220ce50725ed7f117832b9f03aed4d1a9508e00 | [
"PSF-2.0"
] | null | null | null | runtests.py | The-Compiler/mypy | f220ce50725ed7f117832b9f03aed4d1a9508e00 | [
"PSF-2.0"
] | null | null | null | runtests.py | The-Compiler/mypy | f220ce50725ed7f117832b9f03aed4d1a9508e00 | [
"PSF-2.0"
] | null | null | null | #!/usr/bin/env python3
import subprocess
from subprocess import Popen
from os import system
from sys import argv, exit, platform, executable, version_info
# Use the Python provided to execute the script, or fall back to a sane default
if version_info >= (3, 5, 0):
python_name = executable
else:
if platform == 'win32':
python_name = 'py -3'
else:
python_name = 'python3'
# Slow test suites
CMDLINE = 'PythonCmdline'
SAMPLES = 'SamplesSuite'
TYPESHED = 'TypeshedSuite'
PEP561 = 'PEP561Suite'
EVALUATION = 'PythonEvaluation'
DAEMON = 'testdaemon'
STUBGEN_CMD = 'StubgenCmdLine'
STUBGEN_PY = 'StubgenPythonSuite'
MYPYC_RUN = 'TestRun'
MYPYC_RUN_MULTI = 'TestRunMultiFile'
MYPYC_EXTERNAL = 'TestExternal'
MYPYC_COMMAND_LINE = 'TestCommandLine'
ERROR_STREAM = 'ErrorStreamSuite'
ALL_NON_FAST = [
CMDLINE,
SAMPLES,
TYPESHED,
PEP561,
EVALUATION,
DAEMON,
STUBGEN_CMD,
STUBGEN_PY,
MYPYC_RUN,
MYPYC_RUN_MULTI,
MYPYC_EXTERNAL,
MYPYC_COMMAND_LINE,
ERROR_STREAM,
]
# These must be enabled by explicitly including 'mypyc-extra' on the command line.
MYPYC_OPT_IN = [MYPYC_RUN, MYPYC_RUN_MULTI]
# We split the pytest run into three parts to improve test
# parallelization. Each run should have tests that each take a roughly similar
# time to run.
cmds = {
# Self type check
'self': python_name + ' -m mypy --config-file mypy_self_check.ini -p mypy',
# Lint
'lint': 'flake8 -j0',
# Fast test cases only (this is the bulk of the test suite)
'pytest-fast': 'pytest -k "not (%s)"' % ' or '.join(ALL_NON_FAST),
# Test cases that invoke mypy (with small inputs)
'pytest-cmdline': 'pytest -k "%s"' % ' or '.join([CMDLINE,
EVALUATION,
STUBGEN_CMD,
STUBGEN_PY]),
# Test cases that may take seconds to run each
'pytest-slow': 'pytest -k "%s"' % ' or '.join(
[SAMPLES,
TYPESHED,
PEP561,
DAEMON,
MYPYC_EXTERNAL,
MYPYC_COMMAND_LINE,
ERROR_STREAM]),
# Mypyc tests that aren't run by default, since they are slow and rarely
# fail for commits that don't touch mypyc
'mypyc-extra': 'pytest -k "%s"' % ' or '.join(MYPYC_OPT_IN),
}
# Stop run immediately if these commands fail
FAST_FAIL = ['self', 'lint']
DEFAULT_COMMANDS = [cmd for cmd in cmds if cmd != 'mypyc-extra']
assert all(cmd in cmds for cmd in FAST_FAIL)
def run_cmd(name: str) -> int:
status = 0
cmd = cmds[name]
print('run %s: %s' % (name, cmd))
res = (system(cmd) & 0x7F00) >> 8
if res:
print('\nFAILED: %s' % name)
status = res
if name in FAST_FAIL:
exit(status)
return status
def start_background_cmd(name: str) -> Popen:
cmd = cmds[name]
proc = subprocess.Popen(cmd,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
return proc
def wait_background_cmd(name: str, proc: Popen) -> int:
output = proc.communicate()[0]
status = proc.returncode
print('run %s: %s' % (name, cmds[name]))
if status:
print(output.decode().rstrip())
print('\nFAILED: %s' % name)
if name in FAST_FAIL:
exit(status)
return status
def main() -> None:
prog, *args = argv
if not set(args).issubset(cmds):
print("usage:", prog, " ".join('[%s]' % k for k in cmds))
print()
print('Run the given tests. If given no arguments, run everything except mypyc-extra.')
exit(1)
if not args:
args = DEFAULT_COMMANDS[:]
status = 0
if 'self' in args and 'lint' in args:
# Perform lint and self check in parallel as it's faster.
proc = start_background_cmd('lint')
cmd_status = run_cmd('self')
if cmd_status:
status = cmd_status
cmd_status = wait_background_cmd('lint', proc)
if cmd_status:
status = cmd_status
args = [arg for arg in args if arg not in ('self', 'lint')]
for arg in args:
cmd_status = run_cmd(arg)
if cmd_status:
status = cmd_status
exit(status)
if __name__ == '__main__':
main()
| 27.509434 | 95 | 0.599909 |
704dd2e5c29dfebe33a0eba9d70b76c791ddad6d | 22,623 | py | Python | electrum/tests/test_lnrouter.py | y-chan/electrum | af4a3328f78ccc0d50efa3f6fa88facb328ef436 | [
"MIT"
] | 7 | 2020-03-20T10:20:29.000Z | 2021-04-21T12:46:25.000Z | electrum/tests/test_lnrouter.py | y-chan/electrum | af4a3328f78ccc0d50efa3f6fa88facb328ef436 | [
"MIT"
] | 19 | 2019-11-19T14:34:30.000Z | 2022-03-10T21:21:07.000Z | electrum/tests/test_lnrouter.py | y-chan/electrum | af4a3328f78ccc0d50efa3f6fa88facb328ef436 | [
"MIT"
] | 8 | 2018-04-11T01:05:12.000Z | 2020-03-24T22:26:30.000Z | import unittest
import tempfile
import shutil
import asyncio
from electrum.util import bh2u, bfh, create_and_start_event_loop
from electrum.lnonion import (OnionHopsDataSingle, new_onion_packet, OnionPerHop,
process_onion_packet, _decode_onion_error, decode_onion_error,
OnionFailureCode)
from electrum import bitcoin, lnrouter
from electrum.constants import BitcoinTestnet
from electrum.simple_config import SimpleConfig
from . import TestCaseForTestnet
class Test_LNRouter(TestCaseForTestnet):
#@staticmethod
#def parse_witness_list(witness_bytes):
# amount_witnesses = witness_bytes[0]
# witness_bytes = witness_bytes[1:]
# res = []
# for i in range(amount_witnesses):
# witness_length = witness_bytes[0]
# this_witness = witness_bytes[1:witness_length+1]
# assert len(this_witness) == witness_length
# witness_bytes = witness_bytes[witness_length+1:]
# res += [bytes(this_witness)]
# assert witness_bytes == b"", witness_bytes
# return res
def setUp(self):
super().setUp()
self.asyncio_loop, self._stop_loop, self._loop_thread = create_and_start_event_loop()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
def tearDown(self):
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
super().tearDown()
def test_find_path_for_payment(self):
class fake_network:
config = self.config
asyncio_loop = asyncio.get_event_loop()
trigger_callback = lambda *args: None
register_callback = lambda *args: None
interface = None
fake_network.channel_db = lnrouter.ChannelDB(fake_network())
cdb = fake_network.channel_db
path_finder = lnrouter.LNPathFinder(cdb)
self.assertEqual(cdb.num_channels, 0)
cdb.add_channel_announcement({'node_id_1': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'node_id_2': b'\x02cccccccccccccccccccccccccccccccc',
'bitcoin_key_1': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'bitcoin_key_2': b'\x02cccccccccccccccccccccccccccccccc',
'short_channel_id': bfh('0000000000000001'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
self.assertEqual(cdb.num_channels, 1)
cdb.add_channel_announcement({'node_id_1': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'node_id_2': b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
'bitcoin_key_1': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'bitcoin_key_2': b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
'short_channel_id': bfh('0000000000000002'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
cdb.add_channel_announcement({'node_id_1': b'\x02aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'node_id_2': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
'bitcoin_key_1': b'\x02aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'bitcoin_key_2': b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
'short_channel_id': bfh('0000000000000003'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
cdb.add_channel_announcement({'node_id_1': b'\x02cccccccccccccccccccccccccccccccc', 'node_id_2': b'\x02dddddddddddddddddddddddddddddddd',
'bitcoin_key_1': b'\x02cccccccccccccccccccccccccccccccc', 'bitcoin_key_2': b'\x02dddddddddddddddddddddddddddddddd',
'short_channel_id': bfh('0000000000000004'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
cdb.add_channel_announcement({'node_id_1': b'\x02dddddddddddddddddddddddddddddddd', 'node_id_2': b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
'bitcoin_key_1': b'\x02dddddddddddddddddddddddddddddddd', 'bitcoin_key_2': b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
'short_channel_id': bfh('0000000000000005'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
cdb.add_channel_announcement({'node_id_1': b'\x02aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'node_id_2': b'\x02dddddddddddddddddddddddddddddddd',
'bitcoin_key_1': b'\x02aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'bitcoin_key_2': b'\x02dddddddddddddddddddddddddddddddd',
'short_channel_id': bfh('0000000000000006'),
'chain_hash': BitcoinTestnet.rev_genesis_bytes(),
'len': b'\x00\x00', 'features': b''}, trusted=True)
o = lambda i: i.to_bytes(8, "big")
cdb.add_channel_update({'short_channel_id': bfh('0000000000000001'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000001'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000002'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(99), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000002'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000003'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000003'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000004'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000004'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000005'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000005'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(999), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000006'), 'message_flags': b'\x00', 'channel_flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(99999999), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
cdb.add_channel_update({'short_channel_id': bfh('0000000000000006'), 'message_flags': b'\x00', 'channel_flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150), 'chain_hash': BitcoinTestnet.rev_genesis_bytes(), 'timestamp': b'\x00\x00\x00\x00'})
path = path_finder.find_path_for_payment(b'\x02aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee', 100000)
self.assertEqual([(b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', b'\x00\x00\x00\x00\x00\x00\x00\x03'),
(b'\x02cccccccccccccccccccccccccccccccc', b'\x00\x00\x00\x00\x00\x00\x00\x01'),
(b'\x02dddddddddddddddddddddddddddddddd', b'\x00\x00\x00\x00\x00\x00\x00\x04'),
(b'\x02eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee', b'\x00\x00\x00\x00\x00\x00\x00\x05')
], path)
start_node = b'\x02bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'
route = path_finder.create_route_from_path(path, start_node)
self.assertEqual(route[0].node_id, start_node)
self.assertEqual(route[0].short_channel_id, bfh('0000000000000003'))
# need to duplicate tear_down here, as we also need to wait for the sql thread to stop
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
cdb.sql_thread.join(timeout=1)
def test_new_onion_packet(self):
# test vector from bolt-04
payment_path_pubkeys = [
bfh('02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619'),
bfh('0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c'),
bfh('027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007'),
bfh('032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991'),
bfh('02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145'),
]
session_key = bfh('4141414141414141414141414141414141414141414141414141414141414141')
associated_data = bfh('4242424242424242424242424242424242424242424242424242424242424242')
hops_data = [
OnionHopsDataSingle(OnionPerHop(
bfh('0000000000000000'), bfh('0000000000000000'), bfh('00000000')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0101010101010101'), bfh('0000000000000001'), bfh('00000001')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0202020202020202'), bfh('0000000000000002'), bfh('00000002')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0303030303030303'), bfh('0000000000000003'), bfh('00000003')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0404040404040404'), bfh('0000000000000004'), bfh('00000004')
)),
]
packet = new_onion_packet(payment_path_pubkeys, session_key, hops_data, associated_data)
self.assertEqual(bfh('0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a71da571226458c510bbadd1276f045c21c520a07d35da256ef75b4367962437b0dd10f7d61ab590531cf08000178a333a347f8b4072e216400406bdf3bf038659793a86cae5f52d32f3438527b47a1cfc54285a8afec3a4c9f3323db0c946f5d4cb2ce721caad69320c3a469a202f3e468c67eaf7a7cda226d0fd32f7b48084dca885d15222e60826d5d971f64172d98e0760154400958f00e86697aa1aa9d41bee8119a1ec866abe044a9ad635778ba61fc0776dc832b39451bd5d35072d2269cf9b040d6ba38b54ec35f81d7fc67678c3be47274f3c4cc472aff005c3469eb3bc140769ed4c7f0218ff8c6c7dd7221d189c65b3b9aaa71a01484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f917265f21f9190c70217774a6fbaaa7d63ad64199f4664813b955cff954949076dcf'),
packet.to_bytes())
def test_process_onion_packet(self):
# this test is not from bolt-04, but is based on the one there;
# except here we have the privkeys for these pubkeys
payment_path_pubkeys = [
bfh('03d75c0ee70f68d73d7d13aeb6261d8ace11416800860c7e59407afe4e2e2d42bb'),
bfh('03960a0b830c7b8e76de745b819f252c62508346196b916f5e813cdb0773283cce'),
bfh('0385620e0a571cbc3552620f8bf1bdcdab2d1a4a59c36fa10b8249114ccbdda40d'),
bfh('02ee242cf6c38b7285f0152c33804ff777f5c51fd352ca8132e845e2cf23b3d8ba'),
bfh('025c585fd2e174bf8245b2b4a119e52a417688904228643ea3edaa1728bf2a258e'),
]
payment_path_privkeys = [
bfh('3463a278617b3dd83f79bda7f97673f12609c54386e1f0d2b67b1c6354fda14e'),
bfh('7e1255fddb52db1729fc3ceb21a46f95b8d9fe94cc83425e936a6c5223bb679d'),
bfh('c7ce8c1462c311eec24dff9e2532ac6241e50ae57e7d1833af21942136972f23'),
bfh('3d885f374d79a5e777459b083f7818cdc9493e5c4994ac9c7b843de8b70be661'),
bfh('dd72ab44729527b7942e195e7a835e7c71f9c0ff61844eb21274d9c26166a8f8'),
]
session_key = bfh('4141414141414141414141414141414141414141414141414141414141414141')
associated_data = bfh('4242424242424242424242424242424242424242424242424242424242424242')
hops_data = [
OnionHopsDataSingle(OnionPerHop(
bfh('0000000000000000'), bfh('0000000000000000'), bfh('00000000')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0101010101010101'), bfh('0000000000000001'), bfh('00000001')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0202020202020202'), bfh('0000000000000002'), bfh('00000002')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0303030303030303'), bfh('0000000000000003'), bfh('00000003')
)),
OnionHopsDataSingle(OnionPerHop(
bfh('0404040404040404'), bfh('0000000000000004'), bfh('00000004')
)),
]
packet = new_onion_packet(payment_path_pubkeys, session_key, hops_data, associated_data)
self.assertEqual(bfh('0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f28368661954176cd9869da33d713aa219fcef1e5c806fef11e696bcc66844de8271c27974a0fd57c2dbcb2c6dd4e8ef35d96db28d5a0e49b6ab3d6de31af65950723b8cddc108390bebf8d149002e31bdc283056477ba27c8054c248ad7306de31663a7c99ec659da15d0f6fbc7e1687485b39e9be0ec3b70164cb3618a9b546317e7c2d62ae9f0f840704535729262d30c6132d1b390f073edec8fa057176c6268b6ad06a82ff0d16d4c662194873e8b4ecf46eb2c9d4d58d2ee2021adb19840605ac5afd8bd942dd71e8244c83e28b2ed5a3b09e9e7df5c8c747e5765ba366a4f7407a6c6b0a32f74bc5e428f7fa4c3cf70e13ed91563177d94190d5149aa4b9c96d00e40d2ac35ab9c4a621ce0f6f5df7d64a9c8d435db19de192d9db522c7f7b4e201fc1b61a9bd3efd062ae24455d463818b01e2756c7d0691bc3ac4c017be34c9a8b2913bb1b937e31e0ae40f650a7cd820bcb4996825b1cbad1ff7ccc2b513b1104524c34f6573e1b59201c005a632ee5dccd3711a32e3ba1ff00fcffbe636e4b3a84bbe491b836a57ccec138b8cc2ec733846904d872f305d538d51db8e56232ec6e07877075328874cb7b09c7e799100a9ff085dead253886b174fc408a0ea7b48bce2c5d8992285011960af088f7e006ef60089d46ac9aa15acfac6c87c3cf6904764dd785419292fbafa9cca09c8ade24a6cd63f12d1cfc83fa35cf2f1cf503c39cbf78293f06c68a3cece7177169cd872bb49bf69d933a27a887dd9daefa9239fca9f0c3e309ec61d9df947211da98cf11a6e0fb77252629cdf9f2226dd69ca73fa51be4df224592f8d471b69a1aebbdaa2f3a798b3581253d97feb0a12e6606043ca0fc5efc0f49b8061d6796eff31cd8638499e2f25ffb96eec32837438ed7ebebbe587886648f63e35d80f41869f4c308f2e6970bd65fead5e8544e3239a6acc9d996b08d1546455bcafbe88ed3ed547714841946fe2e77180e4d7bf1452414e4b1745a7897184a2c4cbc3ac46f83342a55a48e29dc8f17cf595dd28f51e297ba89fd25ed0dbd1c0081a810beaab09758a36fbfd16fbdc3daa9fe05c8a73195f244ef2743a5df761f01ee6e693eb6c7f1a7834fab3671391e5ddebf611e119a2ae4456e2cee7a6d4f27a2246cdb1f8ef35f0b3d7044b3799d8d0ed0a6470557fd807c065d6d83acba07e96e10770ada8c0b4d4921522944188d5f30086a6ee0a4795331273f32beaaa43363fc58208a257e5c5c434c7325b583642219d81c7d67b908d5263b42ac1991edc69a777da60f38eff138c844af9e549374e8b29b166211bfded24587a29394e33828b784da7e7b62ab7e49ea2693fcdd17fa96186a5ef11ef1a8adffa50f93a3119e95e6c09014f3e3b0709183fa08a826ced6deb4608b7d986ebbcf99ad58e25451d4d9d38d0059734d8501467b97182cd11e0c07c91ca50f61cc31255a3147ade654976a5989097281892aafd8df595c63bd14f1e03f5955a9398d2dd6368bbcae833ae1cc2df31eb0980b4817dfd130020ffb275743fcc01df40e3ecda1c5988e8e1bde965353b0b1bf34ea05f095000c45b6249618d275905a24d3eb58c600aeab4fb552fbf1ccdb2a5c80ace220310f89829d7e53f78c126037b6d8d500220c7a118d9621b4d6bd5379edd7e24bcf540e87aba6b88862db16fa4ee00b009fda80577be67ab94910fd8a7807dfe4ebe66b8fdcd040aa2dc17ec22639298be56b2a2c9d8940647b75f2f6d81746df16e1cb2f05e23397a8c63baea0803441ff4b7d517ff172980a056726235e2f6af85e8aa9b91ba85f14532272d6170df3166b91169dc09d4f4a251610f57ff0885a93364cfaf650bdf436c89795efed5ca934bc7ffc0a4'),
packet.to_bytes())
for i, privkey in enumerate(payment_path_privkeys):
processed_packet = process_onion_packet(packet, associated_data, privkey)
self.assertEqual(hops_data[i].per_hop.to_bytes(), processed_packet.hop_data.per_hop.to_bytes())
packet = processed_packet.next_packet
def test_decode_onion_error(self):
# test vector from bolt-04
payment_path_pubkeys = [
bfh('02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619'),
bfh('0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c'),
bfh('027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007'),
bfh('032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991'),
bfh('02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145'),
]
session_key = bfh('4141414141414141414141414141414141414141414141414141414141414141')
error_packet_for_node_0 = bfh('9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d')
decoded_error, index_of_sender = _decode_onion_error(error_packet_for_node_0, payment_path_pubkeys, session_key)
self.assertEqual(bfh('4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
decoded_error)
self.assertEqual(4, index_of_sender)
failure_msg, index_of_sender = decode_onion_error(error_packet_for_node_0, payment_path_pubkeys, session_key)
self.assertEqual(4, index_of_sender)
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, failure_msg.code)
self.assertEqual(b'', failure_msg.data)
| 107.728571 | 2,765 | 0.766919 |
4d62244adbd1375d592651dfaadfc249bf81f841 | 1,292 | py | Python | working_with_pdf_csv/Working with CVS.py | alok8765/basic_python_practicse | 9bd61f0b03fc1e703a75df39862a24692bb3fdb7 | [
"MIT"
] | null | null | null | working_with_pdf_csv/Working with CVS.py | alok8765/basic_python_practicse | 9bd61f0b03fc1e703a75df39862a24692bb3fdb7 | [
"MIT"
] | null | null | null | working_with_pdf_csv/Working with CVS.py | alok8765/basic_python_practicse | 9bd61f0b03fc1e703a75df39862a24692bb3fdb7 | [
"MIT"
] | null | null | null | #Working with CVS
import csv
#open the file
data=open('example.csv',encoding='utf-8')
#csv.reader
csv_data=csv.reader(data)
#reformatted into python obj (list of list)
data_line=list(csv_data)
#print(data_line)
#check first row item
print(data_line[0])
#check number of rows
print(len(data_line))
#print few rows in list of list
#for line in data_line[1:5]:
#print(line)
#Extract any single row
#print(data_line[10])
#Extract any single value of any line
#print(data_line[20][3])
#Extracting all email in table
all_email=[]
for x in data_line[1:15]:
all_email.append(x[3])
#print(all_email)
#get a full name from first and last name
full_name=[]
for line in data_line[1:]:
full_name.append(line[1]+' '+ line[2])
#print(full_name)
#write csv file
file_to_output = open('save_file.csv','w',newline='')
csv_writer = csv.writer(file_to_output,delimiter=',')
csv_writer.writerow(['a','b','c'])
file_to_output.close()
#save multiple list
file_to_output = open('save_file.csv','w',newline='')
csv_writer = csv.writer(file_to_output,delimiter=',')
csv_writer.writerows([['1','2','3'],['4','5','6']])
file_to_output.close()
#appending on existing file
f = open('to_save_file.csv','a',newline='')
csv_writer = csv.writer(f)
csv_writer.writerow(['new','new','new'])
f.close() | 23.071429 | 53 | 0.713622 |
3daef2e6adc20c1c235f2d629685e994940e3502 | 9,676 | py | Python | drqa/reader/utils.py | felixgaschi/experiment-on-DrQA | fe276df098bc797a20607930f12ad7079659575d | [
"BSD-3-Clause"
] | null | null | null | drqa/reader/utils.py | felixgaschi/experiment-on-DrQA | fe276df098bc797a20607930f12ad7079659575d | [
"BSD-3-Clause"
] | null | null | null | drqa/reader/utils.py | felixgaschi/experiment-on-DrQA | fe276df098bc797a20607930f12ad7079659575d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""DrQA reader utilities."""
import json
import time
import logging
import string
import regex as re
from collections import Counter
from .data import Dictionary
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(args, filename, skip_no_answer=False):
"""Load examples from preprocessed file.
One example per line, JSON encoded.
"""
# Load JSON lines
with open(filename) as f:
examples = [json.loads(line) for line in f]
# Make case insensitive?
if args.uncased_question or args.uncased_doc:
for ex in examples:
if args.uncased_question:
ex['question'] = [w.lower() for w in ex['question']]
if args.uncased_doc:
ex['document'] = [w.lower() for w in ex['document']]
# Skip unparsed (start/end) examples
if skip_no_answer:
examples = [ex for ex in examples if len(ex['answers']) > 0]
return examples
def load_text(filename):
"""Load the paragraphs only of a SQuAD dataset. Store as qid -> text."""
# Load JSON file
with open(filename) as f:
examples = json.load(f)['data']
texts = {}
for article in examples:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
texts[qa['id']] = paragraph['context']
return texts
def load_answers(filename):
"""Load the answers only of a SQuAD dataset. Store as qid -> [answers]."""
# Load JSON file
with open(filename) as f:
examples = json.load(f)['data']
ans = {}
for article in examples:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
ans[qa['id']] = list(map(lambda x: x['text'], qa['answers']))
return ans
# ------------------------------------------------------------------------------
# Dictionary building
# ------------------------------------------------------------------------------
def index_embedding_words(embedding_file, lower=False, remove=None):
"""Put all the words in embedding_file into a set."""
words = set()
with open(embedding_file) as f:
for line in f:
w = Dictionary.normalize(
line.rstrip().split(' ')[0],
lower=lower,
remove=remove
)
words.add(w)
return words
def index_embedding_words_tsv(vocab_file, lower=False, remove=None):
words = set()
with open(vocab_file) as f:
for i, line in enumerate(f):
if i == 0:
continue
w = Dictionary.normalize(
line.rstrip().split("\t")[0],
lower=lower,
remove=remove
)
words.add(w)
return words
def load_words(args, examples):
"""Iterate and index all the words in examples (documents + questions)."""
if args.restrict_vocab and args.embedding_file:
logger.info('Restricting to words in %s' % args.embedding_file)
if args.use_tsv:
valid_words = index_embedding_words_tsv(
args.vocab_file,
lower=args.lower,
remove=args.remove
)
else:
valid_words = index_embedding_words(
args.embedding_file,
lower=args.lower,
remove=args.remove
)
logger.info('Num words in set = %d' % len(valid_words))
else:
valid_words = None
def _insert(iterable, words, valid_words):
for w in iterable:
w = Dictionary.normalize(
w,
lower=args.lower,
remove=args.remove
)
if valid_words and w not in valid_words:
continue
words.add(w)
return words
words = set()
for i, ex in enumerate(examples):
words = _insert(ex['question'], words, valid_words)
words = _insert(ex['document'], words, valid_words)
return words
def build_word_dict(args, examples):
"""Return a dictionary from question and document words in
provided examples.
"""
word_dict = Dictionary(lower=args.lower, remove=args.remove)
for w in load_words(args, examples):
word_dict.add(w)
return word_dict
def top_question_words(args, examples, word_dict):
"""Count and return the most common question words in provided examples."""
word_count = Counter()
for ex in examples:
for w in ex['question']:
w = Dictionary.normalize(
w,
lower=args.lower,
remove=args.remove
)
if w in word_dict:
word_count.update([w])
return word_count.most_common(args.tune_partial)
def build_feature_dict(args, examples):
"""Index features (one hot) from fields in examples and options."""
def _insert(feature):
if feature not in feature_dict:
feature_dict[feature] = len(feature_dict)
feature_dict = {}
# Exact match features
if args.use_in_question:
_insert('in_question')
_insert('in_question_uncased')
if args.use_lemma:
_insert('in_question_lemma')
# Part of speech tag features
if args.use_pos:
for ex in examples:
for w in ex['pos']:
_insert('pos=%s' % w)
# Named entity tag features
if args.use_ner:
for ex in examples:
for w in ex['ner']:
_insert('ner=%s' % w)
# Term frequency feature
if args.use_tf:
_insert('tf')
return feature_dict
# ------------------------------------------------------------------------------
# Evaluation. Follows official evalutation script for v1.1 of the SQuAD dataset.
# ------------------------------------------------------------------------------
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Compute the geometric mean of precision and recall for answer tokens."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Check if the prediction is a (soft) exact match with the ground truth."""
return normalize_answer(prediction) == normalize_answer(ground_truth)
def regex_match_score(prediction, pattern):
"""Check if the prediction matches the given regular expression."""
try:
compiled = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE
)
except BaseException:
logger.warn('Regular expression failed to compile: %s' % pattern)
return False
return compiled.match(prediction) is not None
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Given a prediction and multiple valid answers, return the score of
the best prediction-answer_n pair given a metric function.
"""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
# ------------------------------------------------------------------------------
# Utility classes
# ------------------------------------------------------------------------------
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Timer(object):
"""Computes elapsed time."""
def __init__(self):
self.running = True
self.total = 0
self.start = time.time()
def reset(self):
self.running = True
self.total = 0
self.start = time.time()
return self
def resume(self):
if not self.running:
self.running = True
self.start = time.time()
return self
def stop(self):
if self.running:
self.running = False
self.total += time.time() - self.start
return self
def time(self):
if self.running:
return self.total + time.time() - self.start
return self.total
| 29.772308 | 80 | 0.56604 |
e1a5b4637489993898fa17e3756d3650c4f88620 | 14,870 | py | Python | trpo.py | sagerpascal/deep-rl-bootcamp-lab4 | 1058a41ef0a7fa909b02bc8e760b1b48c0cae6f8 | [
"MIT"
] | null | null | null | trpo.py | sagerpascal/deep-rl-bootcamp-lab4 | 1058a41ef0a7fa909b02bc8e760b1b48c0cae6f8 | [
"MIT"
] | null | null | null | trpo.py | sagerpascal/deep-rl-bootcamp-lab4 | 1058a41ef0a7fa909b02bc8e760b1b48c0cae6f8 | [
"MIT"
] | null | null | null | """
This project was developed by Rocky Duan, Peter Chen, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/.
Copyright 2017 Deep RL Bootcamp Organizers.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from alg_utils import *
from simplepg.simple_utils import test_once
import tests.trpo_tests
def fvp(policy, f_kl, grad0, v, eps=1e-5, damping=1e-8):
"""
Approximately compute the Fisher-vector product of the provided policy, F(x)v, where x is the current policy parameter
and v is the vector we want to form product with.
Define g(x) to be the gradient of the KL divergence (f_kl) evaluated at x. Note that for small \\epsilon, Taylor expansion gives
g(x + \\epsilon v) ≈ g(x) + \\epsilon F(x)v
So
F(x)v \\approx (g(x + \epsilon v) - g(x)) / \\epsilon
Since x is always the current parameters, we cache the computation of g(x) and this is provided as an input, grad0
:param policy: The policy to compute Fisher-vector product.
:param f_kl: A function which computes the average KL divergence.
:param grad0: The gradient of KL divergence evaluated at the current parameter x.
:param v: The vector we want to compute product with.
:param eps: A small perturbation for finite difference computation.
:param damping: A small damping factor to ensure that the Fisher information matrix is positive definite.
:return:
"""
flat_params = get_flat_params(policy)
# compute g(x + \epsilon v)
set_flat_params(policy, flat_params + eps * v)
policy.cleargrads()
f_kl().backward()
grad_plus = get_flat_grad(policy)
# don't forget to restore the policy parameters!
set_flat_params(policy, flat_params)
# form the finite difference
return (grad_plus - grad0) / (eps) + damping * flat_params
def linesearch(f, x0, dx, expected_improvement, y0=None, backtrack_ratio=0.8, max_backtracks=15, accept_ratio=0.1,
atol=1e-7):
"""
Perform line search on the function f at x, where
:param f: The function to perform line search on.
:param x0: The current parameter value.
:param dx: The full descent direction. We will shrink along this direction.
:param y0: The initial value of f at x (optional).
:param backtrack_ratio: Ratio to shrink the descent direction per line search step.
:param max_backtracks: Maximum number of backtracking steps
:param expected_improvement: Expected amount of improvement when taking the full descent direction dx, typically
computed by y0 - y \\approx (f_x|x=x0).dot(dx), where f_x|x=x0 is the gradient of f w.r.t. x, evaluated at x0.
:param accept_ratio: minimum acceptance ratio of actual_improvement / expected_improvement
:return: The descent step obtained
"""
if expected_improvement >= atol:
if y0 is None:
y0 = f(x0)
for ratio in backtrack_ratio ** np.arange(max_backtracks):
x = x0 - ratio * dx
y = f(x)
actual_improvement = y0 - y
if actual_improvement / (expected_improvement * ratio) >= accept_ratio:
logger.logkv("ExpectedImprovement",
expected_improvement * ratio)
logger.logkv("ActualImprovement", actual_improvement)
logger.logkv("ImprovementRatio", actual_improvement /
(expected_improvement * ratio))
return x
logger.logkv("ExpectedImprovement", expected_improvement)
logger.logkv("ActualImprovement", 0.)
logger.logkv("ImprovementRatio", 0.)
return x0
def trpo(env, env_maker, policy, baseline, n_envs=mp.cpu_count(), last_iter=-1, n_iters=100, batch_size=1000,
discount=0.99, gae_lambda=0.97, step_size=0.01, use_linesearch=True, kl_subsamp_ratio=1., snapshot_saver=None):
"""
This method implements Trust Region Policy Optimization. Without the line search step, this algorithm is equivalent
to an approximate procedure for computing natural gradient using conjugate gradients, where it performs approximate
Hessian-vector product computation using finite differences.
:param env: An environment instance, which should have the same class as what env_maker.make() returns.
:param env_maker: An object such that calling env_maker.make() will generate a new environment.
:param policy: A stochastic policy which we will be optimizing.
:param baseline: A baseline used for variance reduction and estimating future returns for unfinished trajectories.
:param n_envs: Number of environments running simultaneously.
:param last_iter: The index of the last iteration. This is normally -1 when starting afresh, but may be different when
loaded from a snapshot.
:param n_iters: The total number of iterations to run.
:param batch_size: The number of samples used per iteration.
:param discount: Discount factor.
:param gae_lambda: Lambda parameter used for generalized advantage estimation. For details see the following paper:
:param step_size: The maximum value of average KL divergence allowed per iteration.
:param use_linesearch: Whether to perform line search using the surrogate loss derived in the TRPO algorithm.
Without this step, the algorithm is equivalent to an implementation of natural policy gradient where we use
conjugate gradient algorithm to approximately compute F^{-1}g, where F is the Fisher information matrix, and
g is the policy gradient.
:param kl_subsamp_ratio: The ratio we use to subsample data in computing the Hessian-vector products. This can
potentially save a lot of time.
:param snapshot_saver: An object for saving snapshots.
"""
logger.info("Starting env pool")
with EnvPool(env_maker, n_envs=n_envs) as env_pool:
for iter in range(last_iter + 1, n_iters):
logger.info("Starting iteration {}".format(iter))
logger.logkv('Iteration', iter)
logger.info("Start collecting samples")
trajs = parallel_collect_samples(env_pool, policy, batch_size)
logger.info("Computing input variables for policy optimization")
all_obs, all_acts, all_advs, all_dists = compute_pg_vars(
trajs, policy, baseline, discount, gae_lambda)
logger.info("Performing policy update")
# subsample for kl divergence computation
mask = np.zeros(len(all_obs), dtype=np.bool)
mask_ids = np.random.choice(len(all_obs), size=int(
np.ceil(len(all_obs) * kl_subsamp_ratio)), replace=False)
mask[mask_ids] = 1
if kl_subsamp_ratio < 1:
subsamp_obs = all_obs[mask]
subsamp_dists = policy.distribution.from_dict(
{k: v[mask] for k, v in all_dists.as_dict().items()})
else:
subsamp_obs = all_obs
subsamp_dists = all_dists
# Define helper functions to compute surrogate loss and/or KL divergence. They share part of the computation
# graph, so we use a common function to decide whether we should compute both (which is needed in the line
# search phase)
def f_loss_kl_impl(need_loss, need_kl):
retval = dict()
if need_loss:
new_dists = policy.compute_dists(all_obs)
old_dists = all_dists
elif need_kl:
# if only kl is needed, compute distribution from sub-sampled data
new_dists = policy.compute_dists(subsamp_obs)
old_dists = subsamp_dists
def compute_surr_loss(old_dists, new_dists, all_acts, all_advs):
"""
:param old_dists: An instance of subclass of Distribution
:param new_dists: An instance of subclass of Distribution
:param all_acts: A chainer variable, which should be a matrix of size N * |A|
:param all_advs: A chainer variable, which should be a vector of size N
:return: A chainer variable, which should be a scalar
"""
surr_loss = Variable(np.array(0., dtype=np.float32))
"*** YOUR CODE HERE ***"
likelihood_ratio = new_dists.likelihood_ratio(old_dists, all_acts)
surr_loss -= F.mean(likelihood_ratio * all_advs)
return surr_loss
def compute_kl(old_dists, new_dists):
"""
:param old_dists: An instance of subclass of Distribution
:param new_dists: An instance of subclass of Distribution
:return: A chainer variable, which should be a scalar
"""
kl = Variable(np.array(0., dtype=np.float32))
"*** YOUR CODE HERE ***"
kl += F.mean(old_dists.kl_div(new_dists))
return kl
test_once(compute_surr_loss)
test_once(compute_kl)
if need_loss:
retval["surr_loss"] = compute_surr_loss(
old_dists, new_dists, all_acts, all_advs)
if need_kl:
retval["kl"] = compute_kl(old_dists, new_dists)
return retval
def f_surr_loss():
return f_loss_kl_impl(need_loss=True, need_kl=False)["surr_loss"]
def f_kl():
return f_loss_kl_impl(need_loss=False, need_kl=True)["kl"]
def f_surr_loss_kl():
retval = f_loss_kl_impl(need_loss=True, need_kl=True)
return retval["surr_loss"], retval["kl"]
# Step 1: compute gradient in Euclidean space
logger.info("Computing gradient in Euclidean space")
policy.cleargrads()
surr_loss = f_surr_loss()
surr_loss.backward()
# obtain the flattened gradient vector
flat_grad = get_flat_grad(policy)
# Optimize memory usage: after getting the gradient, we do not need the rest of the computation graph
# anymore
surr_loss.unchain_backward()
# Step 2: Perform conjugate gradient to compute approximate natural gradient
logger.info(
"Computing approximate natural gradient using conjugate gradient algorithm")
policy.cleargrads()
f_kl().backward()
flat_kl_grad = get_flat_grad(policy)
def Fx(v):
return fvp(policy, f_kl, flat_kl_grad, v)
descent_direction = cg(Fx, flat_grad)
# Step 3: Compute initial step size
# We'd like D_KL(old||new) <= step_size
# The 2nd order approximation gives 1/2*d^T*H*d <= step_size, where d is the descent step
# Hence given the initial direction d_0 we can rescale it so that this constraint is tight
# Let this scaling factor be \alpha, i.e. d = \alpha*d_0
# Solving 1/2*\alpha^2*d_0^T*H*d_0 = step_size we get \alpha = \sqrt(2 * step_size / d_0^T*H*d_0)
scale = np.sqrt(
2.0 * step_size *
(1. / (descent_direction.dot(Fx(descent_direction)) + 1e-8))
)
descent_step = descent_direction * scale
cur_params = get_flat_params(policy)
if use_linesearch:
# Step 4: Perform line search
logger.info("Performing line search")
expected_improvement = flat_grad.dot(descent_step)
def f_barrier(x):
set_flat_params(policy, x)
with chainer.no_backprop_mode():
surr_loss, kl = f_surr_loss_kl()
return surr_loss.data + 1e100 * max(kl.data - step_size, 0.)
new_params = linesearch(
f_barrier,
x0=cur_params,
dx=descent_step,
y0=surr_loss.data,
expected_improvement=expected_improvement
)
else:
new_params = cur_params - descent_step
set_flat_params(policy, new_params)
logger.info("Updating baseline")
baseline.update(trajs)
# log statistics
logger.info("Computing logging information")
with chainer.no_backprop_mode():
mean_kl = f_kl().data
logger.logkv('MeanKL', mean_kl)
log_action_distribution_statistics(all_dists)
log_reward_statistics(env)
log_baseline_statistics(trajs)
logger.dumpkvs()
if snapshot_saver is not None:
logger.info("Saving snapshot")
snapshot_saver.save_state(
iter,
dict(
alg=trpo,
alg_state=dict(
env_maker=env_maker,
policy=policy,
baseline=baseline,
n_envs=n_envs,
last_iter=iter,
n_iters=n_iters,
batch_size=batch_size,
discount=discount,
gae_lambda=gae_lambda,
step_size=step_size,
use_linesearch=use_linesearch,
kl_subsamp_ratio=kl_subsamp_ratio,
)
)
)
| 47.813505 | 460 | 0.620444 |
84134d45ff1ab7f2f202dd2ffc16ab73fae2c0e6 | 977 | py | Python | gallery/views.py | lilian-2021/my-gallery | aaa1ac356f96258c1272ee9418be8e212fbe863b | [
"Unlicense"
] | null | null | null | gallery/views.py | lilian-2021/my-gallery | aaa1ac356f96258c1272ee9418be8e212fbe863b | [
"Unlicense"
] | null | null | null | gallery/views.py | lilian-2021/my-gallery | aaa1ac356f96258c1272ee9418be8e212fbe863b | [
"Unlicense"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse,Http404
from .models import gallery
# Create your views here.
def all_photos(request):
images = gallery.objects.all()
return render(request, 'index.html', {'images':images})
def photo_description(request,Gallery_image_id):
image = gallery.objects.get(id=Gallery_image_id)
return render(request, 'index.html', {'image':image})
def search_results(request):
if 'images' in request.GET and request.GET["images"]:
search_term = request.GET.get("images")
searched_picture = gallery.search_by_category(search_term)
message = f'{search_term}'
return render(request, 'search.html', {"message":message, "images": searched_picture})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {"message":message})
def profile(request):
return HttpResponse('This is my profile page')
| 29.606061 | 94 | 0.696008 |
3ec1432e389c944f7f9398746f6a3194398a81f4 | 443 | py | Python | bookwyrm/migrations/0114_importjob_source.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 270 | 2020-01-27T06:06:07.000Z | 2020-06-21T00:28:18.000Z | bookwyrm/migrations/0114_importjob_source.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 158 | 2020-02-10T20:36:54.000Z | 2020-06-26T17:12:54.000Z | bookwyrm/migrations/0114_importjob_source.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 15 | 2020-02-13T21:53:33.000Z | 2020-06-17T16:52:46.000Z | # Generated by Django 3.2.5 on 2021-11-13 00:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0113_auto_20211110_2104"),
]
operations = [
migrations.AddField(
model_name="importjob",
name="source",
field=models.CharField(default="Import", max_length=100),
preserve_default=False,
),
]
| 22.15 | 69 | 0.604966 |
bcd50cd546991918e4d417c120ad285793289eee | 338 | py | Python | src/griffe/__main__.py | rohankumardubey/griffe | 3c026d3b8c9714d4b15a6ad2e003718e2e46a3e4 | [
"0BSD"
] | 29 | 2022-01-31T21:39:28.000Z | 2022-03-24T04:03:27.000Z | src/griffe/__main__.py | rohankumardubey/griffe | 3c026d3b8c9714d4b15a6ad2e003718e2e46a3e4 | [
"0BSD"
] | 56 | 2022-01-31T20:41:23.000Z | 2022-03-31T19:03:07.000Z | src/griffe/__main__.py | rohankumardubey/griffe | 3c026d3b8c9714d4b15a6ad2e003718e2e46a3e4 | [
"0BSD"
] | 5 | 2022-02-19T16:58:55.000Z | 2022-03-21T18:28:03.000Z | """
Entry-point module, in case you use `python -m griffe`.
Why does this file exist, and why `__main__`? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
import sys
from griffe.cli import main
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 21.125 | 66 | 0.695266 |
319939c33a3a60dc69da4e47414a6368bb852108 | 25 | py | Python | python/zpmanga/manga_tools/__init__.py | zpace/zpmanga | 320ac59e5cdf4c62b24165945d9a194b80ea07a2 | [
"BSD-3-Clause"
] | null | null | null | python/zpmanga/manga_tools/__init__.py | zpace/zpmanga | 320ac59e5cdf4c62b24165945d9a194b80ea07a2 | [
"BSD-3-Clause"
] | null | null | null | python/zpmanga/manga_tools/__init__.py | zpace/zpmanga | 320ac59e5cdf4c62b24165945d9a194b80ea07a2 | [
"BSD-3-Clause"
] | null | null | null | __all__ = ['manga_tools'] | 25 | 25 | 0.72 |
4f018d05affa6f3da20273ee00d72e2bd1e36807 | 57,928 | py | Python | tests/testflows/ldap/role_mapping/tests/mapping.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | 1 | 2022-02-08T03:09:51.000Z | 2022-02-08T03:09:51.000Z | tests/testflows/ldap/role_mapping/tests/mapping.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | 1 | 2022-03-21T07:27:34.000Z | 2022-03-21T07:27:34.000Z | tests/testflows/ldap/role_mapping/tests/mapping.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from testflows.core import *
from testflows.asserts import error
from helpers.common import Pool
from ldap.role_mapping.requirements import *
from ldap.role_mapping.tests.common import *
from ldap.external_user_directory.tests.common import randomword
from ldap.external_user_directory.tests.authentications import login_with_valid_username_and_password
from ldap.external_user_directory.tests.authentications import login_with_invalid_username_and_valid_password
from ldap.external_user_directory.tests.authentications import login_with_valid_username_and_invalid_password
def remove_ldap_groups_in_parallel(groups, i, iterations=10):
"""Remove LDAP groups.
"""
with When(f"LDAP groups are removed #{i}"):
for j in range(iterations):
for group in groups:
with When(f"I delete group #{j}", description=f"{group}"):
delete_group_from_ldap(group, exitcode=None)
def add_ldap_groups_in_parallel(ldap_user, names, i, iterations=10):
"""Add LDAP groups.
"""
with When(f"LDAP groups are added #{i}"):
for j in range(iterations):
for name in names:
with When(f"I add group {name} #{j}", description=f"{name}"):
group = add_group_to_ldap(cn=name, exitcode=None)
with When(f"I add user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=group, exitcode=None)
def add_user_to_ldap_groups_in_parallel(ldap_user, groups, i, iterations=10):
"""Add user to LDAP groups.
"""
with When(f"user is added to LDAP groups #{i}"):
for j in range(iterations):
for group in groups:
with When(f"I add user to the group {group['dn']} #{j}"):
add_user_to_group_in_ldap(user=ldap_user, group=group, exitcode=None)
def remove_user_from_ldap_groups_in_parallel(ldap_user, groups, i, iterations=10):
"""Remove user from LDAP groups.
"""
with When(f"user is removed from LDAP groups #{i}"):
for j in range(iterations):
for group in groups:
with When(f"I remove user from the group {group['dn']} #{j}"):
delete_user_from_group_in_ldap(user=ldap_user, group=group, exitcode=None)
def add_roles_in_parallel(role_names, i, iterations=10):
"""Add roles.
"""
with When(f"roles are added #{i}"):
for j in range(iterations):
for role_name in role_names:
with When(f"I add role {role_name} #{j}"):
current().context.node.query(f"CREATE ROLE OR REPLACE {role_name}")
def remove_roles_in_parallel(role_names, i, iterations=10):
"""Remove roles.
"""
with When(f"roles are removed #{i}"):
for j in range(iterations):
for role_name in role_names:
with When(f"I remove role {role_name} #{j}"):
current().context.node.query(f"DROP ROLE IF EXISTS {role_name}")
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Map_MultipleRoles("1.0")
)
def multiple_roles(self, ldap_server, ldap_user):
"""Check that users authenticated using LDAP external user directory
can be assigned multiple LDAP mapped roles.
"""
uid = getuid()
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix":""
}
]
with Given("I add LDAP groups"):
groups = add_ldap_groups(groups=({"cn": f"role0_{uid}"}, {"cn": f"role1_{uid}"}))
with And("I add LDAP user to each LDAP group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
add_user_to_group_in_ldap(user=ldap_user, group=groups[1])
with And("I add RBAC roles"):
roles = add_rbac_roles(roles=(f"role0_{uid}", f"role1_{uid}"))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped LDAP roles"):
with By(f"checking that first role is assigned", description=f"{roles[0]}"):
assert roles[0] in r.output, error()
with And(f"checking that second role is also assigned", description=f"{roles[1]}"):
assert roles[1] in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_WithFixedRoles("1.0")
)
def with_fixed_roles(self, ldap_server, ldap_user):
"""Check that LDAP users can be assigned roles dynamically
and statically using the `<roles>` section.
"""
uid = getuid()
role_name = f"role_{uid}"
fixed_role_name = f"role_fixed_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
mapped_roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add an RBAC role that will be added statically"):
roles = add_rbac_roles(roles=(f"{fixed_role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, roles=roles, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped and fixed roles"):
with By("checking that mapped role is assigned"):
assert mapped_roles[0].strip("'") in r.output, error()
with And("checking that fixed role is assigned"):
assert roles[0] in r.output, error()
@TestOutline
def map_role(self, role_name, ldap_server, ldap_user, rbac_role_name=None, role_mappings=None):
"""Check that we can map a role with a given name.
"""
if role_mappings is None:
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
if rbac_role_name is None:
rbac_role_name = role_name
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"'{rbac_role_name}'",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped LDAP role"):
with By(f"checking that the role is assigned", description=f"{role_name}"):
assert roles[0].strip("'") in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithUTF8Characters("1.0")
)
def role_name_with_utf8_characters(self, ldap_server, ldap_user):
"""Check that we can map a role that contains UTF8 characters.
"""
uid = getuid()
role_name = f"role_{uid}_Gãńdåłf_Thê_Gręât"
map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_Long("1.0")
)
def role_name_with_more_than_128_characters(self, ldap_server, ldap_user):
"""Check that we can map a role that contains more than 128 characters.
"""
uid = getuid()
role_name = f"role_{uid}_{'r'*128}"
map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialXMLCharacters("1.0")
)
def role_name_with_special_xml_characters(self, ldap_server, ldap_user):
"""Check that we can map a role that contains special XML
characters that must be escaped.
"""
uid = getuid()
role_name = f"role_{uid}_\\<\\>"
rbac_role_name = f"role_{uid}_<>"
map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, rbac_role_name=rbac_role_name)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialRegexCharacters("1.0")
)
def role_name_with_special_regex_characters(self, ldap_server, ldap_user):
"""Check that we can map a role that contains special regex
characters that must be escaped.
"""
uid = getuid()
role_name = f"role_{uid}_\\+.?$"
rbac_role_name = f"role_{uid}_+.?$"
map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, rbac_role_name=rbac_role_name)
@TestOutline
def map_groups_with_prefixes(self, prefixes, group_names, role_names,
expected, not_expected, ldap_server, ldap_user):
"""Check that we can map multiple groups to roles whith one or more prefixes.
"""
role_mappings = []
for prefix in prefixes:
role_mappings.append({
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": prefix
})
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": name} for name in group_names))
with And("I add LDAP user to the group"):
for group in groups:
add_user_to_group_in_ldap(user=ldap_user, group=group)
with And("I add RBAC roles"):
roles = add_rbac_roles(roles=(f"'{name}'" for name in role_names))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped roles"):
with By(f"checking that the roles are assigned", description=f"{', '.join(expected)}"):
for name in expected:
assert name in r.output, error()
with And("I expect the user not to have mapped roles"):
with By(f"checking that the roles are not assigned", description=f"{', '.join(not_expected)}"):
for name in not_expected:
assert name not in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Syntax("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix("1.0")
)
def prefix_non_empty(self, ldap_server, ldap_user):
"""Check that only group names with specified prefix are mapped to roles
when prefix is not empty.
"""
uid = getuid()
with Given("I define group names"):
group_names=[
f"clickhouse_role_{uid}",
f"role0_{uid}"
]
with And("I define role names"):
role_names=[
f"role_{uid}",
f"role0_{uid}"
]
with And("I define group prefixes to be mapped"):
prefixes = ["clickhouse_"]
with And("I define the expected mapped and not mapped roles"):
expected=[f"role_{uid}"]
not_expected=[f"role0_{uid}"]
map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user,
prefixes=prefixes, group_names=group_names, role_names=role_names,
expected=expected, not_expected=not_expected)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_Default("1.0")
)
def prefix_default_value(self, ldap_server, ldap_user):
"""Check that when prefix is not specified the default value of prefix
is empty and therefore ldap groups are mapped directly to roles.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
}
]
map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, role_mappings=role_mappings)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters("1.0")
)
def prefix_with_utf8_characters(self, ldap_server, ldap_user):
"""Check that we can map a role when prefix contains UTF8 characters.
"""
uid = getuid()
with Given("I define group names"):
group_names=[
f"Gãńdåłf_Thê_Gręât_role_{uid}",
f"role0_{uid}"
]
with And("I define role names"):
role_names=[
f"role_{uid}",
f"role0_{uid}"
]
with And("I define group prefixes to be mapped"):
prefixes = ["Gãńdåłf_Thê_Gręât_"]
with And("I define the expected mapped and not mapped roles"):
expected=[f"role_{uid}"]
not_expected=[f"role0_{uid}"]
map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user,
prefixes=prefixes, group_names=group_names, role_names=role_names,
expected=expected, not_expected=not_expected)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_SpecialCharactersEscaping("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters("1.0")
)
def prefix_with_special_xml_characters(self, ldap_server, ldap_user):
"""Check that we can map a role when prefix contains special XML characters.
"""
uid = getuid()
with Given("I define group names"):
group_names=[
f"clickhouse\\<\\>_role_{uid}",
f"role0_{uid}"
]
with And("I define role names"):
role_names=[
f"role_{uid}",
f"role0_{uid}"
]
with And("I define group prefixes to be mapped"):
prefixes = ["clickhouse<>_"]
with And("I define the expected mapped and not mapped roles"):
expected=[f"role_{uid}"]
not_expected=[f"role0_{uid}"]
map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user,
prefixes=prefixes, group_names=group_names, role_names=role_names,
expected=expected, not_expected=not_expected)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters("1.0")
)
def prefix_with_special_regex_characters(self, ldap_server, ldap_user):
"""Check that we can map a role when prefix contains special regex characters.
"""
uid = getuid()
with Given("I define group names"):
group_names=[
f"clickhouse\\+.?\\$_role_{uid}",
f"role0_{uid}"
]
with And("I define role names"):
role_names=[
f"role_{uid}",
f"role0_{uid}"
]
with And("I define group prefixes to be mapped"):
prefixes = ["clickhouse+.?\\$_"]
with And("I define the expected mapped and not mapped roles"):
expected=[f"role_{uid}"]
not_expected=[f"role0_{uid}"]
map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user,
prefixes=prefixes, group_names=group_names, role_names=role_names,
expected=expected, not_expected=not_expected)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections("1.0")
)
def multiple_sections_with_different_prefixes(self, ldap_server, ldap_user):
"""Check that we can map multiple roles with multiple role mapping sections
that use different prefixes.
"""
uid = getuid()
with Given("I define group names"):
group_names=[
f"clickhouse0_role0_{uid}",
f"clickhouse1_role1_{uid}",
f"role2_{uid}"
]
with And("I define role names"):
role_names=[
f"role0_{uid}",
f"role1_{uid}",
f"role2_{uid}"
]
with And("I define group prefixes to be mapped"):
prefixes = ["clickhouse0_", "clickhouse1_"]
with And("I define the expected mapped and not mapped roles"):
expected=[f"role0_{uid}", f"role1_{uid}"]
not_expected=[f"role2_{uid}"]
map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user,
prefixes=prefixes, group_names=group_names, role_names=role_names,
expected=expected, not_expected=not_expected)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_Removed("1.0")
)
def group_removed(self, ldap_server, ldap_user):
"""Check that roles are not mapped after the corresponding LDAP group
is removed.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
try:
with Given("I add LDAP group"):
group = add_group_to_ldap(**{"cn": role_name})
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=group)
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped LDAP role"):
with By(f"checking that the role is assigned", description=f"{role_name}"):
assert role_name in r.output, error()
finally:
with Finally("I remove LDAP group"):
delete_group_from_ldap(group)
with When(f"I login as an LDAP user after LDAP group is removed"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user not to have mapped LDAP role"):
with By(f"checking that the role is not assigned", description=f"{role_name}"):
assert role_name not in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemoved("1.0")
)
def user_removed_from_group(self, ldap_server, ldap_user):
"""Check that roles are not mapped after the user has been removed
from the corresponding LDAP group.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped LDAP role"):
with By(f"checking that the role is assigned", description=f"{role_name}"):
assert role_name in r.output, error()
with When("I remove user from the LDAP group"):
delete_user_from_group_in_ldap(user=ldap_user, group=groups[0])
with And(f"I login as an LDAP user after user has been removed from the group"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user not to have mapped LDAP role"):
with By(f"checking that the role is not assigned", description=f"{role_name}"):
assert role_name not in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0")
)
def role_not_present(self, ldap_server, ldap_user):
"""Check that LDAP users can still be authenticated even if
the mapped role is not present.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group for which no matching roles are present"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True)
with Then("I expect the login to succeed"):
assert r.exitcode == 0, error()
with And("the user not to have any mapped LDAP role"):
assert r.output == "", error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0")
)
def add_new_role_not_present(self, ldap_server, ldap_user):
"""Check that LDAP user can still authenticate when the LDAP
user is added to a new LDAP group that does not match any existing
RBAC roles while having other role being already mapped.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": "clickhouse_"
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": "clickhouse_" + role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True)
with Then("I expect the login to succeed"):
assert r.exitcode == 0, error()
with And("the user should have the mapped LDAP role"):
assert f"{role_name}" in r.output, error()
with When("I add LDAP group that maps to unknown role"):
unknown_groups = add_ldap_groups(groups=({"cn": "clickhouse_" + role_name + "_unknown"},))
with And("I add LDAP user to the group that maps to unknown role"):
add_user_to_group_in_ldap(user=ldap_user, group=unknown_groups[0])
with And(f"I again login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True)
with Then("I expect the login to succeed"):
assert r.exitcode == 0, error()
with And("the user should still have the present mapped LDAP role"):
assert f"{role_name}" in r.output, error()
with When("I add matching previously unknown RBAC role"):
unknown_roles = add_rbac_roles(roles=(f"{role_name}_unknown",))
with And(f"I again login as an LDAP user after previously unknown RBAC role has been added"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True)
with Then("I expect the login to succeed"):
assert r.exitcode == 0, error()
with And("the user should still have the first mapped LDAP role"):
assert f"{role_name}" in r.output, error()
with And("the user should have the previously unknown mapped LDAP role"):
assert f"{role_name}_unknown" in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Removed("1.0"),
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Readded("1.0")
)
def role_removed_and_readded(self, ldap_server, ldap_user):
"""Check that when a mapped role is removed the privileges provided by the role
are revoked from all the authenticated LDAP users and when the role
is added back the privileges to the authenticated LDAP users are re-granted.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I create a table for which the role will provide privilege"):
table_name = create_table(name=f"table_{uid}",
create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()")
with And("I grant select privilege on the table to the role"):
self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}")
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as LDAP user using clickhouse-client"):
with self.context.cluster.shell(node=self.context.node.name) as shell:
with shell(
f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}",
asynchronous=True, name="client") as client:
client.app.expect("clickhouse1 :\) ")
with When("I execute SHOW GRANTS"):
client.app.send(f"SHOW GRANTS")
with Then("I expect the user to have the mapped role"):
client.app.expect(f"{role_name}")
client.app.expect("clickhouse1 :\) ")
with When("I execute select on the table"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get no errors"):
client.app.expect("Ok\.")
client.app.expect("clickhouse1 :\) ")
with When("I remove the role that grants the privilege"):
self.context.node.query(f"DROP ROLE {role_name}")
with And("I re-execute select on the table"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get not enough privileges error"):
client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.")
client.app.expect("clickhouse1 :\) ")
with When("I add the role that grant the privilege back"):
self.context.node.query(f"CREATE ROLE {role_name}")
self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}")
with And("I execute select on the table after role is added back"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get no errors"):
client.app.expect("Ok\.")
client.app.expect("clickhouse1 :\) ")
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NewPrivilege("1.0"),
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedPrivilege("1.0")
)
def privilege_new_and_removed(self, ldap_server, ldap_user):
"""Check that when a new privilege is added to the mapped role
it is granted to all authenticated LDAP users and when
the privilege is removed from the role it is also revoked
from all authenticated LDAP users.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I create a table for which the role will provide privilege"):
table_name = create_table(name=f"table_{uid}",
create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()")
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as LDAP user using clickhouse-client"):
with self.context.cluster.shell(node=self.context.node.name) as shell:
with shell(
f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}",
asynchronous=True, name="client") as client:
client.app.expect("clickhouse1 :\) ")
with When("I execute SHOW GRANTS"):
client.app.send(f"SHOW GRANTS")
with Then("I expect the user to have the mapped role"):
client.app.expect(f"{role_name}")
client.app.expect("clickhouse1 :\) ")
with And("I execute select on the table when the mapped role does not provide this privilege"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get not enough privileges error"):
client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.")
client.app.expect("clickhouse1 :\) ")
with When("I grant select privilege on the table to the mapped role"):
self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}")
with And("I execute select on the table"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get no errors"):
client.app.expect("Ok\.")
client.app.expect("clickhouse1 :\) ")
with When("I remove the privilege from the mapped role"):
self.context.node.query(f"REVOKE SELECT ON {table_name} FROM {role_name}")
with And("I re-execute select on the table"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get not enough privileges error"):
client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.")
client.app.expect("clickhouse1 :\) ")
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Added("1.0")
)
def role_added(self, ldap_server, ldap_user):
"""Check that when the mapped role is not present during LDAP user authentication but
is later added then the authenticated LDAP users is granted the privileges provided
by the mapped role.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I create a table for which the role will provide privilege"):
table_name = create_table(name=f"table_{uid}",
create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()")
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as LDAP user using clickhouse-client"):
with self.context.cluster.shell(node=self.context.node.name) as shell:
with shell(
f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}",
asynchronous=True, name="client") as client:
client.app.expect("clickhouse1 :\) ")
with When("I execute SHOW GRANTS"):
client.app.send(f"SHOW GRANTS")
with Then("I expect the user not to have any mapped role"):
client.app.expect(f"Ok\.")
client.app.expect("clickhouse1 :\) ")
with And("I execute select on the table"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get not enough privileges error"):
client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.")
client.app.expect("clickhouse1 :\) ")
with When("I add the role that grant the privilege"):
self.context.node.query(f"CREATE ROLE {role_name}")
self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}")
with And("I execute select on the table after role is added"):
client.app.send(f"SELECT * FROM {table_name} LIMIT 1")
with Then("I expect to get no errors"):
client.app.expect("Ok\.")
client.app.expect("clickhouse1 :\) ")
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_New("1.0")
)
def role_new(self, ldap_server, ldap_user):
"""Check that no new roles can be granted to LDAP authenticated users.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
message = f"DB::Exception: Cannot update user `{ldap_user['username']}` in ldap because this storage is readonly"
exitcode = 239
with Given("I a have RBAC role that is not mapped"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as LDAP user using clickhouse-client"):
with self.context.cluster.shell(node=self.context.node.name) as shell:
with shell(
f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}",
asynchronous=True, name="client") as client:
client.app.expect("clickhouse1 :\) ")
with When("I try to grant new role to user"):
self.context.node.query(f"GRANT {role_name} TO {ldap_user['username']}",
message=message, exitcode=exitcode)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections_IdenticalParameters("1.0")
)
def multiple_sections_with_identical_parameters(self, ldap_server, ldap_user):
"""Check behaviour when multiple role mapping sections
have exactly the same parameters.
"""
uid = getuid()
role_name = f"role_{uid}"
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
] * 4
with Given("I add LDAP group"):
groups = add_ldap_groups(groups=({"cn": role_name},))
with And("I add LDAP user to the group"):
add_user_to_group_in_ldap(user=ldap_user, group=groups[0])
with And("I add matching RBAC role"):
roles = add_rbac_roles(roles=(f"{role_name}",))
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
with When(f"I login as an LDAP user"):
r = self.context.node.query(f"SHOW GRANTS", settings=[
("user", ldap_user["username"]), ("password", ldap_user["password"])])
with Then("I expect the user to have mapped LDAP role"):
with By(f"checking that the role is assigned", description=f"{role_name}"):
assert roles[0].strip("'") in r.output, error()
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_RemovedAndAdded_Parallel("1.0")
)
def group_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200):
"""Check that user can be authenticated successfully when LDAP groups
are removed and added in parallel.
"""
uid = getuid()
role_names = [f"role{i}_{uid}" for i in range(count)]
users = [{"cn": ldap_user["username"], "userpassword": ldap_user["password"]}]
groups = []
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
try:
with Given("I initially add all LDAP groups"):
for role_name in role_names:
with When(f"I add LDAP groop {role_name}"):
group = add_group_to_ldap(**{"cn": role_name})
with And(f"I add LDAP user to the group {role_name}"):
add_user_to_group_in_ldap(user=ldap_user, group=group)
groups.append(group)
with And("I add RBAC roles"):
add_rbac_roles(roles=role_names)
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
tasks = []
with Pool(4) as pool:
try:
with When("user try to login while LDAP groups are added and removed in parallel"):
for i in range(10):
tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,)))
tasks.append(pool.submit(remove_ldap_groups_in_parallel, (groups, i, 10,)))
tasks.append(pool.submit(add_ldap_groups_in_parallel,(ldap_user, role_names, i, 10,)))
finally:
with Finally("it should work", flags=TE):
for task in tasks:
task.result(timeout=timeout)
finally:
with Finally("I clean up all LDAP groups"):
for group in groups:
delete_group_from_ldap(group, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemovedAndAdded_Parallel("1.0")
)
def user_removed_and_added_in_ldap_groups_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200):
"""Check that user can be authenticated successfully when it is
removed and added from mapping LDAP groups in parallel.
"""
uid = getuid()
role_names = [f"role{i}_{uid}" for i in range(count)]
users = [{"cn": ldap_user["username"], "userpassword": ldap_user["password"]}]
groups = [{"cn": role_name} for role_name in role_names]
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
with Given("I add all LDAP groups"):
groups = add_ldap_groups(groups=groups)
for group in groups:
with And(f"I add LDAP user to the group {group['dn']}"):
add_user_to_group_in_ldap(user=ldap_user, group=group)
with And("I add RBAC roles"):
add_rbac_roles(roles=role_names)
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
tasks = []
with Pool(4) as pool:
try:
with When("user try to login while user is added and removed from LDAP groups in parallel"):
for i in range(10):
tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,)))
tasks.append(pool.submit(remove_user_from_ldap_groups_in_parallel, (ldap_user, groups, i, 1,)))
tasks.append(pool.submit(add_user_to_ldap_groups_in_parallel, (ldap_user, groups, i, 1,)))
finally:
with Finally("it should work", flags=TE):
for task in tasks:
task.result(timeout=timeout)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedAndAdded_Parallel("1.0")
)
def roles_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200):
"""Check that user can be authenticated successfully when roles that are mapped
by the LDAP groups are removed and added in parallel.
"""
uid = getuid()
role_names = [f"role{i}_{uid}" for i in range(count)]
users = [{"cn": ldap_user["username"], "userpassword": ldap_user["password"]}]
groups = [{"cn": role_name} for role_name in role_names]
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": ""
}
]
fail("known bug that needs to be investigated")
with Given("I add all LDAP groups"):
groups = add_ldap_groups(groups=groups)
for group in groups:
with And(f"I add LDAP user to the group {group['dn']}"):
add_user_to_group_in_ldap(user=ldap_user, group=group)
with And("I add RBAC roles"):
add_rbac_roles(roles=role_names)
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
tasks = []
with Pool(4) as pool:
try:
with When("user try to login while mapped roles are added and removed in parallel"):
for i in range(10):
tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,)))
tasks.append(pool.submit(remove_roles_in_parallel, (role_names, i, 10,)))
tasks.append(pool.submit(add_roles_in_parallel, (role_names, i, 10,)))
finally:
with Finally("it should work", flags=TE):
for task in tasks:
task.result(timeout=timeout)
with And("I clean up all the roles"):
for role_name in role_names:
with By(f"dropping role {role_name}", flags=TE):
self.context.node.query(f"DROP ROLE IF EXISTS {role_name}")
@TestOutline
def parallel_login(self, ldap_server, ldap_user, user_count=10, timeout=200, role_count=10):
"""Check that login of valid and invalid LDAP authenticated users
with mapped roles works in parallel.
"""
uid = getuid()
role_names = [f"role{i}_{uid}" for i in range(role_count)]
users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)]
groups = [{"cn": f"clickhouse_{role_name}"} for role_name in role_names]
role_mappings = [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": "clickhouse_"
}
]
with Given("I add LDAP users"):
users = add_ldap_users(users=users)
with And("I add all LDAP groups"):
groups = add_ldap_groups(groups=groups)
for group in groups:
for user in users:
with And(f"I add LDAP user {user['dn']} to the group {group['dn']}"):
add_user_to_group_in_ldap(user=user, group=group)
with And("I add RBAC roles"):
add_rbac_roles(roles=role_names)
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=ldap_server,
role_mappings=role_mappings, restart=True)
tasks = []
with Pool(4) as pool:
try:
with When("users try to login in parallel", description="""
* with valid username and password
* with invalid username and valid password
* with valid username and invalid password
"""):
for i in range(10):
tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,)))
tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,)))
tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,)))
finally:
with Then("it should work"):
for task in tasks:
task.result(timeout=timeout)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0")
)
def parallel_login_of_multiple_users(self, ldap_server, ldap_user, timeout=200, role_count=10):
"""Check that valid and invalid logins of multiple LDAP authenticated users
with mapped roles works in parallel.
"""
parallel_login(user_count=10, ldap_user=ldap_user,ldap_server=ldap_server,
timeout=timeout, role_count=role_count)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_SameUser("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0")
)
def parallel_login_of_the_same_user(self, ldap_server, ldap_user, timeout=200, role_count=10):
"""Check that valid and invalid logins of the same LDAP authenticated user
with mapped roles works in parallel.
"""
parallel_login(user_count=10, ldap_user=ldap_user,ldap_server=ldap_server,
timeout=timeout, role_count=role_count)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_MultipleServers("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0")
)
def parallel_login_of_ldap_users_with_multiple_servers(self, ldap_server, ldap_user, timeout=200):
"""Check that valid and invalid logins of multiple LDAP users that have mapped roles
works in parallel using multiple LDAP external user directories.
"""
parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user,
user_count=10, role_count=10,timeout=timeout, with_ldap_users=True, with_local_users=False)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalAndMultipleLDAP("1.0"),
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0")
)
def parallel_login_of_local_and_ldap_users_with_multiple_servers(self, ldap_server, ldap_user, timeout=200):
"""Check that valid and invalid logins of local users and LDAP users that have mapped roles
works in parallel using multiple LDAP external user directories.
"""
parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user,
user_count=10, role_count=10, timeout=timeout, with_local_users=True, with_ldap_users=True)
@TestScenario
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalOnly("1.0")
)
def parallel_login_of_local_users(self, ldap_server, ldap_user, timeout=200):
"""Check that valid and invalid logins of local users
works in parallel when multiple LDAP external user directories
with role mapping are configured.
"""
parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user,
user_count=10, role_count=10, timeout=timeout, with_local_users=True, with_ldap_users=False)
@TestOutline
def parallel_login_with_multiple_servers(self, ldap_server, ldap_user, user_count=10,
role_count=10, timeout=200, with_ldap_users=True, with_local_users=False):
"""Check that login of valid and invalid local users or LDAP users that have mapped roles
works in parallel using multiple LDAP external user directories.
"""
uid = getuid()
cluster = self.context.cluster
user_groups = {}
with Given("I define role names"):
role_names = [f"role{i}_{uid}" for i in range(role_count)]
with And("I define corresponding group names"):
groups = [{"cn": f"clickhouse_{role_name}"} for role_name in role_names]
if with_ldap_users:
with And("I define a group of users to be created on each LDAP server"):
user_groups["openldap1_users"] = [
{"cn": f"openldap1_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count)
]
user_groups["openldap2_users"] = [
{"cn": f"openldap2_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count)
]
if with_local_users:
with And("I define a group of local users to be created"):
user_groups["local_users"] = [
{"cn": f"local_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count)
]
with And("I have a list of checks that I want to run for each user group"):
checks = [
login_with_valid_username_and_password,
login_with_valid_username_and_invalid_password,
login_with_invalid_username_and_valid_password
]
with And("I create config file to define LDAP external user directory for each LDAP server"):
entries = {
"user_directories": [
{"ldap": [
{"server": "openldap1"},
{"role_mappings" : [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": "clickhouse_"
}
]}
]},
{"ldap": [
{"server": "openldap2"},
{"role_mappings": [
{
"base_dn": "ou=groups,dc=company,dc=com",
"attribute": "cn",
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
"prefix": "clickhouse_"
}
]}
]}
]
}
config = create_entries_ldap_external_user_directory_config_content(entries)
with And("I add LDAP external user directory configuration"):
add_ldap_external_user_directory(server=None, restart=True, config=config)
if with_ldap_users:
with And("I add LDAP users to each LDAP server"):
openldap1_users = add_ldap_users(users=user_groups["openldap1_users"], node=cluster.node("openldap1"))
openldap2_users = add_ldap_users(users=user_groups["openldap2_users"], node=cluster.node("openldap2"))
with And("I add all LDAP groups to each LDAP server"):
openldap1_groups = add_ldap_groups(groups=groups, node=cluster.node("openldap1"))
openldap2_groups = add_ldap_groups(groups=groups, node=cluster.node("openldap2"))
with And("I add all users to LDAP groups on the first LDAP server"):
for group in openldap1_groups:
for user in openldap1_users:
with By(f"adding LDAP user {user['dn']} to the group {group['dn']}"):
add_user_to_group_in_ldap(user=user, group=group, node=cluster.node("openldap1"))
with And("I add all users to LDAP groups on the second LDAP server"):
for group in openldap2_groups:
for user in openldap2_users:
with By(f"adding LDAP user {user['dn']} to the group {group['dn']}"):
add_user_to_group_in_ldap(user=user, group=group, node=cluster.node("openldap2"))
with And("I add RBAC roles"):
add_rbac_roles(roles=role_names)
if with_local_users:
with And("I add local users"):
add_rbac_users(users=user_groups["local_users"])
with And("I grant the same RBAC roles to local users"):
for user in user_groups["local_users"]:
for role_name in role_names:
self.context.node.query(f"GRANT {role_name} TO {user['cn']}")
tasks = []
with Pool(4) as pool:
try:
with When("users in each group try to login in parallel", description="""
* with valid username and password
* with invalid username and valid password
* with valid username and invalid password
"""):
for i in range(10):
for users in user_groups.values():
for check in checks:
tasks.append(pool.submit(check, (users, i, 50,)))
finally:
with Then("it should work"):
for task in tasks:
task.result(timeout=timeout)
@TestFeature
@Name("mapping")
@Requirements(
RQ_SRS_014_LDAP_RoleMapping_Search("1.0")
)
def feature(self):
"""Check role LDAP role mapping.
"""
self.context.node = self.context.cluster.node("clickhouse1")
self.context.ldap_node = self.context.cluster.node("openldap1")
servers = {
"openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"bind_dn": "cn={user_name},ou=users,dc=company,dc=com"
},
"openldap2": {
"host": "openldap2",
"port": "636",
"enable_tls": "yes",
"bind_dn": "cn={user_name},ou=users,dc=company,dc=com",
"tls_require_cert": "never",
}
}
users = [
{"server": "openldap1", "username": "user1", "password": "user1", "login": True,
"dn": "cn=user1,ou=users,dc=company,dc=com"},
]
with Given("I fix LDAP access permissions"):
fix_ldap_permissions()
with And("I add LDAP servers configuration", description=f"{servers}"):
add_ldap_servers_configuration(servers=servers)
for scenario in loads(current_module(), Scenario):
scenario(ldap_server="openldap1", ldap_user=users[0])
| 40.005525 | 118 | 0.632596 |
4a905b3e862140e9a368c15e4bab1d523ff03e77 | 943 | py | Python | oops_fhir/r4/value_set/v3_act_relationship_subset.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/v3_act_relationship_subset.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/v3_act_relationship_subset.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_act_relationship_subset import (
v3ActRelationshipSubset as v3ActRelationshipSubset_,
)
__all__ = ["v3ActRelationshipSubset"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3ActRelationshipSubset(v3ActRelationshipSubset_):
"""
v3 Code System ActRelationshipSubset
<ns1:p>Used to indicate that the target of the relationship will be a
filtered subset of the total related set of targets.</ns1:p><ns1:p>Used
when there is a need to limit the number of components to the first, the
last, the next, the total, the average or some other filtered or
calculated subset.</ns1:p>
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-ActRelationshipSubset
"""
class Meta:
resource = _resource
| 26.942857 | 74 | 0.765642 |
032cf19503dad5f63b6ae50ecf0b06b10339600b | 4,089 | py | Python | tests/providers/amazon/aws/sensors/test_sqs.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 3 | 2015-08-25T13:56:44.000Z | 2020-03-21T10:26:58.000Z | tests/providers/amazon/aws/sensors/test_sqs.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | tests/providers/amazon/aws/sensors/test_sqs.py | vuppalli/airflow | dfe8337ca2d3ed173d9ecc112938271519792c40 | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from moto import mock_sqs
from airflow.exceptions import AirflowException
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.sqs import SQSHook
from airflow.providers.amazon.aws.sensors.sqs import SQSSensor
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSQSSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
self.sensor = SQSSensor(
task_id='test_task',
dag=self.dag,
sqs_queue='test',
aws_conn_id='aws_default'
)
self.mock_context = mock.MagicMock()
self.sqs_hook = SQSHook()
@mock_sqs
def test_poke_success(self):
self.sqs_hook.create_queue('test')
self.sqs_hook.send_message(queue_url='test', message_body='hello')
result = self.sensor.poke(self.mock_context)
self.assertTrue(result)
self.assertTrue("'Body': 'hello'" in str(self.mock_context['ti'].method_calls),
"context call should contain message hello")
@mock_sqs
def test_poke_no_messsage_failed(self):
self.sqs_hook.create_queue('test')
result = self.sensor.poke(self.mock_context)
self.assertFalse(result)
context_calls = []
self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context call should be same")
@mock.patch.object(SQSHook, 'get_conn')
def test_poke_delete_raise_airflow_exception(self, mock_conn):
message = {'Messages': [{'MessageId': 'c585e508-2ea0-44c7-bf3e-d1ba0cb87834',
'ReceiptHandle': 'mockHandle',
'MD5OfBody': 'e5a9d8684a8edfed460b8d42fd28842f',
'Body': 'h21'}],
'ResponseMetadata': {'RequestId': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'date': 'Mon, 18 Feb 2019 18:41:52 GMT',
'content-type': 'text/xml', 'mock_sqs_hook-length': '830'},
'RetryAttempts': 0}}
mock_conn.return_value.receive_message.return_value = message
mock_conn.return_value.delete_message_batch.return_value = \
{'Failed': [{'Id': '22f67273-4dbc-4c19-83b5-aee71bfeb832'}]}
with self.assertRaises(AirflowException) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('Delete SQS Messages failed' in context.exception.args[0])
@mock.patch.object(SQSHook, 'get_conn')
def test_poke_receive_raise_exception(self, mock_conn):
mock_conn.return_value.receive_message.side_effect = Exception('test exception')
with self.assertRaises(Exception) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('test exception' in context.exception.args[0])
| 39.317308 | 110 | 0.638542 |
e8bbb3424900c812876008860afa817808ceaf96 | 632 | py | Python | Random-Number-Checker.py | Ethan-Francolla/Random-Number-Checker | 40419954cf6f99b184f84c00a600ef1d384cba99 | [
"MIT"
] | 5 | 2021-11-26T18:39:57.000Z | 2022-02-17T04:17:45.000Z | Random-Number-Checker.py | Ethan-Francolla/Random-Number-Checker | 40419954cf6f99b184f84c00a600ef1d384cba99 | [
"MIT"
] | null | null | null | Random-Number-Checker.py | Ethan-Francolla/Random-Number-Checker | 40419954cf6f99b184f84c00a600ef1d384cba99 | [
"MIT"
] | null | null | null | import random
list = []
def print_list():
print("This is the list:")
print(*list, sep = ', ')
for i in range(100):
rand_num = int(random.randint(1,500))
list.append(rand_num)
list.sort()
list.reverse()
user_num = int(input("Please enter a number to check if it is in the array: "))
if user_num in list:
print("That nuber is in the array!")
print("This is how many times it appears in the array: ")
print(list.count(user_num))
else:
print("Sorry, that is not in the array.")
print("This is the largest number:", max(list))
print("This is the smallest number:", min(list))
print_list()
| 21.793103 | 79 | 0.653481 |
173936dac745115efa8b08779ca59bea5ffd88d0 | 7,783 | py | Python | benchmarking/TriVec/experiment_TriVec.py | hpi-sam/GNN-Effectants | e1204cb78bb91ffe3126df62d2d14b20da950694 | [
"MIT"
] | 1 | 2021-03-29T20:50:38.000Z | 2021-03-29T20:50:38.000Z | benchmarking/TriVec/experiment_TriVec.py | hpi-sam/GNN-Effectants | e1204cb78bb91ffe3126df62d2d14b20da950694 | [
"MIT"
] | null | null | null | benchmarking/TriVec/experiment_TriVec.py | hpi-sam/GNN-Effectants | e1204cb78bb91ffe3126df62d2d14b20da950694 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import itertools
import gzip
import numpy as np
import time
from sklearn.pipeline import Pipeline
from tqdm import tqdm
from libkge.embedding import TransE, DistMult, ComplEx, TriModel, DistMult_MCL, ComplEx_MCL, TriModel_MCL
from libkge import KgDataset
from libkge.metrics.classification import auc_roc, auc_pr
from libkge.metrics.ranking import precision_at_k, average_precision
from libkge.metrics.classification import auc_pr, auc_roc
import argparse
from sklearn.metrics import accuracy_score
def scale(x, out_range=(-1, 1)):
domain = np.min(x), np.max(x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
parser = argparse.ArgumentParser(description='Set necessary values to train different types of predefined models')
parser.add_argument("--sideeffect", "-se", help="train model for given side-effect")
args = parser.parse_args()
def main():
start = time.time()
seed = 1234
data_name = "pse"
kg_dp_path = "../../preprocessing/data/"
print("Importing dataset files ... ")
benchmark_train_fd = gzip.open(os.path.join(kg_dp_path, f"{args.sideeffect}_train.txt.gz"), "rt")
benchmark_valid_fd = gzip.open(os.path.join(kg_dp_path, f"{args.sideeffect}_val.txt.gz"), "rt")
benchmark_test_fd = gzip.open(os.path.join(kg_dp_path, f"{args.sideeffect}_test.txt.gz"), "rt")
benchmark_train = np.array([l.strip().split() for l in benchmark_train_fd.readlines()])
benchmark_valid = np.array([l.strip().split() for l in benchmark_valid_fd.readlines()])
benchmark_test = np.array([l.strip().split() for l in benchmark_test_fd.readlines()])
benchmark_triples = np.array([[d1, se, d2] for d1, se, d2 in
np.concatenate([benchmark_train, benchmark_valid, benchmark_test])])
pse_drugs = list(set(list(np.concatenate([benchmark_triples[:, 0], benchmark_triples[:, 2]]))))
pse_list = set(list(benchmark_triples[:, 1]))
rel_dict = dict()
for s, p, o in benchmark_triples:
if p not in rel_dict:
rel_dict[p] = 1
else:
rel_dict[p] += 1
pair_dict = dict()
for s, p, o in benchmark_triples:
if s > o:
pair = (s, o)
else:
pair = (o, s)
if pair not in rel_dict:
pair_dict[pair] = 1
else:
pair_dict[pair] += 1
drug_combinations = np.array([[d1, d2] for d1, d2 in list(itertools.product(pse_drugs, pse_drugs)) if d1 != d2])
print("Processing dataset files to generate a knowledge graph ... ")
# delete raw polypharmacy data
del benchmark_triples
dataset = KgDataset(name=data_name)
dataset.load_triples(benchmark_train, tag="bench_train")
dataset.load_triples(benchmark_valid, tag="bench_valid")
dataset.load_triples(benchmark_test, tag="bench_test")
del benchmark_train
del benchmark_valid
del benchmark_test
nb_entities = dataset.get_ents_count()
nb_relations = dataset.get_rels_count()
pse_indices = dataset.get_rel_indices(list(pse_list))
d1 = np.array(dataset.get_ent_indices(list(drug_combinations[:, 0]))).reshape([-1, 1])
d2 = np.array(dataset.get_ent_indices(list(drug_combinations[:, 1]))).reshape([-1, 1])
drug_combinations = np.concatenate([d1, d2], axis=1)
del d1
del d2
# grouping side effect information by the side effect type
train_data = dataset.data["bench_train"]
valid_data = dataset.data["bench_valid"]
test_data = dataset.data["bench_test"]
bench_idx_data = np.concatenate([train_data, valid_data, test_data])
se_facts_full_dict = {se: set() for se in pse_indices}
for s, p, o in bench_idx_data:
se_facts_full_dict[p].add((s, p, o))
print("Initializing the knowledge graph embedding model... ")
# model pipeline definition
model = TriModel(seed=seed, verbose=2)
pipe_model = Pipeline([('kge_model', model)])
# set model parameters
model_params = {
'kge_model__em_size': 100,
'kge_model__lr': 0.01,
'kge_model__optimiser': "AMSgrad",
'kge_model__log_interval': 10,
'kge_model__nb_epochs': 100,
'kge_model__nb_negs': 6,
'kge_model__batch_size': 5000,
'kge_model__initialiser': 'xavier_uniform',
'kge_model__nb_ents': nb_entities,
'kge_model__nb_rels': nb_relations
}
# add parameters to the model then call fit method
pipe_model.set_params(**model_params)
print("Training ... ")
pipe_model.fit(X=train_data, y=None)
metrics_per_se = {se_idx: {"ap": .0, "auc-roc": .0, "auc-pr": .0, "p@50": .0} for se_idx in pse_indices}
se_ap_list = []
se_auc_roc_list = []
se_auc_pr_list = []
se_p50_list = []
se_ac_list= []
print("================================================================================")
for se in tqdm(pse_indices, desc="Evaluating test data for each side-effect"):
se_name = dataset.get_rel_labels([se])[0]
se_all_facts_set = se_facts_full_dict[se]
se_test_facts_pos = np.array([[s, p, o] for s, p, o in test_data if p == se])
se_test_facts_pos_size = len(se_test_facts_pos)
se_test_facts_neg = np.array([[d1, se, d2] for d1, d2 in drug_combinations
if (d1, se, d2) not in se_all_facts_set
and (d2, se, d1) not in se_all_facts_set])
# shuffle and keep negatives with size equal to positive instances so positive to negative ratio is 1:1
np.random.shuffle(se_test_facts_neg)
se_test_facts_neg = se_test_facts_neg[:se_test_facts_pos_size, :]
set_test_facts_all = np.concatenate([se_test_facts_pos, se_test_facts_neg])
se_test_facts_labels = np.concatenate([np.ones([len(se_test_facts_pos)]), np.zeros([len(se_test_facts_neg)])])
se_test_facts_scores = model.predict(set_test_facts_all)
se_ap = average_precision(se_test_facts_labels, se_test_facts_scores)
se_p50 = precision_at_k(se_test_facts_labels, se_test_facts_scores, k=50)
se_auc_pr = auc_pr(se_test_facts_labels, se_test_facts_scores)
se_auc_roc = auc_roc(se_test_facts_labels, se_test_facts_scores)
se_ac = accuracy_score(se_test_facts_labels, (scale(se_test_facts_scores,out_range=(0,1))>0.5))
se_ap_list.append(se_ap)
se_auc_roc_list.append(se_auc_roc)
se_auc_pr_list.append(se_auc_pr)
se_p50_list.append(se_p50)
se_ac_list.append(se_ac)
se_code = se_name.replace("SE:", "")
metrics_per_se[se] = {"ac": se_ac,"ap": se_ap, "auc-roc": se_auc_roc, "auc-pr": se_auc_pr, "p@50": se_p50}
print("AC: %1.4f - AP: %1.4f - AUC-ROC: %1.4f - AUC-PR: %1.4f - P@50: %1.4f > %s (%-4d)" %
(se_ac,se_ap, se_auc_roc, se_auc_pr, se_p50, se_code, se_test_facts_pos_size), flush=True)
se_ap_list_avg = np.average(se_ap_list)
se_auc_roc_list_avg = np.average(se_auc_roc_list)
se_auc_pr_list_avg = np.average(se_auc_pr_list)
se_p50_list_avg = np.average(se_p50_list)
se_ac_list_avg = np.average(se_ac_list)
print("================================================================================")
print("[AVERAGE] AC: %1.4f - AP: %1.4f - AUC-ROC: %1.4f - AUC-PR: %1.4f - P@50: %1.4f" %
(se_ac_list_avg, se_ap_list_avg, se_auc_roc_list_avg, se_auc_pr_list_avg, se_p50_list_avg), flush=True)
print("================================================================================")
end = time.time()
time_taken = end - start
print('This run has taken {} seconds to execute.'.format(time_taken))
if __name__ == '__main__':
main()
| 40.748691 | 118 | 0.647951 |
a34d08bb021b913763df8aafe21dfda822fe1dc4 | 11,055 | py | Python | test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_subscription_in_credentials_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_subscription_in_credentials_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/aio/operations/_subscription_in_credentials_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._subscription_in_credentials_operations import (
build_post_method_global_not_provided_valid_request,
build_post_method_global_null_request,
build_post_method_global_valid_request,
build_post_path_global_valid_request,
build_post_swagger_global_valid_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionInCredentialsOperations:
"""SubscriptionInCredentialsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azurespecialproperties.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def post_method_global_valid(self, **kwargs: Any) -> None:
"""POST method with subscriptionId modeled in credentials. Set the credential subscriptionId to
'1234-5678-9012-3456' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_post_method_global_valid_request(
subscription_id=self._config.subscription_id,
template_url=self.post_method_global_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post_method_global_valid.metadata = {"url": "/azurespecials/subscriptionId/method/string/none/path/global/1234-5678-9012-3456/{subscriptionId}"} # type: ignore
@distributed_trace_async
async def post_method_global_null(self, **kwargs: Any) -> None:
"""POST method with subscriptionId modeled in credentials. Set the credential subscriptionId to
null, and client-side validation should prevent you from making this call.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_post_method_global_null_request(
subscription_id=self._config.subscription_id,
template_url=self.post_method_global_null.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post_method_global_null.metadata = {"url": "/azurespecials/subscriptionId/method/string/none/path/global/null/{subscriptionId}"} # type: ignore
@distributed_trace_async
async def post_method_global_not_provided_valid(self, **kwargs: Any) -> None:
"""POST method with subscriptionId modeled in credentials. Set the credential subscriptionId to
'1234-5678-9012-3456' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_post_method_global_not_provided_valid_request(
subscription_id=self._config.subscription_id,
template_url=self.post_method_global_not_provided_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post_method_global_not_provided_valid.metadata = {"url": "/azurespecials/subscriptionId/method/string/none/path/globalNotProvided/1234-5678-9012-3456/{subscriptionId}"} # type: ignore
@distributed_trace_async
async def post_path_global_valid(self, **kwargs: Any) -> None:
"""POST method with subscriptionId modeled in credentials. Set the credential subscriptionId to
'1234-5678-9012-3456' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_post_path_global_valid_request(
subscription_id=self._config.subscription_id,
template_url=self.post_path_global_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post_path_global_valid.metadata = {"url": "/azurespecials/subscriptionId/path/string/none/path/global/1234-5678-9012-3456/{subscriptionId}"} # type: ignore
@distributed_trace_async
async def post_swagger_global_valid(self, **kwargs: Any) -> None:
"""POST method with subscriptionId modeled in credentials. Set the credential subscriptionId to
'1234-5678-9012-3456' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_post_swagger_global_valid_request(
subscription_id=self._config.subscription_id,
template_url=self.post_swagger_global_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post_swagger_global_valid.metadata = {"url": "/azurespecials/subscriptionId/swagger/string/none/path/global/1234-5678-9012-3456/{subscriptionId}"} # type: ignore
| 48.065217 | 188 | 0.708186 |
f647bab2f0d77d8b958f1c111387de8a243e6867 | 31,533 | py | Python | flax/core/lift.py | davisyoshida/flax | d03a262249671db92f9924f990437907cac36b21 | [
"Apache-2.0"
] | null | null | null | flax/core/lift.py | davisyoshida/flax | d03a262249671db92f9924f990437907cac36b21 | [
"Apache-2.0"
] | null | null | null | flax/core/lift.py | davisyoshida/flax | d03a262249671db92f9924f990437907cac36b21 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jax transform lifting."""
import collections
from dataclasses import dataclass
import functools
import jax
from jax import random
from typing import Any, Callable, Sequence, Union, Iterable, Optional, Mapping, TypeVar, Generic
from .frozen_dict import freeze
from .frozen_dict import FrozenDict
from .frozen_dict import unfreeze
from .scope import Scope, CollectionFilter, PRNGSequenceFilter, in_filter, union_filters, intersect_filters, subtract_filters, group_collections
from . import axes_scan
T = TypeVar('T')
def _dedup_scopes(scopes):
paths = []
# must preseve insertion order for duplication to work correctly
minimal_set = collections.OrderedDict((s, ()) for s in scopes)
for leaf in scopes:
scope = leaf.parent
max_parent = leaf
max_parent_path = ()
path = [leaf.name]
while scope is not None:
if scope in minimal_set:
max_parent = scope
max_parent_path = tuple(reversed(path))
path.append(scope.name)
scope = scope.parent
if max_parent is not leaf and leaf in minimal_set:
del minimal_set[leaf]
paths.append((max_parent, max_parent_path))
return tuple(minimal_set), tuple(paths)
def _dup_scopes(orig_scopes, scopes, paths):
mapping = dict(zip(orig_scopes, scopes))
scopes = []
for root, path in paths:
scope = mapping[root]
for name in path:
scope = scope.push(name, reuse=True)
scopes.append(scope)
return scopes
def _transpose(xs):
return tuple(zip(*xs))
def pack(fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
"""
@functools.wraps(fn)
def wrapper(scope_tree: Scope, *args):
# pylint: disable=protected-access
scopes, treedef = jax.tree_flatten(scope_tree)
scopes, paths = _dedup_scopes(scopes)
variable_groups_xs = []
for scope in scopes:
scope._validate_trace_level()
scope._populate_collections()
variable_groups_xs.append(group_collections(
scope._variables, in_variable_filters))
variable_groups_xs_t = _transpose(variable_groups_xs)
# Make sure that in-only variable collections are frozen
for variable_group_xs in variable_groups_xs_t:
for variable_group in variable_group_xs:
for col_name, collection in variable_group.items():
col_in_out = any(
in_filter(col_filter, col_name)
for col_filter in out_variable_filters)
if not col_in_out:
variable_group[col_name] = freeze(collection)
rng_groups_xs = []
for scope in scopes:
rng_groups = group_collections(scope.rngs, rng_filters)
for rng_group in rng_groups:
for kind in rng_group:
rng_group[kind] = scope.make_rng(kind)
rng_groups_xs.append(rng_groups)
rng_groups_xs_t = _transpose(rng_groups_xs)
inner_scopes = []
def scope_fn(variable_groups_xs_t, rng_groups_xs_t):
nonlocal inner_scopes
for inner_scope in inner_scopes:
inner_scope.invalidate()
inner_scopes = []
mutable = False
for out_filter in out_variable_filters:
mutable = union_filters(mutable, out_filter)
# could be () in the edge case where no rngs or variable_groups are lifted
# in this case fallback to ((),) * len(scopes) to make sure the zip has something
# to iterate over for each scope.
variable_groups_xs = _transpose(variable_groups_xs_t) or ((),) * len(scopes)
rng_groups_xs = _transpose(rng_groups_xs_t) or ((),) * len(scopes)
assert len(variable_groups_xs) == len(scopes)
assert len(rng_groups_xs) == len(scopes)
for variable_groups, rng_groups, scope in zip(variable_groups_xs, rng_groups_xs, scopes):
variables = {}
rngs = {}
for variable_group in variable_groups:
variables.update(variable_group)
for rng_group in rng_groups:
rngs.update(rng_group)
# make sure variable dicts are cloned and can't be manipulated by ref sharing.
variables = jax.tree_map(lambda x: x, variables)
scope_mutable = intersect_filters(scope.root.mutable, mutable)
new_path = scope.path
if name:
if new_path:
new_path = new_path[:-1] + (f'{name}({new_path[-1]})',)
else:
new_path = (f'{name}()',)
inner_scope = Scope(
variables, name=scope.name, rngs=rngs,
mutable=scope_mutable, parent=None,
path=new_path)
inner_scopes.append(inner_scope)
inner_scopes = _dup_scopes(scopes, inner_scopes, paths)
return treedef.unflatten(inner_scopes)
def repack(inner_scope_tree):
inner_scopes = treedef.flatten_up_to(inner_scope_tree)
inner_scopes, inner_paths = _dedup_scopes(inner_scopes)
inner_scopes = list(inner_scopes)
assert [p for _, p in paths] == [p for _, p in inner_paths]
out_variable_groups_xs = []
for inner_scope in inner_scopes:
inner_scope.invalidate()
inner_scope._validate_trace_level()
mutable_variables = {key: val for key, val
in inner_scope._variables.items()
if in_filter(inner_scope.mutable, key)}
out_variable_groups = group_collections(
mutable_variables, tuple(out_variable_filters) + (True,))
remainder = tuple(out_variable_groups[-1].keys())
if remainder:
raise ValueError(f'unmapped output variables: {remainder}')
out_variable_groups_xs.append(out_variable_groups[:-1])
return _transpose(out_variable_groups_xs)
try:
y, out_variable_groups_xs_t = fn(
scope_fn, repack,
variable_groups_xs_t, rng_groups_xs_t,
*args)
finally:
for inner_scope in inner_scopes:
inner_scope.invalidate()
out_variable_groups_xs = _transpose(out_variable_groups_xs_t)
for scope, out_variable_groups in zip(scopes, out_variable_groups_xs):
for out_variable_group in out_variable_groups:
for col_name, collection in out_variable_group.items():
for var_name, value in collection.items():
scope.put_variable(col_name, var_name, value)
return y
return wrapper
id_fn = lambda x: x
def transform(
fn: Callable[..., Any],
target: CollectionFilter,
trans_in_fn: Callable[..., Any] = id_fn,
trans_out_fn: Callable[..., Any] = id_fn,
init: bool = False, mutable: bool = False,
rngs: PRNGSequenceFilter = True, variables: CollectionFilter = True):
"""Locally transform Variables inside a scope.
Args:
fn: the function to be transformed.
target: the collection(s) to be transformed.
trans_in_fn: creates a view of the target variables.
trans_out_fn: transforms the updated variables in the view after mutation.
init: If True, variables are initialized before transformation.
rngs: PRNGSequences added to the transformed scope (default: all).
variables: Addtional Variable collections added to the transformed scope.
Besides those specified by `target` (default: all).
"""
def wrapper(scope_fn, repack, variable_groups, rng_groups, treedef, *args):
target, variables = variable_groups
if init:
scope = scope_fn((target, variables), rng_groups)
fn(scope, *args)
target, _ = repack(scope)
target_tree = trans_out_fn(treedef.unflatten(target))
target = treedef.flatten_up_to(target_tree)
target_tree = treedef.unflatten(map(unfreeze, target))
target_tree = trans_in_fn(target_tree)
target = treedef.flatten_up_to(target_tree)
if not is_target_out:
target = tuple(map(freeze, target))
scope = scope_fn((target, variables), rng_groups)
y = fn(scope, *args)
out_target, out_vars = repack(scope)
if is_target_out:
out_target_tree = trans_out_fn(treedef.unflatten(out_target))
out_target = treedef.flatten_up_to(out_target_tree)
return y, (out_target, out_vars)
is_target_out = mutable or init
in_vars = (target, variables)
out_vars = in_vars if is_target_out else (False, subtract_filters(variables, target))
wrapper = pack(wrapper, in_vars, out_vars, (rngs,), name='transform')
@functools.wraps(wrapper)
def catch_treedef(scopes, *args):
treedef = jax.tree_structure(scopes)
return wrapper(scopes, treedef, *args)
return catch_treedef
def transform_module(fn: Callable[..., Any],
target: CollectionFilter = 'params',
trans_in_fn: Callable[..., Any] = id_fn,
trans_out_fn: Callable[..., Any] = id_fn,
mutable: bool = False,
rngs: PRNGSequenceFilter = True,
variables: CollectionFilter = True):
""""Wrapper around `transform` for automatic init detection.
This function will detect if the target collection exists.
If it doesn't `init=True` is will be passed to `transform`.
See `transform` for more details.
"""
def wrapper(scope, *args, **kwargs):
vs = scope.variables()
is_init = target not in vs or not vs[target]
fn_p = functools.partial(fn, **kwargs)
lift_trans = transform(
fn_p,
target,
trans_in_fn=trans_in_fn,
trans_out_fn=trans_out_fn,
init=is_init, mutable=mutable,
rngs=rngs, variables=variables)
return lift_trans(scope, *args)
return wrapper
def swap_collection(fn: Callable[..., Any], col_a: str, col_b: str):
"""Swap two collections."""
def swap(target):
a = target[col_a] if col_a in target else {}
b = target[col_b] if col_b in target else {}
target[col_b], target[col_a] = a, b
return target
return transform(fn, (col_a, col_b), swap, swap, mutable=True)
@dataclass(frozen=True)
class In(Generic[T]):
"""Specifies a variable collection should only be lifted as input."""
axis: Any # pytype does not support generic variable annotation
@dataclass(frozen=True)
class Out(Generic[T]):
"""Specifies a variable collection should only be lifted as output."""
axis: Any # pytype does not support generic variable annotation
def _split_in_out_axes(xs: Mapping[CollectionFilter, Any]):
unpack = lambda v: v.axis if isinstance(v, (In, Out)) else v
in_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, Out)}
out_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, In)}
return in_axes, out_axes
Axis = Optional[int]
InOutAxis = Union[Axis, In[Axis], Out[Axis]]
def vmap(fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutAxis],
split_rngs: Mapping[PRNGSequenceFilter, bool],
in_axes=0, out_axes=0,
axis_size: Optional[int] = None,
axis_name: Optional[str] = None) -> Callable[..., Any]:
"""A lifted version of ``jax.vmap``.
See ``jax.vmap`` for the unlifted batch transform in Jax.
``vmap`` can be used to add a batch axis to a scope function.
For example we could create a version of ``dense`` with
a batch axis that does not share parameters::
batch_dense = lift.vmap(
nn.dense,
in_axes=(0, None),
variable_axes={'params': 0},
split_rngs={'params': True})
By using ``variable_axes={'params': 0}``, we indicate that the
parameters themselves are mapped over and therefore not shared along
the mapped axis. Consequently, we also split the 'params' RNG,
otherwise the parameters would be initialized identically along
the mapped axis.
Similarly, ``vmap`` could be use to add a batch axis with parameter
sharing::
batch_foo = lift.vmap(
foo,
in_axes=0, out_axes=0,
variable_axes={'params': None},
split_rngs={'params': False})
Here we use ``variable_axes={'params': None}`` to indicate the parameter
variables are shared along the mapped axis. Consequently, the 'params'
RNG must also be shared.
Args:
target: the function to be transformed.
variable_axes: the variable collections that are lifted into the
batching transformation. Use `None` to indicate a broadcasted
collection or an integer to map over an axis.
split_rngs: Split PRNG sequences will be different for each index
of the batch dimension. Unsplit PRNGs will be broadcasted.
in_axes: Specifies the mapping of the input arguments (see `jax.vmap).
out_axes: Specifies the mapping of the return value (see `jax.vmap).
axis_size: Specifies the size of the batch axis. This only needs
to be specified if it cannot be derived from the input arguments.
axis_name: Specifies a name for the batch axis. Can be used together
with parallel reduction primitives (e.g. `jax.lax.pmean`,
`jax.lax.ppermute`, etc.)
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(0 if rng_split else None for rng_split in rng_splits)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
def find_axis_size(axis, x):
if axis is not None:
leaves = jax.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
axis_sizes = jax.tree_multimap(find_axis_size, (variable_in_axes, in_axes), (variable_groups, args))
axis_sizes = set(jax.tree_leaves(axis_sizes))
if axis_size is None and len(axis_sizes) == 1:
d_axis_size, = axis_sizes
elif len(axis_sizes) > 1:
raise ValueError(f'Inconsistent batch axis sizes: {axis_sizes}')
elif axis_size is None:
raise ValueError('axis_size should be specified manually.')
else:
d_axis_size = axis_size
split_fn = lambda rng: random.split(rng, d_axis_size)
rng_groups = tuple(
jax.tree_map(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits))
@functools.partial(jax.vmap,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
axis_name=axis_name)
@functools.wraps(fn)
def mapped(variable_groups, rng_groups, args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return mapped(variable_groups, rng_groups, args)
return pack(
inner, variable_in_groups, variable_out_groups, rng_groups,
name='vmap')
ScanAxis = int
InOutScanAxis = Union[ScanAxis, In[ScanAxis], Out[ScanAxis]]
def scan(fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {},
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
split_rngs: Mapping[PRNGSequenceFilter, bool] = {},
in_axes=0, out_axes=0,
length: Optional[int] = None,
reverse: bool = False) -> Callable[..., Any]:
"""A lifted version of ``jax.lax.scan``.
See ``jax.lax.scan`` for the unlifted scan in Jax.
To improve consistency with ``vmap``, this version of scan
uses ``in_axes`` and ``out_axes`` to determine which arguments
are scanned over and along which axis.
``scan`` distinguishes between 3 different types of values inside the loop:
1. **scan**: a value that is iterated over in a loop. All scan values must
have the same size in the axis they are scanned over. Scanned outputs
will be stacked along the scan axis.
2. **carry**: A carried value is updated at each loop iteration. It must
have the same shape and dtype throughout the loop.
3. **broadcast**: a value that is closed over by the loop. When a variable
is broadcasted they are typically initialized inside the loop body but
independent of the loop variables.
The loop body should have the signature
``(scope, body, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys``
are the scan values that go in and out of the loop.
Example::
scope.variable('counter', 'i', jnp.zeros, ())
def body_fn(scope, c, x):
counter = scope.variable('counter', 'i', jnp.zeros, ())
counter.value += 1
x = scope.child(nn.dense)(x, 1)
return c, x
_, ys = lift.scan(
body_fn,
variable_carry='counter',
variable_broadcast='params',
split_rngs={'params': False})(scope, (), xs)
Args:
fn: the function to be transformed.
variable_axes: the variable collections that are scanned over.
variable_broadcast: Specifies the broadcasted variable collections.
A broadcasted variable should not depend on any computation that cannot be lifted out of the loop.
This is typically used to define shared parameters inside the fn.
variable_carry: Specifies the variable collections that are carried through the loop.
Mutations to these variables are carried to the next iteration and will be preserved
when the scan finishes.
split_rngs: Split PRNG sequences will be different for each loop iterations.
If split is False the PRNGs will be the same across iterations.
in_axes: Specifies the axis to scan over for the arguments. Should be a prefix
tree of the arguments. Use `flax.core.broadcast` to feed an entire input
to each iteration of the scan body.
out_axes: Specifies the axis to scan over for the return value. Should be a prefix
tree of the return value.
length: Specifies the number of loop iterations. This only needs
to be specified if it cannot be derivied from the scan arguments.
reverse: If true, scan from end to start in reverse order.
Returns:
The scan function with the signature ``(scope, carry, *xxs) -> (carry, yys)``,
where ``xxs`` and ``yys`` are the scan values that go in and out of the loop.
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
assert all(isinstance(ax, int) for ax in variable_in_axes)
assert all(isinstance(ax, int) for ax in variable_out_axes)
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(0 if rng_split else axes_scan.broadcast
for rng_split in rng_splits)
def inner(scope_fn, repack_fn,
variable_groups, rng_groups,
init, *args):
def find_length(axis, x):
if axis is not axes_scan.broadcast:
leaves = jax.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
lengths = jax.tree_multimap(find_length, in_axes, args)
lengths = set(jax.tree_leaves(lengths))
if length is None and len(lengths) == 1:
d_length, = lengths
elif len(lengths) > 1:
raise ValueError(f'Inconsistent scan lengths: {lengths}')
elif length is None:
raise ValueError('length should be specified manually.')
else:
d_length = length
split_fn = lambda rng: random.split(rng, d_length)
rng_groups = tuple(
jax.tree_map(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits))
@functools.partial(axes_scan.scan,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
length=length, reverse=reverse)
def scanned(broadcast_vars, carry, scan_variable_groups, rng_groups, args):
carry_vars, c = carry
variable_groups = (broadcast_vars, carry_vars) + scan_variable_groups
scope = scope_fn(variable_groups, rng_groups)
c, y = fn(scope, c, *args)
out_vars = repack_fn(scope)
broadcast_vars_out = out_vars[0]
carry_vars = out_vars[1]
scan_vars = out_vars[2:]
# add immutable broadcast vars back to broadcast output
# otherwise they won't be fed to the actual scan body
for in_group, out_group in zip(broadcast_vars, broadcast_vars_out):
for col in in_group:
if col not in out_group:
out_group[col] = in_group[col]
return broadcast_vars_out, (carry_vars, c), (y, scan_vars)
broadcast_vars = variable_groups[0]
carry_vars = variable_groups[1]
scan_vars = variable_groups[2:]
broadcast_vars, (carry_vars, c), (ys, scan_vars) = scanned(
broadcast_vars, (carry_vars, init), scan_vars, rng_groups, args)
# remove immutable broadcast vars otherwise they will be updated
# with their own value which will cause an error
for out_group in broadcast_vars:
for name, col in tuple(out_group.items()):
if isinstance(col, FrozenDict):
del out_group[name]
out_vars = (broadcast_vars, carry_vars,) + scan_vars
return (c, ys), out_vars
return pack(
inner,
(variable_broadcast, variable_carry) + variable_in_groups,
(variable_broadcast, variable_carry) + variable_out_groups,
rng_groups,
name='scan')
def custom_vjp(fn: Callable[..., Any], backward_fn: Callable[..., Any],
grad_kind: CollectionFilter = 'params',
nondiff_argnums=()):
""""Lifted version of `jax.custom_vjp`.
`backward_fn` defines a custom vjp (backward gradient) for `fn`.
Example::
def fwd(scope, x, features):
y = nn.dense(scope, x, features)
return y, x
def bwd(features, scope_fn, params, res, g):
x = res
fn = lambda params, x: nn.dense(scope_fn(params), x, features)
_, pullback = jax.vjp(fn, params, x)
g_param, g_x = pullback(g)
g_param = jax.tree_map(jnp.sign, g_param)
return g_param, g_x
dense_sign_grad = lift.custom_vjp(fwd, backward_fn=bwd, nondiff_argnums=(2,))
Args:
fn: should return a tuple of output and auxilliary data for the backward pass.
backward_fn: arguments are passed as (*nondiff_args, scope_fn, grad_variables, aux, g_y)
where scope_fn takes grad_variables to create the scope,
aux is the auxilliary data returend by `fn`,
and g_y is the tangent of y.
"""
# TODO(jheek) is this transform general/flexible enough?
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
grad_variables, other_variables = variable_groups
def simple_scope_fn(grad_variables):
grad_variables = tuple(freeze(x) for x in grad_variables)
return scope_fn((grad_variables, other_variables), rng_groups)
def f(grad_variables, *args):
scope = scope_fn((grad_variables, other_variables), rng_groups)
y, _ = fn(scope, *args)
vars_out = repack_fn(scope)
return y, vars_out
f = jax.custom_vjp(f, nondiff_argnums=nondiff_argnums)
def f_fwd(grad_variables, *args):
scope = simple_scope_fn(grad_variables)
y, res = fn(scope, *args)
vars_out = repack_fn(scope)
return (y, vars_out), (res, grad_variables)
def f_bwd(*args):
nondiff_args = args[:-2]
res, g = args[-2:]
g_y, _ = g
user_res, grad_variables = res
return backward_fn(*nondiff_args, simple_scope_fn, grad_variables, user_res, g_y)
f.defvjp(f_fwd, f_bwd)
return f(grad_variables, *args)
variable_in_groups = (grad_kind, True,)
variable_out_groups = (grad_kind, True,)
rng_groups = (True,)
return pack(
inner, variable_in_groups, variable_out_groups, rng_groups,
name='custom_vjp')
def checkpoint(fn: Callable[..., Any],
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
concrete: bool = False,
) -> Callable[..., Any]:
"""Lifted version of ``jax.checkpoint``.
This function is aliased to ``lift.remat`` just like ``jax.remat``.
Args:
fn: scope function for which intermediate computations should be
re-computed when computing gradients.
variables: The variable collections that are lifted. By default all
collections are lifted.
rngs: The PRNG sequences that are lifted. By defualt all PRNG sequences
are lifted.
concrete: Optional, boolean indicating whether ``fun`` may involve
value-dependent Python control flow (default False). Support for such
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
Returns:
A wrapped version of ``fn``. When computing gradients intermediate
computations will be re-computed when computing gradients.
"""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
@functools.partial(jax.remat, concrete=concrete)
@functools.wraps(fn)
def rematted(variable_groups, rng_groups, *args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return rematted(variable_groups, rng_groups, *args)
return pack(inner, (variables,), (variables,), (rngs,), name='remat')
def jit(fn: Callable[..., Any],
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
static_argnums: Union[int, Iterable[int]] = (),
donate_argnums: Union[int, Iterable[int]] = (),
device=None,
backend: Union[str, None] = None,
) -> Callable[..., Any]:
"""Lifted version of ``jax.jit``.
Args:
fn: Scope function to be jitted.
variables: The variable collections that are lifted. By default all
collections are lifted.
rngs: The PRNG sequences that are lifted. By defualt all PRNG sequences
are lifted.
static_argnums: An int or collection of ints specifying which positional
arguments to treat as static (compile-time constant). Operations that only
depend on static arguments will be constant-folded in Python (during
tracing), and so the corresponding argument values can be any Python
object. Static arguments should be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and immutable. Calling the jitted function
with different values for these constants will trigger recompilation. If
the jitted function is called with fewer positional arguments than
indicated by ``static_argnums`` then an error is raised. Arguments that
are not arrays or containers thereof must be marked as static.
Defaults to ().
device: This is an experimental feature and the API is likely to change.
Optional, the Device the jitted function will run on. (Available devices
can be retrieved via :py:func:`jax.devices`.) The default is inherited from
XLA's DeviceAssignment logic and is usually to use ``jax.devices()[0]``.
backend: a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
Returns:
A wrapped version of ``fn``, set up for just-in-time compilation.
"""
if not isinstance(static_argnums, Iterable):
static_argnums = (static_argnums,)
if not isinstance(donate_argnums, Iterable):
donate_argnums = (donate_argnums,)
# offset argnums by one because first argument is the scope.
static_argnums = tuple(i + 1 for i in static_argnums if i > 0)
donate_argnums = tuple(i + 1 for i in donate_argnums if i > 0)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
@functools.partial(jax.jit,
static_argnums=static_argnums,
donate_argnums=donate_argnums,
device=device, backend=backend)
@functools.wraps(fn)
def jitted(variable_groups, rng_groups, *args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return jitted(variable_groups, rng_groups, *args)
return pack(inner, (variables,), (variables,), (rngs,), name='jit')
remat = checkpoint
def remat_scan(body_fn: Callable[..., Any], scope: Scope, carry: Any,
lengths: Sequence[int],
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {},
split_rngs: Mapping[PRNGSequenceFilter, bool] = {}):
"""Combines `lift.remat` and `lift.scan` for memory efficient scans.
Example::
def body_fn(scope, x):
return nn.dense(scope, x, features=x.shape[-1])
# 100x dense with O(sqrt(N)) memory for gradient computation
y = lift.remat_scan(
body_fn, scope, x, lengths=(10, 10),
variable_axes={'params': 0},
split_rngs={'params': True})
"""
# TODO(jheek) should remat scan have scan inputs/outputs?
scan_fn = functools.partial(
scan,
variable_broadcast=variable_broadcast,
variable_carry=variable_carry,
variable_axes=variable_axes,
split_rngs=split_rngs)
if len(lengths) == 1:
def wrapper(scope, carry):
return body_fn(scope, carry), ()
carry, _ = scan_fn(wrapper, length=lengths[0])(scope, carry)
else:
@remat
def inner_loop(scope, carry):
carry = remat_scan(body_fn, scope, carry, lengths[1:],
variable_broadcast, variable_carry,
variable_axes, split_rngs)
return carry, ()
carry, _ = scan_fn(inner_loop, length=lengths[0])(scope, carry)
return carry
def named_call(fn: Callable[..., Any], name: str) -> Callable[..., Any]:
"""Adds a name scope to `fn` during profiling."""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, args, kwargs):
@functools.wraps(fn)
def named(variable_groups, rng_groups):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args, **kwargs)
return y, repack_fn(scope)
named = jax.named_call(named, name=name)
return named(variable_groups, rng_groups)
lifted = pack(inner, (True,), (True,), (True,))
def wrapper(scope, *args, **kwargs):
return lifted(scope, args, kwargs)
return wrapper
def _unzip2(xs):
ys = tuple(zip(*xs))
return ys if ys else ((), ())
| 39.465582 | 144 | 0.682016 |
76cd1b816416bfb5baa1d17db3552016b5c40749 | 5,650 | py | Python | keystoneclient/fixture/v2.py | jamielennox/python-keystoneclient | a1bc48c0fc475db6bca761a9023c35adab740dab | [
"Apache-1.1"
] | null | null | null | keystoneclient/fixture/v2.py | jamielennox/python-keystoneclient | a1bc48c0fc475db6bca761a9023c35adab740dab | [
"Apache-1.1"
] | null | null | null | keystoneclient/fixture/v2.py | jamielennox/python-keystoneclient | a1bc48c0fc475db6bca761a9023c35adab740dab | [
"Apache-1.1"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from keystoneclient.fixture import exception
from keystoneclient.openstack.common import timeutils
class _Service(dict):
def add_endpoint(self, public, admin=None, internal=None,
tenant_id=None, region=None):
data = {'tenantId': tenant_id or uuid.uuid4().hex,
'publicURL': public,
'adminURL': admin or public,
'internalURL': internal or public,
'region': region}
self.setdefault('endpoints', []).append(data)
return data
class Token(dict):
"""A V2 Keystone token that can be used for testing.
This object is designed to allow clients to generate a correct V2 token for
use in there test code. It should prevent clients from having to know the
correct token format and allow them to test the portions of token handling
that matter to them and not copy and paste sample.
"""
def __init__(self, token_id=None, expires=None, issued=None,
tenant_id=None, tenant_name=None, user_id=None,
user_name=None):
super(Token, self).__init__()
self.token_id = token_id or uuid.uuid4().hex
self.user_id = user_id or uuid.uuid4().hex
self.user_name = user_name or uuid.uuid4().hex
if not issued:
issued = timeutils.utcnow() - datetime.timedelta(minutes=2)
if not expires:
expires = issued + datetime.timedelta(hours=1)
try:
self.issued = issued
except (TypeError, AttributeError):
# issued should be able to be passed as a string so ignore
self.issued_str = issued
try:
self.expires = expires
except (TypeError, AttributeError):
# expires should be able to be passed as a string so ignore
self.expires_str = expires
if tenant_id or tenant_name:
self.set_scope(tenant_id, tenant_name)
@property
def root(self):
return self.setdefault('access', {})
@property
def _token(self):
return self.root.setdefault('token', {})
@property
def token_id(self):
return self._token['id']
@token_id.setter
def token_id(self, value):
self._token['id'] = value
@property
def expires_str(self):
return self._token['expires']
@expires_str.setter
def expires_str(self, value):
self._token['expires'] = value
@property
def expires(self):
return timeutils.parse_isotime(self.expires_str)
@expires.setter
def expires(self, value):
self.expires_str = timeutils.isotime(value)
@property
def issued_str(self):
return self._token['issued_at']
@issued_str.setter
def issued_str(self, value):
self._token['issued_at'] = value
@property
def issued(self):
return timeutils.parse_isotime(self.issued_str)
@issued.setter
def issued(self, value):
self.issued_str = timeutils.isotime(value)
@property
def _user(self):
return self.root.setdefault('user', {})
@property
def user_id(self):
return self._user['id']
@user_id.setter
def user_id(self, value):
self._user['id'] = value
@property
def user_name(self):
return self._user['name']
@user_name.setter
def user_name(self, value):
self._user['name'] = value
@property
def tenant_id(self):
return self._token.get('tenant', {}).get('id')
@tenant_id.setter
def tenant_id(self, value):
self._token.setdefault('tenant', {})['id'] = value
@property
def tenant_name(self):
return self._token.get('tenant', {}).get('name')
@tenant_name.setter
def tenant_name(self, value):
self._token.setdefault('tenant', {})['name'] = value
@property
def _metadata(self):
return self.root.setdefault('metadata', {})
def validate(self):
scoped = 'tenant' in self.token
catalog = self.root.get('serviceCatalog')
if catalog and not scoped:
msg = 'You cannot have a service catalog on an unscoped token'
raise exception.FixtureValidationError(msg)
if scoped and not self.user.get('roles'):
msg = 'You must have roles on a token to scope it'
raise exception.FixtureValidationError(msg)
def add_role(self, name=None, id=None):
id = id or uuid.uuid4().hex
name = name or uuid.uuid4().hex
roles = self._user.setdefault('roles', [])
roles.append({'name': name})
self._metadata.setdefault('roles', []).append(id)
return {'id': id, 'name': name}
def add_service(self, type, name=None):
name = name or uuid.uuid4().hex
service = _Service(name=name, type=type)
self.root.setdefault('serviceCatalog', []).append(service)
return service
def set_scope(self, id=None, name=None):
self.tenant_id = id or uuid.uuid4().hex
self.tenant_name = name or uuid.uuid4().hex
| 29.89418 | 79 | 0.630442 |
8cbe9d6ad2acb90980dd1ac9c32ba0ab046f2ba6 | 5,919 | py | Python | exec.py | wellhowtosay/Smbop_mt | f4b0feb9308632cfe31d4dc47efdbae672439bf7 | [
"MIT"
] | null | null | null | exec.py | wellhowtosay/Smbop_mt | f4b0feb9308632cfe31d4dc47efdbae672439bf7 | [
"MIT"
] | null | null | null | exec.py | wellhowtosay/Smbop_mt | f4b0feb9308632cfe31d4dc47efdbae672439bf7 | [
"MIT"
] | null | null | null | import json
import argparse
import contextlib
import sh
import subprocess
import pathlib
from allennlp.commands.train import train_model
from allennlp.common import Params
# from smbop.dataset_readers.spider import SmbopSpiderDatasetReader
from smbop.dataset_readers.cosql import SmbopCoSqlDatasetReader
from smbop.models.smbop import SmbopParser
from smbop.modules.relation_transformer import RelationTransformer
from smbop.modules.lxmert import LxmertCrossAttentionLayer
import namegenerator
def to_string(value):
if isinstance(value, list):
return [to_string(x) for x in value]
elif isinstance(value, bool):
return "true" if value else "false"
else:
return str(value)
def run():
parser = argparse.ArgumentParser(allow_abbrev=True)
parser.add_argument("--name", nargs="?")
parser.add_argument("--force", action="store_true")
parser.add_argument("--gpu", type=str, default="0")
parser.add_argument("--recover", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--detect_anomoly", action="store_true")
parser.add_argument("--profile", action="store_true")
parser.add_argument("--is_oracle", action="store_true")
parser.add_argument("--tiny_dataset", action="store_true")
parser.add_argument("--load_less", action="store_true")
parser.add_argument("--cntx_rep", action="store_true")
parser.add_argument("--cntx_beam", action="store_true")
parser.add_argument("--disable_disentangle_cntx", action="store_true")
parser.add_argument("--disable_cntx_reranker", action="store_true")
parser.add_argument("--disable_value_pred", action="store_true")
parser.add_argument("--disable_use_longdb", action="store_true")
parser.add_argument("--uniquify", action="store_true")
parser.add_argument("--use_bce", action="store_true")
parser.add_argument("--tfixup", action="store_true")
parser.add_argument("--train_as_dev", action="store_true")
parser.add_argument("--disable_amp", action="store_true")
parser.add_argument("--disable_utt_aug", action="store_true")
parser.add_argument("--should_rerank", action="store_true")
parser.add_argument("--use_treelstm", action="store_true")
parser.add_argument("--disable_db_content", action="store_true",
help="Run with this argument (once) before pre-proccessing to reduce the pre-proccessing time by half \
This argument causes EncPreproc to not perform IR on the largest tables. ")
parser.add_argument("--lin_after_cntx", action="store_true")
parser.add_argument("--optimizer", type=str, default="adam")
parser.add_argument("--rat_layers", type=int, default=8)
parser.add_argument("--beam_size", default=30, type=int)
parser.add_argument("--base_dim", default=32, type=int)
parser.add_argument("--num_heads", default=8, type=int)
parser.add_argument("--beam_encoder_num_layers", default=1, type=int)
parser.add_argument("--tree_rep_transformer_num_layers", default=1, type=int)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--rat_dropout", default=0.2, type=float)
parser.add_argument("--lm_lr", default=3e-6, type=float)
parser.add_argument("--lr", type=float, default=0.000186)
parser.add_argument("--batch_size", default=20, type=int)
parser.add_argument("--grad_acum", default=4, type=int)
parser.add_argument("--max_steps", default=60000, type=int)
parser.add_argument("--power", default=0.5, type=float)
parser.add_argument("--temperature", default=1.0, type=float)
parser.add_argument("--grad_clip", default=-1, type=float)
parser.add_argument("--grad_norm", default=-1, type=float)
default_dict = {k.option_strings[0][2:]: k.default for k in parser._actions}
args = parser.parse_args()
diff = "_".join(
[
f"{key}{value}"
for key, value in vars(args).items()
if (key != "name" and value != default_dict[key])
]
)
ext_vars = {}
for key, value in vars(args).items():
if key.startswith("disable"):
new_key = key.replace("disable_", "")
ext_vars[new_key] = to_string(not value)
else:
ext_vars[key] = to_string(value)
print(ext_vars)
default_config_file = "configs/defaults.jsonnet"
overrides_dict = {}
if args.profile:
overrides_dict["trainer"]["num_epochs"] = 1
experiment_name_parts = []
experiment_name_parts.append(namegenerator.gen())
if diff:
experiment_name_parts.append(diff)
if args.name:
experiment_name_parts.append(args.name)
experiment_name = "_".join(experiment_name_parts)
print(f"experiment_name: {experiment_name}")
ext_vars["experiment_name"] = experiment_name
overrides_json = json.dumps(overrides_dict)
settings = Params.from_file(
default_config_file,
ext_vars=ext_vars,
params_overrides=overrides_json,
)
prefix = ""
# prefix = "/home/ohadr/"
prefix = "/media/disk1/ohadr/"
assert not pathlib.Path(f"{prefix}experiments/{experiment_name}").exists()
# sh.ln("-s", f"{prefix}/experiments/{experiment_name}", f"experiments/{experiment_name}")
pathlib.Path(f"backup").mkdir(exist_ok=True)
pathlib.Path(f"cache").mkdir(exist_ok=True)
# pathlib.Path(f"experiments/{experiment_name}").mkdir(exist_ok=True)
subprocess.check_call(
f"git ls-files | tar Tzcf - backup/{experiment_name}.tgz", shell=True
)
if args.profile:
pass
else:
cntx = contextlib.nullcontext()
with cntx:
train_model(
params=settings,
serialization_dir=f"{prefix}experiments/{experiment_name}",
recover=args.recover,
force=True,
)
if __name__ == "__main__":
run()
| 39.993243 | 127 | 0.687954 |
92f9c0a40502b50b922102d5b072e06101fef4c3 | 416 | py | Python | aws-dev/awsdev8/Scripts/pip-script.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 13 | 2020-02-02T13:53:50.000Z | 2022-03-20T19:50:02.000Z | aws-dev/awsdev63/Scripts/pip-script.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 2 | 2020-03-29T19:08:04.000Z | 2021-06-02T00:57:44.000Z | aws-dev/awsdev63/Scripts/pip-script.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 10 | 2019-12-25T20:42:37.000Z | 2021-11-17T15:19:00.000Z | #!"C:\Users\malcolm orr\Documents\GitHub\awsdev63\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| 32 | 69 | 0.665865 |
a31007b71926238dfc8438696161d1b4703dd6bb | 1,850 | py | Python | python/petitBloc/chain.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 24 | 2018-01-17T02:58:10.000Z | 2021-08-20T20:34:08.000Z | python/petitBloc/chain.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 2 | 2018-12-05T08:02:49.000Z | 2021-05-21T06:57:02.000Z | python/petitBloc/chain.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 5 | 2018-02-06T05:40:17.000Z | 2022-03-19T06:30:20.000Z | from numbers import Number
from . import core
from . import packet
from . import workerManager
class Chain(core.ChainBase):
def __init__(self, srcPort, dstPort):
super(Chain, self).__init__(srcPort, dstPort)
self.__packets = None
def empty(self):
if self.__packets is None:
return True
return self.__packets.empty()
def clear(self):
if self.__packets is not None:
while (not self.__packets.empty()):
self.__packets.get().drop()
workerManager.WorkerManager.DeleteQueue(self.__packets)
self.__packets = None
def disconnect(self):
super(Chain, self).disconnect()
self.clear()
def activate(self):
src = self.src()
dst = self.dst()
if src is None or dst is None:
return
if src.isByPassing():
return
if dst.isByPassing():
return
if self.__packets is None:
self.__packets = workerManager.WorkerManager.CreateQueue()
def terminate(self):
self.clear()
def send(self, pack):
if self.dst() is None:
return False
if self.__packets is None:
return False
self.__packets.put(pack)
return True
def sendEOP(self):
return self.send(packet.EndOfPacket)
def receive(self, timeout=None):
if self.src() is None:
return packet.EndOfPacket
if self.__packets is None:
return packet.EndOfPacket
p = self.__packets.get(timeout=timeout)
if self.needToCast() and not p.isEOP():
tc = self.dst().typeClass()
if issubclass(tc, (core.Any, core.DasTypeBase)):
tc = None
p = packet.CastedPacket(p, typeClass=tc)
return p
| 23.125 | 70 | 0.574054 |
14d4501cfd83d3045828c6655cf15513aa823657 | 4,344 | py | Python | src/tools/voc_eval_lib/utils/visualization.py | papkov/CenterNet | 91fe229756881fe8f77697b73407e2337b6fc65e | [
"MIT"
] | null | null | null | src/tools/voc_eval_lib/utils/visualization.py | papkov/CenterNet | 91fe229756881fe8f77697b73407e2337b6fc65e | [
"MIT"
] | null | null | null | src/tools/voc_eval_lib/utils/visualization.py | papkov/CenterNet | 91fe229756881fe8f77697b73407e2337b6fc65e | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
from six.moves import range
STANDARD_COLORS = [
"AliceBlue",
"Chartreuse",
"Aqua",
"Aquamarine",
"Azure",
"Beige",
"Bisque",
"BlanchedAlmond",
"BlueViolet",
"BurlyWood",
"CadetBlue",
"AntiqueWhite",
"Chocolate",
"Coral",
"CornflowerBlue",
"Cornsilk",
"Crimson",
"Cyan",
"DarkCyan",
"DarkGoldenRod",
"DarkGrey",
"DarkKhaki",
"DarkOrange",
"DarkOrchid",
"DarkSalmon",
"DarkSeaGreen",
"DarkTurquoise",
"DarkViolet",
"DeepPink",
"DeepSkyBlue",
"DodgerBlue",
"FireBrick",
"FloralWhite",
"ForestGreen",
"Fuchsia",
"Gainsboro",
"GhostWhite",
"Gold",
"GoldenRod",
"Salmon",
"Tan",
"HoneyDew",
"HotPink",
"IndianRed",
"Ivory",
"Khaki",
"Lavender",
"LavenderBlush",
"LawnGreen",
"LemonChiffon",
"LightBlue",
"LightCoral",
"LightCyan",
"LightGoldenRodYellow",
"LightGray",
"LightGrey",
"LightGreen",
"LightPink",
"LightSalmon",
"LightSeaGreen",
"LightSkyBlue",
"LightSlateGray",
"LightSlateGrey",
"LightSteelBlue",
"LightYellow",
"Lime",
"LimeGreen",
"Linen",
"Magenta",
"MediumAquaMarine",
"MediumOrchid",
"MediumPurple",
"MediumSeaGreen",
"MediumSlateBlue",
"MediumSpringGreen",
"MediumTurquoise",
"MediumVioletRed",
"MintCream",
"MistyRose",
"Moccasin",
"NavajoWhite",
"OldLace",
"Olive",
"OliveDrab",
"Orange",
"OrangeRed",
"Orchid",
"PaleGoldenRod",
"PaleGreen",
"PaleTurquoise",
"PaleVioletRed",
"PapayaWhip",
"PeachPuff",
"Peru",
"Pink",
"Plum",
"PowderBlue",
"Purple",
"Red",
"RosyBrown",
"RoyalBlue",
"SaddleBrown",
"Green",
"SandyBrown",
"SeaGreen",
"SeaShell",
"Sienna",
"Silver",
"SkyBlue",
"SlateBlue",
"SlateGray",
"SlateGrey",
"Snow",
"SpringGreen",
"SteelBlue",
"GreenYellow",
"Teal",
"Thistle",
"Tomato",
"Turquoise",
"Violet",
"Wheat",
"White",
"WhiteSmoke",
"Yellow",
"YellowGreen",
]
NUM_COLORS = len(STANDARD_COLORS)
try:
FONT = ImageFont.truetype("arial.ttf", 24)
except IOError:
FONT = ImageFont.load_default()
def _draw_single_box(
image, xmin, ymin, xmax, ymax, display_str, font, color="black", thickness=4
):
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color,
)
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font,
)
return image
def draw_bounding_boxes(image, gt_boxes, im_info):
num_boxes = gt_boxes.shape[0]
gt_boxes_new = gt_boxes.copy()
gt_boxes_new[:, :4] = np.round(gt_boxes_new[:, :4].copy() / im_info[2])
disp_image = Image.fromarray(np.uint8(image[0]))
for i in range(num_boxes):
this_class = int(gt_boxes_new[i, 4])
disp_image = _draw_single_box(
disp_image,
gt_boxes_new[i, 0],
gt_boxes_new[i, 1],
gt_boxes_new[i, 2],
gt_boxes_new[i, 3],
"N%02d-C%02d" % (i, this_class),
FONT,
color=STANDARD_COLORS[this_class % NUM_COLORS],
)
image[0, :] = np.array(disp_image)
return image
| 21.294118 | 82 | 0.561924 |
2deda0f8d1c9335e7dd52042d5e5544dd37fcac3 | 3,193 | py | Python | monk/pytorch/training/params.py | Sanskar329/monk_v1 | 51a497a925ec1fb2c8fef1d51245ea7040a5a65a | [
"Apache-2.0"
] | 7 | 2020-07-26T08:37:29.000Z | 2020-10-30T10:23:11.000Z | monk/pytorch/training/params.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | null | null | null | monk/pytorch/training/params.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | null | null | null | from pytorch.training.imports import *
from system.imports import *
@accepts(int, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_num_epochs(num_epochs, system_dict):
'''
Set number of training epochs
Args:
num_epochs (int): Number of epochs to train the network
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["hyper-parameters"]["num_epochs"] = num_epochs;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_display_progress_realtime(value, system_dict):
'''
Set verbosity levels for iterations
Args:
value (bool): If True, displays progress for every iteration in the epoch
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["training"]["settings"]["display_progress_realtime"] = value;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_display_progress(value, system_dict):
'''
Set all training params for epochs
Args:
value (bool): If True, displays summary post every epoch
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["training"]["settings"]["display_progress"] = value;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_save_intermediate_models(value, system_dict):
'''
Set whether to save models post every epoch or not
Args:
value (bool): If True, saves model weight post every epoch
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["training"]["settings"]["save_intermediate_models"] = value;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_save_training_logs(value, system_dict):
'''
Set whether to save training logs or not
Args:
value (bool): If True, saves all training and validation metrics. Required for comparison.
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["training"]["settings"]["save_training_logs"] = value;
return system_dict;
@accepts(str, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_intermediate_model_prefix(value, system_dict):
'''
Set a prefix to names of intermediate models being saved
Args:
value (str): Appends a prefix to intermediate weights
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["training"]["settings"]["intermediate_model_prefix"] = value;
return system_dict; | 30.409524 | 98 | 0.710304 |
9d28b7134f8c58cd85c41e693789022d7beb0cbb | 352 | py | Python | corsempy/__init__.py | iaousse/corsempy_project | e369016e1edd9372556e13d0038088628dc7bb40 | [
"MIT"
] | null | null | null | corsempy/__init__.py | iaousse/corsempy_project | e369016e1edd9372556e13d0038088628dc7bb40 | [
"MIT"
] | null | null | null | corsempy/__init__.py | iaousse/corsempy_project | e369016e1edd9372556e13d0038088628dc7bb40 | [
"MIT"
] | null | null | null | """corsempy is a Python package for implementing the iterative methods for the computations of the covariance
matrix implied by an SEM model. The package still under development."""
"""from .optimizer import Optimizer
from .model import Model
from .identifier import Identifier
from .stats import Statistics"""
name = "corsempy"
__version__ = "1.0.0"
| 35.2 | 109 | 0.784091 |
2dbefc0019ffb5976d6187a2d79e30d5948a0d28 | 1,464 | py | Python | aliyun-python-sdk-ros/aliyunsdkros/request/v20190910/DescribeRegionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-ros/aliyunsdkros/request/v20190910/DescribeRegionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-ros/aliyunsdkros/request/v20190910/DescribeRegionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkros.endpoint import endpoint_data
class DescribeRegionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ROS', '2019-09-10', 'DescribeRegions','ros')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AcceptLanguage(self):
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self,AcceptLanguage):
self.add_query_param('AcceptLanguage',AcceptLanguage) | 38.526316 | 74 | 0.771858 |
1c3c1b6d1bf3418e30f84f8cf25639559ef31943 | 3,970 | py | Python | utils/polus-imagej-util/{{cookiecutter.project_slug}}/src/ij_converter.py | LabShare/polus-plugin-utils | 9332ca6c229401d57b063a81973d48ce718c654c | [
"MIT"
] | null | null | null | utils/polus-imagej-util/{{cookiecutter.project_slug}}/src/ij_converter.py | LabShare/polus-plugin-utils | 9332ca6c229401d57b063a81973d48ce718c654c | [
"MIT"
] | null | null | null | utils/polus-imagej-util/{{cookiecutter.project_slug}}/src/ij_converter.py | LabShare/polus-plugin-utils | 9332ca6c229401d57b063a81973d48ce718c654c | [
"MIT"
] | null | null | null |
'''
A conversion utility built to convert abstract to primitive
'''
import imagej
import logging
import imglyb
import jpype
import numpy as np
import scyjava
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("ij_converter")
logger.setLevel(logging.INFO)
## fill in types to convert
ABSTRACT_ITERABLES = [
'IterableInterval',
'Iterable',
]
IMG_ARRAYS = [
'ArrayImg'
]
ABSTRACT_SCALARS = [
'RealType',
]
SCALARS = [
'double',
'float',
'long', #long type (int64) not supported by bfio
'int',
'short',
'char',
'byte',
'boolean'
]
## recognize array objects as scalar objects + '[]'
ARRAYS = [s+'[]' for s in SCALARS]
def _java_setup():
global IMGLYB_PRIMITIVES, PRIMITIVES, PRIMITIVE_ARRAYS
IMGLYB_PRIMITIVES = {
'float32' : imglyb.types.FloatType,
'float64' : imglyb.types.DoubleType,
'int8' : imglyb.types.ByteType,
'int16' : imglyb.types.ShortType,
'int32' : imglyb.types.IntType,
'int64' : imglyb.types.LongType,
'uint8' : imglyb.types.UnsignedByteType,
'uint16' : imglyb.types.UnsignedShortType,
'uint32' : imglyb.types.UnsignedIntType,
'uint64' : imglyb.types.UnsignedLongType
}
# PRIMITIVES = {
# 'double' : jpype.JDouble,
# 'float' : jpype.JFloat,
# 'long' : jpype.JLong,
# 'int' : jpype.JInt,
# 'short' : jpype.JShort,
# 'char' : jpype.JChar,
# 'byte' : jpype.JByte,
# 'boolean' : jpype.JBoolean
# }
PRIMITIVES = {
'double' : jpype.JDouble,
'float' : jpype.JFloat,
'long' : jpype.JLong,
'int' : jpype.JInt,
'short' : jpype.JShort,
'char' : jpype.JChar,
'byte' : jpype.JByte,
'boolean' : jpype.JBoolean
}
PRIMITIVE_ARRAYS = {
'double[]' : jpype.JDouble[:],
'float[]' : jpype.JFloat[:],
'long[]' : jpype.JLong[:],
'int[]' : jpype.JInt[:],
'short[]' : jpype.JShort[:],
'char[]' : jpype.JChar[:],
'byte[]' : jpype.JByte[:],
'boolean[]' : jpype.JBoolean[:]
}
scyjava.when_jvm_starts(_java_setup)
JAVA_CONVERT = {}
JAVA_CONVERT.update({
t:lambda s,t,st: IMGLYB_PRIMITIVES[str(st)](st.type(s)) for t in ABSTRACT_SCALARS
})
JAVA_CONVERT.update({
t:lambda s,t,st: PRIMITIVES[t](float(s)) for t in SCALARS
})
JAVA_CONVERT.update({
t:lambda s,t,st: PRIMITIVE_ARRAYS[t]([float(si) for si in s.split(',')]) for t in ARRAYS
})
# JAVA_CONVERT.update({
# t: lambda s,t,st: IMGLYB_PRIMITIVES[str(st)](s) for t in SCALARS
# })
JAVA_CONVERT.update({
t:lambda s,ij: imglyb.util.Views.iterable(ij.py.to_java(s)) for t in ABSTRACT_ITERABLES
})
JAVA_CONVERT.update({
t:lambda s,ij: imglyb.util._to_imglib(s) for t in IMG_ARRAYS
})
def to_java(ij, np_array,java_type,java_dtype=None):
if ij == None:
raise ValueError('No imagej instance found.')
if isinstance(np_array,type(None)):
return None
if java_type in JAVA_CONVERT.keys():
if str(java_dtype) != 'None':
out_array = JAVA_CONVERT[java_type](np_array,java_type,java_dtype)
else:
out_array = JAVA_CONVERT[java_type](np_array, ij)
else:
logger.warning('Did not recognize type, {}, will pass default.'.format(java_type))
out_array = ij.py.to_java(np_array)
return out_array
def from_java(ij, java_array,java_type):
if ij == None:
raise ValueError('No imagej instance found.')
if ij.py.dtype(java_array) == bool:
java_array = ij.op().convert().uint8(java_array)
return ij.py.from_java(java_array) | 27.569444 | 92 | 0.581612 |
65e033b7b694bd8b38b15bb918eaba18f76b8443 | 8,650 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/SGIX/fragment_lighting.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/SGIX/fragment_lighting.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/SGIX/fragment_lighting.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | '''OpenGL extension SGIX.fragment_lighting
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/SGIX/fragment_lighting.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIX_fragment_lighting'
GL_FRAGMENT_LIGHTING_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHTING_SGIX', 0x8400 )
GL_FRAGMENT_COLOR_MATERIAL_SGIX = constant.Constant( 'GL_FRAGMENT_COLOR_MATERIAL_SGIX', 0x8401 )
GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX = constant.Constant( 'GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX', 0x8402 )
GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX = constant.Constant( 'GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX', 0x8403 )
GL_MAX_FRAGMENT_LIGHTS_SGIX = constant.Constant( 'GL_MAX_FRAGMENT_LIGHTS_SGIX', 0x8404 )
GL_MAX_ACTIVE_LIGHTS_SGIX = constant.Constant( 'GL_MAX_ACTIVE_LIGHTS_SGIX', 0x8405 )
GL_CURRENT_RASTER_NORMAL_SGIX = constant.Constant( 'GL_CURRENT_RASTER_NORMAL_SGIX', 0x8406 )
GL_LIGHT_ENV_MODE_SGIX = constant.Constant( 'GL_LIGHT_ENV_MODE_SGIX', 0x8407 )
GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX', 0x8408 )
GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX', 0x8409 )
GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX', 0x840A )
GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX', 0x840B )
GL_FRAGMENT_LIGHT0_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT0_SGIX', 0x840C )
GL_FRAGMENT_LIGHT1_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT1_SGIX', 0x840D )
GL_FRAGMENT_LIGHT2_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT2_SGIX', 0x840E )
GL_FRAGMENT_LIGHT3_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT3_SGIX', 0x840F )
GL_FRAGMENT_LIGHT4_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT4_SGIX', 0x8410 )
GL_FRAGMENT_LIGHT5_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT5_SGIX', 0x8411 )
GL_FRAGMENT_LIGHT6_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT6_SGIX', 0x8412 )
GL_FRAGMENT_LIGHT7_SGIX = constant.Constant( 'GL_FRAGMENT_LIGHT7_SGIX', 0x8413 )
glFragmentColorMaterialSGIX = platform.createExtensionFunction(
'glFragmentColorMaterialSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum,),
doc = 'glFragmentColorMaterialSGIX( GLenum(face), GLenum(mode) ) -> None',
argNames = ('face', 'mode',),
)
glFragmentLightfSGIX = platform.createExtensionFunction(
'glFragmentLightfSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLfloat,),
doc = 'glFragmentLightfSGIX( GLenum(light), GLenum(pname), GLfloat(param) ) -> None',
argNames = ('light', 'pname', 'param',),
)
glFragmentLightfvSGIX = platform.createExtensionFunction(
'glFragmentLightfvSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glFragmentLightfvSGIX( GLenum(light), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('light', 'pname', 'params',),
)
glFragmentLightiSGIX = platform.createExtensionFunction(
'glFragmentLightiSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLint,),
doc = 'glFragmentLightiSGIX( GLenum(light), GLenum(pname), GLint(param) ) -> None',
argNames = ('light', 'pname', 'param',),
)
glFragmentLightivSGIX = platform.createExtensionFunction(
'glFragmentLightivSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glFragmentLightivSGIX( GLenum(light), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('light', 'pname', 'params',),
)
glFragmentLightModelfSGIX = platform.createExtensionFunction(
'glFragmentLightModelfSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLfloat,),
doc = 'glFragmentLightModelfSGIX( GLenum(pname), GLfloat(param) ) -> None',
argNames = ('pname', 'param',),
)
glFragmentLightModelfvSGIX = platform.createExtensionFunction(
'glFragmentLightModelfvSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, arrays.GLfloatArray,),
doc = 'glFragmentLightModelfvSGIX( GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('pname', 'params',),
)
glFragmentLightModeliSGIX = platform.createExtensionFunction(
'glFragmentLightModeliSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLint,),
doc = 'glFragmentLightModeliSGIX( GLenum(pname), GLint(param) ) -> None',
argNames = ('pname', 'param',),
)
glFragmentLightModelivSGIX = platform.createExtensionFunction(
'glFragmentLightModelivSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, arrays.GLintArray,),
doc = 'glFragmentLightModelivSGIX( GLenum(pname), GLintArray(params) ) -> None',
argNames = ('pname', 'params',),
)
glFragmentMaterialfSGIX = platform.createExtensionFunction(
'glFragmentMaterialfSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLfloat,),
doc = 'glFragmentMaterialfSGIX( GLenum(face), GLenum(pname), GLfloat(param) ) -> None',
argNames = ('face', 'pname', 'param',),
)
glFragmentMaterialfvSGIX = platform.createExtensionFunction(
'glFragmentMaterialfvSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glFragmentMaterialfvSGIX( GLenum(face), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('face', 'pname', 'params',),
)
glFragmentMaterialiSGIX = platform.createExtensionFunction(
'glFragmentMaterialiSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLint,),
doc = 'glFragmentMaterialiSGIX( GLenum(face), GLenum(pname), GLint(param) ) -> None',
argNames = ('face', 'pname', 'param',),
)
glFragmentMaterialivSGIX = platform.createExtensionFunction(
'glFragmentMaterialivSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glFragmentMaterialivSGIX( GLenum(face), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('face', 'pname', 'params',),
)
glGetFragmentLightfvSGIX = platform.createExtensionFunction(
'glGetFragmentLightfvSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glGetFragmentLightfvSGIX( GLenum(light), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('light', 'pname', 'params',),
)
glGetFragmentLightivSGIX = platform.createExtensionFunction(
'glGetFragmentLightivSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glGetFragmentLightivSGIX( GLenum(light), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('light', 'pname', 'params',),
)
glGetFragmentMaterialfvSGIX = platform.createExtensionFunction(
'glGetFragmentMaterialfvSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glGetFragmentMaterialfvSGIX( GLenum(face), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('face', 'pname', 'params',),
)
glGetFragmentMaterialivSGIX = platform.createExtensionFunction(
'glGetFragmentMaterialivSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glGetFragmentMaterialivSGIX( GLenum(face), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('face', 'pname', 'params',),
)
glLightEnviSGIX = platform.createExtensionFunction(
'glLightEnviSGIX', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLint,),
doc = 'glLightEnviSGIX( GLenum(pname), GLint(param) ) -> None',
argNames = ('pname', 'param',),
)
def glInitFragmentLightingSGIX():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 43.467337 | 132 | 0.784509 |
ec6dc8fcc13c3741cae03b46f280bb409058cc09 | 3,107 | py | Python | hub_module/modules/text/text_generation/ernie_gen_poetry/model/modeling_ernie_gen.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | 4 | 2021-02-25T03:27:38.000Z | 2021-05-15T03:20:23.000Z | hub_module/modules/text/text_generation/ernie_gen_poetry/model/modeling_ernie_gen.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | null | null | null | hub_module/modules/text/text_generation/ernie_gen_poetry/model/modeling_ernie_gen.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | 2 | 2021-03-01T07:04:01.000Z | 2021-05-14T05:54:18.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as F
import paddle.fluid.layers as L
from ernie_gen_poetry.model.modeling_ernie import ErnieModel
from ernie_gen_poetry.model.modeling_ernie import _build_linear, _build_ln, append_name
class ErnieModelForGeneration(ErnieModel):
def __init__(self, cfg, name=None):
cfg['return_additional_info'] = True
cfg['has_pooler'] = False
super(ErnieModelForGeneration, self).__init__(cfg, name=name)
initializer = F.initializer.TruncatedNormal(
scale=cfg['initializer_range'])
d_model = cfg['hidden_size']
d_vocab = cfg['vocab_size']
self.mlm = _build_linear(
d_model,
d_model,
append_name(name, 'mask_lm_trans_fc'),
initializer,
act=cfg['hidden_act'])
self.mlm_ln = _build_ln(
d_model, name=append_name(name, 'mask_lm_trans'))
self.mlm_bias = L.create_parameter(
dtype='float32',
shape=[d_vocab],
attr=F.ParamAttr(
name=append_name(name, 'mask_lm_out_fc.b_0'),
initializer=F.initializer.Constant(value=0.0)),
is_bias=True,
)
def forward(self, src_ids, *args, **kwargs):
tgt_labels = kwargs.pop('tgt_labels', None)
tgt_pos = kwargs.pop('tgt_pos', None)
encode_only = kwargs.pop('encode_only', False)
_, encoded, info = ErnieModel.forward(self, src_ids, *args, **kwargs)
if encode_only:
return None, None, info
elif tgt_labels is None:
encoded = self.mlm(encoded)
encoded = self.mlm_ln(encoded)
logits = L.matmul(
encoded, self.word_emb.weight, transpose_y=True) + self.mlm_bias
output_ids = L.argmax(logits, -1)
return output_ids, logits, info
else:
encoded_2d = L.gather_nd(encoded, tgt_pos)
encoded_2d = self.mlm(encoded_2d)
encoded_2d = self.mlm_ln(encoded_2d)
logits_2d = L.matmul(
encoded_2d, self.word_emb.weight,
transpose_y=True) + self.mlm_bias
if len(tgt_labels.shape) == 1:
tgt_labels = L.reshape(tgt_labels, [-1, 1])
loss = L.reduce_mean(
L.softmax_with_cross_entropy(
logits_2d,
tgt_labels,
soft_label=(tgt_labels.shape[-1] != 1)))
return loss, logits_2d, info
| 39.329114 | 87 | 0.621178 |
1dbc6e57080adc9016ef1de86c6328795eec509e | 5,413 | py | Python | meli/models.py | rmarcacini/cc-meli2019 | 57139212b0afce5ada550c903516e5b919347b7b | [
"MIT"
] | 5 | 2019-10-13T21:32:55.000Z | 2021-01-12T15:47:36.000Z | meli/models.py | pablozivic/cc-meli2019 | 57139212b0afce5ada550c903516e5b919347b7b | [
"MIT"
] | null | null | null | meli/models.py | pablozivic/cc-meli2019 | 57139212b0afce5ada550c903516e5b919347b7b | [
"MIT"
] | 2 | 2019-10-07T15:43:05.000Z | 2019-10-25T23:36:26.000Z | import keras
from keras.models import Sequential
from keras.layers import Embedding
from keras.layers import Dropout
from keras.layers import Bidirectional
from keras.layers import GRU
from keras.layers import LSTM
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.layers import Input
from keras.layers import Conv1D
from keras.layers import SpatialDropout1D
from keras.layers import MaxPool1D
from keras.layers.merge import concatenate
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import MaxPool1D
from keras.models import Model
import numpy as np
import random
def TextGRU(num_words, input_length, embedding_dim, embedding_matrix, number_of_classes):
model = Sequential()
model.add(Embedding(num_words, embedding_dim, input_length=input_length,trainable=random.choice([True,False]),weights=[embedding_matrix]))
model.add(Bidirectional(GRU(embedding_dim, return_sequences=False)))
model.add(Dense(max(3*number_of_classes,3*embedding_dim), activation='relu'))
model.add(Dropout(random.choice([0.1,0.2,0.3])))
model.add(Dense(max(2*number_of_classes,2*embedding_dim), activation='relu'))
model.add(Dropout(random.choice([0.1,0.2,0.3])))
model.add(Dense(number_of_classes, activation='softmax'))
L=[]
L.append(keras.optimizers.Adam(lr=random.choice([0.001,0.001,0.002,0.003]), beta_1=0.9, beta_2=0.999, amsgrad=False))
L.append(keras.optimizers.Adadelta(lr=1.0, rho=0.95))
opt = random.choice(L)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
return model
def TextLSTM(num_words, input_length, embedding_dim, embedding_matrix, number_of_classes):
model = Sequential()
model.add(Embedding(num_words, embedding_dim, input_length=input_length,trainable=random.choice([True,False]),weights=[embedding_matrix]))
model.add(Bidirectional(LSTM(embedding_dim, return_sequences=False)))
model.add(Dense(max(3*number_of_classes,3*embedding_dim), activation='relu'))
model.add(Dropout(random.choice([0.1,0.2,0.3])))
model.add(Dense(max(2*number_of_classes,2*embedding_dim), activation='relu'))
model.add(Dropout(random.choice([0.1,0.2,0.3])))
model.add(Dense(number_of_classes, activation='softmax'))
L=[]
L.append(keras.optimizers.Adam(lr=random.choice([0.001,0.001,0.002,0.003]), beta_1=0.9, beta_2=0.999, amsgrad=False))
L.append(keras.optimizers.Adadelta(lr=1.0, rho=0.95))
opt = random.choice(L)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
return model
def TextCNN(num_words, input_length, embedding_dim, embedding_matrix, number_of_classes):
sequence_input = Input(shape=(input_length,), dtype='int32')
embedding_layer = Embedding(num_words, embedding_dim, embeddings_initializer=keras.initializers.random_uniform(minval=-0.25, maxval=0.25),input_length=input_length,trainable=random.choice([True,False]),weights=[embedding_matrix])
embedded_sequences = embedding_layer(sequence_input)
# create a convolution + maxpool layer for each filter size
NUM_FILTERS = random.choice([64,128,256])
FILTER_SIZES = [2, 3, 4]
pooled_outputs = []
for filter_size in FILTER_SIZES:
x = Conv1D(NUM_FILTERS, filter_size, activation='relu')(embedded_sequences)
x = MaxPool1D(int(x.shape[1]))(x)
pooled_outputs.append(x)
merged = concatenate(pooled_outputs)
#x = Flatten()(merged)
x = Bidirectional(GRU(embedding_dim, return_sequences=False))(x)
x = Dense(max(3*number_of_classes,3*embedding_dim), activation='relu')(x)
x = Dropout(random.choice([0.1,0.2,0.3]))(x)
x = Dense(max(2*number_of_classes,2*embedding_dim), activation='relu')(x)
x = Dropout(random.choice([0.1,0.2,0.3]))(x)
outputs = Dense(number_of_classes, activation='softmax')(x)
model = Model(sequence_input, outputs)
L = []
L.append(keras.optimizers.Adam(lr=random.choice([0.001,0.001,0.002,0.003]), beta_1=0.9, beta_2=0.999, amsgrad=False))
L.append(keras.optimizers.Adadelta(lr=1.0, rho=0.95))
opt = L[random.randrange(len(L))]
model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
model.summary()
return model
def data_input(df,embeddings_index):
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = random.randrange(100000, 250000, 10000)
MAX_SEQUENCE_LENGTH = int(random.randrange(10, 15))
EMBEDDING_DIM = 300
tokenizer = keras.preprocessing.text.Tokenizer(num_words=MAX_NB_WORDS, lower=True)
tokenizer.fit_on_texts(df['title_clean'].apply(str))
word_index = tokenizer.word_index
words_not_found = []
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= nb_words:
continue
embedding_vector = embeddings_index.get(word)
if (embedding_vector is not None) and len(embedding_vector) > 0:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
else:
words_not_found.append(word)
return tokenizer, embedding_matrix, MAX_NB_WORDS, MAX_SEQUENCE_LENGTH, nb_words | 42.289063 | 233 | 0.727323 |
c9d1e65e7def3edcef46e672dc626b21ff962c88 | 876 | py | Python | Download/validate_unusual_video.py | jyun790430/CrawlerH | 4329c42dfbf3eacba9711eaf1dc90106166ecdcc | [
"MIT"
] | null | null | null | Download/validate_unusual_video.py | jyun790430/CrawlerH | 4329c42dfbf3eacba9711eaf1dc90106166ecdcc | [
"MIT"
] | 3 | 2021-03-31T19:49:20.000Z | 2021-12-13T20:38:02.000Z | Download/validate_unusual_video.py | jyun790430/CrawlerH | 4329c42dfbf3eacba9711eaf1dc90106166ecdcc | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import subprocess
from model import CrawlVideoModel
from setting.config import VIDEO_FILE_PATH
files = os.listdir(VIDEO_FILE_PATH)
a = subprocess.Popen(['ffmpeg', '-i', '/Users/bryson/videoH/46ca6786ceaf078653acf3e9b0cd13bb.mp4'])
print a.returncode
# # 以迴圈處理
# for _f in files:
# # 產生檔案的絕對路徑
# fullpath = os.path.join(VIDEO_FILE_PATH, _f)
# # 判斷 fullpath 是檔案還是目錄
# if os.path.isfile(fullpath):
# print(fullpath)
# result = subprocess.Popen(['ffmpeg', '-i', fullpath])
# text = result.communicate()
# print(result.returncode)
# #break
#
# for k, row in enumerate(data):
# filename = row[0]
# filename += '.mp4'
# filepath = VIDEO_FILE_PATH
#
# _file = os.path.isfile(filepath, filename)
#
# if os.path.isfile(_file):
# print 'exists'
# #os.remove(_file)
# else:
# pass
| 22.461538 | 99 | 0.650685 |
b6d827d520391be4c035fc2a2259d66b7b9f2834 | 1,325 | py | Python | builders/bidirectional_rnn_builder.py | ChenCongGit/Chinese-aster | 3e4ad7e1cc36ad071e4e5dd1017d7d415d686d8d | [
"MIT"
] | 6 | 2019-08-05T08:54:46.000Z | 2021-06-29T08:36:58.000Z | builders/bidirectional_rnn_builder.py | ChenCongGit/Chinese-aster | 3e4ad7e1cc36ad071e4e5dd1017d7d415d686d8d | [
"MIT"
] | null | null | null | builders/bidirectional_rnn_builder.py | ChenCongGit/Chinese-aster | 3e4ad7e1cc36ad071e4e5dd1017d7d415d686d8d | [
"MIT"
] | null | null | null | from Chinese_aster.core import bidirectional_rnn
from Chinese_aster.protos import hyperparams_pb2
from Chinese_aster.protos import bidirectional_rnn_pb2
from Chinese_aster.builders import hyperparams_builder
from Chinese_aster.builders import rnn_cell_builder
def build(config, is_training):
if not isinstance(config, bidirectional_rnn_pb2.BidirectionalRnn):
raise ValueError('config not of type bidirectional_rnn_pb2.BidirectionalRnn')
if config.static:
brnn_class = bidirectional_rnn.StaticBidirectionalRnn
else:
brnn_class = bidirectional_rnn.DynamicBidirectionalRnn
fw_cell_object = rnn_cell_builder.build(config.fw_bw_rnn_cell)
bw_cell_object = rnn_cell_builder.build(config.fw_bw_rnn_cell)
rnn_regularizer_object = hyperparams_builder._build_regularizer(config.rnn_regularizer)
fc_hyperparams_object = None
if config.num_output_units > 0:
if config.fc_hyperparams.op != hyperparams_pb2.Hyperparams.FC:
raise ValueError('op type must be FC')
fc_hyperparams_object = hyperparams_builder.build(config.fc_hyperparams, is_training)
return brnn_class(
fw_cell_object, bw_cell_object,
rnn_regularizer=rnn_regularizer_object,
num_output_units=config.num_output_units,
fc_hyperparams=fc_hyperparams_object,
summarize_activations=config.summarize_activations)
| 41.40625 | 89 | 0.830943 |
7fd22447fd2213aa234138016d281d59d5604bb5 | 179 | py | Python | celeryconfig.py | urkonn/shogun | fbbd9dfd5773d223949d1030c954e81fd353f175 | [
"MIT"
] | null | null | null | celeryconfig.py | urkonn/shogun | fbbd9dfd5773d223949d1030c954e81fd353f175 | [
"MIT"
] | null | null | null | celeryconfig.py | urkonn/shogun | fbbd9dfd5773d223949d1030c954e81fd353f175 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append('.')
BROKER_URL = os.getenv('REDIS_URL')
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
| 17.9 | 35 | 0.759777 |
d80b61e6864643ddb4bd5f866b090d53a14bd2dc | 69 | py | Python | stonesoup/models/__init__.py | Red-Portal/Stone-Soup-1 | 267621c86161a839da9b144c2745d28d9166d903 | [
"MIT"
] | 157 | 2019-04-14T20:43:11.000Z | 2022-03-30T08:30:33.000Z | stonesoup/models/__init__.py | Red-Portal/Stone-Soup-1 | 267621c86161a839da9b144c2745d28d9166d903 | [
"MIT"
] | 364 | 2019-04-18T15:54:49.000Z | 2022-03-31T09:50:02.000Z | stonesoup/models/__init__.py | Red-Portal/Stone-Soup-1 | 267621c86161a839da9b144c2745d28d9166d903 | [
"MIT"
] | 86 | 2019-04-20T02:01:18.000Z | 2022-03-28T01:03:11.000Z | # -*- coding: utf-8 -*-
from .base import Model
__all__ = ['Model']
| 13.8 | 23 | 0.594203 |
9d82929a79d530b762249270118da3a7cfcb74d2 | 679 | py | Python | Algorithms/2_Implementation/50.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | 1 | 2021-11-25T13:39:30.000Z | 2021-11-25T13:39:30.000Z | Algorithms/2_Implementation/50.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | null | null | null | Algorithms/2_Implementation/50.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/minimum-distances/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'minimumDistances' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY a as parameter.
#
def minimumDistances(a):
l = [y-z for z in range(len(a)) for y in range(z+1,len(a)) if(a[z]==a[y])]
return min(l) if(l) else -1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
a = list(map(int, input().rstrip().split()))
result = minimumDistances(a)
fptr.write(str(result) + '\n')
fptr.close() | 20.575758 | 78 | 0.662739 |
fb6037a424f2e01ba085af32ef7ef6d2f3e35f8f | 3,194 | py | Python | meetup_api/app/master4b.py | albertolusoli/meetup_api | c631adf509d4ba1a7a4e26be4bca78060daee0a6 | [
"MIT"
] | 1 | 2019-08-23T00:06:50.000Z | 2019-08-23T00:06:50.000Z | meetup_api/app/master4b.py | albertolusoli/meetup_api | c631adf509d4ba1a7a4e26be4bca78060daee0a6 | [
"MIT"
] | null | null | null | meetup_api/app/master4b.py | albertolusoli/meetup_api | c631adf509d4ba1a7a4e26be4bca78060daee0a6 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import psycopg2
from config import Config
import find_group_ids as find_group_ids
import find_topic_ids as topics
import find_event_ids as find_event_ids
import api_groups as api_groups
import api_past_events as api_past_events
import api_upcoming_events as api_upcoming_events
import api_group_members as api_group_members
import api_rsvp as api_rsvp
import api_script_topics as api_script_topics
import params
import fuckit
import requests
import json
import time
import codecs
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import datetime
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
import settings
def main():
# if params.topics_run == True:
# with fuckit:
# add_topics(params.topics)
if params.groups_run == True:
with fuckit:
groups()
# if params.events_and_members_run == True:
# with fuckit:
# events_and_members()
# if params.rsvps_run == True:
# with fuckit:
# rsvp()
# print '---'
# def cron
# schedule.dow.on('SUN').at("00:00").do(job)
def test():
print '---'
# def add_topics(*argv):
# for arg in argv:
# for topic in arg:
# api_script_topics.main(topic)
def groups():
topic_ids=topics.select_topic_ids(params.topic_order, params.topic_limit)
created_time= (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),)
# for topic_id in topic_ids:
# api_groups.main(topic_id, created_time)
print topic_ids, 'yes'
# def events_and_members():
# today = datetime.date.today()
# DD = datetime.timedelta(days=4)
# compare_date=today-DD
# ids, urlnames, created_at =find_group_ids.select_group_ids(params.groups_order, params.groups_limit)
# if params.events_run == True:
# for (index, date) in zip(urlnames, created_at):
# api_past_events.main(index, "upcoming")
# if date > compare_date:
# api_past_events.main(index, "past")
# print index, '********** urlname here *****'
# # for status in params.event_statuses:
# # api_past_events.main(index, status)
# if params.members_run == True:
# for index in ids:
# print index
# api_group_members.main(index)
# def rsvp():
# event_ids, group_urlnames, event_date, created_at=find_event_ids.select_group_ids()
# todayR = datetime.date.today()
# DD = datetime.timedelta(days=1)
# compare_date=todayR-DD
# #print event_ids, group_urlnames, 'event_ids'
# i=0
# for pairs in event_ids:
# # print group_urlnames[i], event_ids[i],event_date[i], created_at[i]
# print '-----'
# if event_date[i] > todayR or created_at[i] > compare_date:
# print 'yes'
# with fuckit:
# api_rsvp.main(group_urlnames[i], event_ids[i] )
# i+=1
def heading():
stars = '*'* 50
date = 'CYCLE:'+ str(datetime.datetime.now())
date_len = len(date)
stars2 = int((50-date_len)/2.0)-1
print stars, '\n'
print stars2*'*',date,stars2*'*', '\n'
print stars, '\n'
def footer():
stars = '*'* 50
date = 'END OF CYCLE:'+ str(datetime.datetime.now())
date_len = len(date)
stars2 = int((50-date_len)/2.0)-1
print stars, '\n'
print stars2*'*',date,stars2*'*', '\n'
print stars, '\n'
if __name__=="__main__":
heading()
main()
footer()
| 22.335664 | 103 | 0.69474 |
182d7e5ce8fc3b4eff8600a2bb88c07f30970066 | 4,469 | py | Python | scrapy_crawl_once/middlewares.py | SmirnovStepan/scrapy-crawl-once | 8ee8004d7effbf05cb7d0590f91b1880c98731a9 | [
"MIT"
] | null | null | null | scrapy_crawl_once/middlewares.py | SmirnovStepan/scrapy-crawl-once | 8ee8004d7effbf05cb7d0590f91b1880c98731a9 | [
"MIT"
] | null | null | null | scrapy_crawl_once/middlewares.py | SmirnovStepan/scrapy-crawl-once | 8ee8004d7effbf05cb7d0590f91b1880c98731a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import time
import logging
from sqlitedict import SqliteDict
from scrapy import signals
from scrapy.utils.project import data_path
from scrapy.utils.request import request_fingerprint
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class CrawlOnceMiddleware(object):
"""
This spider and downloader middleware allows to avoid re-crawling pages
which were already downloaded in previous crawls.
To enable it, modify your settings.py::
SPIDER_MIDDLEWARES = {
# ...
'scrapy_crawl_once.CrawlOnceMiddleware': 100,
# ...
}
DOWNLOADER_MIDDLEWARES = {
# ...
'scrapy_crawl_once.CrawlOnceMiddleware': 50,
# ...
}
By default it does nothing. To avoid crawling a particular page
multiple times set ``request.meta['crawl_once'] = True``. Other
``request.meta`` keys:
* ``crawl_once_value`` - a value to store in DB. By default, timestamp
is stored.
* ``crawl_once_key`` - request unique id; by default request_fingerprint
is used.
Settings:
* ``CRAWL_ONCE_ENABLED`` - set it to False to disable middleware.
Default is True.
* ``CRAWL_ONCE_PATH`` - a path to a folder with crawled requests database.
By default ``.scrapy/crawl_once/`` path is used; this folder contains
``<spider_name>.sqlite`` files with databases of seen requests.
* ``CRAWL_ONCE_DEFAULT`` - default value for ``crawl_once`` meta key
(False by default). When True, all requests are handled by
this middleware unless disabled explicitly using
``request.meta['crawl_once'] = False``.
* ``CRAWL_ONCE_RESET`` - reset the state, clearing out all seen requests
Default is False.
This middleware puts all requests to the Scheduler, and then filters
them out at Downloader.
"""
def __init__(self, path, stats, default, reset=False):
self.path = path
self.stats = stats
self.default = default
self.reset = reset
@classmethod
def from_crawler(cls, crawler):
s = crawler.settings
if not s.getbool('CRAWL_ONCE_ENABLED', True):
raise NotConfigured()
path = data_path(s.get('CRAWL_ONCE_PATH', 'crawl_once'),
createdir=True)
default = s.getbool('CRAWL_ONCE_DEFAULT', default=False)
reset = s.getbool('CRAWL_ONCE_RESET', default=False)
o = cls(path, crawler.stats, default, reset)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider):
self.db, dbpath = self._spider_db(spider)
reset = self.reset or getattr(spider, 'crawl_once_reset', False)
if reset:
self.db.clear()
num_records = len(self.db)
logger.info("Opened crawl database %r with %d existing records" % (
dbpath, num_records
))
self.stats.set_value('crawl_once/initial', num_records)
def spider_closed(self, spider):
self.db.close()
def _spider_db(self, spider):
dbpath = os.path.join(self.path, '%s.sqlite' % spider.name)
db = SqliteDict(
filename=dbpath,
tablename='requests',
autocommit=True,
)
return db, dbpath
def _get_key(self, request):
return (request.meta.get('crawl_once_key') or
request_fingerprint(request))
# spider middleware interface
def process_spider_output(self, response, result, spider):
for r in result:
yield r
# response is crawled, store its fingerprint in DB if crawl_once
# is requested.
if response.meta.get('crawl_once', self.default):
key = self._get_key(response.request)
self.db[key] = response.meta.get('crawl_once_value', time.time())
self.stats.inc_value('crawl_once/stored')
# downloader middleware interface
def process_request(self, request, spider):
if not request.meta.get('crawl_once', self.default):
return
if self._get_key(request) in self.db:
self.stats.inc_value('crawl_once/ignored')
raise IgnoreRequest()
| 35.188976 | 78 | 0.633028 |
af4d765b05c6e72a633f53f47e93181f40e92ed2 | 2,136 | py | Python | examples/listdevs.py | DCC-Lab/libusb | 40e49a03cf17fafaa03c6b02a4aa5e920631442c | [
"Zlib"
] | null | null | null | examples/listdevs.py | DCC-Lab/libusb | 40e49a03cf17fafaa03c6b02a4aa5e920631442c | [
"Zlib"
] | null | null | null | examples/listdevs.py | DCC-Lab/libusb | 40e49a03cf17fafaa03c6b02a4aa5e920631442c | [
"Zlib"
] | null | null | null | # Copyright (c) 2016-2020 Adam Karpierz
# Licensed under the zlib/libpng License
# https://opensource.org/licenses/Zlib
# libusb example program to list devices on the bus
# Copyright © 2007 Daniel Drake <dsd@gentoo.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import ctypes as ct
import libusb as usb
def print_devs(devs):
path = (ct.c_uint8 * 8)()
i = 0
while devs[i]:
dev = devs[i]
desc = usb.device_descriptor()
r = usb.get_device_descriptor(dev, ct.byref(desc))
if r < 0:
print("failed to get device descriptor", file=sys.stderr)
return
print("{:04x}:{:04x} (bus {:d}, device {:d})".format(
desc.idVendor, desc.idProduct,
usb.get_bus_number(dev), usb.get_device_address(dev)), end="")
r = usb.get_port_numbers(dev, path, ct.sizeof(path))
if r > 0:
print(" path: {:d}".format(path[0]), end="")
for j in range(1, r):
print(".{:d}".format(path[j]), end="")
print()
i += 1
def main(argv=sys.argv):
r = usb.init(None)
if r < 0:
return r
try:
devs = ct.POINTER(ct.POINTER(usb.device))()
cnt = usb.get_device_list(None, ct.byref(devs))
if cnt < 0:
return cnt
print_devs(devs)
usb.free_device_list(devs, 1)
finally:
usb.exit(None)
return 0
sys.exit(main())
| 27.74026 | 78 | 0.632022 |
cb24039d6887d5914ed43ab1944a2a5a423c36f3 | 16,500 | py | Python | tests/test_modeling_roberta.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 23 | 2020-10-26T11:10:30.000Z | 2022-03-21T10:18:08.000Z | tests/test_modeling_roberta.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 2 | 2020-10-29T07:59:57.000Z | 2021-09-08T14:49:44.000Z | tests/test_modeling_roberta.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 8 | 2020-12-31T03:30:57.000Z | 2022-03-21T08:12:54.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaForTokenClassification,
)
from transformers.modeling_roberta import RobertaEmbeddings, RobertaForMultipleChoice, RobertaForQuestionAnswering
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_utils import create_position_ids_from_input_ids
@require_torch
class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (RobertaForMaskedLM, RobertaModel) if is_torch_available() else ()
class RobertaModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_roberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaModel(config=config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_roberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_roberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RobertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(
list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels]
)
self.check_loss_output(result)
def create_and_check_roberta_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RobertaForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
self.check_loss_output(result)
def create_and_check_roberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def setUp(self):
self.model_tester = RobertaModelTest.RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_roberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in list(ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
""" Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
""" Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.Tensor(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
class RobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_no_head(self):
model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_classification_head(self):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
| 44.117647 | 119 | 0.656909 |
650a49d05c92fb6fba0cc2bef6c78878e4178be0 | 192 | py | Python | backend/workers/main.py | jmeisele/celery-farm | 7e2638a24717b37f1886a816c45b9d88f89cc726 | [
"MIT"
] | 3 | 2022-01-07T20:30:31.000Z | 2022-02-09T19:11:27.000Z | backend/workers/main.py | jmeisele/celery-farm | 7e2638a24717b37f1886a816c45b9d88f89cc726 | [
"MIT"
] | null | null | null | backend/workers/main.py | jmeisele/celery-farm | 7e2638a24717b37f1886a816c45b9d88f89cc726 | [
"MIT"
] | null | null | null | from celery import Celery
from .config import settings
celery_app = Celery(
"workers",
broker=settings.BROKER_URL,
include=["workers.tasks"],
backend=settings.BACKEND_URL,
)
| 17.454545 | 33 | 0.71875 |
fb9982db309834bb844ab22be28730b654696caa | 15,362 | py | Python | main_verb_try_adamexp.py | thilinicooray/mac-network-pytorch | 0e4bf3f7f301570b652490f697758361c866f3c1 | [
"MIT"
] | null | null | null | main_verb_try_adamexp.py | thilinicooray/mac-network-pytorch | 0e4bf3f7f301570b652490f697758361c866f3c1 | [
"MIT"
] | null | null | null | main_verb_try_adamexp.py | thilinicooray/mac-network-pytorch | 0e4bf3f7f301570b652490f697758361c866f3c1 | [
"MIT"
] | null | null | null | import torch
from imsitu_encoder_verb import imsitu_encoder
from imsitu_loader import imsitu_loader_verb
from imsitu_scorer_log import imsitu_scorer
import json
import model_verb_directcnn
import os
import utils
import torchvision as tv
#from torchviz import make_dot
#from graphviz import Digraph
def train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, lr_max, model_name, args,eval_frequency=4000):
model.train()
train_loss = 0
total_steps = 0
print_freq = 400
dev_score_list = []
'''if model.gpu_mode >= 0 :
ngpus = 2
device_array = [i for i in range(0,ngpus)]
pmodel = torch.nn.DataParallel(model, device_ids=device_array)
else:
pmodel = model'''
pmodel = model
top1 = imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer(encoder, 5, 3)
'''print('init param data check :')
for f in model.parameters():
if f.requires_grad:
print(f.data.size())'''
for epoch in range(max_epoch):
#print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
#sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
mx = len(train_loader)
for i, (id, img, verb) in enumerate(train_loader):
#print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
total_steps += 1
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
#optimizer.zero_grad()
'''print('all inputs')
print(img)
print('=========================================================================')
print(verb)
print('=========================================================================')
print(roles)
print('=========================================================================')
print(labels)'''
verb_predict = pmodel(img)
'''g = make_dot(verb_predict, model.state_dict())
g.view()'''
loss = model.calculate_loss(verb_predict, verb)
#print('current loss = ', loss)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item()
top1.add_point_verb_only(verb_predict, verb)
top5.add_point_verb_only(verb_predict, verb)
if total_steps % print_freq == 0:
top1_a = top1.get_average_results()
top5_a = top5.get_average_results()
print ("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}"
.format(total_steps-1,epoch,i, utils.format_dict(top1_a, "{:.2f}", "1-"),
utils.format_dict(top5_a,"{:.2f}","5-"), loss.item(),
train_loss / ((total_steps-1)%eval_frequency) ))
if total_steps % eval_frequency == 0:
top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)
model.train()
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
#todo : top 5
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
#avg_score = top1_avg["value*"]
#avg_score /= 3
print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
#print('Dev loss :', val_loss)
dev_score_list.append(avg_score)
max_score = max(dev_score_list)
if max_score == dev_score_list[-1]:
torch.save(model.state_dict(), model_dir + "/{}_verb_directcnn_bn_featfreeze_adamexplr.model".format(model_name))
print ('New best model saved! {0}'.format(max_score))
#eval on the trainset
'''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)
model.train()
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))'''
print('current train loss', train_loss)
train_loss = 0
top1 = imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer(encoder, 5, 3)
del loss, img, verb
#break
print('Epoch ', epoch, ' completed!')
scheduler.step()
#break
def eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):
model.eval()
val_loss = 0
print ('evaluating model...')
top1 = imsitu_scorer(encoder, 1, 3, write_to_file)
top5 = imsitu_scorer(encoder, 5, 3)
with torch.no_grad():
mx = len(dev_loader)
for i, (id, img, verb) in enumerate(dev_loader):
#prit("{}/{} batches\r".format(i+1,mx)) ,
'''im_data = torch.squeeze(im_data,0)
im_info = torch.squeeze(im_info,0)
gt_boxes = torch.squeeze(gt_boxes,0)
num_boxes = torch.squeeze(num_boxes,0)
verb = torch.squeeze(verb,0)
roles = torch.squeeze(roles,0)
labels = torch.squeeze(labels,0)'''
#print('handling batch :', id)
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
verb_predict = model(img)
'''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)
val_loss += loss.item()'''
top1.add_point_verb_only_eval(id, verb_predict, verb)
top5.add_point_verb_only_eval(id, verb_predict, verb)
del img, verb
#break
#return top1, top5, val_loss/mx
return top1, top5, 0
def main():
import argparse
parser = argparse.ArgumentParser(description="imsitu VSRL. Training, evaluation and prediction.")
parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int)
#parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True)
parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')
parser.add_argument('--resume_model', type=str, default='', help='The model we resume')
parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module')
parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch')
parser.add_argument('--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch')
parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch')
parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')
parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')
parser.add_argument('--test', action='store_true', help='Only use the testing mode')
parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')
parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')
parser.add_argument('--frcnn_feat_dir', type=str, help='Location of output from detectron')
#todo: train role module separately with gt verbs
args = parser.parse_args()
batch_size = 640
#lr = 5e-6
lr = 0.0001
lr_max = 5e-4
lr_gamma = 0.1
lr_step = 25
clip_norm = 50
weight_decay = 1e-4
n_epoch = 500
n_worker = 3
#dataset_folder = 'imSitu'
#imgset_folder = 'resized_256'
dataset_folder = args.dataset_folder
imgset_folder = args.imgset_dir
print('model spec :, verb role with context ')
train_set = json.load(open(dataset_folder + "/updated_train_new.json"))
encoder = imsitu_encoder(train_set)
model = model_verb_directcnn.BaseModel(encoder, args.gpuid)
# To group up the features
#all verb and role feat are under role as it's a single unit
#cnn_features, role_features = utils.group_features_noun(model)
train_set = imsitu_loader_verb(imgset_folder, train_set, encoder, model.train_preprocess())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True, num_workers=n_worker)
dev_set = json.load(open(dataset_folder +"/dev.json"))
dev_set = imsitu_loader_verb(imgset_folder, dev_set, encoder, model.dev_preprocess())
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=64, shuffle=True, num_workers=n_worker)
test_set = json.load(open(dataset_folder +"/test.json"))
test_set = imsitu_loader_verb(imgset_folder, test_set, encoder, model.dev_preprocess())
test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker)
traindev_set = json.load(open(dataset_folder +"/dev.json"))
traindev_set = imsitu_loader_verb(imgset_folder, traindev_set, encoder, model.dev_preprocess())
traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker)
#utils.set_trainable(model, False)
if args.train_role:
print('CNN fix, Verb fix, train role from the scratch from: {}'.format(args.verb_module))
args.train_all = False
if len(args.verb_module) == 0:
raise Exception('[pretrained verb module] not specified')
utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb'])
optimizer_select = 1
model_name = 'cfx_vfx_rtrain'
elif args.finetune_verb:
print('CNN fix, Verb finetune, train role from the scratch from: {}'.format(args.verb_module))
args.train_all = True
if len(args.verb_module) == 0:
raise Exception('[pretrained verb module] not specified')
utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb'])
optimizer_select = 2
model_name = 'cfx_vft_rtrain'
elif args.finetune_cnn:
print('CNN finetune, Verb finetune, train role from the scratch from: {}'.format(args.verb_module))
args.train_all = True
if len(args.verb_module) == 0:
raise Exception('[pretrained verb module] not specified')
utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb'])
optimizer_select = 3
model_name = 'cft_vft_rtrain'
elif args.resume_training:
print('Resume training from: {}'.format(args.resume_model))
args.train_all = True
if len(args.resume_model) == 0:
raise Exception('[pretrained verb module] not specified')
utils.load_net(args.resume_model, [model])
optimizer_select = 0
model_name = 'resume_all'
else:
print('Training from the scratch.')
optimizer_select = 0
args.train_all = True
model_name = 'train_full'
'''optimizer = utils.get_optimizer_noun(lr,weight_decay,optimizer_select,
cnn_features, role_features)'''
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
torch.manual_seed(1234)
if args.gpuid >= 0:
#print('GPU enabled')
model.cuda()
torch.cuda.manual_seed(1234)
torch.backends.cudnn.deterministic = True
'''optimizer = torch.optim.Adamax([{'params': cnn_features, 'lr': 5e-5},
{'params': role_features}],
lr=1e-3)
#optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma)
#gradient clipping, grad check
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)'''
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
if args.evaluate:
top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Dev average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
all = top1.all_res
with open('all_pred_verbfreezefeat.json', 'w') as fp:
json.dump(all, fp, indent=4)
print('Writing predictions to file completed !')
elif args.test:
top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results_nouns()
top5_avg = top5.get_average_results_nouns()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Test average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
else:
print('Model training started!')
train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)
if __name__ == "__main__":
main()
| 41.184987 | 180 | 0.588921 |
1b871222e2fc5998800eb387b6b062df925d655a | 2,005 | py | Python | CapGenerator/train_model.py | edwardcruzcruz/Describiendo_imagenes_keras | 5c9a0ffed365098d0df880dd73c1908d2228cc28 | [
"MIT"
] | 64 | 2018-10-25T06:19:49.000Z | 2022-02-17T11:23:56.000Z | CapGenerator/train_model.py | edwardcruzcruz/Describiendo_imagenes_keras | 5c9a0ffed365098d0df880dd73c1908d2228cc28 | [
"MIT"
] | 12 | 2018-09-10T14:56:35.000Z | 2022-03-28T14:48:45.000Z | CapGenerator/train_model.py | edwardcruzcruz/Describiendo_imagenes_keras | 5c9a0ffed365098d0df880dd73c1908d2228cc28 | [
"MIT"
] | 55 | 2018-11-12T08:33:43.000Z | 2022-01-22T15:35:53.000Z | import load_data as ld
import generate_model as gen
from keras.callbacks import ModelCheckpoint
from pickle import dump
def train_model(weight = None, epochs = 10):
# load dataset
data = ld.prepare_dataset('train')
train_features, train_descriptions = data[0]
test_features, test_descriptions = data[1]
# prepare tokenizer
tokenizer = gen.create_tokenizer(train_descriptions)
# save the tokenizer
dump(tokenizer, open('models/tokenizer.pkl', 'wb'))
# index_word dict
index_word = {v: k for k, v in tokenizer.word_index.items()}
# save dict
dump(index_word, open('models/index_word.pkl', 'wb'))
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size: %d' % vocab_size)
# determine the maximum sequence length
max_length = gen.max_length(train_descriptions)
print('Description Length: %d' % max_length)
# generate model
model = gen.define_model(vocab_size, max_length)
# Check if pre-trained weights to be used
if weight != None:
model.load_weights(weight)
# define checkpoint callback
filepath = 'models/model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
steps = len(train_descriptions)
val_steps = len(test_descriptions)
# create the data generator
train_generator = gen.data_generator(train_descriptions, train_features, tokenizer, max_length)
val_generator = gen.data_generator(test_descriptions, test_features, tokenizer, max_length)
# fit model
model.fit_generator(train_generator, epochs=epochs, steps_per_epoch=steps, verbose=1,
callbacks=[checkpoint], validation_data=val_generator, validation_steps=val_steps)
try:
model.save('models/wholeModel.h5', overwrite=True)
model.save_weights('models/weights.h5',overwrite=True)
except:
print("Error in saving model.")
print("Training complete...\n")
if __name__ == '__main__':
train_model(epochs=20)
| 33.983051 | 97 | 0.740648 |
3f6d10abf82896213b63769f2928895be5227b02 | 895 | py | Python | altair_examples/poly_fit_regression.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | 1 | 2021-10-30T03:55:24.000Z | 2021-10-30T03:55:24.000Z | altair_examples/poly_fit_regression.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | null | null | null | altair_examples/poly_fit_regression.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | null | null | null | """
Polynomial Fit Plot with Regression Transform
=============================================
This example shows how to overlay data with multiple fitted polynomials using
the regression transform.
"""
# category: scatter plots
import numpy as np
import altair as alt
# Generate some random data
rng = np.random.RandomState(1)
x = rng.rand(40) ** 2
y = 10 - 1.0 / (x + 0.1) + rng.randn(40)
source = alt.pd.DataFrame({"x": x, "y": y})
# Define the degree of the polynomial fits
degree_list = [1, 3, 5]
base = (
alt.Chart(source).mark_circle(color="black").encode(alt.X("x"), alt.Y("y"))
)
polynomial_fit = [
base.transform_regression(
"x", "y", method="poly", order=order, as_=["x", str(order)]
)
.mark_line()
.transform_fold([str(order)], as_=["degree", "y"])
.encode(alt.Color("degree:N"))
for order in degree_list
]
alt.layer(base, *polynomial_fit)
| 24.861111 | 79 | 0.627933 |
55a74bbd6dce1a0c413c69bb675b15e31c59f032 | 19,020 | py | Python | desktop/core/ext-py/Beaker/beaker/container.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 19 | 2015-05-01T19:59:03.000Z | 2021-12-09T08:03:16.000Z | desktop/core/ext-py/Beaker/beaker/container.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | desktop/core/ext-py/Beaker/beaker/container.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | """Container and Namespace classes"""
import anydbm
import cPickle
import logging
import os.path
import time
import beaker.util as util
from beaker.exceptions import CreationAbortedError, MissingCacheParameter
from beaker.synchronization import _threading, file_synchronizer, \
mutex_synchronizer, NameLock, null_synchronizer
__all__ = ['Value', 'Container', 'ContainerContext',
'MemoryContainer', 'DBMContainer', 'NamespaceManager',
'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer',
'OpenResourceNamespaceManager',
'FileNamespaceManager', 'CreationAbortedError']
logger = logging.getLogger('beaker.container')
if logger.isEnabledFor(logging.DEBUG):
debug = logger.debug
else:
def debug(message, *args):
pass
class NamespaceManager(object):
"""Handles dictionary operations and locking for a namespace of
values.
The implementation for setting and retrieving the namespace data is
handled by subclasses.
NamespaceManager may be used alone, or may be privately accessed by
one or more Container objects. Container objects provide per-key
services like expiration times and automatic recreation of values.
Multiple NamespaceManagers created with a particular name will all
share access to the same underlying datasource and will attempt to
synchronize against a common mutex object. The scope of this
sharing may be within a single process or across multiple
processes, depending on the type of NamespaceManager used.
The NamespaceManager itself is generally threadsafe, except in the
case of the DBMNamespaceManager in conjunction with the gdbm dbm
implementation.
"""
def __init__(self, namespace):
self.namespace = namespace
def get_creation_lock(self, key):
raise NotImplementedError()
def do_remove(self):
raise NotImplementedError()
def acquire_read_lock(self):
pass
def release_read_lock(self):
pass
def acquire_write_lock(self, wait=True):
return True
def release_write_lock(self):
pass
def has_key(self, key):
return self.__contains__(key)
def __getitem__(self, key):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError()
def set_value(self, key, value, expiretime=None):
"""Optional set_value() method called by Value.
Allows an expiretime to be passed, for namespace
implementations which can prune their collections
using expiretime.
"""
self[key] = value
def __contains__(self, key):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def keys(self):
raise NotImplementedError()
def remove(self):
self.do_remove()
class OpenResourceNamespaceManager(NamespaceManager):
"""A NamespaceManager where read/write operations require opening/
closing of a resource which is possibly mutexed.
"""
def __init__(self, namespace):
NamespaceManager.__init__(self, namespace)
self.access_lock = self.get_access_lock()
self.openers = 0
self.mutex = _threading.Lock()
def get_access_lock(self):
raise NotImplementedError()
def do_open(self, flags):
raise NotImplementedError()
def do_close(self):
raise NotImplementedError()
def acquire_read_lock(self):
self.access_lock.acquire_read_lock()
try:
self.open('r', checkcount = True)
except:
self.access_lock.release_read_lock()
raise
def release_read_lock(self):
try:
self.close(checkcount = True)
finally:
self.access_lock.release_read_lock()
def acquire_write_lock(self, wait=True):
r = self.access_lock.acquire_write_lock(wait)
try:
if (wait or r):
self.open('c', checkcount = True)
return r
except:
self.access_lock.release_write_lock()
raise
def release_write_lock(self):
try:
self.close(checkcount=True)
finally:
self.access_lock.release_write_lock()
def open(self, flags, checkcount=False):
self.mutex.acquire()
try:
if checkcount:
if self.openers == 0:
self.do_open(flags)
self.openers += 1
else:
self.do_open(flags)
self.openers = 1
finally:
self.mutex.release()
def close(self, checkcount=False):
self.mutex.acquire()
try:
if checkcount:
self.openers -= 1
if self.openers == 0:
self.do_close()
else:
if self.openers > 0:
self.do_close()
self.openers = 0
finally:
self.mutex.release()
def remove(self):
self.access_lock.acquire_write_lock()
try:
self.close(checkcount=False)
self.do_remove()
finally:
self.access_lock.release_write_lock()
class Value(object):
__slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\
'namespace'
def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None):
self.key = key
self.createfunc = createfunc
self.expire_argument = expiretime
self.starttime = starttime
self.storedtime = -1
self.namespace = namespace
def has_value(self):
"""return true if the container has a value stored.
This is regardless of it being expired or not.
"""
self.namespace.acquire_read_lock()
try:
return self.namespace.has_key(self.key)
finally:
self.namespace.release_read_lock()
def can_have_value(self):
return self.has_current_value() or self.createfunc is not None
def has_current_value(self):
self.namespace.acquire_read_lock()
try:
has_value = self.namespace.has_key(self.key)
if has_value:
value = self.__get_value()
return not self._is_expired()
else:
return False
finally:
self.namespace.release_read_lock()
def _is_expired(self):
"""Return true if this container's value is expired.
Note that this method is only correct if has_current_value()
or get_value() have been called already.
"""
return (
(
self.starttime is not None and
self.storedtime < self.starttime
)
or
(
self.expiretime is not None and
time.time() >= self.expiretime + self.storedtime
)
)
def get_value(self):
self.namespace.acquire_read_lock()
try:
has_value = self.has_value()
if has_value:
try:
value = self.__get_value()
if not self._is_expired():
return value
except KeyError:
# guard against un-mutexed backends raising KeyError
pass
if not self.createfunc:
raise KeyError(self.key)
finally:
self.namespace.release_read_lock()
has_createlock = False
creation_lock = self.namespace.get_creation_lock(self.key)
if has_value:
if not creation_lock.acquire(wait=False):
debug("get_value returning old value while new one is created")
return value
else:
debug("lock_creatfunc (didnt wait)")
has_createlock = True
if not has_createlock:
debug("lock_createfunc (waiting)")
creation_lock.acquire()
debug("lock_createfunc (waited)")
try:
# see if someone created the value already
self.namespace.acquire_read_lock()
try:
if self.has_value():
try:
value = self.__get_value()
if not self._is_expired():
return value
except KeyError:
# guard against un-mutexed backends raising KeyError
pass
finally:
self.namespace.release_read_lock()
debug("get_value creating new value")
v = self.createfunc()
self.set_value(v)
return v
finally:
creation_lock.release()
debug("released create lock")
def __get_value(self):
value = self.namespace[self.key]
try:
self.storedtime, self.expiretime, value = value
except ValueError:
if not len(value) == 2:
raise
# Old format: upgrade
self.storedtime, value = value
self.expiretime = self.expire_argument = None
debug("get_value upgrading time %r expire time %r", self.storedtime, self.expire_argument)
self.namespace.release_read_lock()
self.set_value(value)
self.namespace.acquire_read_lock()
except TypeError:
# occurs when the value is None. memcached
# may yank the rug from under us in which case
# that's the result
raise KeyError(self.key)
return value
def set_value(self, value):
self.namespace.acquire_write_lock()
try:
self.storedtime = time.time()
debug("set_value stored time %r expire time %r", self.storedtime, self.expire_argument)
self.namespace.set_value(self.key, (self.storedtime, self.expire_argument, value))
finally:
self.namespace.release_write_lock()
def clear_value(self):
self.namespace.acquire_write_lock()
try:
debug("clear_value")
if self.namespace.has_key(self.key):
try:
del self.namespace[self.key]
except KeyError:
# guard against un-mutexed backends raising KeyError
pass
self.storedtime = -1
finally:
self.namespace.release_write_lock()
class MemoryNamespaceManager(NamespaceManager):
namespaces = util.SyncDict()
def __init__(self, namespace, **kwargs):
NamespaceManager.__init__(self, namespace)
self.dictionary = MemoryNamespaceManager.namespaces.get(self.namespace,
dict)
def get_creation_lock(self, key):
return NameLock(
identifier="memorycontainer/funclock/%s/%s" % (self.namespace, key),
reentrant=True
)
def __getitem__(self, key):
return self.dictionary[key]
def __contains__(self, key):
return self.dictionary.__contains__(key)
def has_key(self, key):
return self.dictionary.__contains__(key)
def __setitem__(self, key, value):
self.dictionary[key] = value
def __delitem__(self, key):
del self.dictionary[key]
def do_remove(self):
self.dictionary.clear()
def keys(self):
return self.dictionary.keys()
class DBMNamespaceManager(OpenResourceNamespaceManager):
def __init__(self, namespace, dbmmodule=None, data_dir=None,
dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs):
self.digest_filenames = digest_filenames
if not dbm_dir and not data_dir:
raise MissingCacheParameter("data_dir or dbm_dir is required")
elif dbm_dir:
self.dbm_dir = dbm_dir
else:
self.dbm_dir = data_dir + "/container_dbm"
util.verify_directory(self.dbm_dir)
if not lock_dir and not data_dir:
raise MissingCacheParameter("data_dir or lock_dir is required")
elif lock_dir:
self.lock_dir = lock_dir
else:
self.lock_dir = data_dir + "/container_dbm_lock"
util.verify_directory(self.lock_dir)
self.dbmmodule = dbmmodule or anydbm
self.dbm = None
OpenResourceNamespaceManager.__init__(self, namespace)
self.file = util.encoded_path(root= self.dbm_dir,
identifiers=[self.namespace],
extension='.dbm',
digest_filenames=self.digest_filenames)
debug("data file %s", self.file)
self._checkfile()
def get_access_lock(self):
return file_synchronizer(identifier=self.namespace,
lock_dir=self.lock_dir)
def get_creation_lock(self, key):
return file_synchronizer(
identifier = "dbmcontainer/funclock/%s" % self.namespace,
lock_dir=self.lock_dir
)
def file_exists(self, file):
if os.access(file, os.F_OK):
return True
else:
for ext in ('db', 'dat', 'pag', 'dir'):
if os.access(file + os.extsep + ext, os.F_OK):
return True
return False
def _checkfile(self):
if not self.file_exists(self.file):
g = self.dbmmodule.open(self.file, 'c')
g.close()
def get_filenames(self):
list = []
if os.access(self.file, os.F_OK):
list.append(self.file)
for ext in ('pag', 'dir', 'db', 'dat'):
if os.access(self.file + os.extsep + ext, os.F_OK):
list.append(self.file + os.extsep + ext)
return list
def do_open(self, flags):
debug("opening dbm file %s", self.file)
try:
self.dbm = self.dbmmodule.open(self.file, flags)
except:
self._checkfile()
self.dbm = self.dbmmodule.open(self.file, flags)
def do_close(self):
if self.dbm is not None:
debug("closing dbm file %s", self.file)
self.dbm.close()
def do_remove(self):
for f in self.get_filenames():
os.remove(f)
def __getitem__(self, key):
return cPickle.loads(self.dbm[key])
def __contains__(self, key):
return self.dbm.has_key(key)
def __setitem__(self, key, value):
self.dbm[key] = cPickle.dumps(value)
def __delitem__(self, key):
del self.dbm[key]
def keys(self):
return self.dbm.keys()
class FileNamespaceManager(OpenResourceNamespaceManager):
def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None,
digest_filenames=True, **kwargs):
self.digest_filenames = digest_filenames
if not file_dir and not data_dir:
raise MissingCacheParameter("data_dir or file_dir is required")
elif file_dir:
self.file_dir = file_dir
else:
self.file_dir = data_dir + "/container_file"
util.verify_directory(self.file_dir)
if not lock_dir and not data_dir:
raise MissingCacheParameter("data_dir or lock_dir is required")
elif lock_dir:
self.lock_dir = lock_dir
else:
self.lock_dir = data_dir + "/container_file_lock"
util.verify_directory(self.lock_dir)
OpenResourceNamespaceManager.__init__(self, namespace)
self.file = util.encoded_path(root=self.file_dir,
identifiers=[self.namespace],
extension='.cache',
digest_filenames=self.digest_filenames)
self.hash = {}
debug("data file %s", self.file)
def get_access_lock(self):
return file_synchronizer(identifier=self.namespace,
lock_dir=self.lock_dir)
def get_creation_lock(self, key):
return file_synchronizer(
identifier = "filecontainer/funclock/%s" % self.namespace,
lock_dir = self.lock_dir
)
def file_exists(self, file):
return os.access(file, os.F_OK)
def do_open(self, flags):
if self.file_exists(self.file):
fh = open(self.file, 'rb')
try:
self.hash = cPickle.load(fh)
except (IOError, OSError, EOFError, cPickle.PickleError, ValueError):
pass
fh.close()
self.flags = flags
def do_close(self):
if self.flags == 'c' or self.flags == 'w':
fh = open(self.file, 'wb')
cPickle.dump(self.hash, fh)
fh.close()
self.hash = {}
self.flags = None
def do_remove(self):
os.remove(self.file)
self.hash = {}
def __getitem__(self, key):
return self.hash[key]
def __contains__(self, key):
return self.hash.has_key(key)
def __setitem__(self, key, value):
self.hash[key] = value
def __delitem__(self, key):
del self.hash[key]
def keys(self):
return self.hash.keys()
#### legacy stuff to support the old "Container" class interface
namespace_classes = {}
ContainerContext = dict
class ContainerMeta(type):
def __init__(cls, classname, bases, dict_):
namespace_classes[cls] = cls.namespace_class
return type.__init__(cls, classname, bases, dict_)
def __call__(self, key, context, namespace, createfunc=None,
expiretime=None, starttime=None, **kwargs):
if namespace in context:
ns = context[namespace]
else:
nscls = namespace_classes[self]
context[namespace] = ns = nscls(namespace, **kwargs)
return Value(key, ns, createfunc=createfunc,
expiretime=expiretime, starttime=starttime)
class Container(object):
__metaclass__ = ContainerMeta
namespace_class = NamespaceManager
class FileContainer(Container):
namespace_class = FileNamespaceManager
class MemoryContainer(Container):
namespace_class = MemoryNamespaceManager
class DBMContainer(Container):
namespace_class = DBMNamespaceManager
DbmContainer = DBMContainer
| 31.647255 | 102 | 0.576919 |
37bd6b211c4ec1e5bf03db04fb1220ec57de137d | 9,624 | py | Python | _OutputOshaberi/train.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | _OutputOshaberi/train.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | _OutputOshaberi/train.py | Geson-anko/JARVIS3 | bc599a352401a7e135ebaabead4d8e6d8835747e | [
"MIT"
] | null | null | null | from MemoryManager import MemoryManager
from MasterConfig import Config as mconf
from Sensation6 import Sensation
from Sensation6.sensation_models import KikitoriEncoder,Encoder
from Sensation6.torch_KMeans import torch_KMeans
from Sensation6.config import config
from .output import Output
from .output_models import Oshaberi,TextGenerator
from torch_model_fit import Fit
import numpy as np
import multiprocessing as mp
import torch
import sentencepiece as spm
import os
from pydub import AudioSegment
from typing import Tuple,Union
class Train(MemoryManager):
MemoryFormat = Sensation.MemoryFormat
LogTitle:str = f'TrainOshaberi'
def __init__(self,device:torch.device,debug_mode:bool=False) -> None:
super().__init__(log_title=self.LogTitle, debug_mode=debug_mode)
self.device = torch.device(device)
self.dtype = Sensation.Training_dtype
self.fit = Fit(self.LogTitle,debug_mode)
def activation(self,shutdown:mp.Value,sleep:mp.Value) -> None:
# ----- Oshaberi Training ------
data,ans = self.GetOshaberiData()
self.log('shape of data shape',data.shape,ans.shape)
model = Oshaberi()
model.load_state_dict(torch.load(Output.Oshaberi_file,map_location=self.device))
batch_size = Output.OshaberiBatchSize
epochs = Output.OshaberiEpochs
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=Output.OshaberiLearningRate)
self.fit.Train(
shutdown,sleep,
model=model,
epochs=epochs,
batch_size=batch_size,
optimizer=optimizer,
criterion=criterion,
device=self.device,
train_x=data,
train_y=ans,
)
torch.save(model.state_dict(),Output.Oshaberi_file)
del model,data,ans
self.release_system_memory()
self.log('trained Oshaberi.')
# --- end of Oshaberi Training ---
if shutdown.value or not sleep.value:
self.log('Train process was stopped')
return
# ------ TextGenerator Training -------
current,memory,answer= self.GetTextGeneratorData()
self.log('shape of current,memory,answer',current.shape,memory.shape,answer.shape)
model = TextGenerator()
model.load_state_dict(torch.load(Output.TextGenerator_file,map_location=self.device))
batch_size =Output.TextGeneratorBatchSize
epochs = Output.TextGeneratorEpochs
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=Output.TextGeneratorLearningRate)
self.fit.Train(
shutdown,sleep,
model=model,
epochs=epochs,
batch_size=batch_size,
optimizer=optimizer,
criterion=criterion,
device=self.device,
train_x=[current,memory],
train_y=answer,
metrics=self.fit.CE_Accuracy,
)
torch.save(model.state_dict(),Output.TextGenerator_file)
del model,current,memory,answer
self.log('trained Text Generator.')
self.release_system_memory()
# --- end of TextGenerator Training ---
if shutdown.value or not sleep.value:
self.log('Train process was stopped')
return
@torch.no_grad()
def GetOshaberiData(self) -> Tuple[torch.Tensor,...]:
self.kencoder = KikitoriEncoder().to(self.device).type(torch.float32)
self.kencoder.load_state_dict(torch.load(Sensation.KikitoriEncoder_params,map_location=self.device))
self.kencoder.eval()
self.centroids = torch.load(Sensation.Centroids_file,map_location=self.device).to(self.device)
self.kmeans = torch_KMeans(0)
self.inlen = int(config.speak_second*config.frame_rate)
self.outlen = int(config.speak_seq_len*self.centroids.size(-1))
self.batch_size = 512
self.log('loaded KikitoriEncoder,Centroids,')
files = [os.path.join(Output.Voice_folder,i) for i in os.listdir(Output.Voice_folder)]
self.log('Using file number is',len(files))
data,ans = [],[]
for f in files[:1]:
i,j = self.get_a_oshaberi_data(f)
data.append(i)
ans.append(j)
data = torch.cat(data)[:Output.OshaberiDataSize]
ans = torch.cat(ans)[:Output.OshaberiDataSize]
del self.kencoder,self.centroids
self.release_system_memory()
return data,ans
def get_a_oshaberi_data(self,indata) -> Tuple[torch.Tensor,...]:
sound = AudioSegment.from_file(indata)
if sound.channels != config.channels:
sound = sound.set_channels(config.channels)
if sound.sample_width != config.sample_width:
sound = sound.set_sample_width(config.sample_width)
insound = np.array(sound.set_frame_rate(config.frame_rate).get_array_of_samples())
padlen = self.inlen - (insound.shape[0] % self.inlen)
pad = np.zeros(padlen,dtype=insound.dtype)
insound = np.concatenate([insound,pad]).reshape(-1,config.recognize_length)
insound = (insound / config.sample_range).astype('float32')
insound = torch.from_numpy(insound).unsqueeze(1)
encoded = Fit.Predict(self.kencoder,insound,self.batch_size,self.device)
encoded = encoded.type(torch.float32).to(self.device)
classes = self.kmeans.predict(self.centroids,encoded,batch_size=self.batch_size,device=self.device)
data = self.centroids.to('cpu')[classes].type(torch.float16).view(-1,1,self.outlen)
outsound = np.array(sound.set_frame_rate(config.speak_fps).get_array_of_samples())
outsound = (outsound/config.speak_range).astype('float16')
padlen = config.speak_length - (outsound.shape[0] % config.speak_length)
pad = np.zeros(padlen,dtype=outsound.dtype)
ans= np.concatenate([outsound,pad]).reshape(-1,1,config.speak_length)
ans = torch.from_numpy(ans).type(self.dtype)
return data,ans
@torch.no_grad()
def GetTextGeneratorData(self) -> Tuple[torch.Tensor,...]:
self.FTmodel = self.load_python_obj(Sensation.FastText_file)
self.separator = spm.SentencePieceProcessor()
self.separator.Load(Sensation.Separator_file)
self.bos_id = self.separator.bos_id()
self.eos_id = self.separator.eos_id()
self.unk_id = self.separator.unk_id()
self.bos = self.separator.IdToPiece(self.bos_id)
self.eos = self.separator.IdToPiece(self.eos_id)
self.encoder = Encoder().to(self.device).type(self.dtype)
self.encoder.load_state_dict(torch.load(Sensation.Encoder_params,map_location=self.device))
self.encoder.eval()
self.padvec = np.zeros((config.word_dim,),dtype='float16')
self.log('loaded FastText Sentencepiece,TextEncoder')
with open(Sensation.Corpus_file,'r',encoding='utf-8') as f:
corpus = f.read().split('\n')
idx = np.random.permutation(len(corpus))[:Output.CorpusUseLength]
corpus = [corpus[i] for i in idx]
self.log('Using corpus length is',len(corpus))
cur,mem,ans = [],[],[]
for c in corpus:
i,j,k = self.get_a_textgenerator_data(c)
cur.append(i)
mem.append(j)
ans.append(k)
cur = torch.cat(cur)[:Output.TextGeneratorDataSize]
mem = torch.cat(mem)[:Output.TextGeneratorDataSize]
ans = torch.cat(ans)[:Output.TextGeneratorDataSize]
del self.FTmodel,self.separator,corpus,self.encoder,self.padvec
self.release_system_memory()
return cur,mem,ans
def get_a_textgenerator_data(self,indata:str) -> Tuple[torch.Tensor,...]:
pieces = [self.bos,*self.separator.EncodeAsPieces(indata),self.eos]
piecesid,vectors = [],[]
for i in pieces:
pid =self.separator.PieceToId(i)
if i in self.FTmodel.wv and pid != self.separator.unk_id():
piecesid.append(pid)
vectors.append(self.FTmodel.wv[i])
answer = np.array(piecesid[1:],dtype='int64')
answer = torch.from_numpy(answer)
current= []
veclen = len(vectors)
for i in range(1,veclen):
vec = vectors[:i]
if len(vec) < config.generate_max_words:
vec += [self.padvec]*(config.generate_max_words - len(vec))
else:
vec = vec[-config.generate_max_words:]
current.append(np.stack(vec))
current = np.stack(current)
current = torch.from_numpy(current).type(self.dtype)
memvec = []
for i in range(veclen):
vec = vectors[i:i+config.text_seq_len]
if len(vec) < config.text_seq_len:
vec += [self.padvec] * (config.text_seq_len- len(vec))
memvec.append(np.stack(vec))
memvec = torch.from_numpy(np.stack(memvec)).type(self.dtype)
memvec = Fit.Predict(self.encoder,memvec,Output.MaxSamples,self.device)
mvl = memvec.size(0)
if mvl < config.use_mem_len:
memvec = memvec.repeat(((config.use_mem_len//mvl)+1,1))
mvl = memvec.size(0)
memory = torch.stack([memvec[np.random.permutation(mvl)[:config.use_mem_len]] for _ in range(current.size(0))])
sample_idx = np.random.permutation(answer.size(0))[:Output.MaxSamples]
memory = memory[sample_idx]
current = current[sample_idx]
answer = answer[sample_idx]
return current,memory,answer
| 41.662338 | 119 | 0.64682 |
992d0765381f15774b31cfcf41cc17079d35d1ff | 1,449 | py | Python | tests/python/pants_test/core_tasks/test_substitute_target_aliases_integration.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | 1 | 2020-06-13T22:01:39.000Z | 2020-06-13T22:01:39.000Z | tests/python/pants_test/core_tasks/test_substitute_target_aliases_integration.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/core_tasks/test_substitute_target_aliases_integration.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | 2 | 2020-05-18T18:43:11.000Z | 2020-05-19T02:47:47.000Z | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
class AliasTargetIntegrationTest(PantsRunIntegrationTest):
test_module = "testprojects/src/java/org/pantsbuild/testproject/aliases"
def test_jvm_binary_alias(self):
test_run = self.run_pants(["run", f"{self.test_module}:convenient"])
self.assert_success(test_run)
self.assertIn("AliasedBinaryMain is up and running.", test_run.stdout_data)
def test_intransitive_target_alias(self):
test_run = self.run_pants(["run", f"{self.test_module}:run-use-intransitive"])
self.assert_success(test_run)
def test_alias_missing_target(self):
with self.file_renamed(self.test_module, "TEST_NO_TARGET", "BUILD.test"):
test_run = self.run_pants(["bootstrap", f"{self.test_module}::"])
self.assert_failure(test_run)
self.assertIn('must have a "target"', test_run.stderr_data)
self.assertIn("aliases:missing-target", test_run.stderr_data)
def test_alias_missing_name(self):
with self.file_renamed(self.test_module, "TEST_NO_NAME", "BUILD.test"):
test_run = self.run_pants(["bootstrap", f"{self.test_module}::"])
self.assert_failure(test_run)
self.assertIn("aliases:?", test_run.stderr_data)
| 45.28125 | 86 | 0.711525 |
c5431177b2a5e2790f2b1d922fee4d2c002becda | 935 | py | Python | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/gen_data.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/gen_data.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/gen_data.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import os
import subprocess
train={}
test={}
path='..'
f=open(path+'/trainLabels.csv')
fo=open('trainLabels.csv','w')
fo.write(f.readline())
f1=open(path+'/sampleSubmission.csv')
fo1=open('sampleSubmission.csv','w')
fo1.write(f1.readline())
for line in f:
xx=line.split(',')
label=int(xx[-1])
if label not in train:
size=os.path.getsize(path+'/train/'+xx[0][1:-1]+'.asm')
#if size>1000000:
# continue
train[label]=1
idx=xx[0][1:-1]
cmd='cp '+path+'/train/'+idx+'.* train/'
subprocess.call(cmd,shell=True)
fo.write(line)
elif label not in test:
test[label]=1
idx=xx[0][1:-1]
cmd='cp '+path+'/train/'+idx+'.* test/'
subprocess.call(cmd,shell=True)
fo1.write(idx+','+','.join(['0' if i!=label-1 else '1' for i in range(9)])+'\n')
f.close()
fo.close()
f1.close()
fo1.close()
| 25.972222 | 89 | 0.542246 |
1aadb2fa87d7c4822c5fd54c6b02836b376f35e2 | 3,057 | py | Python | src/aws_cloudwatch_log_minder/delete_empty_log_groups.py | owitplat/aws-cloudwatch-log-minder | b33dbaee2b46b0e65371b6ca17bd0259cca31bc7 | [
"Apache-2.0"
] | 51 | 2019-09-17T20:43:09.000Z | 2022-03-29T10:50:35.000Z | src/aws_cloudwatch_log_minder/delete_empty_log_groups.py | aagutu/aws-cloudwatch-log-minder | 1ffc750eb50632405a6bd3441054241f1cb155dc | [
"Apache-2.0"
] | 24 | 2019-10-11T20:55:17.000Z | 2022-03-04T09:30:07.000Z | src/aws_cloudwatch_log_minder/delete_empty_log_groups.py | aagutu/aws-cloudwatch-log-minder | 1ffc750eb50632405a6bd3441054241f1cb155dc | [
"Apache-2.0"
] | 26 | 2019-10-12T09:02:25.000Z | 2022-03-01T08:30:43.000Z | import json
from datetime import datetime, timedelta
from typing import List
import boto3
from botocore.config import Config
from .logger import log
cw_logs = None
def delete_empty_log_groups(
log_group_name_prefix: str = None,
dry_run: bool = False,
region: str = None,
profile: str = None,
):
global cw_logs
boto_session = boto3.Session(region_name=region, profile_name=profile)
cw_logs = boto_session.client("logs", config=Config(retries=dict(max_attempts=10)))
kwargs = {"PaginationConfig": {"PageSize": 50}}
if log_group_name_prefix:
kwargs["logGroupNamePrefix"] = log_group_name_prefix
log.info("finding log groups with prefix %r", log_group_name_prefix)
for response in cw_logs.get_paginator("describe_log_groups").paginate(**kwargs):
for group in response["logGroups"]:
log_group_name = group["logGroupName"]
response = cw_logs.describe_log_streams(logGroupName=log_group_name)
if len(response["logStreams"]) == 0:
log.info(
"%s deleting empty log group %s",
("dry run" if dry_run else ""),
log_group_name,
)
if dry_run:
continue
cw_logs.delete_log_group(logGroupName=log_group_name)
else:
log.info(
"%s keeping log group %s as it is not empty",
("dry run" if dry_run else ""),
log_group_name,
)
def get_all_log_group_names() -> List[str]:
result: List[str] = []
for response in cw_logs.get_paginator("describe_log_groups").paginate(
PaginationConfig={"PageSize": 50}
):
result.extend(list(map(lambda g: g["logGroupName"], response["logGroups"])))
return result
def fan_out(function_arn: str, log_group_names: List[str], dry_run: bool):
awslambda = boto3.client("lambda")
log.info(
"recursively invoking %s to delete empty groups from %d log groups",
function_arn,
len(log_group_names),
)
for log_group_name in log_group_names:
payload = json.dumps(
{
"log_group_name_prefix": log_group_name,
"dry_run": dry_run,
}
)
awslambda.invoke(
FunctionName=function_arn, InvocationType="Event", Payload=payload
)
def handle(request, context):
global cw_logs
cw_logs = boto3.client("logs", config=Config(retries=dict(max_attempts=10)))
dry_run = request.get("dry_run", False)
if "dry_run" in request and not isinstance(dry_run, bool):
raise ValueError(f"'dry_run' is not a boolean value, {request}")
log_group_name_prefix = request.get("log_group_name_prefix")
if log_group_name_prefix:
delete_empty_log_groups(log_group_name_prefix, dry_run)
else:
fan_out(
context.invoked_function_arn,
get_all_log_group_names(),
dry_run,
)
| 31.84375 | 87 | 0.620216 |
016ed9d2efd9c6673988e75615e156e3013625dc | 6,275 | py | Python | deployment/train/train.py | christian-steinmeyer/deep-learning-v2-pytorch | 04e7a994f461dd69b6c119056be86f411ef74ce4 | [
"MIT"
] | null | null | null | deployment/train/train.py | christian-steinmeyer/deep-learning-v2-pytorch | 04e7a994f461dd69b6c119056be86f411ef74ce4 | [
"MIT"
] | null | null | null | deployment/train/train.py | christian-steinmeyer/deep-learning-v2-pytorch | 04e7a994f461dd69b6c119056be86f411ef74ce4 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()
train_ds = torch.utils.data.TensorDataset(train_X, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, optimizer, loss_fn, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
optimizer - The optimizer to use during training.
loss_fn - The loss function used for training.
device - Where the model and data should be loaded (gpu or cpu).
"""
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
optimizer.zero_grad()
pred_y = model(batch_X)
loss = loss_fn(pred_y, batch_y) # try pred_y.squeeze() on error
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',
help='size of the word embeddings (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='size of the hidden dimension (default: 100)')
parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',
help='size of the vocabulary (default: 5000)')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
# Build the model.
model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)
with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f:
model.word_dict = pickle.load(f)
print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format(
args.embedding_dim, args.hidden_dim, args.vocab_size
))
# Train the model.
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_loader, args.epochs, optimizer, loss_fn, device)
# Save the parameters used to construct the model
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'embedding_dim': args.embedding_dim,
'hidden_dim': args.hidden_dim,
'vocab_size': args.vocab_size,
}
torch.save(model_info, f)
# Save the word_dict
word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')
with open(word_dict_path, 'wb') as f:
pickle.dump(model.word_dict, f)
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| 37.801205 | 107 | 0.656255 |
05b031bbb883d8fe0908178a4eb432e961f4956e | 478 | py | Python | 2018/12b.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 4 | 2018-12-04T23:33:46.000Z | 2021-12-07T17:33:27.000Z | 2018/12b.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 17 | 2018-12-12T23:32:09.000Z | 2020-01-04T15:50:31.000Z | 2018/12b.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pytest
DAY=12
def get_sum(i):
assert i >= 7447
return 485011 + (i-7447)*65
def test_get_sum():
assert get_sum(7447) == 485011
assert get_sum(7447+5) == get_sum(7447) + 65*5
if __name__ == '__main__':
"""
Found out the following while running the loop and printing the i and diff with last sum:
i: 7447
Sum: 485011
Diff: 65
so the increase is 65 with each iteration
"""
print('Answer: {}'.format(get_sum(50000000000)))
| 19.12 | 91 | 0.675732 |
4679c38e2f720c37f303f8e837126e5157de23a1 | 378 | py | Python | backend/app/tests/api.py | sweeneyngo/furspect | 2753fd9c7a3436fe1e058707b4d9037e34561c26 | [
"MIT"
] | null | null | null | backend/app/tests/api.py | sweeneyngo/furspect | 2753fd9c7a3436fe1e058707b4d9037e34561c26 | [
"MIT"
] | 7 | 2021-08-10T06:31:07.000Z | 2021-08-22T02:39:38.000Z | backend/app/tests/api.py | sweeneyngo/furspect | 2753fd9c7a3436fe1e058707b4d9037e34561c26 | [
"MIT"
] | null | null | null | from flask.ext.testing import TestCase
from app.shared.models import db
class MyTest(TestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
TESTING = True
def create_app(self):
# pass in test configuration
return create_app(self)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all() | 18 | 41 | 0.642857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.