id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6691170 | nums = [float(x) for x in input("Please enter three numbers: ").split()]
print("Average is %0.3f" % (sum(nums)/3, )) | StarcoderdataPython |
9612288 | from utils import hex_to_lbin, lbin_to_hex
def ccf(cpu):
# ccf
# -00c
# TODO: optimize this ugly thing (flag extraction should be standard)
flag, z = hex_to_lbin(cpu.registers['F'])[4], \
hex_to_lbin(cpu.registers['F'])[7]
flag = 0 if flag else 1
flag = [0, 0, 0, 0, flag, 0, 0, z]
cpu.registers['F'] = lbin_to_hex(flag)
def scf(cpu):
# scf
# -001
z = hex_to_lbin(cpu.registers['F'])[7]
flag = [0, 0, 0, 0, 1, 0, 0, z]
cpu.registers['F'] = lbin_to_hex(flag)
def nop(cpu):
# nop
pass
def halt(cpu):
# HALT
pass
def stop(cpu):
# stop
pass
def di(cpu):
# di
pass
def ei(cpu):
# ei
pass
| StarcoderdataPython |
12820882 | import ipaddress
import click
from tabulate import tabulate # pylint: disable=import-error
from helpers.api import APIv2 # pylint: disable=import-error,no-name-in-module
from helpers.logger import Logger # pylint: disable=import-error,no-name-in-module
from helpers.config import config # pylint: disable=import-error,no-name-in-module
api = APIv2()
log = Logger().get_logger()
@click.group()
def layer7():
"""Methods to access and modify the layer 7 filter settings"""
@click.command(name="set-routing")
@click.argument("routing_type", type=click.Choice(['only_on','only_off','activate','deactivate']))
@click.argument("ipv4", default=config.get("CB_DEFAULT_IP"))
def l7_routing(routing_type, ipv4):
"""Set the Layer 7 routing mode of the specified IPv4
Valid routing types are only_on, only_off, activate, deactivate"""
try:
ipaddr = str(ipaddress.ip_address(ipv4))
response = api.request(component = "antiddos", method = "layer7", action = "routing", routing = routing_type, ipaddr = ipaddr)
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
log.fatal("An unknown error occured.")
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError, ValueError) as error:
log.error(error)
layer7.add_command(l7_routing)
@click.command(name="domain-add")
@click.argument("domain")
@click.argument("protector", type=click.Choice(['aes','button','captcha']), default='button')
def l7_domain_add(domain, protector):
"""Adds a DOMAIN to the layer 7 filtering infrastructure, optionally setting the PROTECTOR
DOMAIN is a FQDN, whose A-Record points to an IPv4 address owned by your account.
PROTECTOR is one of aes, button, captcha - defaults to button.
"""
try:
response = api.request(component = "antiddos", method = "layer7", action = "add", domain = domain, protector = protector)
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
log.fatal("An unknown error occured.")
except ValueError as error:
log.error(error)
layer7.add_command(l7_domain_add)
@click.command(name="domain-remove")
@click.argument("domain")
def l7_domain_remove(domain):
"""Removes a DOMAIN from the layer 7 filtering infrastructure.
"""
try:
response = api.request(component = "antiddos", method = "layer7", action = "delete", domain = domain)
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
log.fatal("An unknown error occured.")
except ValueError as error:
log.error(error)
layer7.add_command(l7_domain_remove)
@click.command(name="ssl-add")
@click.argument("domain")
@click.argument("certificate", type=click.File("r"))
@click.argument("private-key", type=click.File("r"))
@click.argument("protector", type=click.Choice(['aes','button','captcha']), default='button')
def l7_ssl_add(domain, certificate, private_key, protector):
"""Adds an SSL secured DOMAIN to the layer 7 filtering infrastructure, optionally setting the PROTECTOR.
Requires the path to CERTIFICATE and PRIVATE-KEY. PROTECTOR is one of aes, button, captcha - defaults to button."""
try:
response = api.request(component = 'antiddos', method = 'layer7', action = 'ssl_add', domain = domain, cert = certificate.read(), key = private_key.read(), protector = protector)
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
click.echo(response)
except ValueError as err:
log.error(err)
layer7.add_command(l7_ssl_add)
@click.command(name="ssl-remove")
@click.argument("domain")
def l7_ssl_remove(domain):
"""Removes an SSL secured DOMAIN from the layer 7 filtering infrastructure."""
try:
response = api.request(component = 'antiddos', method = 'layer7', action = 'ssl_delete', domain = domain)
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
click.echo(response)
except ValueError as err:
log.error(err)
layer7.add_command(l7_ssl_remove)
@click.command(name="ssl-view")
@click.option("-f", "--format", "output_format", type=click.Choice(['table','json'], case_sensitive=False), default='table')
def l7_ssl_view(output_format):
"""Shows all SSL secured DOMAINs on the layer 7 filtering infrastructure."""
try:
response = api.request(component = 'antiddos', method = 'layer7', action = 'ssl_view')
if "status" in response:
res_status = api.parse_status(response["status"])
log.log(res_status["level"], res_status["message"])
else:
if output_format == "table":
click.echo(tabulate(([v["domain"]] for v in response), headers=["Domains"]))
elif output_format == "json":
click.echo(response)
except ValueError as err:
log.error(err)
layer7.add_command(l7_ssl_view)
| StarcoderdataPython |
4921539 | #
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fdk import constants
def get_delayed_module_init_class():
if constants.is_py37():
return Python37DelayedImport
else:
return Python35plusDelayedImport
class PythonDelayedImportAbstraction(object):
def __init__(self, func_module_path):
self._mod_path = func_module_path
self._executed = False
@property
def executed(self):
return self._executed
@executed.setter
def executed(self, exec_flag):
self._executed = exec_flag
def get_module(self):
raise Exception("Not implemented")
class Python35plusDelayedImport(PythonDelayedImportAbstraction):
def __init__(self, func_module_path):
self._func_module = None
super(Python35plusDelayedImport, self).__init__(func_module_path)
def get_module(self):
if not self.executed:
from importlib.machinery import SourceFileLoader
fname, ext = os.path.splitext(
os.path.basename(self._mod_path))
self._func_module = SourceFileLoader(fname, self._mod_path)\
.load_module()
self.executed = True
return self._func_module
class Python37DelayedImport(PythonDelayedImportAbstraction):
def import_from_source(self):
from importlib import util
func_module_spec = util.spec_from_file_location(
"func", self._mod_path
)
func_module = util.module_from_spec(func_module_spec)
self._func_module_spec = func_module_spec
self._func_module = func_module
def get_module(self):
if not self.executed:
self.import_from_source()
self._func_module_spec.loader.exec_module(
self._func_module)
self.executed = True
return self._func_module
class Function(object):
def __init__(self, func_module_path, entrypoint="handler"):
dm = get_delayed_module_init_class()
self._delayed_module_class = dm(func_module_path)
self._entrypoint = entrypoint
def handler(self):
mod = self._delayed_module_class.get_module()
return getattr(mod, self._entrypoint)
| StarcoderdataPython |
140460 | import numpy as np
import teaserpp_python
from Config import Config
import gtsam as gt
from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2,
NonlinearFactorGraph, NonlinearISAM, Pose3,
PriorFactorPoint3, PriorFactorPose3, Rot3,
PinholeCameraCal3_S2, Values, Point3) # symbol_shorthand_X, symbol_shorthand_L)
from gtsam.symbol_shorthand import X, L
import matplotlib.pyplot as plt
# import g2o
# class PoseOptimizer(g2o.SparseOptimizer):
# def __init__(self, ):
# super().__init__()
# solver = g2o.BlockSolverX(g2o.LinearSolverDenseX())
# solver = g2o.OptimizationAlgorithmLevenberg(solver)
# super().set_algorithm(solver)
# self.edge_list = []
# self.edge_outlier = np.array([], dtype=bool)
# self.v_se3 = g2o.VertexSE3Expmap()
# self.v_se3.set_id(0) # internal id
# self.v_se3.set_fixed(False)
# super().add_vertex(self.v_se3)
# self.pose = []
# self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float)
# for idx in np.arange(8):
# self.inv_lvl_sigma2[idx] = 1./1.2**(2*idx-2)
#
# def optimize(self, max_iterations=10):
# self.edge_outlier = np.full(len(self.edge_list), False)
# for iteration in range(4):
# # self.v_se3.set_estimate(self.pose)
# super().initialize_optimization(0)
# super().optimize(max_iterations)
# print("ITER", self.vertex(0).estimate().to_vector())
# print("Initial Correspondence: ", np.count_nonzero(1-self.edge_outlier))
# n_bad = 0
# for idx in range(len(self.edge_list)):
# e = self.edge_list[idx]
# e.compute_error()
# chi2 = e.chi2()
# # print("Iter ", iteration, "Chi: " ,chi2)
# if chi2 > 7.815:
# self.edge_outlier[idx] = True
# e.set_level(1)
# n_bad += 1
# else:
# self.edge_outlier[idx] = False
# e.set_level(0)
# if iteration == 2:
# e.set_robust_kernel(None)
#
# print("NUM BADS: ", n_bad, ":", len(self.edge_list))
# return self.edge_outlier
#
# def add_pose(self, pose, fixed=False):
# self.v_se3.set_estimate(pose)
# self.pose = pose
#
# def add_point(self, world_pos,
# measurement_cam,
# octave,
# robust_kernel=g2o.RobustKernelHuber(np.sqrt(7.815))): # ??% CI
#
# edge = g2o.EdgeStereoSE3ProjectXYZOnlyPose()
# edge.set_vertex(0, self.vertex(0))
#
# fx = Config().fx
# fy = Config().fy
# cx = Config().cx
# cy = Config().cy
# bf = Config().bf
#
# edge.fx = fx
# edge.fy = fy
# edge.cx = cx
# edge.cy = cy
# edge.bf = bf
# edge.Xw = world_pos
#
# edge.set_measurement(measurement_cam) # projection
# information = self.inv_lvl_sigma2[octave]*np.identity(3)
# edge.set_information(information)
#
# if robust_kernel is not None:
# edge.set_robust_kernel(robust_kernel)
#
# super().add_edge(edge)
#
# self.edge_list.append(edge)
#
# def get_pose(self):
# return self.vertex(0).estimate()
class PoseOptimizerTeaser:
def __init__(self):
self.NOISE_BOUND = 0.1 # 0.05
self.solver_params = teaserpp_python.RobustRegistrationSolver.Params()
self.solver_params.cbar2 = 0.6 # 1
self.solver_params.noise_bound = self.NOISE_BOUND
self.solver_params.estimate_scaling = False
self.solver_params.rotation_estimation_algorithm = \
teaserpp_python.RobustRegistrationSolver.ROTATION_ESTIMATION_ALGORITHM.GNC_TLS
self.solver_params.rotation_gnc_factor = 1.4
self.solver_params.rotation_max_iterations = 200
self.solver_params.rotation_cost_threshold = 1e-12
self.solver = teaserpp_python.RobustRegistrationSolver(self.solver_params)
def optimize(self, src, dst):
# start = time.time()
self.solver.solve(src, dst)
# end = time.time()
solution = self.solver.getSolution()
trans = np.hstack((solution.rotation, np.expand_dims(solution.translation, axis=1)))
trans = np.concatenate((trans, np.expand_dims(np.array([0, 0, 0, 1]), axis=1).T), axis=0)
return trans
class PoseOptimizerGTSAM:
def __init__(self):
fx = Config().fx
fy = Config().fy
cx = Config().cx
cy = Config().cy
bf = Config().bf
# Create realistic calibration and measurement noise model
# format: fx fy skew cx cy baseline
baseline = bf/fx
self.K_stereo = gt.Cal3_S2Stereo(fx, fy, 0.0, cx, cy, baseline)
self.K_mono = gt.Cal3_S2(fx, fy, 0.0, cx, cy)
self.deltaMono = np.sqrt(5.991)
self.deltaStereo = np.sqrt(7.815)
self.depth_threshold = bf/fx * 60
# Create graph container and add factors to it
self.graph = gt.NonlinearFactorGraph()
# Create initial estimate for camera poses and landmarks
self.initialEstimate = gt.Values()
# add a constraint on the starting pose
# first_pose = gt.Pose3()
# self.graph.add(gt.NonlinearEqualityPose3(X(1), first_pose))
self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float)
for idx in np.arange(8):
self.inv_lvl_sigma2[idx] = 1. / 1.2 ** (2 * idx - 2)
# point counter for landmarks and octave container
self.counter = 1
self.octave = []
self.is_stereo = []
def add_pose(self, R, t):
# Add measurements
# pose 1
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(520, 480, 440), stereo_model, x1, l1, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(120, 80, 440), stereo_model, x1, l2, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 280, 140), stereo_model, x1, l3, K))
# pose 2
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(570, 520, 490), stereo_model, x2, l1, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(70, 20, 490), stereo_model, x2, l2, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 270, 115), stereo_model, x2, l3, K))
# self.initialEstimate.insert(X(1), gt.Rot3(pose[0]), gt.Point3(pose[1]))
t = t.reshape((3, 1))
self.initialEstimate.insert(X(1), gt.Pose3(np.concatenate((R, t), axis=1)))
def add_point(self, pointsInitial, measurements, octave):
if pointsInitial[-1] > self.depth_threshold:
information = self.inv_lvl_sigma2[octave] * np.identity(2)
stereo_model = gt.noiseModel_Diagonal.Information(information)
huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaMono)
robust_model = gt.noiseModel_Robust(huber, stereo_model)
factor = gt.GenericProjectionFactorCal3_S2(gt.Point2(measurements[0], measurements[2]), robust_model,
X(1), L(self.counter), self.K_mono)
self.is_stereo.append(False)
else:
information = self.inv_lvl_sigma2[octave] * np.identity(3)
stereo_model = gt.noiseModel_Diagonal.Information(information)
huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaStereo)
robust_model = gt.noiseModel_Robust(huber, stereo_model)
factor = gt.GenericStereoFactor3D(gt.StereoPoint2(*tuple(measurements)), robust_model,
X(1), L(self.counter), self.K_stereo)
self.is_stereo.append(True)
self.graph.add(gt.NonlinearEqualityPoint3(L(self.counter), gt.Point3(pointsInitial)))
self.initialEstimate.insert(L(self.counter), gt.Point3(pointsInitial))
self.graph.add(factor)
self.octave.append(octave)
self.counter += 1
def optimize(self, flag_verbose=False):
# optimize
edge_outlier = np.full(self.counter-1, False)
error_th_stereo = [7.815, 7.815, 5, 5]
error_th_mono = [5.991, 5.991, 3.5, 3.5]
# error_th_stereo = [7.815, 7.815, 7.815, 7.815]
# error_th_mono = [5.991, 5.991, 5.991, 5.991]
for iteration in range(4):
if flag_verbose:
errors = []
optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate)
result = optimizer.optimize()
n_bad = 0
if flag_verbose:
print(f"Number of Factors: {self.graph.nrFactors()-self.graph.size()//2, self.graph.size()//2}")
error_s = error_th_stereo[iteration]
error_m = error_th_mono[iteration]
for idx in range(1, self.graph.size(), 2):
try:
if self.is_stereo[idx]:
factor = gt.dynamic_cast_GenericStereoFactor3D_NonlinearFactor(self.graph.at(idx))
else:
factor = gt.dynamic_cast_GenericProjectionFactorCal3_S2_NonlinearFactor(self.graph.at(idx))
except:
if flag_verbose:
errors.append(0)
continue
error = factor.error(result)
# print(error)
if flag_verbose:
errors.append(error)
# if error > 7.815:
if (self.is_stereo[idx] and error > error_s) or (not self.is_stereo[idx] and error > error_m):
edge_outlier[idx//2] = True
self.graph.remove(idx)
n_bad += 1
else:
edge_outlier[idx//2] = False
if iteration == 2:
if self.is_stereo[idx]:
information = self.inv_lvl_sigma2[self.octave[idx//2]] * np.identity(3)
stereo_model = gt.noiseModel_Diagonal.Information(information)
new_factor = gt.GenericStereoFactor3D(factor.measured(), stereo_model, X(1),
L(idx//2+1), self.K_stereo)
else:
information = self.inv_lvl_sigma2[self.octave[idx // 2]] * np.identity(2)
stereo_model = gt.noiseModel_Diagonal.Information(information)
new_factor = gt.GenericProjectionFactorCal3_S2(factor.measured(), stereo_model,
X(1),
L(idx // 2 + 1), self.K_mono)
self.graph.replace(idx, new_factor)
if flag_verbose:
fig, ax = plt.subplots()
ax.bar(np.arange(0, len(errors)).tolist(), errors)
plt.show()
print("NUM BADS: ", n_bad)
pose = result.atPose3(X(1))
# marginals = gt.Marginals(self.graph, result)
# cov = marginals.marginalCovariance(gt.X(1))
return pose, edge_outlier # self.graph, result
class PoseGraphOptimizerGTSAM:
def __init__(self):
# Create graph container and add factors to it
self.graph = gt.NonlinearFactorGraph()
# Create initial estimate for camera poses and landmarks
self.initialEstimate = gt.Values()
sigmas = np.array([5*np.pi/180, 5*np.pi/180, 5*np.pi/180, 0.05, 0.05, 0.05])
self.covariance = gt.noiseModel.Diagonal.Sigmas(sigmas)
self.graph.add(gt.NonlinearEqualityPose3(X(0), gt.Pose3(np.eye(4))))
self.result = None
self.marginals = None
def add_node(self, kf):
self.initialEstimate.insert(X(kf.kfID), gt.Pose3(kf.pose_matrix()))
for kf_n, rel_pose, _ in kf.neighbors:
if kf_n.kfID > kf.kfID:
continue
self.graph.add(gt.BetweenFactorPose3(X(kf.kfID), X(kf_n.kfID),
gt.Pose3(rel_pose), self.covariance))
def add_node_optimize(self, kf):
self.add_node(kf)
result, marginals = self.optimize()
return result, marginals
def optimize(self):
optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate)
result = optimizer.optimize()
marginals = gt.Marginals(self.graph, result)
return result, marginals
class PoseOptimizerRANSAC:
def __init__(self):
self.n_iteration = 100
@classmethod
def procrustes(cls, X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n, m = X.shape
ny, my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0 ** 2.).sum()
ssY = (Y0 ** 2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m - my)), 0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:, -1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA ** 2
# transformed coords
Z = normX * traceTA * np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY / ssX - 2 * traceTA * normY / normX
Z = normY * np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my, :]
c = muX - b * np.dot(muY, T)
# transformation values
tform = {'rotation': T, 'scale': b, 'translation': c}
return d, Z, tform | StarcoderdataPython |
4893769 | from . import _dbg
from . import gs
from .margo import mg
from .margo_render import render_src
from .margo_state import actions, ViewPathName
import os
import sublime
import sublime_plugin
class MargoEvents(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
return mg.event('query_completions', view, mg.on_query_completions, [view, prefix, locations])
def on_activated(self, view):
return mg.event('activated', view, mg.on_activated, [view])
def on_modified(self, view):
return mg.event('modified', view, mg.on_modified, [view])
def on_selection_modified(self, view):
return mg.event('selection_modified', view, mg.on_selection_modified, [view])
def on_pre_save(self, view):
return mg.event('pre_save', view, mg.on_pre_save, [view])
def on_post_save(self, view):
return mg.event('post_save', view, mg.on_post_save, [view])
def on_load(self, view):
return mg.event('load', view, mg.on_load, [view])
def on_new(self, view):
return mg.event('new', view, mg.on_new, [view])
def on_pre_close(self, view):
return mg.event('pre_close', view, mg.on_pre_close, [view])
def on_hover(self, view, point, hover_zone):
return mg.event('hover', view, mg.on_hover, [view, point, hover_zone])
class MargoRenderSrcCommand(sublime_plugin.TextCommand):
def run(self, edit, src):
render_src(self.view, edit, src)
class MargoUserCmdsCommand(sublime_plugin.TextCommand):
def enabled(self):
return mg.enabled(self.view)
def run(self, edit, action='QueryUserCmds'):
act = getattr(actions, action)
mg.send(view=self.view, actions=[act], cb=lambda rs: self._cb(rs=rs, action=action))
def _on_done(self, *, win, cmd, prompts):
if len(prompts) >= len(cmd.prompts):
self._on_done_call(win=win, cmd=cmd, prompts=prompts)
return
def on_done(s):
prompts.append(s)
self._on_done(win=win, cmd=cmd, prompts=prompts)
win.show_input_panel('%d/%d %s' % (
len(prompts) + 1,
len(cmd.prompts),
cmd.prompts[len(prompts)-1],
), '', on_done, None, None)
def _on_done_call(self, *, win, cmd, prompts):
win.run_command('gs9o_win_open', {
'run': [cmd.name] + cmd.args,
'action_data': {
'Prompts': prompts,
},
'save_hist': False,
'focus_view': False,
'show_view': True,
})
def _cb(self, *, rs, action):
win = self.view.window() or sublime.active_window()
selected = 0
flags = sublime.MONOSPACE_FONT
items = []
cmds = rs.state.user_cmds
for c in cmds:
desc = c.desc or '`%s`' % ' '.join([c.name] + c.args)
items.append([c.title, desc])
def on_done(i):
if i >= 0 and i < len(cmds):
self._on_done(win=win, cmd=cmds[i], prompts=[])
def on_highlight(i):
pass
win.show_quick_panel(items or ['%s returned no results' % action], on_done, flags, selected, on_highlight)
class margo_display_issues(sublime_plugin.TextCommand):
def run(self, edit, **action):
if mg.enabled(self.view):
self._run()
else:
self.view.run_command('gs_palette', {
'palette': 'errors', 'direct': True,
})
def _run(self):
mg.send(view=self.view, actions=[actions.QueryIssues], cb=self._cb)
def _cb(self, rs):
show_issues(self.view, rs.state.issues)
class margo_issues(margo_display_issues):
pass
def issues_to_items(view, issues):
vp = ViewPathName(view)
dir = os.path.dirname(vp.path)
index = []
for isu in issues:
if isu.message:
index.append(isu)
if not index:
return ([], [], -1)
def sort_key(isu):
if vp.match(isu):
return (-1, '', isu.row)
return (1, isu.relpath(dir), isu.row)
index.sort(key=sort_key)
row, _ = gs.rowcol(view)
items = []
selected = []
for idx, isu in enumerate(index):
if vp.match(isu):
title = '%s:%d' % (isu.basename(), isu.row + 1)
selected.append((abs(isu.row - row), idx))
else:
title = '%s:%d' % (isu.relpath(dir) or isu.name, isu.row + 1)
selected.append((999999999, -1))
rows = [title]
rows.extend(s.strip() for s in isu.message.split('\n'))
rows.append(' '.join(
'[%s]' % s for s in filter(bool, (isu.tag, isu.label))
))
# hack: ST sometimes decide to truncate the message because it's longer
# than the top row... and we don't want the message up there
rows[0] = rows[0].ljust(max(len(s) for s in rows))
items.append(rows)
# hack: if the items don't have the same length, ST throws an exception
n = max(len(l) for l in items)
for l in items:
l += [''] * (n - len(l))
return (items, index, min(selected)[1])
def show_issues(view, issues):
orig_row, orig_col = gs.rowcol(view)
flags = sublime.MONOSPACE_FONT
items, index, selected = issues_to_items(view, issues)
def on_done(i):
if not index or i >= len(index):
return
if i < 0:
vp = ViewPathName(view)
fn = vp.path or vp.name
gs.focus(fn, row=orig_row, col=orig_col, win=view.window())
return
isu = index[i]
gs.focus(isu.path or isu.name, row=isu.row, col=isu.col, win=view.window())
def on_highlight(i):
on_done(i)
view.window().show_quick_panel(items or ['No Issues'], on_done, flags, selected, on_highlight)
class MargoFmtCommand(sublime_plugin.TextCommand):
def run(self, edit):
if mg.enabled(self.view):
mg.fmt(self.view)
else:
self.view.run_command('gs_fmt')
class MargoRestartAgentCommand(sublime_plugin.WindowCommand):
def run(self):
mg.restart()
class MargoOpenExtensionCommand(sublime_plugin.WindowCommand):
def run(self):
fn = mg.extension_file(True)
if fn:
gs.focus(fn, row=-1, focus_pat='')
class margo_show_hud(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('show_panel', {'panel': 'output.%s' % mg.hud_name})
self.window.focus_view(self.window.active_view())
| StarcoderdataPython |
3510402 | s = raw_input('string: ')
rotate = raw_input('rotate by: ')
r = int(rotate)
def rotate_word(s, r):
"""
Rotate each char in a string by through the alphabet by the given amount.
Wrap around to the beginning (if necessary).
"""
r_s = ''
for c in s:
num = ord(c)
r_num = num + r
r_c = chr(r_num)
r_s += r_c
return r_s
print rotate_word(s, r)
| StarcoderdataPython |
1868481 | <gh_stars>0
class MergeSort(object):
def __init__(self, list):
self.list = list
self.numReversePairs = 0
def sort(self):
aux = [0 for _ in self.list]
self.mergesort(self.list, 0, len(self.list), aux)
def mergesort(self, list, l, h, aux):
if h <= l+1:
return
mid = (l+h)//2
self.mergesort(list, l, mid, aux)
self.mergesort(list, mid, h, aux)
self.merge(list, l, mid, h, aux)
def merge(self, list, l, m, h, aux):
aux[l:h] = list[l:h] # copy all items to aux
r1 = l
for r2 in xrange(m, h):
while r1 < m and list[r1] <= list[r2] * 2:
r1 += 1
if r1 == m: break
self.numReversePairs += (m-r1)
w = l
r1, r2 = l, m
while r1 < m and r2 < h:
if aux[r1] <= aux[r2]:
list[w] = aux[r1]
r1 += 1
else:
list[w] = aux[r2]
r2 += 1
w += 1
while r1 < m:
list[w] = aux[r1]
w += 1
r1 += 1
class Solution(object):
def reversePairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
M = MergeSort(nums)
M.sort()
return M.numReversePairs
print Solution().reversePairs([1,3,2,3,1])
print Solution().reversePairs([2,4,3,5,1]) | StarcoderdataPython |
9721950 | import random
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QDialog, QCheckBox, QPushButton, QMessageBox, QTextEdit
from src.UI.util import create_line, create_multi_line
from src.backend.word_book import WordBook
class UiReview(QDialog):
words = []
current = -1
right_index = -1
def __init__(self, parent, group_id, word_book: WordBook):
super().__init__(parent)
self.setWindowTitle("复习生词")
self.word_book = word_book
for word in word_book.get_words(group_id, for_review=True):
self.words.append({
"id": word[0],
"word": word[1],
"translate": word[2],
"review_count": word[4]
})
self.font_word = QFont()
self.font_word.setFamily("Mono")
self.font_word.setPointSize(16)
self.font_word.setWeight(75)
self.font_word.setBold(True)
self.setFixedSize(QSize(800, 600))
self.label_text = QTextEdit()
self.label_text.setReadOnly(True)
self.label_text.setFont(self.font_word)
self.label_text.setAcceptRichText(False)
self.checkbox = []
for i in range(4):
chk = QCheckBox()
chk.setFont(self.font_word)
self.checkbox.append(chk)
self.btn_next = QPushButton(" 下一个 ")
self.btn_next.setFont(self.font_word)
self.btn_next.clicked.connect(self.on_next)
items = [
self.label_text,
create_line([1, create_multi_line(self.checkbox), 1]),
create_line([1, self.btn_next, 1])
]
self.setLayout(create_multi_line(items))
self.on_next()
def get_error_word(self, exclude):
while True:
r = random.Random()
index = r.randint(0, len(self.words) - 1)
if index in exclude:
continue
return index, self.words[index]
def on_next(self):
if len(self.words) < len(self.checkbox):
return
if self.current >= 0:
curr = self.words[self.current]
for i in range(len(self.checkbox)):
if self.checkbox[i].isChecked() and i != self.right_index:
QMessageBox.information(self, "错误", "选错了。")
# 如果做错了,需要加强复习一次
if self.words[-1] != curr:
self.words.append(curr)
return
self.word_book.review_word(curr["id"], curr["review_count"] + 1)
self.current += 1
if self.current == len(self.words) - 1:
self.btn_next.setText(" 完成 ")
elif self.current >= len(self.words):
self.accept()
return
curr = self.words[self.current]
r = random.Random()
self.right_index = r.randint(0, len(self.checkbox) - 1)
self.label_text.setText(curr["translate"])
self.checkbox[self.right_index].setText(curr["word"])
self.checkbox[self.right_index].setChecked(False)
exclude = [self.current]
for i in range(len(self.checkbox)):
if i == self.right_index:
continue
index, word = self.get_error_word(exclude)
self.checkbox[i].setText(word["word"])
self.checkbox[i].setChecked(False)
exclude.append(index)
| StarcoderdataPython |
5062178 | <gh_stars>100-1000
from .kwta import kWTA
from .lp_relu import LP_ReLU1, LP_ReLU2
from .frelu import FReLU | StarcoderdataPython |
268830 | <filename>mecache/core.py
import time
import pickle
import hmac
from hashlib import sha1
from functools import wraps
class Aboriginal:
@staticmethod
def keyf(*args, **kwargs):
key = pickle.dumps([args, kwargs])
mac = hmac.new(b'mecache', msg=key, digestmod=sha1)
return str(mac.hexdigest())
class BaseCache(Aboriginal):
def get_cache(self, func, key, max_time):
raise NotImplementedError("You must overwrite 'get_cache'!")
def set_cache(self, result, func, key, max_time):
raise NotImplementedError("You must overwrite 'set_cache'!")
def cache(self, max_time, keyf=None):
"""
if *args or **kwargs can't be pickle.dumps, use keyf to transform them
example:
def calc(*args, **kwargs):
return str(args)+str(kwargs)
c = Cache()
@c.cache(max_time=10, keyf=calc)
def add(x, y):
return x + y
"""
def timeout(func):
@wraps(func)
def warpper(*args, **kwargs):
if keyf is None:
key = self.keyf(*args, **kwargs)
else:
key = keyf(*args, **kwargs)
# get result from cache
result = self.get_cache(func, key, max_time)
if result is None: # cache invalid
result = func(*args, **kwargs)
# set new cache
self.set_cache(result, func, key, max_time)
return result
return warpper
return timeout
class AioBaseCache(Aboriginal):
async def get_cache(self, func, key, max_time):
raise NotImplementedError("You must overwrite 'get_cache'!")
async def set_cache(self, result, func, key, max_time):
raise NotImplementedError("You must overwrite 'set_cache'!")
def cache(self, max_time, keyf=None):
"""
if *args or **kwargs can't be pickle.dumps, use keyf to transform them
example:
def calc(*args, **kwargs):
return str(args)+str(kwargs)
c = Cache()
@c.cache(max_time=10, keyf=calc)
async def add(x, y):
return await do.something
"""
def timeout(func):
@wraps(func)
async def warpper(*args, **kwargs):
if keyf is None:
key = self.keyf(*args, **kwargs)
else:
key = keyf(*args, **kwargs)
# get result from cache
result = await self.get_cache(func, key, max_time)
if result is None: # cache invalid
result = await func(*args, **kwargs)
# set new cache
await self.set_cache(result, func, key, max_time)
return result
return warpper
return timeout
| StarcoderdataPython |
8125160 | from .correlation import fig_correlation
from .metric import fig_metric
from .posterior import fig_posterior
| StarcoderdataPython |
8016978 | #!/usr/bin/env python
#
### INSTRUCTOR COMMENTS:
# Note that the phrases "Module description" and "Function docstring" should not
# actually appear in docstrings. Instead, you should put the name and/or brief
# description of what the function/module does.
#
"""Module Description
This function takes the input the user puts in, n, and outputs
the first n sequences of the Fibonacci number sequences
For example, putting in the number 5 will yield the list 1, 1, 2, 3, 5
Please do not input a negative number.
"""
def fibonacci(n):
"""Function Docstring
fibonacci(int) -> list
this function will take in an integer n and finds the n sequences of fibonacci
and will return the list of the n sequences. This function has a catch to see if the user
input is a negative number, in which an error message will display, the number converted
to its non-negative equivalent, and then the function runs with the new positive input.
"""
#
### INSTRUCTOR COMMENT:
# Make sure your docstrings are good English. I think you mean that the function takes
# an integer n and returns a sequence of the first n fibonacci numbers. It does not return
# multiple sequences. You also don't need to explain the logic of the code in a docstring,
# just the purpose, expected inputs, and output. Saying that a positive integer is expected
# as an input is sufficient.
#
if n < 0:
print ("The number is negative. Will change to a positive number")
#
### INSTRUCTOR COMMENT:
# Automatically correcting inputs like this is a bad idea. It can hide errors.
# Instead, raise an exception if the input is actually bad. Something like:
# if not (isinstance(n, int) and n > 0):
# raise TypeError("Not a positive integer")
# Then a user is informed forcibly if they misuse your code, rather than having their
# error hidden from view.
#
# Also, it is usually bad to mix print statements with running code like this.
# There is no guarantee that this code will be running somewhere where you can see a
# print statement. It is much better to raise an exception in the event of an error.
#
n = -n
#
### INSTRUCTOR COMMENTS:
# Good. However, this logic is very long. Can you see any way to shorten it?
# The danger with long chains of logic like this is that it can be difficult to read later
# and understand exactly what is happening. Your good comments help with that, but it's even
# better to write shorter (but still clear) code if possible.
#
#setting our initial condition variables, list, and counter variable at 3
a1, a2 = 1, 1
final_list = [1]
counter = 3
#first checks if the user input was numbers 1 or 2 as those are part of our initial conditons
#then returns the list appropriately
if n == 1:
return final_list
elif n == 2:
final_list.append(1)
return final_list
#if the inputed number is greater than 2, we initiate our code to calculate the fibonacci sequences
else:
# append 1 to the list,
# now our lists starts with two 1s, per our initial conditions
final_list.append(1)
#while our counter is less than or equal to the user input, we add the previous two numbers
#of the list, append it to our list, then reset our variables
while counter <= n:
a1, a2 = a2, a1 + a2
final_list.append(a2)
# push up the counter
counter = counter + 1
return final_list
def main(argv):
#
### INSTRUCTOR COMMENT:
# This naming is problematic. sys.argv is a list of strings,
# so naming something argv makes a reader think it's a list of strings.
# Here, you have already converted it to a single int, but you wouldn't
# know that without reading the rest of the code. It is much better to
# use names that suggest what is actually happening. For example: main(n)
# would make it more clear that n is not a list of strings. A docstring
# on the function main would then clarify exactly what it is expected to be.
#
print (fibonacci(argv))
if __name__ == "__main__":
from sys import argv
if len(argv) != 2:
#
### INSTRUCTOR COMMENT:
# This is one possible error. But the check done above was just that the length
# wasn't 2. It didn't actually check whether a number was input. Make sure your
# error messages actually match the checks being done in the code.
#
print ("Error: did not input a number. Will set a default argument of 5")
main(5)
else:
#
### INSTRUCTOR COMMENT:
# For example, int() here could raise an exception if it cannot convert to an int
#
main(int(argv[1]))
| StarcoderdataPython |
3516173 | import json
import gzip
from twython import TwythonError
from hoover.auth import twython_from_key_and_auth
from hoover.rate_control import RateControl
def json_split(json_str):
if len(json_str.split('}{')) == 1:
return [json_str.strip()]
parts = []
depth = 0
part = ''
for i, c in enumerate(json_str.strip()):
part += c
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth == 0:
parts.append(part)
part = ''
return parts
class Hydrate(RateControl):
def __init__(self, infile, outfile, errfile, key_file, auth_file):
super().__init__(rate_limit=500)
self.infile = infile
self.outfile = outfile
self.errfile = errfile
self.twitter = twython_from_key_and_auth(key_file, auth_file)
self.retrieved = 0
self.lost = 0
def get_tweets(self, tweet_ids):
ids = ','.join(tweet_ids)
try:
tweets = self.twitter.lookup_status(id=ids,
include_rt=True,
tweet_mode='extended')
return tweets
except TwythonError as e:
print('*** {}'.format(len(tweet_ids)))
print('ERROR: {}'.format(e))
with open(self.errfile, 'a') as file:
file.write('ERROR: {}\n'.format(e))
return []
def _hydrate_and_write(self, truncated_ids, non_truncated_tweets):
if len(truncated_ids) > 0:
self.pre_request()
tweets = self.get_tweets(truncated_ids)
else:
tweets = []
self.retrieved += len(tweets)
self.lost += len(truncated_ids) - len(tweets)
print('{} tweets retrieved, {} tweets lost.'.format(
self.retrieved, self.lost))
tweets += non_truncated_tweets
tweets = sorted(tweets, key=lambda k: k['id'])
with gzip.open(self.outfile, 'at') as f:
for tweet in tweets:
f.write('{}\n'.format(json.dumps(tweet)))
def retrieve(self):
ids = []
tweets = []
with gzip.open(self.infile, 'rt') as f:
for line in f:
try:
tid = int(line.strip())
ids.append(str(tid))
if len(ids) >= 100:
self._hydrate_and_write(ids, tweets)
ids = []
tweets = []
except ValueError:
for json_str in json_split(line):
try:
tweet = json.loads(json_str)
if tweet['truncated']:
ids.append(tweet['id_str'])
else:
tweets.append(tweet)
except Exception as e:
print('ERROR: {}'.format(e))
with open(self.errfile, 'a') as file:
file.write('ERROR: {}\n'.format(e))
if len(ids) >= 100 or len(tweets) >= 100000:
self._hydrate_and_write(ids, tweets)
ids = []
tweets = []
self._hydrate_and_write(ids, tweets)
def hydrate_file(key_file, auth_file, infile, outfile, errfile):
hydrate = Hydrate(infile, outfile, errfile, key_file, auth_file)
hydrate.retrieve()
| StarcoderdataPython |
6689819 | <filename>test.py
import sockslib
socket = sockslib.SocksSocket()
socket.set_proxy(('127.0.0.1', 9050), [sockslib.NoAuth(), sockslib.UserPassAuth('username', 'password')])
socket.connect(('myexternalip.com', 80))
socket.sendall(b"GET /raw HTTP/1.1\r\nHost: myexternalip.com\r\n\r\n")
print(socket.recv(1024))
| StarcoderdataPython |
5187723 | <filename>lifecycle/ingest/tooling/unknown.py
def unknown(command):
print(command.zone + " is not implemented.\n")
return 1 | StarcoderdataPython |
1971880 |
data="""Ground, 2
Vegetation, 5
Building, 6
Water, 9
Transmission Tower, 15
Bridge Deck, 17
Overhead Structure, 19
Wire, 64
Car, 65
Truck, 66
Boat, 67
Barrier, 68
Railroad Car, 69
Elevated Walkway, 70
Covered Walkway, 71
Pier/Dock, 72
Fence, 73
Tower, 74
Crane, 75
Silo/Storage, 76
Bridge Structure, 77"""
lines = data.split('\n')
import struct
import base64
import json
fmt = '<B15c'
limit = 15
vlr = None
for line in lines:
description, value = line.split(',')
desc = description[:limit].ljust(limit, '\0')
desc = [bytes(i, 'utf-8') for i in desc]
p = struct.pack(fmt, int(value), *desc)
if not vlr:
vlr = p
vlr = vlr + p
b64 = base64.b64encode(vlr)
b64 = b64.decode('utf-8')
vlr = {
"description": "ASPRS Classification Lookup",
"record_id": 0,
"user_id": "LASF_Spec",
"data": b64
}
print (json.dumps(vlr))
| StarcoderdataPython |
11316162 | '''
------------------------------------
Assignment 10 - EE2703 (Jan-May 2020)
Done by <NAME> (EE18B122)
Created on 02/05/20
Last Modified on 02/05/20
------------------------------------
'''
# Imports
import csv
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import scipy.signal as sgnl
# Global Variables
plotsDir = 'plots/'
PI = np.pi
figNum = 0
showAll = True
# Helper Functions
def plotSignal(t, x, figTitle=None, style='b-', blockFig=False, showFig=False, saveFig=True, stemPlot=True, xLimit=None, yLimit=None, xLabel=r"$n\ \to$", yLabel=None):
global figNum
plt.figure(figNum)
plt.title(figTitle)
plt.grid()
plt.ylabel(yLabel)
plt.xlabel(xLabel)
if(stemPlot):
plt.stem(t, x, linefmt='b-', markerfmt='bo')
else:
plt.plot(t, x, style)
if(xLimit):
plt.xlim(xLimit)
if(yLimit):
plt.ylim(yLimit)
if(saveFig):
plt.savefig(plotsDir + "Fig"+str(figNum)+".png")
if(showFig):
plt.show(block=blockFig)
figNum+=1
def plotSpectrum(w, Y, figTitle=None, magStyle='b-', phaseStyle='ro', xLimit=None, yLimit=None, showFig=False, saveFig=True, blockFig=False, type="Y"):
global figNum
plt.figure(figNum)
plt.suptitle(figTitle)
plt.subplot(211)
plt.grid()
plt.plot(w, abs(Y), magStyle, lw=2)
plt.ylabel(r"$\| "+type+"\|$")
if (xLimit):
plt.xlim(xLimit)
if (yLimit):
plt.ylim(yLimit)
plt.subplot(212)
plt.grid()
plt.plot(w, np.angle(Y), phaseStyle, lw=2)
plt.xlim(xLimit)
plt.ylabel(r"$\angle "+type+"$")
plt.xlabel(r"$\omega\ \to$")
if(saveFig):
plt.savefig(plotsDir + "Fig"+str(figNum)+".png")
if(showFig):
plt.show(block=blockFig)
figNum+=1
# Question 1
filter = np.genfromtxt("h.csv")
# Question 2
plotSignal(range(len(filter)), filter, "FIR Filter ($h[n]$)", showFig=showAll, yLabel=r"$h[n]$")
w, H = sgnl.freqz(filter, 1)
plotSpectrum(w, H, "Frequency Response of FIR Filter ($H(e^{j\omega}))$", type="H", showFig=showAll)
# Question 3
n = np.linspace(1, 2**10, 2**10)
x = np.cos(0.2*PI*n) + np.cos(0.85*PI*n)
plotSignal(n, x, figTitle="$x[n] = cos(0.2\pi n) + cos(0.85\pi n)$", xLimit=[0, 50], showFig=showAll, yLabel=r"$x[n]$")
# Question 4
y = np.convolve(x, filter)
plotSignal(list(range(len(y))), y, figTitle=r"$y[n] = x[n]\ast h[n]$", xLimit=[0, 100], showFig=showAll, yLabel=r"$y[n]$")
# Question 5
numZeros = len(x)-len(filter)
y = fft.ifft(fft.fft(x)*fft.fft(np.concatenate((filter, np.zeros(numZeros,)))))
plotSignal(list(range(len(y))), y, figTitle=r"$y[n] = x[n]\otimes h[n]$ (N = 1024)", xLimit=[0, 100], showFig=showAll, yLabel=r"$y[n]$")
# Question 6
numZerosForX = len(filter) - 1
numZerosForH = len(x) - 1
paddedX = np.concatenate((x, np.zeros(numZerosForX,)))
paddedH = np.concatenate((filter, np.zeros(numZerosForH,)))
y = fft.ifft(fft.fft(paddedX)*fft.fft(paddedH))
plotSignal(list(range(len(y))), y, figTitle=r"$y[n] = x[n]\otimes h[n]$ (N = 1034), with zero-padding of $x[n]$ and $h[n]$", xLimit=[0, 100], showFig=showAll, yLabel=r"$y[n]$")
# Question 7
def readComplexNumbers(fileName):
rawLines = []
actualValues = []
with open(fileName, "r") as p:
rawLines = p.readlines()
for line in rawLines:
actualValues.append(complex(line))
return actualValues
zChu = readComplexNumbers("x1.csv")
plotSpectrum(list(range(len(zChu))), np.asarray(zChu, dtype=np.complex), r"Zadoff-Chu Sequence", phaseStyle='r-', showFig=showAll, type=r"zChu[n]", yLimit=[-0.5, 1.5])
zChuShifted = np.roll(zChu, 5)
y = fft.ifftshift(np.correlate(zChuShifted, zChu, "full"))
plotSignal(list(range(len(y))), abs(y), figTitle=r"Correlation of $ZC[n]$ with $ZC[n-5]$", showFig=showAll, yLabel=r"$cor[n]$")
plotSignal(list(range(len(y))), abs(y), figTitle=r"Correlation of $ZC[n]$ with $ZC[n-5]$", xLimit=[0, 15], showFig=showAll, blockFig=True, yLabel=r"$cor[n]$")
| StarcoderdataPython |
79749 | <reponame>tranconbv/ironpython-stubs
class ApplicationId(object):
"""
Contains information used to uniquely identify a manifest-based application. This class cannot be inherited.
ApplicationId(publicKeyToken: Array[Byte],name: str,version: Version,processorArchitecture: str,culture: str)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ApplicationId()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Copy(self):
"""
Copy(self: ApplicationId) -> ApplicationId
Creates and returns an identical copy of the current application identity.
Returns: An System.ApplicationId object that represents an exact copy of the original.
"""
pass
def Equals(self,o):
"""
Equals(self: ApplicationId,o: object) -> bool
Determines whether the specified System.ApplicationId object is equivalent to the current System.ApplicationId.
o: The System.ApplicationId object to compare to the current System.ApplicationId.
Returns: true if the specified System.ApplicationId object is equivalent to the current System.ApplicationId; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: ApplicationId) -> int
Gets the hash code for the current application identity.
Returns: The hash code for the current application identity.
"""
pass
def ToString(self):
"""
ToString(self: ApplicationId) -> str
Creates and returns a string representation of the application identity.
Returns: A string representation of the application identity.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
@staticmethod
def __new__(self,publicKeyToken,name,version,processorArchitecture,culture):
""" __new__(cls: type,publicKeyToken: Array[Byte],name: str,version: Version,processorArchitecture: str,culture: str) """
pass
def __ne__(self,*args):
pass
Culture=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a string representing the culture information for the application.
Get: Culture(self: ApplicationId) -> str
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the application.
Get: Name(self: ApplicationId) -> str
"""
ProcessorArchitecture=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the target processor architecture for the application.
Get: ProcessorArchitecture(self: ApplicationId) -> str
"""
PublicKeyToken=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the public key token for the application.
Get: PublicKeyToken(self: ApplicationId) -> Array[Byte]
"""
Version=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the version of the application.
Get: Version(self: ApplicationId) -> Version
"""
| StarcoderdataPython |
1944386 | <reponame>Sieva-cmd/Myblog<filename>config.py
import os
class Config:
RANDOM_QUOTE_API_KEY =os.environ.get('RANDOM_QUOTE_API_KEY')
SECRET_KEY =os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:sieva@localhost/blog'
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST ='app/static/photos'
# MAIL_SERVER = 'smtp.googlemail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
# MAIL_PASSWORD = <PASSWORD>("MAIL_PASSWORD")
class ProdConfig(Config):
pass
class DevConfig(Config):
Debug =True
config_options ={
'development':DevConfig,
'production':ProdConfig
} | StarcoderdataPython |
3493528 | """Riemannian Adam optimizer implementation.
Becigneul, Gary, and <NAME>. "Riemannian Adaptive Optimization
Methods." International Conference on Learning Representations. 2018.
"""
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import gen_training_ops
from keras.optimizer_v2.optimizer_v2 import OptimizerV2
from tensorflow_riemopt.variable import get_manifold
@generic_utils.register_keras_serializable(name="RiemannianAdam")
class RiemannianAdam(OptimizerV2):
"""Optimizer that implements the Riemannian Adam algorithm."""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
stabilize=None,
name="RiemannianAdam",
**kwargs,
):
"""Construct a new Riemannian Adam optimizer.
Becigneul, Gary, and <NAME>. "Riemannian Adaptive
Optimization Methods." International Conference on Learning
Representations. 2018.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
takes no arguments and returns the actual value to use, The learning
rate. Defaults to 0.001.
beta_1: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use. The exponential decay
rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use, The exponential decay
rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond". Defaults to `False`.
stabilize: Project variables back to manifold every `stabilize` steps.
Defaults to `None`.
name: Optional name for the operations created when applying gradients.
Defaults to "RiemannianAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(RiemannianAdam, self).__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
self.stabilize = stabilize
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
if self.amsgrad:
for var in var_list:
self.add_slot(var, "vhat")
def _prepare_local(self, var_device, var_dtype, apply_state):
super(RiemannianAdam, self)._prepare_local(
var_device, var_dtype, apply_state
)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper("beta_1", var_dtype))
beta_2_t = array_ops.identity(self._get_hyper("beta_2", var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = apply_state[(var_device, var_dtype)]["lr_t"] * (
math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
)
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
)
)
def set_weights(self, weights):
params = self.weights
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[: len(params)]
super(RiemannianAdam, self).set_weights(weights)
@def_function.function(experimental_compile=True)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
manifold = get_manifold(var)
grad = manifold.egrad2rgrad(var, grad)
alpha = (
coefficients["lr_t"]
* math_ops.sqrt(1 - coefficients["beta_2_power"])
/ (1 - coefficients["beta_1_power"])
)
m.assign_add((grad - m) * (1 - coefficients["beta_1_t"]))
v.assign_add(
(manifold.inner(var, grad, grad, keepdims=True) - v)
* (1 - coefficients["beta_2_t"])
)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat.assign(math_ops.maximum(vhat, v))
v = vhat
var_t = manifold.retr(
var, -(m * alpha) / (math_ops.sqrt(v) + coefficients["epsilon"])
)
m.assign(manifold.transp(var, var_t, m))
var.assign(var_t)
if self.stabilize is not None:
self._stabilize(var)
@def_function.function(experimental_compile=True)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
manifold = get_manifold(var)
grad = manifold.egrad2rgrad(var, grad)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coefficients["one_minus_beta_1_t"]
m_t_values = (
array_ops.gather(m, indices) * coefficients["beta_1_t"]
+ m_scaled_g_values
)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (
manifold.inner(var, grad, grad, keepdims=True)
* coefficients["one_minus_beta_2_t"]
)
v_t_values = (
array_ops.gather(v, indices) * coefficients["beta_2_t"]
+ v_scaled_g_values
)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat.scatter_max(ops.IndexedSlices(v_t_values, indices))
v_t_values = array_ops.gather(vhat, indices)
var_values = array_ops.gather(var, indices)
var_t_values = manifold.retr(
var_values,
-(m_t_values * coefficients["lr"])
/ (math_ops.sqrt(v_t_values) + coefficients["epsilon"]),
)
m_t_transp = manifold.transp(var_values, var_t_values, m_t_values)
m.scatter_update(ops.IndexedSlices(m_t_transp, indices))
v.scatter_update(ops.IndexedSlices(v_t_values, indices))
var.scatter_update(ops.IndexedSlices(var_t_values, indices))
if self.stabilize is not None:
self._stabilize(var)
@def_function.function(experimental_compile=True)
def _stabilize(self, var):
if math_ops.floor_mod(self.iterations, self.stabilize) == 0:
manifold = get_manifold(var)
m = self.get_slot(var, "m")
var.assign(manifold.projx(var))
m.assign(manifold.proju(var, m))
def get_config(self):
config = super(RiemannianAdam, self).get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._serialize_hyperparameter("decay"),
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
"stabilize": self.stabilize,
}
)
return config
| StarcoderdataPython |
6420135 | <reponame>dougn/trtis-cidmgr
# Copyright (c) 2019, <NAME>. All rights reserved.
import sys, os, os.path
import argparse
import shutil
from . import util
from .version import __version__
_LIB= util.name2lib('cidmgr')
_template = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'config.pbtxt.in')
def dirtype(dirname):
full_path = util.expand(dirname)
if not os.path.exists(full_path) or not os.path.isdir(full_path):
raise argparse.ArgumentTypeError('Could not find directory.')
return full_path
parser = argparse.ArgumentParser(version=__version__, add_help=True,
description="""Install the cidmgr model into the trtserver model-store.
This tool does not contain the custom backend shared library.
You can specify the --i option to search for this library on the system
and if found, install that. Otherwise this tool will just set up the
directory structure and the config.pbtxt in the TRTIS Model Repository
(a.k.a. model-store.)
""")
parser.add_argument("store", metavar='model-store', type=dirtype,
help="trtserver model repository directory")
parser.add_argument("-o", "--overwrite", action='store_true',
help="overwrite to an existing model of NAME if present")
parser.add_argument("-n", "--name", nargs='?', default="cidmgr",
help="model name (DEFAULT: cidmgr)")
parser.add_argument("-m", "--modver", dest='version',
nargs='?', type=int, default=1,
help="model version (DEFAULT: 1)")
parser.add_argument("-l", "--library", nargs='?', default=_LIB,
help="model version (DEFAULT: "+_LIB+")")
parser.add_argument("-i", "--install", action='store_true',
help="search for LIBRARY and install in the model")
parser.add_argument("-p", "--path", nargs="+", default=[],
help="additional search paths for finding LIBRARY (implicit '-i')")
def main():
args = parser.parse_args()
if args.path:
args.install=True
modeldir = os.path.join(args.store, args.name)
modelvdir = os.path.join(modeldir, str(args.version))
modlib = None
if args.install:
modlib = util.find_library(args.library, args.path)
if not modlib:
parser.error('Could not find library to install: ' + args.library)
# overwrite library name to get the exact library name found.
# e.g.: -l foo becomes libfoo.so.0.1
args.library = os.path.basename(modlib)
if os.path.exists(modeldir):
if not os.path.isdir(modeldir):
parser.error("Supplied model directory exists "
"but is not a directory:\n "+ modeldir)
elif not args.overwrite:
parser.error("Model directory already exists:\n "+
modeldir+"\nUse '-o' to overwrite")
else:
os.mkdir(modeldir)
if not os.path.exists(modelvdir):
os.mkdir(modelvdir)
_config = os.path.join(modeldir, 'config.pbtxt')
with open(_template, 'rU') as t:
template = t.read()
config = template % (args.name, args.library)
with open(_config, 'w') as c:
c.write(config)
print("Wrote config: "+_config)
if modlib:
dest = os.path.join(modelvdir, args.library)
shutil.copyfile(modlib, dest)
print("Wrote custom backend: " + dest)
if __name__ == '__main__':
main() | StarcoderdataPython |
1858539 |
#
# Convert a csv to a multiline json
# utility to convert also with the goal to use the
# json2cerberus utility afterwatrs to create a schema from a csv
# (csv->json->schema)
#
# khz 2018
# update: 18.3.2019: added option to convert str types to int and float. See --convstr
import csv
import simplejson as json
import click
@click.command()
@click.option('--infile', help='csv input filename')
@click.option('--convstr', default=False, is_flag=True, help='try to convert strings to numbers first')
@click.option('--skipcol', multiple=True, help='skip a column by column name')
@click.option('--startrow', default=1, help='set the startrow number. [first=1]')
@click.option('--skiprow', multiple=True, help='skip row by rownumber')
@click.option('--delimiter', default=",", help='csv delimiter')
def csv_to_json(infile, convstr, delimiter, skipcol,startrow, skiprow):
csvfile = open(infile, 'r')
jsonfile = open(infile + '.json', 'w')
reader = csv.reader(csvfile, delimiter=delimiter)
i = next(reader)
columns=i
print(columns)
#for col in skipcol:
# columns.remove(col)
#print(columns)
try:
reader = csv.DictReader( csvfile, columns, delimiter=delimiter )
except:
raise
olist=[]
rowcount=1
for row in reader:
if rowcount > startrow:
print(row)
if convstr:
# try to convert str types to int,float
for elem in row:
#print(" {} -> {} ".format(elem, row[elem]))
if isinstance(row[elem], str):
try:
row[elem] = int(row[elem])
#print("converted to int: {}".format(str(row[elem])))
except:
try:
row[elem] = float(row[elem])
#print("converted to float: {}".format(str(row[elem])))
except:
pass
drow = dict(row)
for col in skipcol:
try:
del drow[col]
except:
pass
olist.append(drow)
rowcount+=1
#json.dump(row, jsonfile)
#jsonfile.write('')
json.dump(olist, jsonfile)
if __name__ == "__main__":
csv_to_json()
| StarcoderdataPython |
3534474 | <filename>tracewing.py
from astropy.io import ascii
from matplotlib import pyplot
from matplotlib.patches import Polygon
import numpy as np
import lacewing
import kinematics
import sys
def traceback(argv=None):
if argv is None:
argv = sys.argv
name,coord,era,edec,pmra,epmra,pmdec,epmdec,rv,erv,plx,eplx,note = lacewing.csv_loader(argv[1])
withmgp = argv[2].replace(' ','_')
method = argv[3]
mgpage = argv[4]
mgpage2 = argv[5]
timespan = float(argv[6])
timestep = -0.1
n_int = np.int(argv[7])
# load the association for comparison
mgp = ascii.read('Moving_Group_'+withmgp+'_'+method+'.dat')
#print timespan/timestep
time = mgp['Time'][0:int(timespan/timestep)]
mgpx = mgp['X'][0:int(timespan/timestep)]
mgpy = mgp['Y'][0:int(timespan/timestep)]
mgpz = mgp['Z'][0:int(timespan/timestep)]
mgpa = mgp['A'][0:int(timespan/timestep)]
mgpea = mgp['eA'][0:int(timespan/timestep)]
mgpb = mgp['B'][0:int(timespan/timestep)]
mgpeb = mgp['eB'][0:int(timespan/timestep)]
mgpc = mgp['C'][0:int(timespan/timestep)]
mgpec = mgp['eC'][0:int(timespan/timestep)]
# AR 2013.1122 The equivalent radius should be a RADIUS (4/3 pi a b c=4/3 pi r^3)
mgprad = (mgpa * mgpb * mgpc)**(1./3.)
mgpradmin = ((mgpa - mgpea) * (mgpb - mgpeb) * (mgpc - mgpec))**(1./3.)
mgpradmax = ((mgpa + mgpea) * (mgpb + mgpeb) * (mgpc + mgpec))**(1./3.)
# AR 2014.0321 This is to prevent ill-behaved associations like Argus from screwing up everything.
mgpradmin[np.where(np.logical_not(np.isfinite(mgpradmin)))] = 0.0
good_stars = [x for x in xrange(len(coord)) if ((pmra[x] is not None) & (pmdec[x] is not None) & (plx[x] is not None) & (rv[x] is not None))]
n_stars = len(good_stars)
# Make directory for the moving group
os.mkdir(withmgp)
for i in good_stars:
#print name[i],coord[i].ra.degree,era[i],coord[i].dec.degree,edec[i],plx[i],eplx[i],pmra[i],epmra[i],pmdec[i],epmdec[i],rv[i],erv[i]
print '({0:2d}) {1:16} {2:08.4f} {3:+07.4f} {4:6.2f} {5:+.4f} {6:+.4f} {7:+6.2f}'.format(i,name[i],coord[i].ra.degree,coord[i].dec.degree,1/plx[i],pmra[i],pmdec[i],rv[i])
sigmin = np.zeros((4,len(time)))+99999.99
sigmax = np.zeros((4,len(time)))
if method == 'ballistic':
px,py,pz = kinematics.ballistic_uniform(coord[i].ra.degree,0,coord[i].dec.degree,0,1/plx[i],0,pmra[i],0,pmdec[i],0,rv[i],0,timespan,timestep,1)
elif method == 'epicyclic':
px,py,pz = kinematics.epicyclic_uniform(coord[i].ra.degree,0,coord[i].dec.degree,0,1/plx[i],0,pmra[i],0,pmdec[i],0,rv[i],0,timespan,timestep,1)
elif method == 'potential':
px,py,pz = kinematics.potential_uniform(coord[i].ra.degree,0,coord[i].dec.degree,0,1/plx[i],0,pmra[i],0,pmdec[i],0,rv[i],0,timespan,timestep,1)
distance = np.sqrt((px-mgpx)**2 + (py-mgpy)**2 + (pz-mgpz)**2)
sigmin[0] = distance
sigmax[0] = distance
# now run monte carlos in three distributions
for k in [1,2,3]:
if method == 'ballistic':
px,py,pz = kinematics.ballistic_uniform(coord[i].ra.degree,era[i]*k,coord[i].dec.degree,edec[i]*k,1/plx[i],k*eplx[i]/(plx[i]**2),pmra[i],epmra[i]*k,pmdec[i],epmdec[i]*k,rv[i],erv[i]*k,timespan,timestep,n_int)
elif method == 'epicyclic':
px,py,pz = kinematics.epicyclic_uniform(coord[i].ra.degree,era[i]*k,coord[i].dec.degree,edec[i]*k,1/plx[i],k*eplx[i]/(plx[i]**2),pmra[i],epmra[i]*k,pmdec[i],epmdec[i]*k,rv[i],erv[i]*k,timespan,timestep,n_int)
elif method == 'potential':
px,py,pz = kinematics.potential_uniform(coord[i].ra.degree,era[i]*k,coord[i].dec.degree,edec[i]*k,1/plx[i],k*eplx[i]/(plx[i]**2),pmra[i],epmra[i]*k,pmdec[i],epmdec[i]*k,rv[i],erv[i]*k,timespan,timestep,n_int)
# We must rotate these so we are slicing across time, not different stars
px = np.rot90(px,3)
py = np.rot90(py,3)
pz = np.rot90(pz,3)
# loop through time
for j in range(len(time)):
distance = np.sqrt((px[j]-mgpx[j])**2 + (py[j]-mgpy[j])**2 + (pz[j]-mgpz[j])**2)
sigmin[k,j] = np.amin(distance)
sigmax[k,j] = np.amax(distance)
#print sigmin[1]
x = np.concatenate((time,time[::-1],[time[0]]))
y1 = np.concatenate((sigmin[1],sigmax[1][::-1],[sigmin[1][0]]))
y2 = np.concatenate((sigmin[2],sigmax[2][::-1],[sigmin[2][0]]))
y3 = np.concatenate((sigmin[3],sigmax[3][::-1],[sigmin[3][0]]))
mg = np.concatenate((np.zeros_like(mgpradmin),mgpradmax[::-1],[0]))
fig = pyplot.figure(figsize=(7,5))
ax = fig.add_subplot(111)
poly1 = Polygon(zip(x,y1),facecolor="#000000",edgecolor='none',alpha=0.25)
poly2 = Polygon(zip(x,y2),facecolor="#1F1F1F",edgecolor='none',alpha=0.25)
poly3 = Polygon(zip(x,y3),facecolor="#3F3F3F",edgecolor='none',alpha=0.25)
polym = Polygon(zip(x,mg),facecolor="#FF0000",edgecolor='none',alpha=0.3)
polya = Polygon(zip([mgpage,mgpage,mgpage2,mgpage2,mgpage],[0,500,500,0,0]),facecolor="#0000FF",edgecolor='none',alpha=0.3)
ax.add_patch(poly3)
ax.add_patch(poly2)
ax.add_patch(poly1)
ax.add_patch(polym)
ax.add_patch(polya)
ax.plot(time,sigmin[0],color="#000000")
ax.plot(time,mgprad,color="#FF0000")
ax.set_xlim(0,timespan)
ax.set_ylim(0,200)
ax.set_xlabel('Time (Myr)')
ax.set_ylabel('Distance between Star and Moving Group (pc)')
#ax.set_title('Traceback for {0:} and {3:}, {4:}'.format(name,ra,dec,withmgp,method))
#ax.xaxis.set_ticks(np.arange(0,timespan,20))
ax.yaxis.set_ticks(np.arange(0,200,50))
ax.grid(b=True,which='minor', color="#EFEFEF")
ax.text(timespan/20.,170,'Real Values',color="#000000")
ax.text(timespan/20.,160,'1 $\sigma$',color="#9F9F9F")
ax.text(timespan/20.,150,'2 $\sigma$',color="#BFBFBF")
ax.text(timespan/20.,140,'3 $\sigma$',color="#DFDFDF")
ax.text(timespan/20.,130,'Moving Group Volume-Radius (pc)',color="#FF0000")
ax.text(timespan/20.,120,'Group Age Spread',color="#0000FF")
#ax.vlines([mgpage],0,500)
pyplot.savefig("{3:}/traceback_{0:}_{1:07.3f}_{2:+07.3f}_{3:}_{4:}.png".format(name[i].replace(' ','_'),coord[i].ra.degree,coord[i].dec.degree,withmgp,method),dpi=100)
pyplot.clf()
pyplot.close()
if __name__ == "__main__":
if len(sys.argv) == 0:
print "tracewing.py <inputfile> <group> <method> <minage> <maxage> <maxplotage> <iterations>"
else:
traceback()
| StarcoderdataPython |
126686 | <filename>hubblestack/utils/signing.py
# coding: utf-8
"""
hubblestack/utils/signing.py is a collection of tools that facility repo
signing and verification.
The settings for the signing and verification (and their defaults) are as
follows.
repo_signing:
# defaults
require_verify: false
ca_crt: /etc/hubble/sign/ca-root.crt
public_crt: /etc/hubble/sign/public.crt
private_key: /etc/hubble/sign/private.key
# alternatively, ca_crt can be a list
ca_crt:
# there should be exactly one trusted cert
# (only the first cert found in this file will count)
- /etc/hubble/sign/ca-root.crt
# all additional entries in the list (and all certs in each file)
# will be included as untrusted certs; wqich (if a path can be found
# to the root) may become trusted before verification. Normally these
# would be intermediate or chain certs.
- /etc/hubble/sign/untrusted.crt
For verification purposes, only the ca_crt and the public_crt are required. The
private key is only used for actually signing repos.
Signing can be accomplished with shell scripting and openssl packages. Only the
MANIFEST file is signed (contained in the SIGNATURE file). The MANIFEST is
simply a list of files preceeded by a SHA-256 hash digest.
To sign a repo simply (without having to write shell scripts, etc), issue
something like the following in a repo root.
hubble signing.msign ./sign.this.file ./and-this-dir/
"""
import os
import getpass
import logging
import re
import json
import inspect
import cStringIO
from collections import OrderedDict, namedtuple
# In any case, pycrypto won't do the job. The below requires pycryptodome.
# (M2Crypto is the other choice; but the docs are weaker, mostly non-existent.)
from Crypto.IO import PEM
from Crypto.Util import asn1
from Crypto.Hash import SHA256
import OpenSSL.crypto as ossl
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding, utils
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_private_key
MANIFEST_RE = re.compile(r'^\s*(?P<digest>[0-9a-fA-F]+)\s+(?P<fname>.+)$')
log = logging.getLogger(__name__)
class STATUS:
FAIL = 'fail'
VERIFIED = 'verified'
UNKNOWN = 'unknown'
class Options(object):
"""
The Options class is simply a convenience interface for interacting with repo_signing options.
Instead of `__salt__['config.get']('repo_signing:public_crt')`, write `Options.public_crt`.
"""
class defaults:
require_verify = False
ca_crt = '/etc/hubble/sign/ca-root.crt'
public_crt = '/etc/hubble/sign/public.crt'
private_key = '/etc/hubble/sign/private.key'
def __getattribute__(self, name):
""" If the option exists in the default pseudo meta class
Try to find the option with config.get under repo_signing.
Failing that, return the default from the pseudo meta class.
If the option name isn't in the defaults, raise the exception.
"""
try:
return object.__getattribute__(self, name)
except AttributeError:
pass
try:
default = getattr(self.defaults, name)
return __salt__['config.get']('repo_signing:{}'.format(name), default)
# except NameError:
# # __salt__ isn't defined: return the default?
# # should we just raise an exception cuz this was called too early??
# return default
except AttributeError:
raise
Options = Options()
def split_certs(fh):
ret = None
for line in fh.readlines():
if ret is None:
if line.startswith('----'):
ret = line
else:
ret += line
if line.startswith('----'):
try:
yield ossl.load_certificate(ossl.FILETYPE_PEM, ret)
except Exception as e:
log.debug('decoding item as certificate failed: %s; trying as PEM encoded private key', e)
yield load_pem_private_key(ret, password=None, backend=default_backend())
ret = None
def read_certs(*fnames):
for fname in fnames:
if fname.strip().startswith('--') and '\x0a' in fname:
for x in split_certs(cStringIO.StringIO(fname)):
yield x
elif os.path.isfile(fname):
try:
with open(fname, 'r') as fh:
for x in split_certs(fh):
yield x
except Exception as e:
log.error('error while reading "%s": %s', fname, e)
class X509AwareCertBucket:
"""
A wrapper around the various operations required to verify certificate authenticity.
We assume the `Options.ca_crt` is correct. We can check that the signature
is valid, that the signature was generated by the given public.crt and that
the public.crt is signed by the ca.crt.
"""
PublicCertObj = namedtuple('PublicCertObj', ['crt', 'txt', 'status'])
public_crt = tuple()
def authenticate_cert(self):
if any( x.status == STATUS.FAIL for x in self.public_crt ):
return STATUS.FAIL
if all( x.status == STATUS.VERIFIED for x in self.public_crt ):
return STATUS.VERIFIED
return STATUS.UNKNOWN
def __init__(self, public_crt, ca_crt):
try:
import hubblestack.pre_packaged_certificates as HPPC
# iff we have hardcoded certs then we're meant to ignore any other
# configured value
if hasattr(HPPC, 'public_crt'):
log.debug('using pre-packaged-public_crt')
public_crt = HPPC.public_crt
if hasattr(HPPC, 'ca_crt'):
log.debug('using pre-packaged-ca_crt')
ca_crt = HPPC.ca_crt
except ImportError:
pass
if isinstance(ca_crt, (list, tuple)):
untrusted_crt = ca_crt[1:]
ca_crt = ca_crt[0]
else:
untrusted_crt = list()
if not isinstance(public_crt, (list, tuple)):
public_crt = [ public_crt ]
self.store = ossl.X509Store()
self.trusted = list()
# NOTE: trusted is mostly useless. We do use it in
# testing, and that's probably about it
already = set()
for c in read_certs(ca_crt):
d = c.digest('sha1')
if d in already:
continue
already.add(d)
d += " " + stringify_ossl_cert(c)
log.debug('adding %s as a trusted certificate approver', d)
self.store.add_cert(c)
self.trusted.append(d)
for c in read_certs(*untrusted_crt):
d = c.digest('sha1')
if d in already:
continue
already.add(d)
d += " " + stringify_ossl_cert(c)
log.debug('checking to see if %s is trustworthy', d)
try:
ossl.X509StoreContext(self.store, c).verify_certificate()
self.store.add_cert(c)
self.trusted.append(d)
log.debug(' added to verify store')
except ossl.X509StoreContextError as e:
log.debug(' not trustworthy: %s', e)
self.public_crt = list()
for c in read_certs(*public_crt):
status = STATUS.FAIL
d = c.digest('sha1')
if d in already:
continue
already.add(d)
d += " " + stringify_ossl_cert(c)
log.debug('checking to see if %s is a valid leaf cert', d)
try:
ossl.X509StoreContext(self.store, c).verify_certificate()
status = STATUS.VERIFIED
self.trusted.append(d)
log.debug(' marking verified')
except ossl.X509StoreContextError as e:
code, depth, message = e.args[0]
log.debug('authentication of %s failed: code=%s depth=%s, message=%s',
d, code, depth, message)
# from openssl/x509_vfy.h
# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT 2
# define X509_V_ERR_UNABLE_TO_GET_CRL 3
# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY 20
# define X509_V_ERR_CERT_UNTRUSTED 27
# define X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER 33
if code in (2,3,20,27,33):
# we just don't have the required info, it's not failing to
# verify not exactly, but it's definitely not verified
# either
log.debug(' code=%d is alright-ish though. setting status to UNKNOWN', code)
status = STATUS.UNKNOWN
# define X509_V_ERR_CERT_HAS_EXPIRED 10
# define X509_V_ERR_CRL_HAS_EXPIRED 12
# XX # if code in (10,12):
# XX # return # .... is this even the right idea? should we do this through conext flags?
self.public_crt.append(self.PublicCertObj(c, d, status))
def stringify_ossl_cert(c):
if isinstance(c, (list,tuple)):
return ', '.join([ stringify_ossl_cert(x) for x in c ])
return '/'.join([ '='.join(x) for x in c.get_subject().get_components() ])
def jsonify(obj, indent=2):
return json.dumps(obj, indent=indent)
def normalize_path(path, trunc=None):
""" attempt to translate /home/./jettero////files/.bashrc
to /home/jettero/files/.bashrc; optionally truncating
the path if it starts with the given trunc kwarg string.
"""
norm = os.path.normpath(path)
if trunc:
if norm.startswith(os.path.sep + trunc + os.path.sep):
norm = norm[len(trunc)+2:]
elif norm.startswith(trunc + os.path.sep):
norm = norm[len(trunc)+1:]
elif norm.startswith(os.path.sep + trunc):
norm = norm[len(trunc)+1:]
elif norm.startswith(trunc):
norm = norm[len(trunc):]
# log.debug("normalize_path(%s) --> %s", path, norm)
return norm
def hash_target(fname, obj_mode=False, chosen_hash=None):
""" read in a file (fname) and either return the hex digest
(obj_mode=False) or a sha256 object pre-populated with the contents of
the file.
"""
if chosen_hash is None:
chosen_hash = hashes.SHA256()
hasher = hashes.Hash(chosen_hash, default_backend())
if os.path.isfile(fname):
with open(fname, 'rb') as fh:
r = fh.read(1024)
while r:
hasher.update(r)
r = fh.read(1024)
if obj_mode:
return hasher, chosen_hash
digest = hasher.finalize()
hd = ''.join([ '{:02x}'.format(ord(x)) for x in digest ])
log.debug('hashed %s: %s', fname, hd)
return hd
def descend_targets(targets, cb):
"""
recurse into the given `targets` (files or directories) and invoke the `cb`
callback on each file found.
"""
for fname in targets:
if os.path.isfile(fname):
cb(fname)
if os.path.isdir(fname):
for dirpath, dirnames, filenames in os.walk(fname):
for fname in filenames:
fname_ = os.path.join(dirpath, fname)
cb(fname_)
def manifest(targets, mfname='MANIFEST'):
"""
Produce a manifest file given `targets`.
"""
with open(mfname, 'w') as mfh:
def append_hash(fname):
fname = normalize_path(fname)
digest = hash_target(fname)
mfh.write('{} {}\n'.format(digest, fname))
log.debug('wrote %s %s to %s', digest, fname, mfname)
descend_targets(targets, append_hash)
def sign_target(fname, ofname, private_key='private.key', **kw):
"""
Sign a given `fname` and write the signature to `ofname`.
"""
k0, = read_certs(private_key)
hasher, chosen_hash = hash_target(fname, obj_mode=True)
args = { 'data': hasher.finalize() }
if isinstance(k0, rsa.RSAPrivateKey):
args['padding'] = padding.PSS( mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH)
args['algorithm'] = utils.Prehashed(chosen_hash)
sig = k0.sign(**args)
with open(ofname, 'w') as fh:
log.debug('writing signature of %s to %s', os.path.abspath(fname), os.path.abspath(ofname))
fh.write(PEM.encode(sig, 'Detached Signature of {}'.format(fname)))
fh.write('\n')
def verify_signature(fname, sfname, public_crt='public.crt', ca_crt='ca-root.crt', **kw):
"""
Given the fname, sfname public_crt and ca_crt:
return STATUS.FAIL if the signature doesn't match
return STATUS.UNKNOWN if the certificate signature can't be verified with the ca cert
return STATUS.VERIFIED if both the signature and the CA sig match
"""
log.debug("verify_signature(fname=%s, sfname=%s, public_crt=%s, ca_crt=%s", fname, sfname, public_crt, ca_crt)
try:
with open(sfname, 'r') as fh:
sig,_,_ = PEM.decode(fh.read()) # also returns header and decrypted-status
except IOError:
log.info('verify_signature() failed to find sfname=%s for fname=%s', sfname, fname)
return STATUS.UNKNOWN
x509 = X509AwareCertBucket(public_crt, ca_crt)
hasher, chosen_hash = hash_target(fname, obj_mode=True)
digest = hasher.finalize()
args = { 'signature': sig, 'data': digest }
for crt,txt,status in x509.public_crt:
log.debug('trying to check %s with %s', sfname, txt)
pubkey = crt.get_pubkey().to_cryptography_key()
if isinstance(pubkey, rsa.RSAPublicKey):
args['padding'] = padding.PSS( mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH)
args['algorithm'] = utils.Prehashed(chosen_hash)
try:
pubkey.verify(**args)
return status
except InvalidSignature:
pass
log.error('fname=%s failed signature check (sfname=%s)', fname, sfname)
return STATUS.FAIL
def iterate_manifest(mfname):
"""
Generate an interator from the MANFIEST file. Each iter item is a filename
(not the digest portion of the line).
"""
with open(mfname, 'r') as fh:
for line in fh.readlines():
matched = MANIFEST_RE.match(line)
if matched:
_,manifested_fname = matched.groups()
manifested_fname = normalize_path(manifested_fname)
yield manifested_fname
def verify_files(targets, mfname='MANIFEST', sfname='SIGNATURE', public_crt='public.crt', ca_crt='ca-root.crt'):
""" given a list of `targets`, a MANIFEST, and a SIGNATURE file:
1. Check the signature of the manifest, mark the 'MANIFEST' item of the return as:
STATUS.FAIL if the signature doesn't match
STATUS.UNKNOWN if the certificate signature can't be verified with the ca cert
STATUS.VERIFIED if both the signature and the CA sig match
2. mark all targets as STATUS.UNKNOWN
3. check the digest of each target against the manifest, mark each file as
STATUS.FAIL if the digest doesn't match
STATUS.*, the status of the MANIFEST file above
return a mapping from the input target list to the status values (a dict of filename: status)
"""
log.debug("verify_files(%s, mfname=%s, sfname=%s, public_crt=%s, ca_crt=%s", targets, mfname, sfname, public_crt, ca_crt)
ret = OrderedDict()
ret[mfname] = verify_signature(mfname, sfname=sfname, public_crt=public_crt, ca_crt=ca_crt)
# ret[mfname] is the strongest claim we can make about the files we're
# verifiying if they match their hash in the manifest, the best we can say
# is whatever is the status of the manifest iteslf.
mf_dir, _ = os.path.split(mfname)
sf_dir, _ = os.path.split(sfname)
if mf_dir and mf_dir == sf_dir:
trunc = mf_dir + '/'
else:
trunc = None
# prepopulate digests with STATUS.UNKNOWN, skip things that shouldn't be
# digested (MANIFEST, SIGNATURE, etc) and build a database mapping
# normalized names back to given target names.
xlate = dict()
digests = OrderedDict()
if not targets:
targets = list(iterate_manifest(mfname))
for otarget in targets:
target = normalize_path(otarget, trunc=trunc)
log.debug('found manifest for %s (%s)', otarget, target)
if otarget != target:
xlate[target] = otarget
if target in digests or target in (mfname, sfname):
continue
digests[target] = STATUS.UNKNOWN
# populate digests with the hashes from the MANIFEST
if os.path.isfile(mfname):
with open(mfname, 'r') as fh:
for line in fh.readlines():
matched = MANIFEST_RE.match(line)
if matched:
digest,manifested_fname = matched.groups()
manifested_fname = normalize_path(manifested_fname)
if manifested_fname in digests:
digests[manifested_fname] = digest
# compare actual digests of files (if they exist) to the manifested digests
for vfname in digests:
digest = digests[vfname]
htname = os.path.join(trunc, vfname) if trunc else vfname
if digest == STATUS.UNKNOWN:
# digests[vfname] is either UNKNOWN (from the targets population)
# or it's a digest from the MANIFEST. If UNKNOWN, we have nothing to compare
# so we return UNKNOWN
ret[vfname] = STATUS.UNKNOWN
elif digest == hash_target(htname):
# Cool, the digest matches, but rather than mark STATUS.VERIFIED,
# we mark it with the same status as the MANIFEST it self --
# presumably it's signed (STATUS.VERIFIED); but perhaps it's only
# UNKNOWN or even FAIL.
ret[vfname] = ret[mfname]
else:
# We do have a MANIFEST entry and it doesn't match: FAIL with or
# without a matching SIGNATURE
ret[vfname] = STATUS.FAIL
# fix any normalized names so the caller gets back their specified targets
for k,v in xlate.iteritems():
ret[v] = ret.pop(k)
return ret
#### wrappers:
def find_wrapf(not_found={'path': '', 'rel': ''}, real_path='path'):
"""
Wrap a filesystem find_file function and return the original result if the
MANIFEST and SIGNATURE indicate the file is valid. If the file is not verified
and Options.require_verify is False (the default); but the file did not
explicity fail to match the MANIFEST, continue to return the original find result.
Otherwise, return a pretend not-found result instead of the original repo result.
"""
def wrapper(find_file_f):
def _p(fnd):
return fnd.get(real_path, fnd.get('path', ''))
def inner(path, saltenv, *a, **kw):
f_mani = find_file_f('MANIFEST', saltenv, *a, **kw )
f_sign = find_file_f('SIGNATURE', saltenv, *a, **kw )
f_path = find_file_f(path, saltenv, *a, **kw)
real_path = _p(f_path)
mani_path = _p(f_mani)
sign_path = _p(f_sign)
log.debug('path=%s rpath=%s manifest=%s signature=%s',
path, real_path, mani_path, sign_path)
verify_res = verify_files([real_path],
mfname=mani_path, sfname=sign_path,
public_crt=Options.public_crt, ca_crt=Options.ca_crt)
log.debug('verify: %s', dict(**verify_res))
vrg = verify_res.get(real_path, STATUS.UNKNOWN)
if vrg == STATUS.VERIFIED:
return f_path
if vrg == STATUS.UNKNOWN and not Options.require_verify:
return f_path
log.debug('claiming not found')
return dict(**not_found)
return inner
return wrapper
| StarcoderdataPython |
8175736 | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, Tuple, Any, Dict
import numpy as np
from tensorflow import Tensor
import tensorflow as tf
# As from Tensorflow 2.6, keras is a separate package and some classes should be imported differently.
from model_compression_toolkit.common.quantization.candidate_node_quantization_config import \
CandidateNodeQuantizationConfig
from model_compression_toolkit.keras.quantizer.mixed_precision.selective_activation_quantizer import \
SelectiveActivationQuantizer
if tf.__version__ < "2.6":
from tensorflow.python.keras.layers import Layer
else:
from keras.engine.base_layer import Layer
from tensorflow.python.training.tracking.data_structures import ListWrapper
from tensorflow_model_optimization.python.core.quantization.keras.quantize_config import QuantizeConfig
from model_compression_toolkit.keras.quantizer.mixed_precision.selective_weights_quantizer import SelectiveWeightsQuantizer
class SelectiveQuantizeConfig(QuantizeConfig):
"""
SelectiveQuantizeConfig to use as a QuantizeCong for layers that are wrapped
for MP models. SelectiveQuantizeConfig holds a SelectiveWeightsQuantizer and uses
it to use quantized weight from a set of quantized weights (each one of the
quantized weights was quantized with different bitwidth).
At any given time, the SelectiveQuantizeConfig uses only one quantized weight
according to an "active" index - the index of a candidate weight quantization configuration
from a list of candidates that was passed to the SelectiveQuantizeConfig when it was initialized.
The "active" index can be configured as part of the SelectiveQuantizeConfig's API,
so a different quantized weight can be used in another time.
"""
def __init__(self,
node_q_cfg: List[CandidateNodeQuantizationConfig],
float_weights: List[np.ndarray] = None,
weight_attrs: List[str] = None):
"""
Init a SelectiveQuantizeConfig instance.
Args:
weight_attrs: Attributes of the layer's weights to quantize, the
SelectiveQuantizeConfig is attached to.
float_weights: Float weights of the layer, the SelectiveQuantizeConfig is attached to.
node_q_cfg: Candidates quantization config the node has (the node from which
we built the layer that is attached to SelectiveQuantizeConfig).
"""
# Make sure the candidates configurations arrived in a descending order.
curmax = (np.inf, np.inf)
n_candidate_bits = [(x.weights_quantization_cfg.weights_n_bits, x.activation_quantization_cfg.activation_n_bits)
for x in node_q_cfg]
for candidate_bits in n_candidate_bits:
assert candidate_bits < curmax
curmax = candidate_bits
self.weight_attrs = weight_attrs
self.float_weights = float_weights
assert len(node_q_cfg) > 0, 'SelectiveQuantizeConfig has to receive' \
'at least one quantization configuration'
assert (not weight_attrs and not float_weights) or len(weight_attrs) == len(float_weights)
for qc in node_q_cfg:
assert qc.weights_quantization_cfg.enable_weights_quantization == \
node_q_cfg[0].weights_quantization_cfg.enable_weights_quantization \
and qc.activation_quantization_cfg.enable_activation_quantization == \
node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization, \
"Candidates with different weights/activation enabled properties is currently not supported"
self.node_q_cfg = node_q_cfg
self.enable_weights_quantization = node_q_cfg[0].weights_quantization_cfg.enable_weights_quantization
self.enable_activation_quantization = node_q_cfg[0].activation_quantization_cfg.enable_activation_quantization
# Initialize a SelectiveWeightsQuantizer for each weight that should be quantized.
self.weight_quantizers = []
if self.enable_weights_quantization:
self.weight_quantizers = [SelectiveWeightsQuantizer(node_q_cfg,
float_weight=float_weight) for float_weight
in float_weights]
self.activation_selective_quantizer = None if not self.enable_activation_quantization else \
SelectiveActivationQuantizer(node_q_cfg)
def get_candidate_nbits(self) -> List[Tuple[int, int]]:
"""
Returns: All possible number of bits the SelectiveQuantizeConfig holds.
"""
return [(x.weights_quantization_cfg.weights_n_bits, x.activation_quantization_cfg.activation_n_bits)
for x in self.node_q_cfg]
def set_bit_width_index(self,
index: int,
attr: str = None):
"""
Change the "active" bitwidth index the SelectiveQuantizeConfig uses, so
a different quantized weight and activation will be used.
If attr is passed, only the quantizer that was created for this attribute will be configured.
Otherwise, all quantizers the SelectiveQuantizeConfig holds will be configured
using the passed index.
Args:
index: Bitwidth index to use.
attr: Name of the layer's weights attribute to configure its corresponding quantizer.
"""
self.set_weights_bit_width_index(index, attr)
self.set_activation_bit_width_index(index)
def set_weights_bit_width_index(self,
index: int,
attr: str = None):
"""
Change the "active" bitwidth index the SelectiveQuantizeConfig uses, so
a different quantized weight will be used.
If attr is passed, only the quantizer that was created for this attribute will be configured.
Otherwise, all quantizers the SelectiveQuantizeConfig holds will be configured
using the passed index.
Args:
index: Bitwidth index to use.
attr: Name of the layer's attribute to configure its corresponding quantizer.
"""
if self.enable_weights_quantization:
if attr is None: # set bit width to all weights of the layer
for q in self.weight_quantizers:
q.set_active_quantization_config_index(index)
else: # set bit width to a specific selectivequantizer
i = self.weight_attrs.index(attr)
q = self.weight_quantizers[i]
q.set_active_quantization_config_index(index)
def set_activation_bit_width_index(self,
index: int):
"""
Change the "active" bitwidth index the SelectiveQuantizeConfig uses, so
a different quantized weight will be used.
If attr is passed, only the quantizer that was created for this attribute will be configured.
Otherwise, all quantizers the SelectiveQuantizeConfig holds will be configured
using the passed index.
Args:
index: Bitwidth index to use.
attr: Name of the layer's attribute to configure its corresponding quantizer.
"""
if self.enable_activation_quantization:
self.activation_selective_quantizer.set_active_quantization_config_index(index)
def get_weights_and_quantizers(self, layer: Layer) -> List[Tuple[Tensor, Any]]:
"""
Get a list of tuples with weights and the weights quantizers.
The layer's attributes are used to get the weights.
Args:
layer: The layer the SelectiveQuantizeConfig is attached to when is wrapped.
Returns:
List of tuples of the layer's weights and the weights quantizers.
"""
return [] if not self.enable_weights_quantization else \
[(getattr(layer, self.weight_attrs[i]), self.weight_quantizers[i]) for i in range(len(self.weight_attrs))]
def get_activations_and_quantizers(self, layer: Layer) -> list:
# For configurable activations we use get_output_quantizers,
# Therefore, we do not need to implement this method.
return []
def set_quantize_weights(self, layer: Layer, quantize_weights: List[Tensor]):
"""
Set the layer weights with new passed weights.
Args:
layer: Layer to set its attributes.
quantize_weights: Quantized weights to set as new weights.
"""
if self.enable_weights_quantization:
if len(self.weight_attrs) != len(quantize_weights):
raise ValueError(
'`set_quantize_weights` called on layer {} with {} '
'weight parameters, but layer expects {} values.'.format(
layer.name, len(quantize_weights), len(self.weight_attrs)))
for weight_attr, weight in zip(self.weight_attrs, quantize_weights):
current_weight = getattr(layer, weight_attr)
if current_weight.shape != weight.shape:
raise ValueError('Existing layer weight shape {} is incompatible with'
'provided weight shape {}'.format(
current_weight.shape, weight.shape))
setattr(layer, weight_attr, weight)
def set_quantize_activations(self, layer, quantize_activations: ListWrapper):
pass
def get_output_quantizers(self, layer: Layer) -> list:
return [] if not self.enable_activation_quantization else [self.activation_selective_quantizer]
def get_config(self) -> Dict[str, Any]:
"""
Returns: The SelectiveQuantizeConfig configuration.
"""
return {
'weight_attrs': self.weight_attrs,
'float_weights': self.float_weights,
'node_q_cfg': self.node_q_cfg
} | StarcoderdataPython |
6631434 | """ run the comparisons using asyncio """
import asyncio
import asks
import regex
import settingsmay2021 as settings
import aiohttp
import langdetect
import os
import schedule
from time import sleep
from flask_bootstrap import Bootstrap
from collections import OrderedDict
from flask_wtf import FlaskForm
from wtforms import TextAreaField, SubmitField
from wtforms.validators import Length, ValidationError
from flask import Flask, render_template, request, url_for, Response, abort
from datetime import datetime
from redislite import StrictRedis
app = Flask(__name__, static_url_path="/static")
Bootstrap(app)
app.config["SECRET_KEY"] = settings.csrf
REDIS = os.path.join("/tmp/redis.db")
r = StrictRedis(REDIS, charset="utf-8", decode_responses=True)
r.hset("counter", "increment", 0)
def reset_redis():
r.hset("counter", "increment", 0)
schedule.every().hour.do(reset_redis)
class WebForm(FlaskForm):
""" for validation """
webabstract = TextAreaField(
validators=[
Length(
min=150,
max=10000,
message="Your abstract must be between 150 and 10,000 characters.",
)
]
)
def validate_webabstract(form, field):
try:
language = langdetect.detect(field.data)
except langdetect.lang_detect_exception.LangDetectException:
raise ValidationError(
"Your abstract must be between 150 and 10,000 characters."
)
print(language)
if language != "en":
raise ValidationError(
"The Open Journal Matcher only works with abstracts written in English."
)
submit = SubmitField("Search")
@app.route("/", methods=["GET", "POST"])
def index():
""" display index page """
form = WebForm()
valid = form.validate_on_submit()
schedule.run_pending()
if request.method == "POST" and valid:
# check to ensure not over rate limit
counter = int(r.hget("counter", "increment"))
counter += 1
print("counter:", counter)
if counter >= 10:
rate_error = {
"webabstract": [
"The application is experiencing peak load. Please try again later."
]
}
print("Turnaway due to load")
return render_template(
"index.html", form=form, errors=rate_error, output=""
)
r.hset("counter", "increment", counter)
# lay the groundwork
comp = {}
unordered_scores = {}
inp = form.webabstract.data
t0 = datetime.now()
# do the work
asyncio.run(parent1(inp, comp))
asyncio.run(parent2(comp, unordered_scores))
# sort the results
scores = OrderedDict(
sorted(unordered_scores.items(), key=lambda t: t[0], reverse=True)
)
# calculate running time
t1 = datetime.now()
print(t1 - t0)
return render_template("index.html", form=form, errors={}, output=scores)
elif request.method == "POST" and not valid:
return render_template("index.html", form=form, errors=form.errors, output="")
else:
return render_template("index.html", form=form, errors={}, output="")
@app.after_request
def add_security_headers(resp):
resp.headers["X-Content-Type-Options"] = "nosniff"
resp.headers["X-Frame-Options"] = "SAMEORIGIN"
resp.headers["X-XSS-Protection"] = "1; mode=block"
resp.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
resp.headers[
"Content-Security-Policy"
] = "script-src 'self'; style-src 'self'; default-src 'none'"
return resp
async def parent1(inp, comp):
""" manage the async calls to GCP """
await asyncio.gather(
*[cloud_work(blob, inp, comp, 0) for blob in settings.bucket_list]
)
return
async def cloud_work(blob, inp, comp, count):
""" interact with google cloud function """
max_out = 0
try:
async with aiohttp.ClientSession() as session:
while max_out < 6:
async with session.post(
settings.cloud_function,
json={"d": inp, "f": blob, "t": settings.token},
) as resp:
if max_out >= 5:
raise Exception("Max out")
if resp.status == 200:
comp[blob] = await resp.text()
break
elif resp.status == 500:
max_out += 1
elif resp.status == 429:
sleep(0.01)
else:
raise Exception(str(resp.status))
except (
aiohttp.client_exceptions.ClientConnectorError,
aiohttp.client_exceptions.ServerDisconnectedError,
asyncio.TimeoutError,
) as e:
# print(type(e), e, str(count))
if count < 5:
await cloud_work(blob, inp, comp, count + 1)
except Exception as e:
print(type(e), e)
return
async def parent2(comp, unordered_scores):
""" manage the async calls to the DOAJ api """
# test for validity
to_sort = [(k, v) for k, v in comp.items() if test_response(v)]
print("Journals checked:" + str(len(to_sort)))
# this sort is needed to reduce API calls to doaj.org
top = sorted(to_sort, key=lambda x: x[1], reverse=True)[:5]
# make calls to the doaj API asynchronously
await asyncio.gather(
*[titles(idx, item, unordered_scores) for idx, item in enumerate(top)]
)
return
def test_response(resp):
""" some abstract collections raise ValueErrors. Ignore these """
try:
return float(resp) # will evaluate as false if float == 0.0
except ValueError:
return False
async def titles(idx, item, unordered_scores):
if regex.match(r"^[0-9]{4}-[0-9]{3}[0-9Xx]$", item[0]):
issn = item[0]
else:
raise Exception("ISSN does not match regex")
journal_data = await asks.get(
"https://doaj.org/api/v2/search/journals/issn%3A" + issn
)
journal_json = journal_data.json()
try:
title = journal_json["results"][0]["bibjson"]["title"]
if title[-1:] == " ":
title = title[:-1]
url = "https://doaj.org/toc/" + issn
except:
title = "Title lookup failed. Try finding this item by ISSN instead.."
url = ""
score = float(item[1]) * 100
unordered_scores[score] = (title, issn, url)
return
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
3224225 | def move(n, a, b, c):
if n == 1:
print(a, '-->', c)
pass
else:
move(n-1, a, c, b)
move(1, a, b, c)
move(n-1, b, a, c)
pass
move(10,'a','b','c') | StarcoderdataPython |
1716520 | #
# @lc app=leetcode id=203 lang=python3
#
# [203] Remove Linked List Elements
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
dump = ListNode(0)
dump.next = head
last_node = dump
while head:
if head.val == val:
last_node.next = head.next
else:
last_node = head
head = head.next
return dump.next
# @lc code=end
# Accepted
# 65/65 cases passed(68 ms)
# Your runtime beats 93.97 % of python3 submissions
# Your memory usage beats 100 % of python3 submissions(15.6 MB)
| StarcoderdataPython |
3560578 | <reponame>anubhab-code/Competitive-Programming
def reverseWords(string):
return ' '.join(reversed(string.split())) | StarcoderdataPython |
347389 | <gh_stars>0
# ----------------------------------------------------------------------------
# Copyright 2014-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Nervana's deep learning library
"""
from __future__ import print_function
from builtins import zip
try:
from neon.version import VERSION as __version__ # noqa
except ImportError:
import sys
print("ERROR: Version information not found. Ensure you have built "
"the software.\n From the top level dir issue: 'make'")
sys.exit(1)
from copy import deepcopy
import inspect
import logging
DISPLAY_LEVEL_NUM = 41
logging.addLevelName(DISPLAY_LEVEL_NUM, "DISPLAY")
def display(self, message, *args, **kwargs):
if self.isEnabledFor(DISPLAY_LEVEL_NUM):
self._log(DISPLAY_LEVEL_NUM, message, args, **kwargs)
logging.Logger.display = display
# setup a preliminary stream based logger
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
def get_args(func):
"""
returns a dictionary of arg_name:default_values for the input function
"""
args, varargs, keywords, defaults = inspect.getargspec(func)
defaults = list(reversed(defaults)) if defaults is not None else []
args = list(reversed(args))
while len(defaults) != len(args):
defaults += (None,)
return dict(list(zip(args, defaults)))
class NervanaObject(object):
"""
Base (global) object available to all other classes.
Attributes:
be (Backend): Hardware backend being used.
"""
be = None
__counter = 0
def __init__(self, name=None):
"""
Class constructor.
Args:
name (str, optional): Name to assign instance of this class.
"""
if name is None:
name = '{}_{}'.format(self.classnm, self.__counter)
self.name = name
self._desc = None
type(self).__counter += 1
@classmethod
def gen_class(cls, pdict):
return cls(**pdict)
def __del__(self):
type(self).__counter -= 1
@property
def classnm(self):
"""
Returns the class name.
"""
return self.__class__.__name__
@property
def modulenm(self):
"""
Returns the full module path.
"""
return self.__class__.__module__ + '.' + self.__class__.__name__
def get_description(self, skip=[], **kwargs):
"""
Returns a ``dict`` that contains all necessary information needed
to serialize this object.
Arguments:
skip (list): Objects to omit from the dictionary.
Returns:
(dict): Dictionary format for object information.
"""
if type(skip) is not list:
skip = list(skip)
else:
skip = deepcopy(skip)
skip.append('self')
config = {}
defaults = get_args(self.__init__)
for arg in defaults:
if arg in skip:
continue
# all args need to go in the __dict__ so we can read
# them out the way they were read in. alternatively,
# you can override get_description to say how to
# put them in to the description dictionary.
if arg in self.__dict__:
if self.__dict__[arg] != defaults[arg]:
if isinstance(self.__dict__[arg], NervanaObject):
config[arg] = self.__dict__[arg].get_description()
else:
config[arg] = self.__dict__[arg]
else:
logger.warning("can't describe argument '{}' to {}".format(arg, self))
desc = {'type': self.modulenm, 'config': config}
self._desc = desc
return desc
| StarcoderdataPython |
6669268 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 3 23:20:58 2021
@author: <NAME>
"""
"""
Bingo is played on a set of boards each consisting of a 5x5 grid of numbers. Numbers are chosen at random, and the chosen number is marked on all boards on which it appears. (Numbers may not appear on all boards.) If all numbers in any row or any column of a board are marked, that board wins. (Diagonals don't count.)
The submarine has a bingo subsystem to help passengers (currently, you and the giant squid) pass the time. It automatically generates a random order in which to draw numbers and a random set of boards (your puzzle input).
The score of the winning board can now be calculated. Start by finding the sum of all unmarked numbers on that board; in this case, the sum is 188. Then, multiply that sum by the number that was just called when the board won, 24, to get the final score, 188 * 24 = 4512.
"""
input = '''
0,56,39,4,52,7,73,57,65,13,3,72,69,96,18,9,49,83,24,31,12,64,29,21,80,71,66,95,2,62,68,46,11,33,74,88,17,15,5,6,98,30,51,78,76,75,28,53,87,48,20,22,55,86,82,90,47,19,25,1,27,60,94,38,97,58,70,10,43,40,89,26,34,32,23,45,50,91,61,44,35,85,63,16,99,92,8,36,81,84,79,37,93,67,59,54,41,77,42,14
63 5 10 69 57
64 88 27 40 76
59 20 58 90 6
74 32 72 16 26
50 17 7 93 94
92 77 33 44 14
35 25 47 91 7
99 6 56 82 10
41 93 70 5 85
81 97 58 96 29
24 53 4 8 23
0 13 48 47 83
55 56 72 50 52
82 33 58 16 11
91 7 89 9 81
86 70 16 4 34
49 69 37 78 11
22 47 59 20 38
33 82 60 63 56
18 74 36 7 99
64 45 72 86 7
34 50 94 0 85
15 69 2 26 32
62 96 41 17 78
63 5 99 79 47
62 63 24 37 50
89 80 40 41 13
32 64 95 93 66
45 3 23 78 48
60 26 31 61 99
6 63 66 67 15
33 43 62 95 89
72 61 60 2 10
29 7 9 50 18
28 36 3 53 30
91 4 57 74 66
49 36 54 7 89
33 65 59 14 92
63 42 0 20 11
64 32 96 18 58
85 59 33 18 99
90 4 1 51 35
2 57 9 5 78
30 53 25 23 80
74 76 20 19 21
5 42 50 72 90
58 63 49 17 31
39 76 8 19 41
9 59 61 23 54
91 57 18 70 69
90 6 36 71 78
73 75 56 43 35
92 31 21 47 86
69 10 52 80 55
68 30 22 45 34
4 15 88 6 38
46 37 34 23 91
50 98 89 3 79
90 93 60 56 20
40 8 30 69 1
13 62 39 56 78
6 7 17 94 5
44 77 76 81 20
91 64 34 99 45
63 37 3 2 66
57 19 63 59 8
83 51 58 21 4
54 61 56 9 95
92 52 84 67 66
80 34 1 97 69
0 95 89 40 51
58 90 97 85 61
64 47 10 37 26
19 63 7 71 81
20 25 57 55 21
72 6 39 97 58
50 63 2 46 86
73 12 59 37 66
93 77 47 34 67
5 4 98 51 48
96 5 17 68 73
55 13 88 3 52
95 62 18 83 63
31 15 99 20 93
29 50 0 74 22
11 84 79 92 67
36 23 76 14 80
82 72 53 3 85
46 71 89 25 40
51 81 29 0 65
68 24 96 87 5
53 10 95 89 81
88 80 23 12 50
65 16 45 29 62
33 97 91 60 43
36 77 68 20 51
93 71 28 70 97
10 9 16 15 67
42 78 62 34 38
60 74 18 91 53
65 35 40 34 71
0 84 13 81 95
2 31 46 24 76
67 28 83 63 25
62 93 10 14 68
17 36 72 65 49
29 9 22 42 58
76 20 57 3 54
13 37 88 62 24
66 78 55 30 48
55 18 97 40 30
76 69 22 86 98
48 96 20 65 1
77 45 91 82 25
56 70 66 34 58
15 59 37 69 66
51 16 25 0 79
76 72 68 70 20
95 33 82 27 52
53 65 40 45 92
65 18 54 9 28
47 11 84 89 71
52 96 83 57 86
55 0 56 72 20
26 19 81 60 64
97 28 89 55 11
33 92 50 86 79
81 37 0 94 64
44 76 68 58 26
57 65 60 78 93
62 4 55 50 74
86 46 89 20 68
1 52 78 73 19
14 10 0 40 28
69 35 26 22 7
29 15 9 27 8
98 22 69 14 44
75 24 66 63 90
62 72 87 32 31
26 59 85 82 77
90 42 40 10 48
73 8 57 18 29
67 76 5 72 93
43 27 28 82 80
62 41 36 61 21
25 64 69 6 65
40 36 16 81 34
63 38 80 55 29
20 50 90 21 72
4 17 83 27 92
96 89 9 62 78
18 47 82 80 73
75 38 51 3 50
48 19 99 54 6
4 28 63 98 43
37 68 13 30 23
82 14 65 60 27
43 74 62 46 99
80 26 15 9 20
58 44 92 76 64
2 80 99 17 43
37 48 65 52 40
81 90 83 78 72
77 21 56 66 68
92 22 10 61 49
72 27 88 7 57
51 73 31 55 23
39 48 12 91 2
92 42 71 93 4
26 38 36 22 75
37 2 9 10 52
33 45 11 67 25
31 6 5 79 14
70 39 99 8 81
65 87 83 68 77
54 89 78 49 63
39 33 27 98 28
79 61 20 2 25
92 12 13 29 30
51 77 94 38 46
28 87 30 75 9
48 8 23 60 89
79 2 21 18 6
25 69 24 15 71
44 36 59 31 68
16 0 69 19 9
68 15 90 8 87
75 21 12 97 39
5 83 55 23 72
43 60 58 13 76
53 29 98 73 13
58 30 10 68 21
32 81 66 6 82
97 45 15 7 92
19 75 90 36 67
66 14 39 62 89
94 42 20 46 48
0 7 92 4 86
3 84 60 37 55
27 64 1 30 82
78 82 57 44 47
86 11 62 52 99
7 70 17 60 15
45 19 4 91 75
3 6 24 94 81
38 2 59 51 17
76 64 19 78 7
55 42 39 47 56
79 65 37 57 40
53 66 73 83 68
38 49 65 79 82
15 63 53 32 21
48 1 3 81 69
94 87 20 6 59
8 50 96 71 76
17 28 41 24 69
20 96 9 57 85
70 15 53 38 52
79 84 37 73 64
40 30 25 56 1
68 11 49 37 46
24 63 72 35 29
92 62 89 73 28
64 58 9 3 39
13 45 10 19 20
54 41 49 33 60
85 56 0 77 51
81 12 13 20 27
36 24 69 39 80
14 83 57 50 91
19 68 61 56 11
3 74 6 25 22
71 10 21 7 29
92 12 51 84 30
41 72 85 36 91
16 86 37 88 22
48 18 4 89 55
58 83 44 7 43
28 76 15 11 35
81 52 29 23 64
52 42 98 0 31
92 47 41 87 33
6 35 69 44 17
91 50 89 75 3
57 61 81 60 21
40 86 78 2 58
76 73 31 19 14
50 21 53 83 45
68 9 22 70 69
54 1 85 90 44
13 20 96 89 22
85 62 19 99 66
18 46 28 14 39
12 21 34 1 81
40 77 25 4 7
21 76 60 10 9
34 29 59 48 40
30 2 36 82 66
12 95 80 72 58
74 3 46 37 49
6 30 25 12 22
33 65 4 89 59
86 94 70 49 16
11 76 66 84 45
50 31 46 73 36
89 4 99 23 84
72 90 83 44 20
33 66 91 35 26
81 85 24 10 55
45 43 7 78 53
55 62 19 44 63
12 90 77 28 7
80 92 15 41 11
58 24 47 66 82
48 88 37 60 46
48 16 86 94 14
22 43 46 67 1
91 88 49 79 28
19 12 35 85 42
13 26 68 95 97
23 67 33 85 82
21 71 84 8 58
20 41 53 22 99
92 89 59 16 19
79 93 39 83 80
98 96 24 47 15
79 97 19 64 84
62 91 38 30 65
74 25 92 16 50
51 41 34 18 35
55 9 27 95 90
31 11 50 84 71
37 61 62 60 88
24 94 29 42 77
17 83 47 74 91
74 19 3 13 29
48 17 26 42 22
9 25 8 55 38
33 52 90 84 39
82 50 60 41 35
33 7 28 66 21
98 32 41 81 19
46 34 10 50 47
20 68 2 93 25
5 85 69 53 39
91 83 59 37 8
51 39 81 48 19
55 86 73 54 20
5 7 18 1 36
44 75 43 76 23
6 38 71 57 77
95 75 0 32 5
64 41 51 91 30
99 79 8 16 55
88 97 53 47 85
58 23 14 93 59
67 47 49 2 24
75 92 31 52 29
30 7 10 40 55
20 19 35 72 84
75 6 67 76 13
18 32 27 17 71
60 4 48 53 84
8 23 33 91 68
24 42 56 50 45
52 55 68 62 89
9 64 14 58 50
46 8 94 12 24
72 47 42 76 61
97 40 25 7 31
13 83 14 29 58
70 33 28 71 9
16 90 45 30 99
84 0 41 79 51
60 55 35 31 57
93 21 42 67 20
88 73 40 9 1
92 58 15 83 57
30 86 36 97 89
38 22 72 37 24
37 38 78 93 50
44 65 52 54 79
73 27 6 35 91
8 26 63 7 12
25 17 60 4 14
0 30 70 99 23
2 75 51 10 87
12 91 4 69 8
81 62 26 72 33
31 17 46 73 96
53 44 78 46 19
40 85 77 98 50
71 30 68 0 73
31 43 83 92 6
52 45 11 37 1
91 67 8 35 36
23 52 51 83 70
92 32 27 72 16
63 54 75 38 97
45 96 11 13 79
2 62 8 53 3
63 94 81 61 25
10 91 9 87 84
85 59 80 54 0
43 77 21 89 75
12 34 29 68 14
77 81 67 15 9
17 85 26 30 86
33 10 31 23 69
59 42 24 88 89
14 80 84 22 23
85 93 83 91 6
57 27 51 95 46
67 1 24 76 86
55 19 94 8 61
87 42 82 81 98
43 8 51 92 60
17 14 96 36 34
2 19 70 15 78
93 35 74 53 3
53 34 22 41 59
58 18 30 71 37
60 8 74 1 5
96 52 87 17 85
57 31 24 72 32
58 51 98 28 29
81 76 49 24 60
3 33 22 57 86
1 67 46 20 56
5 16 2 8 6
67 51 43 89 94
4 96 50 9 8
22 87 77 38 35
39 37 17 59 32
5 25 26 83 81
15 12 6 27 76
80 70 87 36 55
69 35 91 98 18
89 59 92 5 29
84 10 86 63 39
3 94 38 93 28
88 57 42 97 14
89 36 35 85 9
5 29 51 31 69
46 10 25 0 15
87 46 49 60 22
30 64 93 20 66
95 86 71 21 11
0 58 53 18 97
62 63 3 1 96
56 86 71 30 36
19 27 16 94 53
46 81 25 44 55
75 7 97 76 96
93 79 22 78 50
30 14 68 16 59
23 60 77 37 4
22 43 53 34 81
7 54 38 39 96
25 86 64 46 44
2 72 69 90 58
50 77 16 0 14
75 1 92 66 29
71 59 54 67 4
3 53 49 9 46
21 22 25 42 7
4 95 82 91 27
29 33 30 64 61
74 80 26 83 70
31 88 93 52 96
51 45 43 70 87
48 77 27 53 19
41 83 17 99 49
94 59 95 58 55
75 86 44 91 82
64 88 91 68 19
57 60 80 46 98
20 4 38 32 69
8 9 22 70 39
85 28 97 2 71
65 42 46 75 48
26 66 97 16 74
51 67 94 89 4
61 9 54 22 2
82 8 83 5 90
18 51 61 57 11
41 79 30 78 69
75 14 81 40 88
93 76 25 64 47
24 34 94 46 89
74 8 3 64 59
67 49 26 32 83
85 0 5 71 90
16 27 81 98 56
79 23 76 20 43
37 6 24 16 69
41 82 51 4 35
79 94 99 42 12
30 81 60 3 36
8 22 11 32 48
68 9 1 47 21
61 55 5 19 73
29 0 48 4 31
63 50 93 15 72
39 98 57 70 65
34 55 82 26 10
62 85 68 69 36
5 46 54 50 17
86 95 72 49 29
76 91 43 37 97
41 76 5 33 59
81 51 99 86 34
0 39 64 27 83
40 69 37 91 45
55 49 54 9 61
92 34 81 10 80
64 85 69 28 66
89 93 22 45 7
8 35 90 16 87
4 78 44 13 67
30 21 24 26 40
59 17 4 47 73
10 31 88 12 29
56 98 69 2 7
13 58 91 55 36
25 28 69 4 19
45 62 32 16 98
20 88 6 97 18
91 71 10 80 31
1 66 89 12 21
85 34 16 30 71
47 15 46 24 61
79 69 23 38 96
0 14 80 97 86
48 92 22 26 98
96 94 98 27 56
64 83 46 30 5
3 43 70 67 21
62 0 92 1 65
36 26 35 61 76
'''.strip().split('\n')
#%% Set up the Bingo class
import numpy as np
import re
import copy
class Bingo:
def __init__(self, size):
self.size = size
self.board = np.zeros([size,size])
self.marked = np.zeros([size,size])
def initialize(self, arr):
# set up the board
assert len(arr) == self.size
assert(len(arr[0])) == self.size
self.board = np.array(arr)
def mark(self, num):
# mark a number on the board
for i in range(self.size):
for j in range(self.size):
if self.board[i][j] == num:
self.marked[i][j] = 1
def is_solved(self):
# check if the board is solved
for i in range(self.size):
if self.marked[:,i].sum() == self.size:
return True
if self.marked[i,:].sum() == self.size:
return True
return False
def unmarked_sum(self):
# calculate the sum of unmarked numbers
mysum = 0
for i in range(self.size):
for j in range(self.size):
if self.marked[i][j] == 0:
mysum += self.board[i][j]
return mysum
#%% Set up the boards and numbers
numbers = [int(x) for x in input[0].split(',')]
BOARD_SIZE = 5
boards_raw = input[2:]
boards = []
while boards_raw:
try:
b1, _, boards_raw = boards_raw[:5], boards_raw[5], boards_raw[6:]
except:
b1, boards_raw = boards_raw, []
b2 = [[int(y) for y in re.split(r'\s+', x.strip())] for x in b1]
b3 = Bingo(BOARD_SIZE)
b3.initialize(b2)
boards.append(b3)
boards_orig = copy.deepcopy(boards)
#%% Part 1
# Go through the boards to find the first winning board
boards = copy.deepcopy(boards_orig)
is_found = False
for num in numbers:
print(num)
for board in boards:
board.mark(num)
if board.is_solved():
print('--')
print(num * board.unmarked_sum())
print(board.board)
print(board.marked)
is_found = True
if is_found:
break
#%% Part 2: what's the last board to win?
boards = copy.deepcopy(boards_orig)
solved_boards = [0] * len(boards)
final_board_found = False
for num in numbers:
for i, board in enumerate(boards):
board.mark(num)
if board.is_solved():
solved_boards[i] = 1
# if this was the final board to be solved, do the calculations
if sum(solved_boards) == len(solved_boards):
final_board_found = True
print(num * board.unmarked_sum())
print(board.board)
break
if final_board_found:
break
| StarcoderdataPython |
5059973 | <reponame>ismailalptug/Image-Quantization
import numpy as np
import cv2
def quantizeImage(image, q):
quaLevel = q
new_img = np.uint8(np.floor(np.double(image)/(256/quaLevel)))
norm_image = cv2.normalize(new_img, None, 0, 255, norm_type=cv2.NORM_MINMAX)
cv2.imshow("Quantized", norm_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
6471437 | <reponame>taxmeifyoucan/lnbits
async def m001_initial(db):
await db.execute(
f"""
CREATE TABLE IF NOT EXISTS streamalerts.Services (
id {db.serial_primary_key},
state TEXT NOT NULL,
twitchuser TEXT NOT NULL,
client_id TEXT NOT NULL,
client_secret TEXT NOT NULL,
wallet TEXT NOT NULL,
onchain TEXT,
servicename TEXT NOT NULL,
authenticated BOOLEAN NOT NULL,
token TEXT
);
"""
)
await db.execute(
f"""
CREATE TABLE IF NOT EXISTS streamalerts.Donations (
id TEXT PRIMARY KEY,
wallet TEXT NOT NULL,
name TEXT NOT NULL,
message TEXT NOT NULL,
cur_code TEXT NOT NULL,
sats INT NOT NULL,
amount FLOAT NOT NULL,
service INTEGER NOT NULL,
posted BOOLEAN NOT NULL,
FOREIGN KEY(service) REFERENCES {db.references_schema}Services(id)
);
"""
)
| StarcoderdataPython |
6431014 | import json
import shutil
import glob
from os import makedirs
from os import listdir
from os.path import isdir
from os.path import basename
from datetime import datetime
class DBmanage:
def __init__(self):
self.databasePath = './database/'
self.subFolderPath = self.databasePath + self.getCrawlDate() + '/'
self.checkAndCreateDatabaseFolder()
def getDBPath(self):
return self.databasePath
def getSubFolderPath(self):
return self.subFolderPath
def checkAndCreateDatabaseFolder(self):
if not isdir(self.databasePath):
print('Database folder not exists!')
makedirs(self.databasePath)
print('Created database.')
def checkAndCreateSubFolder(self):
if not isdir(self.subFolderPath):
print('Sub-folder not exists!')
makedirs(self.subFolderPath)
print('Created subfolder at', self.subFolderPath)
def removeDatabase(self):
shutil.rmtree(self.databasePath)
print('Database removed.')
def saveResultFile(self, crawlResult, crawlResultFilePath):
self.checkAndCreateSubFolder()
with open(crawlResultFilePath, 'w', encoding='utf-8') as f:
json.dump(crawlResult, f, sort_keys=True,
indent=4, ensure_ascii=False)
print('Crawl result saved at:', crawlResultFilePath)
print()
def getCrawlDate(self):
return datetime.now().strftime('%Y%m%d')
def saveCrawledBoardResult(self, crawlResult):
boardName = crawlResult['boardName']
crawlResultFilePath = self.getBoardResultFilePath(boardName)
self.saveResultFile(crawlResult, crawlResultFilePath)
def getBoardResultFilePath(self, boardName):
crawlDate = self.getCrawlDate()
crawlResultText = 'boardResult' + crawlDate
crawlResultFileName = crawlResultText + '_' + boardName + '.json'
return self.subFolderPath + crawlResultFileName
def saveCrawledArticleResult(self, crawlResult):
boardName = crawlResult['boardName']
crawlResultFilePath = self.getArticleResultFilePath(boardName)
self.saveResultFile(crawlResult, crawlResultFilePath)
def getArticleResultFilePath(self, boardName):
crawlDate = self.getCrawlDate()
crawlResultText = 'articleResult' + crawlDate
crawlResultFileName = crawlResultText + '_' + boardName + '.json'
return self.subFolderPath + crawlResultFileName
def loadResultFile(self, path):
with open(path, encoding='utf8') as f:
result = json.load(f)
return result
def loadCrawledBoardResult(self, boardResultFilePath):
return self.loadResultFile(boardResultFilePath)
def loadCrawledArticleResult(self, articleResultFilePath):
return self.loadResultFile(articleResultFilePath)
def getLatestVersion(self):
return listdir(self.databasePath)[-1]
def getLatestSubFolder(self):
return self.databasePath + self.getLatestVersion() + '/'
def getAllLatestBoardResultPath(self):
latestSubFolder = self.getLatestSubFolder()
boardResultPattern = latestSubFolder + 'boardResult*.json'
allBoardResult = glob.glob(boardResultPattern)
return allBoardResult
def getAllLatestArticleResultPath(self):
latestSubFolder = self.getLatestSubFolder()
articleResultPattern = latestSubFolder + 'articleResult*.json'
allArticleResult = glob.glob(articleResultPattern)
return allArticleResult
def getLatestBoardResultPath(self, boardName):
latestSubFolder = self.getLatestSubFolder()
pattern = latestSubFolder + 'boardResult*' + boardName + '.json'
return glob.glob(pattern)[-1]
def getLatestArticleResultPath(self, boardName):
latestSubFolder = self.getLatestSubFolder()
pattern = latestSubFolder + 'articleResult*' + boardName + '.json'
return glob.glob(pattern)[-1]
def getLatestBoardLists(self):
print('=== Available boards in database ===')
print('( database version:', self.getLatestVersion(), ')')
allArticleResultPath = self.getAllLatestArticleResultPath()
for resultPath in allArticleResultPath:
fileName = basename(resultPath).replace('.json', '')
print(fileName.split('_', 1)[1])
| StarcoderdataPython |
1684136 | <filename>dependencytrack/__init__.py
# Copyright 2020 <NAME> <EMAIL>
# SPDX-License-Identifier: GPL-2.0+
import logging
import requests
from .projects import Projects
from .components import Components
from .licenses import Licenses
from .bom import Bom
from .exceptions import AuthenticationError, DependencyTrackApiError
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DependencyTrack(Projects, Components, Licenses, Bom):
"""Main DependencyTrack API class
Manipulation against a running DependencyTrack instance is performed using an API key.
:Example:
>>> from dependencytrack import DependencyTrack
>>> dt = DependencyTrack(url, api_key)
.. note::
The class instantiation exits if the session with the DependencyTrack server
can't be established
:param url: URL of the DependencyTrack instance
:param api_key: The API key generated using the DependencyTrack UI
:type url: str
:type api_key: str
:raises AuthenticationError: if the api_key couldn't be found
"""
def __init__(self, url, api_key):
self.host = url
self.api_key = api_key
self.api = self.host + "/api/v1"
self.session = requests.Session()
self.session.headers.update({"X-Api-Key": f"{self.api_key}", "X-Total-Count": '5aa0'})
"""
Most APIs are paginated so if you have more than 100 results you’ll need to page through them.
X-Total-Count header seems no use to increase the number of result.
So using parameter is my workround for now. Guess 10000 will fit for most of situation.
"""
self.paginated_param_payload = {'pageSize': "10000", 'pageNumber': "1"}
logger.info(
f"DependencyTrack instance against {self.host} using {self.api}"
)
def close(self):
self.session.close()
def search(self, query):
"""Search from the server
API endpoint: GET /search/{query}
:Example:
>>> dt.search('dnsmasq-2.78')['results']['component']
:return: the seatch result
:rtype: dict {'license': [], 'project': [], 'component': [], 'vulnerability': []}
:raises DependencyTrackApiError: if the REST call failed
"""
response = self.session.get(self.api + f"/search/{query}", params=self.paginated_param_payload)
if response.status_code == 200:
return response.json()['results']
else:
description = f"Error while searching"
raise DependencyTrackApiError(description, response)
def search_component(self, query):
"""Search component from the server
API endpoint: GET /component/?searchText={query}
:Example:
>>> dt.search_component('dnsmasq-2.78')
:return: the seatch result
:rtype: dict
:raises DependencyTrackApiError: if the REST call failed
"""
response = self.session.get(self.api + f"/component/?searchText={query}", params=self.paginated_param_payload)
if response.status_code == 200:
return response.json()
else:
description = f"Error while component searching"
raise DependencyTrackApiError(description, response)
def search_project(self, query):
"""Search project from the server
API endpoint: GET /project/?searchText={query}
:Example:
>>> dt.search_project('my project')['results']['component']
:return: the seatch result
:rtype: dict
:raises DependencyTrackApiError: if the REST call failed
"""
response = self.session.get(self.api + f"/project/?searchText={query}", params=self.paginated_param_payload)
if response.status_code == 200:
return response.json()
else:
description = f"Error while project searching"
raise DependencyTrackApiError(description, response)
def search_vulnerability(self, query):
"""Search vulnerability from the server
API endpoint: GET /vulnerability/?searchText={query}
:Example:
>>> dt.search_vulnerability('my vulnerability')
:return: the seatch result
:rtype: dict
:raises DependencyTrackApiError: if the REST call failed
"""
response = self.session.get(self.api + f"/vulnerability/?searchText={query}", params=self.paginated_param_payload)
if response.status_code == 200:
return response.json()
else:
description = f"Error while vulnerability searching"
raise DependencyTrackApiError(description, response)
def search_license(self, query):
"""Search license from the server
API endpoint: GET /license/?searchText={query}
:Example:
>>> dt.search_license('my license')
:return: the seatch result
:rtype: dict
:raises DependencyTrackApiError: if the REST call failed
"""
response = self.session.get(self.api + f"/license/?searchText={query}", params=self.paginated_param_payload)
if response.status_code == 200:
return response.json()
else:
description = f"Error while license searching"
raise DependencyTrackApiError(description, response)
| StarcoderdataPython |
9795363 | from django.core.management import call_command
from django.test import TestCase
from models import Game
class TestGame(Game):
class Meta:
proxy = True
class TestGameModule(TestCase):
def test_game_instance(self):
gi = TestGame.get_instance()
self.assertEqual(gi.name, 'TestGame')
self.assertFalse(TestGame.get_formulas())
self.assertFalse(TestGame.get_coins())
self.assertFalse(TestGame.get_modifiers())
self.assertFalse(gi.get_game_absolute_url())
self.assertEqual(gi.__unicode__(), gi.name)
| StarcoderdataPython |
1846766 | <filename>tests/test_parser.py
import pytest
from awkwardql.parser import (parse, Symbol, Literal, Assignment,
Pack, Histogram, Cut, Call, With, Axis,
GetAttr, Block, Vary, Trial)
def test_whitespace():
assert parse(r"") == []
assert parse(r"""x
""") == [Symbol("x")]
assert parse(r"""
x""") == [Symbol("x")]
assert parse(r"""
x
""") == [Symbol("x")]
assert parse(r"""
x
""") == [Symbol("x")]
assert parse(r"""
x
""") == [Symbol("x")]
assert parse(r"""
x
""") == [Symbol("x")]
assert parse(r"x # comment") == [Symbol("x")]
assert parse(r"x // comment") == [Symbol("x")]
assert parse(r"""
x /* multiline
comment */
""") == [Symbol("x")]
assert parse(r"""# comment
x""") == [Symbol("x")]
assert parse(r"""// comment
x""") == [Symbol("x")]
assert parse(r"""/* multiline
comment */
x""") == [Symbol("x")]
def test_expressions():
assert parse(r"x") == [Symbol("x")]
assert parse(r"?x") == [Symbol("x", True)]
assert parse(r"1") == [Literal(1)]
assert parse(r"3.14") == [Literal(3.14)]
assert parse(r'"hello"') == [Literal("hello")]
assert parse(r"f(x)") == [Call(Symbol("f"), [Symbol("x")])]
assert parse(r"f(x, 1, 3.14)") == [Call(Symbol("f"), [Symbol("x"), Literal(1), Literal(3.14)])]
# parse(r"a[0]")
# assert parse(r"a[0]") == [GetItem(Symbol("a"), [Literal(0)])]
# assert parse(r"a[0][i]") == [GetItem(GetItem(Symbol("a"), [Literal(0)]), [Symbol("i")])]
# assert parse(r"a[0, i]") == [GetItem(Symbol("a"), [Literal(0), Symbol("i")])]
assert parse(r"a.b") == [GetAttr(Symbol("a"), "b", False)]
assert parse(r"a.b.c") == [GetAttr(GetAttr(Symbol("a"), "b", False), "c", False)]
assert parse(r"a?.b") == [GetAttr(Symbol("a"), "b", True)]
assert parse(r"a?.b?.c") == [GetAttr(GetAttr(Symbol("a"), "b", True), "c", True)]
assert parse(r"x**2") == [Call(Symbol("**"), [Symbol("x"), Literal(2)])]
assert parse(r"2*x") == [Call(Symbol("*"), [Literal(2), Symbol("x")])]
assert parse(r"x/10") == [Call(Symbol("/"), [Symbol("x"), Literal(10)])]
assert parse(r"x + y") == [Call(Symbol("+"), [Symbol("x"), Symbol("y")])]
assert parse(r"x - y") == [Call(Symbol("-"), [Symbol("x"), Symbol("y")])]
assert parse(r"x + 2*y") == [Call(Symbol("+"), [Symbol("x"), Call(Symbol("*"), [Literal(2), Symbol("y")])])]
assert parse(r"(x + 2)*y") == [Call(Symbol("*"), [Call(Symbol("+"), [Symbol("x"), Literal(2)]), Symbol("y")])]
assert parse(r"x + y + z") == [Call(Symbol("+"), [Call(Symbol("+"), [Symbol("x"), Symbol("y")]), Symbol("z")])]
assert parse(r"+x") == [Call(Symbol("*1"), [Symbol("x")])]
assert parse(r"-x") == [Call(Symbol("*-1"), [Symbol("x")])]
assert parse(r"+3.14") == [Call(Symbol("*1"), [Literal(3.14)])]
assert parse(r"-3.14") == [Call(Symbol("*-1"), [Literal(3.14)])]
assert parse(r"x == 0") == [Call(Symbol("=="), [Symbol("x"), Literal(0)])]
assert parse(r"x != 0") == [Call(Symbol("!="), [Symbol("x"), Literal(0)])]
assert parse(r"x > 0") == [Call(Symbol(">"), [Symbol("x"), Literal(0)])]
assert parse(r"x >= 0") == [Call(Symbol(">="), [Symbol("x"), Literal(0)])]
assert parse(r"x < 0") == [Call(Symbol("<"), [Symbol("x"), Literal(0)])]
assert parse(r"x <= 0") == [Call(Symbol("<="), [Symbol("x"), Literal(0)])]
assert parse(r"x in table") == [Call(Symbol(".in"), [Symbol("x"), Symbol("table")])]
assert parse(r"x not in table") == [Call(Symbol(".not in"), [Symbol("x"), Symbol("table")])]
assert parse(r"p and q") == [Call(Symbol(".and"), [Symbol("p"), Symbol("q")])]
assert parse(r"p or q") == [Call(Symbol(".or"), [Symbol("p"), Symbol("q")])]
assert parse(r"not p") == [Call(Symbol(".not"), [Symbol("p")])]
assert parse(r"p or q and r") == [Call(Symbol(".or"), [Symbol("p"), Call(Symbol(".and"), [Symbol("q"), Symbol("r")])])]
assert parse(r"(p or q) and r") == [Call(Symbol(".and"), [Call(Symbol(".or"), [Symbol("p"), Symbol("q")]), Symbol("r")])]
assert parse(r"if x > 0 then 1 else -1") == [Call(Symbol(".if"), [Call(Symbol(">"), [Symbol("x"), Literal(0)]), Literal(1), Call(Symbol("*-1"), [Literal(1)])])]
assert parse(r"if p then if q then 1 else 2 else 3") == [Call(Symbol(".if"), [Symbol("p"), Call(Symbol(".if"), [Symbol("q"), Literal(1), Literal(2)]), Literal(3)])]
assert parse(r"if p then { if q then 1 else 2 } else 3") == [Call(Symbol(".if"), [Symbol("p"), Block([Call(Symbol(".if"), [Symbol("q"), Literal(1), Literal(2)])]), Literal(3)])]
assert parse(r"if p then 1 else if q then 2 else 3") == [Call(Symbol(".if"), [Symbol("p"), Literal(1), Call(Symbol(".if"), [Symbol("q"), Literal(2), Literal(3)])])]
assert parse(r"if p then 1 else { if q then 2 else 3 }") == [Call(Symbol(".if"), [Symbol("p"), Literal(1), Block([Call(Symbol(".if"), [Symbol("q"), Literal(2), Literal(3)])])])]
def test_assign():
assert parse(r"""
x = 5
x + 2
""") == [Assignment("x", Literal(5)), Call(Symbol("+"), [Symbol("x"), Literal(2)])]
assert parse(r"""{
x = 5
x + 2
}""") == [Block([Assignment("x", Literal(5)), Call(Symbol("+"), [Symbol("x"), Literal(2)])])]
assert parse(r"""
y = {
x = 5
x + 2
}
y""") == [Assignment("y", Block([Assignment("x", Literal(5)), Call(Symbol("+"), [Symbol("x"), Literal(2)])])), Symbol("y")]
assert parse(r"{x + 2}") == [Block([Call(Symbol("+"), [Symbol("x"), Literal(2)])])]
assert parse(r"if x > 0 then {1} else {-1}") == [Call(Symbol(".if"), [Call(Symbol(">"), [Symbol("x"), Literal(0)]), Block([Literal(1)]), Block([Call(Symbol("*-1"), [Literal(1)])])])]
def test_table():
assert parse(r"table as x") == [Pack(Symbol("table"), ["x"])]
assert parse(r"table as (x, y)") == [Pack(Symbol("table"), ["x", "y"])]
assert parse(r"table with { x = 3 }") == [With(Symbol("table"), [Assignment("x", Literal(3))], False)]
assert parse(r"table with { x = 3; y = x }") == [With(Symbol("table"), [Assignment("x", Literal(3)), Assignment("y", Symbol("x"))], False)]
assert parse(r"table where x > 0") == [Call(Symbol(".where"), [Symbol("table"), Call(Symbol(">"), [Symbol("x"), Literal(0)])])]
assert parse(r"table with { x = 3 } where x > 0") == [Call(Symbol(".where"), [With(Symbol("table"), [Assignment("x", Literal(3))], False), Call(Symbol(">"), [Symbol("x"), Literal(0)])])]
assert parse(r"a join b") == [Call(Symbol(".join"), [Symbol("a"), Symbol("b")])]
assert parse(r"a cross b") == [Call(Symbol(".cross"), [Symbol("a"), Symbol("b")])]
assert parse(r"a cross b join c") == [Call(Symbol(".join"), [Call(Symbol(".cross"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"(a cross b) join c") == [Call(Symbol(".join"), [Call(Symbol(".cross"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a except b union c") == [Call(Symbol(".union"), [Call(Symbol(".except"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"(a except b) union c") == [Call(Symbol(".union"), [Call(Symbol(".except"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a union b cross c") == [Call(Symbol(".union"), [Symbol("a"), Call(Symbol(".cross"), [Symbol("b"), Symbol("c")])])]
assert parse(r"(a union b) cross c") == [Call(Symbol(".cross"), [Call(Symbol(".union"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a union b join c") == [Call(Symbol(".union"), [Symbol("a"), Call(Symbol(".join"), [Symbol("b"), Symbol("c")])])]
assert parse(r"(a union b) join c") == [Call(Symbol(".join"), [Call(Symbol(".union"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a except b cross c") == [Call(Symbol(".except"), [Symbol("a"), Call(Symbol(".cross"), [Symbol("b"), Symbol("c")])])]
assert parse(r"(a except b) cross c") == [Call(Symbol(".cross"), [Call(Symbol(".except"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a except b join c") == [Call(Symbol(".except"), [Symbol("a"), Call(Symbol(".join"), [Symbol("b"), Symbol("c")])])]
assert parse(r"(a except b) join c") == [Call(Symbol(".join"), [Call(Symbol(".except"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a join b join c") == [Call(Symbol(".join"), [Call(Symbol(".join"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a cross b cross c") == [Call(Symbol(".cross"), [Call(Symbol(".cross"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a union b union c") == [Call(Symbol(".union"), [Call(Symbol(".union"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"a except b except c") == [Call(Symbol(".except"), [Call(Symbol(".except"), [Symbol("a"), Symbol("b")]), Symbol("c")])]
assert parse(r"table group by x") == [Call(Symbol(".group"), [Symbol("table"), Symbol("x")])]
assert parse(r"(table group by x) with { y = 4 }") == [With(Call(Symbol(".group"), [Symbol("table"), Symbol("x")]), [Assignment("y", Literal(4))], False)]
assert parse(r"table min by x") == [Call(Symbol(".min"), [Symbol("table"), Symbol("x")])]
assert parse(r"table max by x") == [Call(Symbol(".max"), [Symbol("table"), Symbol("x")])]
def test_histogram():
assert parse(r"hist pt") == [Histogram([Axis(Symbol("pt"), None)], None, None, None)]
assert parse(r"hist pt, eta") == [Histogram([Axis(Symbol("pt"), None), Axis(Symbol("eta"), None)], None, None, None)]
assert parse(r"hist pt by regular(100, 0, 150)") == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], None, None, None)]
assert parse(r"hist pt by regular(100, 0, 150), eta by regular(100, -5, 5)") == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)])), Axis(Symbol("eta"), Call(Symbol("regular"), [Literal(100), Call(Symbol("*-1"), [Literal(5)]), Literal(5)]))], None, None, None)]
assert parse(r"hist pt weight by w") == [Histogram([Axis(Symbol("pt"), None)], Symbol("w"), None, None)]
assert parse(r"hist pt, eta weight by w") == [Histogram([Axis(Symbol("pt"), None), Axis(Symbol("eta"), None)], Symbol("w"), None, None)]
assert parse(r"hist pt by regular(100, 0, 150), eta weight by w") == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)])), Axis(Symbol("eta"), None)], Symbol("w"), None, None)]
assert parse(r'hist pt named "hello"') == [Histogram([Axis(Symbol("pt"), None)], None, Literal("hello"), None)]
assert parse(r'hist pt, eta named "hello"') == [Histogram([Axis(Symbol("pt"), None), Axis(Symbol("eta"), None)], None, Literal("hello"), None)]
assert parse(r'hist pt weight by w named "hello"') == [Histogram([Axis(Symbol("pt"), None)], Symbol("w"), Literal("hello"), None)]
assert parse(r'hist pt by regular(100, 0, 150) named "hello"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], None, Literal("hello"), None)]
assert parse(r'hist pt by regular(100, 0, 150) weight by w named "hello"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], Symbol("w"), Literal("hello"), None)]
assert parse(r'hist pt titled "there"') == [Histogram([Axis(Symbol("pt"), None)], None, None, Literal("there"))]
assert parse(r'hist pt, eta titled "there"') == [Histogram([Axis(Symbol("pt"), None), Axis(Symbol("eta"), None)], None, None, Literal("there"))]
assert parse(r'hist pt weight by w titled "there"') == [Histogram([Axis(Symbol("pt"), None)], Symbol("w"), None, Literal("there"))]
assert parse(r'hist pt by regular(100, 0, 150) titled "there"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], None, None, Literal("there"))]
assert parse(r'hist pt by regular(100, 0, 150) weight by w titled "there"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], Symbol("w"), None, Literal("there"))]
assert parse(r'hist pt named "hello" titled "there"') == [Histogram([Axis(Symbol("pt"), None)], None, Literal("hello"), Literal("there"))]
assert parse(r'hist pt, eta named "hello" titled "there"') == [Histogram([Axis(Symbol("pt"), None), Axis(Symbol("eta"), None)], None, Literal("hello"), Literal("there"))]
assert parse(r'hist pt weight by w named "hello" titled "there"') == [Histogram([Axis(Symbol("pt"), None)], Symbol("w"), Literal("hello"), Literal("there"))]
assert parse(r'hist pt by regular(100, 0, 150) named "hello" titled "there"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], None, Literal("hello"), Literal("there"))]
assert parse(r'hist pt by regular(100, 0, 150) weight by w named "hello" titled "there"') == [Histogram([Axis(Symbol("pt"), Call(Symbol("regular"), [Literal(100), Literal(0), Literal(150)]))], Symbol("w"), Literal("hello"), Literal("there"))]
def test_cutvary():
assert parse(r"""
cut x > 0 {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, None, [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 {
hist x
}
cut x <= 0 {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, None, [Histogram([Axis(Symbol('x'), None)], None, None, None)]), Cut(Call(Symbol("<="), [Symbol("x"), Literal(0)]), None, None, None, [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 weight by w {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), Symbol("w"), None, None, [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 named "hello" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, Literal("hello"), None, [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 weight by w named "hello" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), Symbol("w"), Literal("hello"), None, [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 titled "there" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, Literal("there"), [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 weight by w titled "there" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), Symbol("w"), None, Literal("there"), [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 named "hello" titled "there" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, Literal("hello"), Literal("there"), [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 weight by w named "hello" titled "there" {
hist x
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), Symbol("w"), Literal("hello"), Literal("there"), [Histogram([Axis(Symbol('x'), None)], None, None, None)])]
assert parse(r"""
cut x > 0 {
cut y > 0 {
hist z
}
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, None, [Cut(Call(Symbol(">"), [Symbol("y"), Literal(0)]), None, None, None, [Histogram([Axis(Symbol("z"), None)], None, None, None)])])]
assert parse(r"""
vary by {epsilon = 0} {
hist epsilon
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], None)], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])]
assert parse(r"""
vary by {x = 0; y = 0} {
hist x + y
}
""") == [Vary([Trial([Assignment("x", Literal(0)), Assignment("y", Literal(0))], None)], [Histogram([Axis(Call(Symbol("+"), [Symbol("x"), Symbol("y")]), None)], None, None, None)])]
assert parse(r"""
vary by {epsilon = 0} named "hello" {
hist epsilon
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], Literal("hello"))], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])]
assert parse(r"""
vary by {epsilon = 0} by {epsilon = 0.001} {
hist epsilon
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], None), Trial([Assignment("epsilon", Literal(0.001))], None)], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])]
assert parse(r"""
vary by {epsilon = 0} named "one"
by {epsilon = 0.001} {
hist epsilon
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], Literal("one")), Trial([Assignment("epsilon", Literal(0.001))], None)], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])]
assert parse(r"""
vary by {epsilon = 0} named "one"
by {epsilon = 0.001} named "two" {
hist epsilon
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], Literal("one")), Trial([Assignment("epsilon", Literal(0.001))], Literal("two"))], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])]
assert parse(r"""
cut x > 0 {
vary by {epsilon = 0} {
hist epsilon
}
}
""") == [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, None, [Vary([Trial([Assignment("epsilon", Literal(0))], None)], [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])])]
assert parse(r"""
vary by {epsilon = 0} {
cut x > 0 {
hist epsilon
}
}
""") == [Vary([Trial([Assignment("epsilon", Literal(0))], None)], [Cut(Call(Symbol(">"), [Symbol("x"), Literal(0)]), None, None, None, [Histogram([Axis(Symbol("epsilon"), None)], None, None, None)])])]
def test_macro():
"Macros haven't been fully tested, but I'll leave that for later."
assert parse(r"""
def f() {
x
}
hist f()
""") == [Histogram([Axis(Symbol("x"), None)], None, None, None)]
assert parse(r"""
def f() {
hist x
}
f()
""") == [Histogram([Axis(Symbol("x"), None)], None, None, None)]
assert parse(r"""
def f(y) {
hist y
}
f(x)
""") == [Histogram([Axis(Symbol("x"), None)], None, None, None)]
def test_benchmark8():
"""For events with at least three leptons and a same-flavor
opposite-sign lepton pair, find the same-flavor opposite-sign
lepton pair with the mass closest to 91.2 GeV and plot the pT
of the leading other lepton."""
assert parse(r"""
leptons = electrons union muons
cut count(leptons) >= 3 named "three_leptons" {
Z = electrons as (lep1, lep2) union muons as (lep1, lep2)
where lep1.charge != lep2.charge
min by abs(mass(lep1, lep2) - 91.2)
third = leptons except [Z.lep1, Z.lep2] max by pt
hist third.pt by regular(100, 0, 250) named "third_pt" titled "Leading other lepton pT"
}
""")
| StarcoderdataPython |
3442709 | <reponame>gubschk/CDEWIP
# -*- coding: utf-8 -*-
"""
chemdataextractor.nlp
~~~~~~~~~~~~~~~~~~~~~
Chemistry-aware natural language processing framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .abbrev import AbbreviationDetector, ChemAbbreviationDetector
from .tokenize import SentenceTokenizer, ChemSentenceTokenizer, WordTokenizer, ChemWordTokenizer, FineWordTokenizer
from .pos import ApPosTagger, ChemApPosTagger, CrfPosTagger, ChemCrfPosTagger
from .cem import CemTagger, CiDictCemTagger, CsDictCemTagger, CrfCemTagger
from .tag import NoneTagger, ApTagger, CrfTagger, DictionaryTagger, RegexTagger
| StarcoderdataPython |
9665514 | <filename>learning_tools/keras-rl/dqn/dqn_complex.py
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, LinearAnnealedPolicy
from rl.memory import SequentialMemory
from oscar.env.envs.general_learning_env import GeneralLearningEnv
from ML_homework.policy_iteration.complex_policy_iteration import policy_iteration_iterator, state_from_obs
CONFIG_FILE = 'config/learning_complex.json'
LOG_FILE = 'learning_tools/learning_nn/keras-rl/duel_dqn_{}.csv'.format(CONFIG_FILE[7:-5])
# Get the environment and extract the number of actions.
env = GeneralLearningEnv(CONFIG_FILE, False, log_file_path=None, publish_stats=False)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
boltzmann_policy = BoltzmannQPolicy(tau=20.0, clip=(0.0, 500.0))
policy = LinearAnnealedPolicy(boltzmann_policy, 'tau', 1.0, 20.0, 1.0, 10000)
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# warm up
pi = None
for p in policy_iteration_iterator(10, 0.5, file_path="/tmp/state_table.csv", save_path="/tmp/OSCAR/"):
pi = p
for i in range(20):
obs = env.reset()
while True:
s = state_from_obs(obs)
a = pi[s.id()]
old_obs = obs
obs, r, done, debug_dict = env.step(a)
memory.append(old_obs, a, r, done, False)
if done:
break
env.close()
env = GeneralLearningEnv(CONFIG_FILE, False, log_file_path=LOG_FILE, publish_stats=False)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=500000, visualize=False, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('learning_tools/learning_nn/keras-rl/duel_dqn_{}_weights.h5f'.format(CONFIG_FILE[7:-5]),
overwrite=True)
env.close()
del env
# env = GeneralLearningEnv(CONFIG_FILE, True, publish_stats=False)
#
# # Finally, evaluate our algorithm for 5 episodes.
# dqn.test(env, nb_episodes=5, visualize=False)
| StarcoderdataPython |
8045839 | <reponame>marcortiz11/FastComposedModels<gh_stars>0
import numpy as np
def __spin_roulette(fit_vals, mask=None):
"""
Action if spinning the wheel
:param fit_vals: Fitness values of the population
:param mask: Boolean vector. Tells which individuals consider this time
:return: index of selected individual among population
"""
n_individuals = len(fit_vals)
assert n_individuals > 0, "ERROR: Fitness values empty list"
assert min(fit_vals) > 0, "ERROR: Roulette selection only works with positive fitness values"
if mask is not None:
assert len(mask) == n_individuals, "ERROR: Length of mask should be length of fitness values"
scaled_fit_vals = np.array(fit_vals)
upper_limit = sum(scaled_fit_vals * mask if mask is not None else scaled_fit_vals)
r = np.random.uniform(0, upper_limit)
j = accumulation = 0
while accumulation <= r and j < n_individuals:
if mask is None or mask[j]:
accumulation += scaled_fit_vals[j]
j += 1
return j-1
def roulette_selection(fit_vals, num_population):
"""
Selects num_population individuals using the rank selection algorithm
:param fit_vals: Vector of fitting values
:param num_population: Number of individuals to select
:return: ids of the population for the next generation
"""
num_population = num_population if num_population <= len(fit_vals) else len(fit_vals)
mask = [1]*len(fit_vals) # Mask. we don't want to consider already selected individuals
selected_ids = []
for i in range(num_population):
selected = __spin_roulette(fit_vals, mask) # Run the roulette wheel
selected_ids.append(selected)
mask[selected] = 0 # Do not consider selected for next spin
return selected_ids
def most_fit_selection(fit_vals, num_population):
"""
Selects the num_population most fit individuals
:param fit_vals: Vector of fitting values
:param num_population: Number of individuals to select
:return: ids of the population for the next generation
"""
num_population = num_population if num_population <= len(fit_vals) else len(fit_vals)
selected_ids = np.argsort(fit_vals)[-num_population:]
return selected_ids
def linear_rank_selection(fit_vals, n_survivors):
if n_survivors < len(fit_vals):
N = len(fit_vals)
S = np.argsort(fit_vals)
P = [1/N * (2 * (i/(N-1))) for i in range(N)]
selected = np.random.choice(N, n_survivors, p=P, replace=False)
ids = [S[i] for i in selected]
return ids
else:
return list(range(len(fit_vals)))
def tournament_selection(fit_vals:np.ndarray, K: int, *fit_vals_next: np.ndarray, p=0.8) -> int:
"""
:param fit_vals: Fitness values of individuals.s
:param K: Number of individuals to participate in the tournment
:param fit_vals_next: Secondary metrics for deciding best individuals in case of ties
param: p: Probability of selecting winner
:return: Index of the winner of the tournment
"""
prob_distribution_selection = np.array([p * pow((1 - p), position) for position in range(K)])
prob_distribution_selection /= sum(prob_distribution_selection)
tournament_individuals = np.random.choice(len(fit_vals), K, replace=False)
if len(fit_vals_next):
fitness_individuals = np.row_stack((fit_vals, np.array(fit_vals_next)))
fitness_individuals_tournment = (fitness_individuals[:, tournament_individuals])[::-1]
result_tournment = np.lexsort(-1 * fitness_individuals_tournment)
else:
fitness_individuals_tournment = np.array(fit_vals)[tournament_individuals]
result_tournment = np.argsort(-1 * fitness_individuals_tournment)
position = np.random.choice(result_tournment, 1, p=prob_distribution_selection)
winner = tournament_individuals[position[0]]
return winner
if __name__ == "__main__":
# Testing tournment selection with multiple fitness values
f1 = np.ones(10)
f2 = np.arange(1, 0, -1/10)
w = tournament_selection(f1, 3, f2)
| StarcoderdataPython |
365297 | import math
from iso639 import languages
from langdetect import detect
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
class Utils(object):
"""
Utilities class.
"""
@staticmethod
def get_sentences(text):
sentences = []
for sentence in sent_tokenize(text):
sentences.append(sentence[:-1]) # Remove punctuation
return sentences
@staticmethod
def get_bag_of_words(sentence):
return word_tokenize(sentence)
@staticmethod
def detect_lang(text):
return languages.get(alpha2=detect(text)).name.lower()
@staticmethod
def get_output_length(n_sentences, percentage):
length = math.floor(float(n_sentences) * percentage)
if length < 1:
return 1
return int(length)
@staticmethod
def remove_stop_words(sentences, text_lang):
for i in range(0, len(sentences)):
for word in sentences[i]:
try:
if word in stopwords.words(text_lang):
sentences[i].remove(word)
except IOError:
return
| StarcoderdataPython |
4884014 | import datetime
import os
from typing import Generator, List
from celery.utils.log import get_task_logger
from dynamicannotationdb.key_utils import build_segmentation_table_name
from dynamicannotationdb.models import SegmentationMetadata
from sqlalchemy import and_, func, text
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm.exc import NoResultFound
from materializationengine.celery_init import celery
from materializationengine.database import dynamic_annotation_cache, sqlalchemy_cache
from materializationengine.utils import (
create_annotation_model,
create_segmentation_model,
get_config_param,
)
celery_logger = get_task_logger(__name__)
def generate_chunked_model_ids(
mat_metadata: dict, use_segmentation_model=False
) -> List[List]:
"""Creates list of chunks with start:end index for chunking queries for materialization.
Parameters
----------
mat_metadata : dict
Materialization metadata
Returns
-------
List[List]
list of list containing start and end indices
"""
celery_logger.info("Chunking supervoxel ids")
if use_segmentation_model:
AnnotationModel = create_segmentation_model(mat_metadata)
else:
AnnotationModel = create_annotation_model(mat_metadata)
chunk_size = mat_metadata.get("chunk_size")
if not chunk_size:
ROW_CHUNK_SIZE = get_config_param("MATERIALIZATION_ROW_CHUNK_SIZE")
chunk_size = ROW_CHUNK_SIZE
chunked_ids = chunk_ids(mat_metadata, AnnotationModel.id, chunk_size)
return [chunk for chunk in chunked_ids]
def create_chunks(data_list: List, chunk_size: int) -> Generator:
"""Create chunks from list with fixed size
Args:
data_list (List): list to chunk
chunk_size (int): size of chunk
Yields:
List: generator of chunks
"""
if len(data_list) <= chunk_size:
chunk_size = len(data_list)
for i in range(0, len(data_list), chunk_size):
yield data_list[i : i + chunk_size]
@celery.task(name="process:fin", acks_late=True, bind=True)
def fin(self, *args, **kwargs):
return True
@celery.task(name="process:workflow_complete", acks_late=True, bind=True)
def workflow_complete(self, workflow_name):
return f"{workflow_name} completed successfully"
def get_materialization_info(
datastack_info: dict,
analysis_version: int = None,
materialization_time_stamp: datetime.datetime.utcnow = None,
skip_table: bool = False,
row_size: int = 1_000_000,
) -> List[dict]:
"""Initialize materialization by an aligned volume name. Iterates thorugh all
tables in a aligned volume database and gathers metadata for each table. The list
of tables are passed to workers for materialization.
Args:
datastack_info (dict): Datastack info
analysis_version (int, optional): Analysis version to use for frozen materialization. Defaults to None.
skip_table (bool, optional): Triggers row count for skipping tables larger than row_size arg. Defaults to False.
row_size (int, optional): Row size number to check. Defaults to 1_000_000.
Returns:
List[dict]: [description]
"""
aligned_volume_name = datastack_info["aligned_volume"]["name"]
pcg_table_name = datastack_info["segmentation_source"].split("/")[-1]
segmentation_source = datastack_info.get("segmentation_source")
if not materialization_time_stamp:
materialization_time_stamp = datetime.datetime.utcnow()
db = dynamic_annotation_cache.get_db(aligned_volume_name)
annotation_tables = db.get_valid_table_names()
metadata = []
celery_logger.debug(f"Annotation tables: {annotation_tables}")
for annotation_table in annotation_tables:
row_count = db._get_table_row_count(annotation_table, filter_valid=True)
max_id = db.get_max_id_value(annotation_table)
min_id = db.get_min_id_value(annotation_table)
if row_count == 0:
continue
if row_count >= row_size and skip_table:
continue
md = db.get_table_metadata(annotation_table)
vx = md.get("voxel_resolution_x", None)
vy = md.get("voxel_resolution_y", None)
vz = md.get("voxel_resolution_z", None)
vx = vx or 1.0
vy = vy or 1.0
vz = vz or 1.0
voxel_resolution = [vx, vy, vz]
reference_table = md.get("reference_table")
if max_id and max_id > 0:
table_metadata = {
"annotation_table_name": annotation_table,
"datastack": datastack_info["datastack"],
"aligned_volume": str(aligned_volume_name),
"schema": db.get_table_schema(annotation_table),
"max_id": int(max_id),
"min_id": int(min_id),
"row_count": row_count,
"add_indices": True,
"coord_resolution": voxel_resolution,
"reference_table": reference_table,
"materialization_time_stamp": str(materialization_time_stamp),
"table_count": len(annotation_tables),
}
if not reference_table:
segmentation_table_name = build_segmentation_table_name(
annotation_table, pcg_table_name
)
try:
segmentation_metadata = db.get_segmentation_table_metadata(
annotation_table, pcg_table_name
)
create_segmentation_table = False
except NoResultFound as e:
celery_logger.warning(f"SEGMENTATION TABLE DOES NOT EXIST: {e}")
segmentation_metadata = {"last_updated": None}
create_segmentation_table = True
last_updated_time_stamp = segmentation_metadata.get("last_updated")
if not last_updated_time_stamp:
last_updated_time_stamp = None
else:
last_updated_time_stamp = str(last_updated_time_stamp)
table_metadata.update(
{
"create_segmentation_table": create_segmentation_table,
"segmentation_table_name": segmentation_table_name,
"temp_mat_table_name": f"temp__{annotation_table}",
"pcg_table_name": pcg_table_name,
"segmentation_source": segmentation_source,
"last_updated_time_stamp": last_updated_time_stamp,
"chunk_size": get_config_param(
"MATERIALIZATION_ROW_CHUNK_SIZE"
),
"find_all_expired_roots": datastack_info.get(
"find_all_expired_roots", False
),
}
)
if analysis_version:
table_metadata.update(
{
"analysis_version": analysis_version,
"analysis_database": f"{datastack_info['datastack']}__mat{analysis_version}",
}
)
metadata.append(table_metadata.copy())
celery_logger.debug(metadata)
db.cached_session.close()
return metadata
@celery.task(name="process:collect_data", acks_late=True)
def collect_data(*args, **kwargs):
return args, kwargs
def query_id_range(column, start_id: int, end_id: int):
if end_id:
return and_(column >= start_id, column < end_id)
else:
return column >= start_id
def chunk_ids(mat_metadata, model, chunk_size: int):
aligned_volume = mat_metadata.get("aligned_volume")
session = sqlalchemy_cache.get(aligned_volume)
q = session.query(
model, func.row_number().over(order_by=model).label("row_count")
).from_self(model)
if chunk_size > 1:
q = q.filter(text("row_count %% %d=1" % chunk_size))
chunks = [id for id, in q]
while chunks:
chunk_start = chunks.pop(0)
chunk_end = chunks[0] if chunks else None
yield [chunk_start, chunk_end]
@celery.task(
name="process:update_metadata",
bind=True,
acks_late=True,
autoretry_for=(Exception,),
max_retries=3,
)
def update_metadata(self, mat_metadata: dict):
"""Update 'last_updated' column in the segmentation
metadata table for a given segmentation table.
Args:
mat_metadata (dict): materialization metadata
Returns:
str: description of table that was updated
"""
aligned_volume = mat_metadata["aligned_volume"]
segmentation_table_name = mat_metadata["segmentation_table_name"]
session = sqlalchemy_cache.get(aligned_volume)
materialization_time_stamp = mat_metadata["materialization_time_stamp"]
try:
last_updated_time_stamp = datetime.datetime.strptime(
materialization_time_stamp, "%Y-%m-%d %H:%M:%S.%f"
)
except ValueError:
last_updated_time_stamp = datetime.datetime.strptime(
materialization_time_stamp, "%Y-%m-%dT%H:%M:%S.%f"
)
try:
seg_metadata = (
session.query(SegmentationMetadata)
.filter(SegmentationMetadata.table_name == segmentation_table_name)
.one()
)
seg_metadata.last_updated = last_updated_time_stamp
session.commit()
except Exception as e:
celery_logger.error(f"SQL ERROR: {e}")
session.rollback()
finally:
session.close()
return {
f"Table: {segmentation_table_name}": f"Time stamp {materialization_time_stamp}"
}
@celery.task(
name="process:add_index",
bind=True,
acks_late=True,
task_reject_on_worker_lost=True,
autoretry_for=(Exception,),
max_retries=3,
)
def add_index(self, database: dict, command: str):
"""Add an index or a contrainst to a table.
Args:
mat_metadata (dict): datastack info for the aligned_volume derived from the infoservice
command (str): sql command to create an index or constraint
Raises:
self.retry: retries task when an error creating an index occurs
Returns:
str: String of SQL command
"""
engine = sqlalchemy_cache.get_engine(database)
# increase maintenance memory to improve index creation speeds,
# reset to default after index is created
ADD_INDEX_SQL = f"""
SET maintenance_work_mem to '1GB';
{command}
SET maintenance_work_mem to '64MB';
"""
try:
with engine.begin() as conn:
celery_logger.info(f"Adding index: {command}")
result = conn.execute(ADD_INDEX_SQL)
except ProgrammingError as index_error:
celery_logger.error(index_error)
return "Index already exists"
except Exception as e:
celery_logger.error(f"Index creation failed: {e}")
raise self.retry(exc=e, countdown=3)
return f"Index {command} added to table"
| StarcoderdataPython |
3494439 | <reponame>paregorios/make-project
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Python 3 tests template (changeme)"""
import logging
from nose.tools import assert_equal, assert_false, assert_true, raises
from os.path import abspath, join, realpath
from unittest import TestCase
logger = logging.getLogger(__name__)
test_data_path = abspath(realpath(join('tests', 'data')))
def setup_module():
"""Change me"""
pass
def teardown_module():
"""Change me"""
pass
class Test_This(TestCase):
def setUp(self):
"""Change me"""
pass
def tearDown(self):
"""Change me"""
pass
def test_a(self):
"""Change me"""
pass
| StarcoderdataPython |
4894489 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import pprint
from typing import List
import numpy as np
import torch
from classy_vision.generic.distributed_util import (
all_reduce_max,
all_reduce_sum,
get_cuda_device_index,
get_rank,
get_world_size,
)
from classy_vision.losses import ClassyLoss, register_loss
from fvcore.common.file_io import PathManager
from torch import nn
from vissl.config import AttrDict
@register_loss("swav_distill_loss")
class SwAVDistillLoss(ClassyLoss):
"""
This loss is proposed by the SwAV paper https://arxiv.org/abs/2006.09882
by Caron et al. See the paper for more details about the loss.
Config params:
embedding_dim (int): the projection head output dimension
temperature (float): temperature to be applied to the logits
use_double_precision (bool): whether to use double precision for the loss.
This could be a good idea to avoid NaNs.
normalize_last_layer (bool): whether to normalize the last layer
num_iters (int): number of sinkhorn algorithm iterations to make
epsilon (float): see the paper for details
num_crops (int): number of crops used
crops_for_assign (List[int]): what crops to use for assignment
num_prototypes (List[int]): number of prototypes
temp_hard_assignment_iters (int): whether to do hard assignment for the initial
few iterations
output_dir (str): for dumping the debugging info in case loss
becomes NaN
queue:
queue_length (int): number of features to store and used in the scores
start_iter (int): when to start using the queue for the scores
local_queue_length (int): length of queue per gpu
"""
def __init__(self, loss_config: AttrDict):
super().__init__()
self.loss_config = loss_config
self.queue_start_iter = self.loss_config.queue.start_iter
self.swav_criterion = SwAVDistillCriterion(
self.loss_config.temperature,
self.loss_config.crops_for_assign,
self.loss_config.num_crops,
self.loss_config.num_iters,
self.loss_config.epsilon,
self.loss_config.use_double_precision,
self.loss_config.num_prototypes,
self.loss_config.queue.local_queue_length,
self.loss_config.embedding_dim,
self.loss_config.temp_hard_assignment_iters,
self.loss_config.output_dir,
self.loss_config.distill_alpha,
)
@classmethod
def from_config(cls, loss_config: AttrDict):
"""
Instantiates SwAVLoss from configuration.
Args:
loss_config: configuration for the loss
Returns:
SwAVLoss instance.
"""
return cls(loss_config)
def forward(self, student_output: torch.Tensor, teacher_output: torch.Tensor, target: torch.Tensor):
self.swav_criterion.use_queue = (
self.swav_criterion.local_queue_length > 0
and self.swav_criterion.num_iteration >= self.queue_start_iter
)
loss = 0
for i, prototypes_scores in enumerate(student_output[1:]):
loss += self.swav_criterion(prototypes_scores, i, teacher_output)
loss /= len(student_output) - 1
self.swav_criterion.num_iteration += 1
if self.swav_criterion.use_queue:
self.swav_criterion.update_emb_queue(student_output[0].detach())
return loss
def __repr__(self):
repr_dict = {
"name": self._get_name(),
"epsilon": self.loss_config.epsilon,
"use_double_precision": self.loss_config.use_double_precision,
"local_queue_length": self.loss_config.queue.local_queue_length,
"temperature": self.loss_config.temperature,
"num_prototypes": self.loss_config.num_prototypes,
"num_crops": self.loss_config.num_crops,
"nmb_sinkhornknopp_iters": self.loss_config.num_iters,
"embedding_dim": self.loss_config.embedding_dim,
"temp_hard_assignment_iters": self.loss_config.temp_hard_assignment_iters,
}
return pprint.pformat(repr_dict, indent=2)
class SwAVDistillCriterion(nn.Module):
"""
This criterion is used by the SwAV paper https://arxiv.org/abs/2006.09882
by <NAME> al. See the paper for more details about the loss.
Config params:
embedding_dim (int): the projection head output dimension
temperature (float): temperature to be applied to the logits
num_iters (int): number of sinkhorn algorithm iterations to make
epsilon (float): see the paper for details
num_crops (int): number of crops used
crops_for_assign (List[int]): what crops to use for assignment
num_prototypes (List[int]): number of prototypes
temp_hard_assignment_iters (int): whether to do hard assignment for the initial
few iterations
output_dir (str): for dumping the debugging info in case loss
becomes NaN
local_queue_length (int): length of queue per gpu
"""
def __init__(
self,
temperature: float,
crops_for_assign: List[int],
num_crops: int,
num_iters: int,
epsilon: float,
use_double_prec: bool,
num_prototypes: List[int],
local_queue_length: int,
embedding_dim: int,
temp_hard_assignment_iters: int,
output_dir: str,
distill_alpha: float,
):
super(SwAVDistillCriterion, self).__init__()
self.use_gpu = get_cuda_device_index() > -1
self.temperature = temperature
self.crops_for_assign = crops_for_assign
self.num_crops = num_crops
self.nmb_sinkhornknopp_iters = num_iters
self.epsilon = epsilon
self.use_double_prec = use_double_prec
self.num_prototypes = num_prototypes
self.nmb_heads = len(self.num_prototypes)
self.embedding_dim = embedding_dim
self.temp_hard_assignment_iters = temp_hard_assignment_iters
self.local_queue_length = local_queue_length
self.dist_rank = get_rank()
self.world_size = get_world_size()
self.log_softmax = nn.LogSoftmax(dim=1).cuda()
self.softmax = nn.Softmax(dim=1).cuda()
self.register_buffer("num_iteration", torch.zeros(1, dtype=int))
self.use_queue = False
if local_queue_length > 0:
self.initialize_queue()
self.output_dir = output_dir
self.distill_alpha = distill_alpha
def distributed_sinkhornknopp(self, Q: torch.Tensor):
"""
Apply the distributed sinknorn optimization on the scores matrix to
find the assignments
"""
eps_num_stab = 1e-12
with torch.no_grad():
# remove potential infs in Q
# replace the inf entries with the max of the finite entries in Q
mask = torch.isinf(Q)
ind = torch.nonzero(mask)
if len(ind) > 0:
for i in ind:
Q[i[0], i[1]] = 0
m = torch.max(Q)
for i in ind:
Q[i[0], i[1]] = m
sum_Q = torch.sum(Q, dtype=Q.dtype)
all_reduce_sum(sum_Q)
Q /= sum_Q
k = Q.shape[0]
n = Q.shape[1]
N = self.world_size * Q.shape[1]
# we follow the u, r, c and Q notations from
# https://arxiv.org/abs/1911.05371
r = torch.ones(k) / k
c = torch.ones(n) / N
if self.use_double_prec:
r, c = r.double(), c.double()
if self.use_gpu:
r = r.cuda(non_blocking=True)
c = c.cuda(non_blocking=True)
for _ in range(self.nmb_sinkhornknopp_iters):
u = torch.sum(Q, dim=1, dtype=Q.dtype)
all_reduce_sum(u)
# for numerical stability, add a small epsilon value
# for non-zero Q values.
if len(torch.nonzero(u == 0)) > 0:
Q += eps_num_stab
u = torch.sum(Q, dim=1, dtype=Q.dtype)
all_reduce_sum(u)
u = r / u
# remove potential infs in "u"
# replace the inf entries with the max of the finite entries in "u"
mask = torch.isinf(u)
ind = torch.nonzero(mask)
if len(ind) > 0:
for i in ind:
u[i[0]] = 0
m = torch.max(u)
for i in ind:
u[i[0]] = m
Q *= u.unsqueeze(1)
Q *= (c / torch.sum(Q, dim=0, dtype=Q.dtype)).unsqueeze(0)
Q = (Q / torch.sum(Q, dim=0, keepdim=True, dtype=Q.dtype)).t().float()
# hard assignment
if self.num_iteration < self.temp_hard_assignment_iters:
index_max = torch.max(Q, dim=1)[1]
Q.zero_()
Q.scatter_(1, index_max.unsqueeze(1), 1)
return Q
def forward(self, scores: torch.Tensor, head_id: int, teacher_embedding: torch.Tensor):
assert scores.shape[0] % self.num_crops == 0
bs = scores.shape[0] // self.num_crops
print(scores.shape)
print(teacher_embedding.shape)
student_similarity = torch.mm(scores, scores.t()) / self.temperature
teacher_similarity = torch.mm(teacher_embedding, teacher_embedding.t()) / self.temperature
soft_student_similarities = torch.nn.functional.log_softmax(student_similarity, dim=1)
soft_teacher_similarities = torch.nn.functional.softmax(teacher_similarity, dim=1)
distill_loss = torch.nn.functional.kl_div(soft_student_similarities, soft_teacher_similarities, reduction='batchmean')
print(distill_loss)
total_loss = 0
n_term_loss = 0
# 2 big crops are normally used for the assignment
for i, crop_id in enumerate(self.crops_for_assign):
with torch.no_grad():
scores_this_crop = scores[bs * crop_id : bs * (crop_id + 1)]
if self.use_queue:
queue = getattr(self, "local_queue" + str(head_id))[i].clone()
scores_this_crop = torch.cat((scores_this_crop, queue))
if self.use_double_prec:
assignments = torch.exp(
scores_this_crop.double() / np.float64(self.epsilon)
).t()
assignments = assignments.double()
else:
assignments = scores_this_crop / self.epsilon
# use the log-sum-exp trick for numerical stability.
M = torch.max(assignments)
all_reduce_max(M)
assignments -= M
assignments = torch.exp(assignments).t()
assignments = self.distributed_sinkhornknopp(assignments)[:bs]
idx_crop_pred = np.delete(np.arange(self.num_crops), crop_id)
loss = 0
for p in idx_crop_pred:
if self.use_double_prec:
loss -= torch.mean(
torch.sum(
assignments
* self.log_softmax(
scores[bs * p : bs * (p + 1)].double()
/ np.float64(self.temperature)
),
dim=1,
dtype=assignments.dtype,
)
)
else:
loss -= torch.mean(
torch.sum(
assignments
* self.log_softmax(
scores[bs * p : bs * (p + 1)] / self.temperature
),
dim=1,
dtype=assignments.dtype,
)
)
loss /= len(idx_crop_pred)
total_loss += loss
n_term_loss += 1
# stop training if NaN appears and log the output to help debugging
# TODO (prigoyal): extract the logic to be common for all losses
# debug_state() method that all losses can override
if torch.isnan(loss):
logging.info(
f"Infinite Loss or NaN. Loss value: {loss}, rank: {self.dist_rank}"
)
scores_output_file = os.path.join(
self.output_dir,
"rank" + str(self.dist_rank) + "_scores" + str(i) + ".pth",
)
assignments_out_file = os.path.join(
self.output_dir,
"rank" + str(self.dist_rank) + "_assignments" + str(i) + ".pth",
)
with PathManager.open(scores_output_file, "wb") as fwrite:
torch.save(scores, fwrite)
with PathManager.open(assignments_out_file, "wb") as fwrite:
torch.save(assignments, fwrite)
logging.info(f"Saved the scores matrix to: {scores_output_file}")
logging.info(f"Saved the assignment matrix to: {assignments_out_file}")
total_loss /= n_term_loss
return total_loss + distill_loss * self.distill_alpha
def update_emb_queue(self, emb):
with torch.no_grad():
bs = len(emb) // self.num_crops
for i, crop_id in enumerate(self.crops_for_assign):
queue = self.local_emb_queue[i]
queue[bs:] = queue[:-bs].clone()
queue[:bs] = emb[crop_id * bs : (crop_id + 1) * bs]
self.local_emb_queue[i] = queue
def compute_queue_scores(self, head):
with torch.no_grad():
for crop_id in range(len(self.crops_for_assign)):
for i in range(head.nmb_heads):
scores = getattr(head, "prototypes" + str(i))(
self.local_emb_queue[crop_id]
)
getattr(self, "local_queue" + str(i))[crop_id] = scores
def initialize_queue(self):
for i in range(self.nmb_heads):
init_queue = (
torch.rand(
len(self.crops_for_assign),
self.local_queue_length,
self.num_prototypes[i],
)
* 2
- 1
)
self.register_buffer("local_queue" + str(i), init_queue)
stdv = 1.0 / math.sqrt(self.embedding_dim / 3)
init_queue = (
torch.rand(
len(self.crops_for_assign), self.local_queue_length, self.embedding_dim
)
.mul_(2 * stdv)
.add_(-stdv)
)
self.register_buffer("local_emb_queue", init_queue)
def __repr__(self):
repr_dict = {
"name": self._get_name(),
"use_queue": self.use_queue,
"local_queue_length": self.local_queue_length,
"distill_alpha": self.distill_alpha,
"temperature": self.temperature,
"num_prototypes": self.num_prototypes,
"num_crops": self.num_crops,
"nmb_sinkhornknopp_iters": self.nmb_sinkhornknopp_iters,
"embedding_dim": self.embedding_dim,
"temp_hard_assignment_iters": self.temp_hard_assignment_iters,
}
return pprint.pformat(repr_dict, indent=2)
| StarcoderdataPython |
3389755 | <filename>tests/test2.py
import bottle
import snifter
from gevent.wsgi import WSGIServer
app = bottle.Bottle()
@app.route('/')
def home():
session = bottle.request['snifter.session']
if session.get('name') is None:
session['name'] = 'World'
return 'Please refresh!'
return 'Hello, %s!' % session['name']
server = WSGIServer(('localhost', 3030), snifter.session_middleware(app))
server.serve_forever()
| StarcoderdataPython |
6667094 | <reponame>oyvindbusk/fullFres<filename>tests/tests.py
import pytest
import pandas as pd
import datatest as dt
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
5101285 | <gh_stars>100-1000
from pxr import Tf
if hasattr(Tf, 'PreparePythonModule'):
Tf.PreparePythonModule('_AL_USDTransaction')
else:
from . import _AL_USDTransaction
Tf.PrepareModule(_AL_USDTransaction, locals())
del Tf
try:
import __DOC
__DOC.Execute(locals())
del __DOC
except Exception:
try:
import __tmpDoc
__tmpDoc.Execute(locals())
del __tmpDoc
except:
pass
class ScopedTransaction(object):
def __init__(self, stage, layer):
self.transaction = Transaction(stage, layer)
def __enter__(self):
return self.transaction.Open()
def __exit__(self, type, value, traceback):
return self.transaction.Close()
| StarcoderdataPython |
6512592 | <filename>exercises/003_Variables/variables_001.py
## Python INTRO for TD Users
## <NAME>
## May, 2018
## Understanding python variables and dinamic typing.
# Declare a variable and initialize it as an integer value
a = 0
print(a)
# re-declaring the variable as an character strings works
a = 'Hola Barcelona!'
print(a)
# re-declaring the variable as a floating number works
a = 3.14162812
print(a)
# re-declaring the variable as a boolean value works
a = True
print(a)
# re-declaring the variable as a "null" value works
a = None
print(a) | StarcoderdataPython |
1891004 | """lightweight test for pysal metapckage that functions import."""
def test_imports():
from tobler.dasymetric import masked_area_interpolate
from tobler.area_weighted import area_interpolate
from tobler.model import glm, glm_pixel_adjusted
| StarcoderdataPython |
8107090 | <gh_stars>0
from trame.internal import (
change, Controller, flush_state, get_cli_parser, get_state, get_version,
is_dirty, is_dirty_all, port, start, State, stop, trigger, update_state
)
from trame.layouts import update_layout
__version__ = get_version()
state = State()
"""This object provides pythonic access to the state
For instance, these getters are the same:
>>> field, = get_state("field")
>>> field = state.field
As are these setters:
>>> update_state("field", value)
>>> state.field = value
``get_state()`` should be used instead if more than one argument is to be
passed, and ``update_state()`` should be used instead to specify additional
arguments (e.g. ``force=True``).
The state may also be accessed and updated similar to dictionaries:
>>> value = state["field"]
>>> state["field"] = value
>>> state.update({"field": value})
This object may be imported via
>>> from trame import state
"""
controller = Controller()
"""The controller is a container for function proxies
The function proxies may be used as callbacks even though the function has
not yet been defined. The function may also be re-defined. For example:
>>> from trame import controller as ctrl
>>> layout = SinglePage("Controller test")
>>> with layout.toolbar:
... vuetify.VSpacer()
... vuetify.VBtn("Click Me", click=ctrl.on_click) # not yet defined
>>> ctrl.on_click = lambda: print("Hello, Trame!") # on_click is now defined
This can be very useful for large projects where the functions may be defined
in separate files after the UI has been constructed, or for re-defining
callbacks when conditions in the application change.
"""
__all__ = [
# Order these how we want them to show up in the docs
# Server-related
"start",
"stop",
"port",
# State-related
"state",
"update_state",
"get_state",
"flush_state",
"is_dirty",
"is_dirty_all",
"change",
# Trigger-related
"trigger",
"controller",
# Layout-related
"update_layout",
# CLI-related
"get_cli_parser",
# These are not exposed in the docs
"__version__",
]
| StarcoderdataPython |
6419843 | <reponame>eterevsky/pygui
import unittest
from unittest.mock import Mock, patch
from .layout import RootLayout, HStackLayout, VStackLayout, LayersLayout
from .observable import Attribute, Observable, make_observable
from .pane import Pane
from .view import View, HAlign, VAlign
class MyView(View):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_draw_calls = 0
def on_draw(self):
self.on_draw_calls += 1
class RootLayoutTest(unittest.TestCase):
def test_init_root(self):
window = Mock()
window.width = 200
window.height = 100
view = MyView(background_color=(1, 2, 3))
layout = RootLayout(window, view)
self.assertEqual(layout.child_pane.background_color, (1, 2, 3))
layout.on_draw()
self.assertEqual(view.on_draw_calls, 1)
other_view = View()
layout.child = other_view
self.assertEqual(layout.child_pane.background_color, None)
layout.on_draw()
self.assertEqual(view.on_draw_calls, 1)
def test_mouseover(self):
window = Mock()
window.width = 200
window.height = 100
layout = RootLayout(window)
callback = Mock()
layout.child_pane.mouse_pos_.observe(callback)
self.assertEqual(layout.child_pane.mouse_pos, None)
layout.on_mouse_leave(1, 2)
callback.assert_not_called()
self.assertEqual(layout.child_pane.mouse_pos, None)
layout.on_mouse_enter(50, 50)
callback.assert_called_once_with((50, 50))
callback.reset_mock()
self.assertEqual(layout.child_pane.mouse_pos, (50, 50))
layout.on_mouse_motion(51, 51, 1, 1)
callback.assert_called_once_with((51, 51))
callback.reset_mock()
self.assertEqual(layout.child_pane.mouse_pos, (51, 51))
class FakeView(View):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.on_draw_calls = 0
self.on_mouse_press_calls = 0
def on_draw(self, *args):
self.on_draw_calls += 1
def on_mouse_press(self, *args):
self.on_mouse_press_calls += 1
class HStackLayoutTest(unittest.TestCase):
def setUp(self):
self.child1 = FakeView(min_height=100,
flex_height=True,
min_width=200,
flex_width=True,
halign=HAlign.CENTER,
valign=VAlign.CENTER)
self.child2 = FakeView(min_height=150,
flex_height=False,
min_width=100,
flex_width=False,
valign=VAlign.TOP)
self.child3 = FakeView(min_height=200,
flex_height=False,
min_width=100,
flex_width=True,
halign=HAlign.FILL,
valign=VAlign.FILL,
hidden=True)
self.stack = HStackLayout(self.child1, self.child2, self.child3)
self.pane = Pane(100, 150, 600, 550)
self.stack.attach(self.pane)
def test_dims(self):
self.assertEqual(self.stack.min_width, None)
self.assertEqual(self.stack.min_height, None)
self.assertEqual(self.stack.derived_width, 300)
self.assertEqual(self.stack.derived_height, 150)
self.assertFalse(self.stack.hidden)
def test_child_alloc_coords(self):
self.assertEqual(self.child1.pane.alloc_coords, (100, 150, 500, 550))
self.assertEqual(self.child2.pane.alloc_coords, (500, 150, 600, 550))
def test_hide(self):
self.child2.hidden = True
self.assertEqual(self.child1.pane.alloc_coords, (100, 150, 600, 550))
def test_reveal(self):
self.child3.hidden = False
self.assertEqual(self.child1.pane.alloc_coords, (100, 150, 350, 550))
self.assertEqual(self.child2.pane.alloc_coords, (350, 150, 450, 550))
self.assertEqual(self.child3.pane.alloc_coords, (450, 150, 600, 550))
def test_child_coords(self):
self.child3.hidden = False
self.assertEqual(self.child1.pane.coords, (125, 300, 325, 400))
self.assertEqual(self.child2.pane.coords, (350, 400, 450, 550))
self.assertEqual(self.child3.pane.coords, (450, 150, 600, 550))
def test_draw(self):
self.pane.dispatch_event('on_draw')
self.assertEqual(self.child1.on_draw_calls, 1)
self.assertEqual(self.child2.on_draw_calls, 1)
self.assertEqual(self.child3.on_draw_calls, 0)
def test_mouse_press(self):
self.child3.hidden = False
self.pane.dispatch_event('on_mouse_press', 200, 350, None, None)
self.assertEqual(self.child1.on_mouse_press_calls, 1)
self.assertEqual(self.child2.on_mouse_press_calls, 0)
self.assertEqual(self.child3.on_mouse_press_calls, 0)
self.pane.dispatch_event('on_mouse_press', 500, 350, None, None)
self.assertEqual(self.child1.on_mouse_press_calls, 1)
self.assertEqual(self.child2.on_mouse_press_calls, 0)
self.assertEqual(self.child3.on_mouse_press_calls, 1)
# Outside of the active area of a child pane.
self.pane.dispatch_event('on_mouse_press', 110, 350, None, None)
self.assertEqual(self.child1.on_mouse_press_calls, 1)
self.assertEqual(self.child2.on_mouse_press_calls, 0)
self.assertEqual(self.child3.on_mouse_press_calls, 1)
self.pane.dispatch_event('on_mouse_press', 200, 200, None, None)
self.assertEqual(self.child1.on_mouse_press_calls, 1)
self.assertEqual(self.child2.on_mouse_press_calls, 0)
self.assertEqual(self.child3.on_mouse_press_calls, 1)
class VStackLayoutTest(unittest.TestCase):
def test_v2views(self):
child1 = FakeView(min_height=100,
flex_height=True,
min_width=200,
flex_width=True)
child2 = FakeView(min_height=150,
flex_height=False,
min_width=100,
flex_width=False)
stack = VStackLayout(child1, child2)
self.assertEqual(stack.min_width, None)
self.assertEqual(stack.min_height, None)
self.assertEqual(stack.derived_width, 200)
self.assertEqual(stack.derived_height, 250)
self.assertFalse(stack.hidden)
pane = Pane(100, 150, 500, 550)
stack.attach(pane)
self.assertEqual(child1.pane.alloc_coords, (100, 300, 500, 550))
self.assertEqual(child2.pane.alloc_coords, (100, 150, 500, 300))
def test_horizontal_overflow(self):
child1 = FakeView(min_height=100,
flex_height=True,
min_width=200,
flex_width=True)
child2 = FakeView(min_height=150,
flex_height=False,
min_width=100,
flex_width=False)
stack = HStackLayout(child1, child2)
pane = Pane(0, 0, 250, 100)
stack.attach(pane)
self.assertEqual(child1.pane.alloc_coords, (0, 0, 200, 100))
self.assertEqual(child2.pane.alloc_coords, (200, 0, 250, 100))
class LayersLayoutTest(unittest.TestCase):
def setUp(self):
self.child1 = FakeView(min_height=100,
flex_height=True,
min_width=200,
flex_width=True,
valign=VAlign.FILL,
halign=HAlign.FILL)
self.child2 = FakeView(min_height=150,
flex_height=False,
min_width=100,
flex_width=False,
valign=VAlign.CENTER,
halign=HAlign.CENTER)
self.child3 = FakeView(min_height=150,
flex_height=False,
min_width=100,
flex_width=False,
hidden=True)
self.layers = LayersLayout(self.child1, self.child2)
self.pane = Pane(100, 150, 500, 550)
self.layers.attach(self.pane)
def test_dims(self):
self.assertEqual(self.layers.min_width, None)
self.assertEqual(self.layers.min_height, None)
self.assertEqual(self.layers.derived_width, 200)
self.assertEqual(self.layers.derived_height, 150)
self.assertFalse(self.layers.hidden)
def test_coords(self):
self.assertEqual(self.child1.pane.coords, (100, 150, 500, 550))
self.assertEqual(self.child2.pane.coords, (250, 275, 350, 425))
def test_mouse_pos(self):
self.pane.mouse_pos = (300, 300)
self.assertEqual(self.child1.pane.mouse_pos, None)
self.assertEqual(self.child2.pane.mouse_pos, (300, 300))
self.pane.mouse_pos = (200, 200)
self.assertEqual(self.child1.pane.mouse_pos, (200, 200))
self.assertEqual(self.child2.pane.mouse_pos, None)
def test_on_draw(self):
self.pane.dispatch_event('on_draw')
self.assertEqual(self.child1.on_draw_calls, 1)
self.assertEqual(self.child2.on_draw_calls, 1)
self.assertEqual(self.child3.on_draw_calls, 0)
def test_on_mouse_press(self):
self.pane.dispatch_event('on_mouse_press', 300, 300, 1, 0)
self.assertEqual(self.child1.on_mouse_press_calls, 0)
self.assertEqual(self.child2.on_mouse_press_calls, 1)
self.assertEqual(self.child3.on_mouse_press_calls, 0)
# Not covered by child2
self.pane.dispatch_event('on_mouse_press', 200, 200, 1, 0)
self.assertEqual(self.child1.on_mouse_press_calls, 1)
self.assertEqual(self.child2.on_mouse_press_calls, 1)
self.assertEqual(self.child3.on_mouse_press_calls, 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6689901 | <reponame>Priyansh2/LeetCode-Solutions
# Time: O(nlogn)
# Space: O(n)
import collections
# hash, sort
class Solution(object):
def findWinners(self, matches):
"""
:type matches: List[List[int]]
:rtype: List[List[int]]
"""
lose = collections.defaultdict(int)
players_set = set()
for x, y in matches:
lose[y] += 1
players_set.add(x)
players_set.add(y)
return [[x for x in sorted(players_set) if lose[x] == i] for i in xrange(2)]
| StarcoderdataPython |
8159977 | <filename>pirates/util/PythonUtilPOD.py<gh_stars>1-10
from panda3d.direct import get_config_showbase
from panda3d.core import Camera
from direct.showbase.PythonUtil import getSetter, getSetterName
import sys
def mostDerivedLast(classList):
"""pass in list of classes. sorts list in-place, with derived classes
appearing after their bases"""
class ClassSortKey(object):
__slots__ = 'classobj',
def __init__(self, classobj):
self.classobj = classobj
def __lt__(self, other):
return issubclass(other.classobj, self.classobj)
classList.sort(key=ClassSortKey)
"""
ParamObj/ParamSet
=================
These two classes support you in the definition of a formal set of
parameters for an object type. The parameters may be safely queried/set on
an object instance at any time, and the object will react to newly-set
values immediately.
ParamSet & ParamObj also provide a mechanism for atomically setting
multiple parameter values before allowing the object to react to any of the
new values--useful when two or more parameters are interdependent and there
is risk of setting an illegal combination in the process of applying a new
set of values.
To make use of these classes, derive your object from ParamObj. Then define
a 'ParamSet' subclass that derives from the parent class' 'ParamSet' class,
and define the object's parameters within its ParamSet class. (see examples
below)
The ParamObj base class provides 'get' and 'set' functions for each
parameter if they are not defined. These default implementations
respectively set the parameter value directly on the object, and expect the
value to be available in that location for retrieval.
Classes that derive from ParamObj can optionally declare a 'get' and 'set'
function for each parameter. The setter should simply store the value in a
location where the getter can find it; it should not do any further
processing based on the new parameter value. Further processing should be
implemented in an 'apply' function. The applier function is optional, and
there is no default implementation.
NOTE: the previous value of a parameter is available inside an apply
function as 'self.getPriorValue()'
The ParamSet class declaration lists the parameters and defines a default
value for each. ParamSet instances represent a complete set of parameter
values. A ParamSet instance created with no constructor arguments will
contain the default values for each parameter. The defaults may be
overriden by passing keyword arguments to the ParamSet's constructor. If a
ParamObj instance is passed to the constructor, the ParamSet will extract
the object's current parameter values.
ParamSet.applyTo(obj) sets all of its parameter values on 'obj'.
SETTERS AND APPLIERS
====================
Under normal conditions, a call to a setter function, i.e.
cam.setFov(90)
will actually result in the following calls being made:
cam.setFov(90)
cam.applyFov()
Calls to several setter functions, i.e.
cam.setFov(90)
cam.setViewType('cutscene')
will result in this call sequence:
cam.setFov(90)
cam.applyFov()
cam.setViewType('cutscene')
cam.applyViewType()
Suppose that you desire the view type to already be set to 'cutscene' at
the time when applyFov() is called. You could reverse the order of the set
calls, but suppose that you also want the fov to be set properly at the
time when applyViewType() is called.
In this case, you can 'lock' the params, i.e.
cam.lockParams()
cam.setFov(90)
cam.setViewType('cutscene')
cam.unlockParams()
This will result in the following call sequence:
cam.setFov(90)
cam.setViewType('cutscene')
cam.applyFov()
cam.applyViewType()
NOTE: Currently the order of the apply calls following an unlock is not
guaranteed.
EXAMPLE CLASSES
===============
Here is an example of a class that uses ParamSet/ParamObj to manage its
parameters:
class Camera(ParamObj):
class ParamSet(ParamObj.ParamSet):
Params = {
'viewType': 'normal',
'fov': 60,
}
...
def getViewType(self):
return self.viewType
def setViewType(self, viewType):
self.viewType = viewType
def applyViewType(self):
if self.viewType == 'normal':
...
def getFov(self):
return self.fov
def setFov(self, fov):
self.fov = fov
def applyFov(self):
base.camera.setFov(self.fov)
...
EXAMPLE USAGE
=============
cam = Camera()
...
# set up for the cutscene
savedSettings = cam.ParamSet(cam)
cam.setViewType('closeup')
cam.setFov(90)
...
# cutscene is over, set the camera back
savedSettings.applyTo(cam)
del savedSettings
"""
class ParamObj:
# abstract base for classes that want to support a formal parameter
# set whose values may be queried, changed, 'bulk' changed (defer reaction
# to changes until multiple changes have been performed), and
# extracted/stored/applied all at once (see documentation above)
# ParamSet subclass: container of parameter values. Derived class must
# derive a new ParamSet class if they wish to define new params. See
# documentation above.
class ParamSet:
Params = {
# base class does not define any parameters, but they would
# appear here as 'name': defaultValue,
#
# WARNING: default values of mutable types that do not copy by
# value (dicts, lists etc.) will be shared by all class instances
# if default value is callable, it will be called to get actual
# default value
#
# for example:
#
# class MapArea(ParamObj):
# class ParamSet(ParamObj.ParamSet):
# Params = {
# 'spawnIndices': Functor(list, [1,5,22]),
# }
#
}
def __init__(self, *args, **kwArgs):
self.__class__._compileDefaultParams()
if len(args) == 1 and len(kwArgs) == 0:
# extract our params from an existing ParamObj instance
obj = args[0]
self.paramVals = {}
for param in self.getParams():
self.paramVals[param] = getSetter(obj, param, 'get')()
else:
assert len(args) == 0
if __debug__:
for arg in kwArgs.keys():
assert arg in self.getParams()
self.paramVals = dict(kwArgs)
def getValue(self, param):
if param in self.paramVals:
return self.paramVals[param]
return self._Params[param]
def applyTo(self, obj):
# Apply our entire set of params to a ParamObj
obj.lockParams()
for param in self.getParams():
getSetter(obj, param)(self.getValue(param))
obj.unlockParams()
def extractFrom(self, obj):
# Extract our entire set of params from a ParamObj
obj.lockParams()
for param in self.getParams():
self.paramVals[param] = getSetter(obj, param, 'get')()
obj.unlockParams()
@classmethod
def getParams(cls):
# returns safely-mutable list of param names
cls._compileDefaultParams()
return cls._Params.keys()
@classmethod
def getDefaultValue(cls, param):
cls._compileDefaultParams()
dv = cls._Params[param]
if callable(dv):
dv = dv()
return dv
@classmethod
def _compileDefaultParams(cls):
if '_Params' in cls.__dict__:
# we've already compiled the defaults for this class
return
bases = list(cls.__bases__)
if object in bases:
bases.remove(object)
# bring less-derived classes to the front
mostDerivedLast(bases)
cls._Params = {}
for c in (bases + [cls]):
# make sure this base has its dict of param defaults
c._compileDefaultParams()
if 'Params' in c.__dict__:
# apply this class' default param values to our dict
cls._Params.update(c.Params)
def __repr__(self):
argStr = ''
for param in self.getParams():
argStr += '%s=%s,' % (param,
repr(self.getValue(param)))
return '%s.%s(%s)' % (
self.__class__.__module__, self.__class__.__name__, argStr)
# END PARAMSET SUBCLASS
def __init__(self, *args, **kwArgs):
assert issubclass(self.ParamSet, ParamObj.ParamSet)
# If you pass in a ParamSet obj, its values will be applied to this
# object in the constructor.
params = None
if len(args) == 1 and len(kwArgs) == 0:
# if there's one argument, assume that it's a ParamSet
params = args[0]
elif len(kwArgs) > 0:
assert len(args) == 0
# if we've got keyword arguments, make a ParamSet out of them
params = self.ParamSet(**kwArgs)
self._paramLockRefCount = 0
# these hold the current value of parameters while they are being set to
# a new value, to support getPriorValue()
self._curParamStack = []
self._priorValuesStack = []
# insert stub funcs for param setters, to handle locked params
for param in self.ParamSet.getParams():
# set the default value on the object
setattr(self, param, self.ParamSet.getDefaultValue(param))
setterName = getSetterName(param)
getterName = getSetterName(param, 'get')
# is there a setter defined?
if not hasattr(self, setterName):
# no; provide the default
def defaultSetter(self, value, param=param):
#print '%s=%s for %s' % (param, value, id(self))
setattr(self, param, value)
self.__class__.__dict__[setterName] = defaultSetter
# is there a getter defined?
if not hasattr(self, getterName):
# no; provide the default. If there is no value set, return
# the default
def defaultGetter(self, param=param,
default=self.ParamSet.getDefaultValue(param)):
return getattr(self, param, default)
self.__class__.__dict__[getterName] = defaultGetter
# have we already installed a setter stub?
origSetterName = '%s_ORIG' % (setterName,)
if not hasattr(self, origSetterName):
# move the original setter aside
origSetterFunc = getattr(self.__class__, setterName)
setattr(self.__class__, origSetterName, origSetterFunc)
"""
# if the setter is a direct member of this instance, move the setter
# aside
if setterName in self.__dict__:
self.__dict__[setterName + '_MOVED'] = self.__dict__[setterName]
setterFunc = self.__dict__[setterName]
"""
# install a setter stub that will a) call the real setter and
# then the applier, or b) call the setter and queue the
# applier, depending on whether our params are locked
"""
setattr(self, setterName, types.MethodType(
Functor(setterStub, param, setterFunc), self, self.__class__))
"""
def setterStub(self, value, param=param, origSetterName=origSetterName):
# should we apply the value now or should we wait?
# if this obj's params are locked, we track which values have
# been set, and on unlock, we'll call the applyers for those
# values
if self._paramLockRefCount > 0:
priorValues = self._priorValuesStack[-1]
if param not in priorValues:
try:
priorValue = getSetter(self, param, 'get')()
except:
priorValue = None
priorValues[param] = priorValue
self._paramsSet[param] = None
getattr(self, origSetterName)(value)
else:
# prepare for call to getPriorValue
try:
priorValue = getSetter(self, param, 'get')()
except:
priorValue = None
self._priorValuesStack.append({
param: priorValue,
})
getattr(self, origSetterName)(value)
# call the applier, if there is one
applier = getattr(self, getSetterName(param, 'apply'), None)
if applier is not None:
self._curParamStack.append(param)
applier()
self._curParamStack.pop()
self._priorValuesStack.pop()
if hasattr(self, 'handleParamChange'):
self.handleParamChange((param,))
setattr(self.__class__, setterName, setterStub)
if params is not None:
params.applyTo(self)
def destroy(self):
"""
for param in self.ParamSet.getParams():
setterName = getSetterName(param)
self.__dict__[setterName].destroy()
del self.__dict__[setterName]
"""
pass
def setDefaultParams(self):
# set all the default parameters on ourself
self.ParamSet().applyTo(self)
def getCurrentParams(self):
params = self.ParamSet()
params.extractFrom(self)
return params
def lockParams(self):
self._paramLockRefCount += 1
if self._paramLockRefCount == 1:
self._handleLockParams()
def unlockParams(self):
if self._paramLockRefCount > 0:
self._paramLockRefCount -= 1
if self._paramLockRefCount == 0:
self._handleUnlockParams()
def _handleLockParams(self):
# this will store the names of the parameters that are modified
self._paramsSet = {}
# this will store the values of modified params (from prior to
# the lock).
self._priorValuesStack.append({})
def _handleUnlockParams(self):
for param in self._paramsSet:
# call the applier, if there is one
applier = getattr(self, getSetterName(param, 'apply'), None)
if applier is not None:
self._curParamStack.append(param)
applier()
self._curParamStack.pop()
self._priorValuesStack.pop()
if hasattr(self, 'handleParamChange'):
self.handleParamChange(tuple(self._paramsSet.keys()))
del self._paramsSet
def paramsLocked(self):
return self._paramLockRefCount > 0
def getPriorValue(self):
# call this within an apply function to find out what the prior value
# of the param was
return self._priorValuesStack[-1][self._curParamStack[-1]]
def __repr__(self):
argStr = ''
for param in self.ParamSet.getParams():
try:
value = getSetter(self, param, 'get')()
except:
value = '<unknown>'
argStr += '%s=%s,' % (param, repr(value))
return '%s(%s)' % (self.__class__.__name__, argStr)
if __debug__ and __name__ == '__main__':
class ParamObjTest(ParamObj):
class ParamSet(ParamObj.ParamSet):
Params = {
'num': 0,
}
def applyNum(self):
self.priorValue = self.getPriorValue()
pto = ParamObjTest()
assert pto.getNum() == 0
pto.setNum(1)
assert pto.priorValue == 0
assert pto.getNum() == 1
pto.lockParams()
pto.setNum(2)
# make sure applyNum is not called until we call unlockParams
assert pto.priorValue == 0
assert pto.getNum() == 2
pto.unlockParams()
assert pto.priorValue == 1
assert pto.getNum() == 2
"""
POD (Plain Ol' Data)
Like ParamObj/ParamSet, but without lock/unlock/getPriorValue and without
appliers. Similar to a C++ struct, but with auto-generated setters and
getters.
Use POD when you want the generated getters and setters of ParamObj, but
efficiency is a concern and you don't need the bells and whistles provided
by ParamObj.
POD.__init__ *MUST* be called. You should NOT define your own data getters
and setters. Data values may be read, set, and modified directly. You will
see no errors if you define your own getters/setters, but there is no
guarantee that they will be called--and they will certainly be bypassed by
POD internally.
EXAMPLE CLASSES
===============
Here is an example of a class heirarchy that uses POD to manage its data:
class Enemy(POD):
DataSet = {
'faction': 'navy',
}
class Sailor(Enemy):
DataSet = {
'build': HUSKY,
'weapon': Cutlass(scale=.9),
}
EXAMPLE USAGE
=============
s = Sailor(faction='undead', build=SKINNY)
# make two copies of s
s2 = s.makeCopy()
s3 = Sailor(s)
# example sets
s2.setWeapon(Musket())
s3.build = TALL
# example gets
faction2 = s2.getFaction()
faction3 = s3.faction
"""
class POD:
DataSet = {
# base class does not define any data items, but they would
# appear here as 'name': defaultValue,
#
# WARNING: default values of mutable types that do not copy by
# value (dicts, lists etc.) will be shared by all class instances.
# if default value is callable, it will be called to get actual
# default value
#
# for example:
#
# class MapData(POD):
# DataSet = {
# 'spawnIndices': Functor(list, [1,5,22]),
# }
}
def __init__(self, **kwArgs):
self.__class__._compileDefaultDataSet()
if __debug__:
# make sure all of the keyword arguments passed in
# are present in our data set
for arg in kwArgs.keys():
assert arg in self.getDataNames(), (
"unknown argument for %s: '%s'" % (
self.__class__, arg))
# assign each of our data items directly to self
for name in self.getDataNames():
# if a value has been passed in for a data item, use
# that value, otherwise use the default value
if name in kwArgs:
getSetter(self, name)(kwArgs[name])
else:
getSetter(self, name)(self.getDefaultValue(name))
def setDefaultValues(self):
# set all the default data values on ourself
for name in self.getDataNames():
getSetter(self, name)(self.getDefaultValue(name))
# this functionality used to be in the constructor, triggered by a single
# positional argument; that was conflicting with POD subclasses that wanted
# to define different behavior for themselves when given a positional
# constructor argument
def copyFrom(self, other, strict=False):
# if 'strict' is true, other must have a value for all of our data items
# otherwise we'll use the defaults
for name in self.getDataNames():
if hasattr(other, getSetterName(name, 'get')):
setattr(self, name, getSetter(other, name, 'get')())
else:
if strict:
raise "object '%s' doesn't have value '%s'" % (other, name)
else:
setattr(self, name, self.getDefaultValue(name))
# support 'p = POD.POD().copyFrom(other)' syntax
return self
def makeCopy(self):
# returns a duplicate of this object
return self.__class__().copyFrom(self)
def applyTo(self, obj):
# Apply our entire set of data to another POD
for name in self.getDataNames():
getSetter(obj, name)(getSetter(self, name, 'get')())
def getValue(self, name):
return getSetter(self, name, 'get')()
@classmethod
def getDataNames(cls):
# returns safely-mutable list of datum names
cls._compileDefaultDataSet()
return cls._DataSet.keys()
@classmethod
def getDefaultValue(cls, name):
cls._compileDefaultDataSet()
dv = cls._DataSet[name]
# this allows us to create a new mutable object every time we ask
# for its default value, i.e. if the default value is dict, this
# method will return a new empty dictionary object every time. This
# will cause problems if the intent is to store a callable object
# as the default value itself; we need a way to specify that the
# callable *is* the default value and not a default-value creation
# function
if hasattr(dv, '__call__'):
dv = dv()
return dv
@classmethod
def _compileDefaultDataSet(cls):
if '_DataSet' in cls.__dict__:
# we've already compiled the defaults for this class
return
# create setters & getters for this class
if 'DataSet' in cls.__dict__:
for name in cls.DataSet:
setterName = getSetterName(name)
if not hasattr(cls, setterName):
def defaultSetter(self, value, name=name):
setattr(self, name, value)
cls.__dict__[setterName] = defaultSetter
getterName = getSetterName(name, 'get')
if not hasattr(cls, getterName):
def defaultGetter(self, name=name):
return getattr(self, name)
cls.__dict__[getterName] = defaultGetter
# this dict will hold all of the aggregated default data values for
# this particular class, including values from its base classes
cls._DataSet = {}
bases = list(cls.__bases__)
# process in reverse of inheritance order, so that base classes listed first
# will take precedence over later base classes
bases.reverse()
for curBase in bases:
# skip multiple-inheritance base classes that do not derive from POD
if issubclass(curBase, POD):
# make sure this base has its dict of data defaults
curBase._compileDefaultDataSet()
# grab all inherited data default values
cls._DataSet.update(curBase._DataSet)
# pull in our own class' default values if any are specified
if 'DataSet' in cls.__dict__:
cls._DataSet.update(cls.DataSet)
def __repr__(self):
argStr = ''
for name in self.getDataNames():
argStr += '%s=%s,' % (name, repr(getSetter(self, name, 'get')()))
return '%s(%s)' % (self.__class__.__name__, argStr)
if __debug__ and __name__ == '__main__':
class PODtest(POD):
DataSet = {
'foo': dict,
}
p1 = PODtest()
p2 = PODtest()
assert hasattr(p1, 'foo')
# make sure the getter is working
assert p1.getFoo() is p1.foo
p1.getFoo()[1] = 2
assert p1.foo[1] == 2
# make sure that each instance gets its own copy of a mutable
# data item
assert p1.foo is not p2.foo
assert len(p1.foo) == 1
assert len(p2.foo) == 0
# make sure the setter is working
p2.setFoo({10:20})
assert p2.foo[10] == 20
# make sure modifications to mutable data items don't affect other
# instances
assert p1.foo[1] == 2
class DerivedPOD(PODtest):
DataSet = {
'bar': list,
}
d1 = DerivedPOD()
# make sure that derived instances get their own copy of mutable
# data items
assert hasattr(d1, 'foo')
assert len(d1.foo) == 0
# make sure derived instances get their own items
assert hasattr(d1, 'bar')
assert len(d1.bar) == 0
def clampScalar(value, a, b):
# calling this ought to be faster than calling both min and max
if a < b:
if value < a:
return a
elif value > b:
return b
else:
return value
else:
if value < b:
return b
elif value > a:
return a
else:
return value
def describeException(backTrace = 4):
# When called in an exception handler, returns a string describing
# the current exception.
def byteOffsetToLineno(code, byte):
# Returns the source line number corresponding to the given byte
# offset into the indicated Python code module.
import array
lnotab = array.array('B', code.co_lnotab)
line = code.co_firstlineno
for i in xrange(0, len(lnotab), 2):
byte -= lnotab[i]
if byte <= 0:
return line
line += lnotab[i+1]
return line
infoArr = sys.exc_info()
exception = infoArr[0]
exceptionName = getattr(exception, '__name__', None)
extraInfo = infoArr[1]
trace = infoArr[2]
stack = []
while trace.tb_next:
# We need to call byteOffsetToLineno to determine the true
# line number at which the exception occurred, even though we
# have both trace.tb_lineno and frame.f_lineno, which return
# the correct line number only in non-optimized mode.
frame = trace.tb_frame
module = frame.f_globals.get('__name__', None)
lineno = byteOffsetToLineno(frame.f_code, frame.f_lasti)
stack.append("%s:%s, " % (module, lineno))
trace = trace.tb_next
frame = trace.tb_frame
module = frame.f_globals.get('__name__', None)
lineno = byteOffsetToLineno(frame.f_code, frame.f_lasti)
stack.append("%s:%s, " % (module, lineno))
description = ""
for i in xrange(len(stack) - 1, max(len(stack) - backTrace, 0) - 1, -1):
description += stack[i]
description += "%s: %s" % (exceptionName, extraInfo)
return description
import __builtin__
__builtin__.describeException = describeException
__builtin__.config = get_config_showbase()
| StarcoderdataPython |
6493045 | <gh_stars>1-10
"""input validation
adapted in part from scikit-learn under license
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
"""
import numpy as np
def is_1d_or_row_or_column(y):
"""validate that a vector is 1-dimensional,
or is a row or column vector that is safe to ravel
to make it 1-dimensional
Parameters
----------
y : array-like
"""
shape = np.shape(y)
if len(shape) == 1 or (len(shape) == 2 and any([size == 1 for size in shape])):
return True
| StarcoderdataPython |
3509007 | <filename>pusherable/example/models.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from django.db import models
class PusherableExample(models.Model):
text = models.TextField()
| StarcoderdataPython |
3594139 | <filename>ck/tweet/migrations/0004_response_priority.py
# Generated by Django 3.2.6 on 2021-09-06 08:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tweet', '0003_response'),
]
operations = [
migrations.AddField(
model_name='response',
name='priority',
field=models.SmallIntegerField(choices=[(0, 'No Priority'), (1, 'Low'), (2, 'Moderate'), (3, 'High'), (4, 'Extreme')], default=0),
),
]
| StarcoderdataPython |
1675106 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import zeros_like
from paddle.fluid import core, Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestZerosLikeAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8')
def test_eager(self):
with _test_eager_guard():
self.test_errors()
class TestZerosLikeAPI(unittest.TestCase):
def test_api(self):
shape = [3, 4]
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
x = paddle.fluid.data('X', shape)
out1 = zeros_like(x)
out2 = zeros_like(x, np.bool_)
out3 = zeros_like(x, 'float64')
out4 = zeros_like(x, 'int32')
out5 = zeros_like(x, 'int64')
place = (fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() else fluid.CPUPlace())
exe = fluid.Executor(place)
outs = exe.run(train_program,
feed={'X': np.ones(shape).astype('float32')},
fetch_list=[out1, out2, out3, out4, out5])
for (i, dtype) in enumerate(
[np.float32, np.bool_, np.float64, np.int32, np.int64]):
self.assertEqual(outs[i].dtype, dtype)
self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
class TestZerosLikeImpeartive(unittest.TestCase):
def test_out(self):
shape = [3, 4]
place = (fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() else fluid.CPUPlace())
paddle.disable_static(place)
x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
True)
out = paddle.tensor.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
out = paddle.tensor.creation.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
paddle.enable_static()
def test_eager(self):
with _test_eager_guard():
self.test_out()
if (__name__ == '__main__'):
unittest.main()
| StarcoderdataPython |
3483367 | <reponame>aless80/devilry-django
from django.contrib.contenttypes.models import ContentType
from devilry.devilry_import_v2database.models import ImportedModel
from django import test
from django.conf import settings
from django.utils.dateparse import parse_datetime
from model_mommy import mommy
from devilry.apps.core.models import RelatedStudent, PeriodTag, Period
from devilry.devilry_import_v2database.modelimporters.relateduser_importer import RelatedStudentImporter
from .importer_testcase_mixin import ImporterTestCaseMixin
class TestRelatedStudentImporter(ImporterTestCaseMixin, test.TestCase):
def _create_model_meta(self):
return {
'model_class_name': 'RelatedStudent',
'max_id': 19,
'app_label': 'core'
}
def _create_related_student_dict(self, period, user):
return {
'pk': 19,
'model': 'core.relatedstudent',
'fields': {
'user': user.id,
'period': period.id,
'candidate_id': None,
'tags': 'group1'
}
}
def test_importer(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
self.assertEqual(RelatedStudent.objects.count(), 1)
self.assertEqual(PeriodTag.objects.count(), 1)
def test_importer_related_examiner_pk(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
self.assertEqual(related_examiner.pk, 19)
self.assertEqual(related_examiner.id, 19)
def test_importer_period_tag_period(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.period, test_period)
def test_importer_period_tag_single_tag_created(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.tag, 'group1')
def test_importer_period_tag_multiple_tags_created(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tags_list = [period_tag.tag for period_tag in PeriodTag.objects.all()]
self.assertEqual(len(period_tags_list), 2)
self.assertIn('group1', period_tags_list)
self.assertIn('group2', period_tags_list)
def test_importer_single_period_tag_related_student_is_added(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.relatedstudents.count(), 1)
self.assertIn(related_examiner, period_tag.relatedstudents.all())
def test_importer_multiple_period_tags_related_student_is_added(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tags = PeriodTag.objects.all()
self.assertEqual(period_tags.count(), 2)
for period_tag in period_tags:
self.assertIn(related_examiner, period_tag.relatedstudents.all())
def test_importer_related_student_is_added_to_existing_tags_and_new_tags(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
mommy.make('core.PeriodTag', period=test_period, tag='group1')
mommy.make('core.PeriodTag', period=test_period, tag='group4')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2,group3,group4'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tags = PeriodTag.objects.all()
self.assertEqual(period_tags.count(), 4)
for period_tag in period_tags:
self.assertIn(related_examiner, period_tag.relatedstudents.all())
# def test_importer_imported_model_created(self):
# test_user = mommy.make(settings.AUTH_USER_MODEL)
# test_period = mommy.make('core.Period')
# related_student_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
# self.create_v2dump(model_name='core.relatedstudent',
# data=related_student_data_dict)
# relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
# relatedstudent_importer.import_models()
# related_student = RelatedStudent.objects.first()
# self.assertEquals(ImportedModel.objects.count(), 1)
# imported_model = ImportedModel.objects.get(
# content_object_id=related_student.id,
# content_type=ContentType.objects.get_for_model(model=related_student)
# )
# self.assertEquals(imported_model.content_object, related_student)
# self.assertEquals(imported_model.data, related_student_data_dict)
def test_auto_sequence_numbered_objects_uses_meta_max_id(self):
test_user = mommy.make(settings.AUTH_USER_MODEL)
test_period = mommy.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user),
model_meta=self._create_model_meta())
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
self.assertEqual(RelatedStudent.objects.count(), 1)
related_student = RelatedStudent.objects.first()
self.assertEqual(related_student.pk, 19)
self.assertEqual(related_student.id, 19)
related_student_with_auto_id = mommy.make('core.RelatedStudent')
self.assertEqual(related_student_with_auto_id.pk, self._create_model_meta()['max_id']+1)
self.assertEqual(related_student_with_auto_id.id, self._create_model_meta()['max_id']+1)
| StarcoderdataPython |
4889365 | WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
OPENID_PROVIDERS = [
{'name': 'Google', 'url': 'https://www.google.com/accounts/o8/id'},
{'name': 'Yahoo', 'url': 'https://me.yahoo.com'},
{'name': 'AOL', 'url': 'http://openid.aol.com/<username>'},
{'name': 'Flickr', 'url': 'http://www.flickr.com/<username>'},
{'name': 'MyOpenID', 'url': 'https://www.myopenid.com'}]
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = True
# mail server settings
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USERNAME = None
MAIL_PASSWORD = <PASSWORD>
# administrator list
ADMINS = ['<EMAIL>']
# pagination
POSTS_PER_PAGE = 3
# Full text search:
WHOOSH_BASE = os.path.join(basedir, 'search.db')
MAX_SEARCH_RESULTS = 50 | StarcoderdataPython |
1963489 | <reponame>smnikolakaki/submodular-linear-cost-maximization
"""
This job processes guru dataset and exports the processed data object
"""
import logging
import pandas as pd
from data.data_provider import DataProvider
from data.data_exporter import DataExporter
from sklearn.preprocessing import MultiLabelBinarizer
from freelancer.freelancer_dataset import FreelancerData
class FreelancerDataProcessor(object):
"""
This job processes freelancer dataset and exports the processed data
"""
def __init__(self, config):
"""
Constructor
:param config:
:return:
"""
self.config = config
self.logger = logging.getLogger("so_logger")
def run(self):
"""
Execute the job
:param:
:return:
"""
self.logger.info("Starting job: FreelancerDataProcessor\n")
data_provider = DataProvider(self.config)
data_exporter = DataExporter(self.config)
# Read freelancer data
df = data_provider.read_freelancer_user_data()
df_cost = df[[1]] # Salary/Hour
df_skills = df[df.columns[4::2]]
df_skills.replace(to_replace=["Other Skills"], value="", inplace=True)
df_skills = (df_skills.iloc[:, 0].map(str)
+ ',' + df_skills.iloc[:, 1].map(str)
+ ',' + df_skills.iloc[:, 2].map(str)
+ ',' + df_skills.iloc[:, 3].map(str)
+ ',' + df_skills.iloc[:, 4].map(str)
+ ',' + df_skills.iloc[:, 5].map(str)
) # Skills
user_df = pd.DataFrame()
user_df['cost'] = df_cost.iloc[:, 0].tolist()
# Converting all strings to lower case
user_df['skills'] = df_skills.str.lower().tolist()
user_df = user_df.reset_index(drop=True)
user_df = user_df.assign(user_id=user_df.index.values)
user_df = user_df.assign(skills=user_df.apply(lambda x: x['skills'][:-1].split(','), axis=1))
# Convert cost to integers
user_df.cost = user_df.cost.astype(int)
# Read skills data
df = data_provider.read_freelancer_skill_data()
df = df[[1]]
df.columns = ['skill']
skill_df = df.assign(skill_id=df.index.values)
# Create multilabel binarizer
mlb = MultiLabelBinarizer(classes=skill_df.skill.values)
# One hot encoding of user skills
skills = mlb.fit_transform(user_df['skills'])
# Create dataset
users = user_df.to_dict('records')
for i in range(len(users)):
users[i]['skills_array'] = skills[i]
# Export csv files
data_exporter.export_csv_file(user_df, "freelancer/freelancer_user_df.csv")
data_exporter.export_csv_file(skill_df, "freelancer/freelancer_skill_df.csv")
# Scaling factor for submodular function
scaling_factor = 1
# Create and export data object to be used in experiments
# containing all methods related to freelancer data
freelancer = FreelancerData(self.config, user_df, skill_df, users, scaling_factor)
data_exporter.export_dill_file(freelancer, "freelancer/freelancer_data.dill")
self.logger.info("Finished job: FreelancerDataProcessor")
| StarcoderdataPython |
1959585 | <reponame>pdghawk/deepQ-pong
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import numpy as np
import time
import gym
import DQN
import matplotlib
matplotlib.use('TkAgg') # this makes the fgire in focus rather than temrinal
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
# ------------------------------------------------------------------------------
matplotlib.rcParams['lines.linewidth'] = 1.5
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['font.weight'] = 'bold'
matplotlib.rcParams['axes.labelweight'] = 'bold'
matplotlib.rcParams['font.size'] = 10
matplotlib.rcParams['legend.frameon'] = False
matplotlib.rcParams['figure.figsize'] = (22/2.54, 15/2.54)
#sns.set()
# ------------------------------------------------------------------------------
# choose the type of game to play
game = 'Pong-v0'
# will this be running on an aws instance
aws_run = False # True
# plot the results to screen (tensorboard will run regardless)
plot_results=True
# ------------------------------------------------------------------------------
# We will set up dictionaries of Hyperparameters (HYPERPARAMS), and parameters
# (PARAMS) of the model to be passed to the DQN.
# See the deepQ-pong documentation, and in particular the DQN package docs, for
# further details.
# HYPERPARAMS should consist of:
# - 'ALPHA': learning rate
# - 'GAMMA': reward discount factor
# - 'EPSILON_H': initial probability of random actions in training
# - 'EPSILON_L': lowest probability of random actions in training
# - 'EPS_DECAY': decay rate (units of frames) of epsilon (exp(-frame/EPS_DECAY))
# - 'EPI_START': episode at which to begin training
# - 'N_FILTER': Number of filters for initial convolutional layer
# - 'N_FC': Number of hidden units in fully connected layer
# - 'N_memory': Number of transitions to store
# - 'N_batch': The mini-batch size
# - 'UPDATE_FREQ': how many frames to train on between updates of target network
# - 'TERMINAL_POINTS': count a single point loss as a terminal move (boolean)
# - 'LOSS_SCALE': scale on Huber loss, for testing, keep as 2.0
# PARAMS should consist of:
# - 'Nc': number of frames in a single game state
# - 'OUTPUT_STEP': How often (in episodes) to save output summaries
# - 'MAX_STEPS': max number of frames allowed per episode
# ------------------------------------------------------------------------------
#
if aws_run:
N_episodes = 200
HYPERPARAMS = {
'ALPHA':3.0e-4,
'GAMMA': 0.99,
'EPSILON_H':1.00,
'EPSILON_L':0.03,
'EPS_DECAY':80000.0,
'EPI_START':40,
'N_FILTER':32,
'N_FC':512,
'N_memory':400000,
'N_batch':32,
'UPDATE_FREQ':5000,
'TERMINAL_POINTS':True,
'LOSS_SCALE':2.0
}
PARAMS = { 'Nc': 4,
'OUTPUT_STEP': 10,
'MAX_STEPS': 20000
}
else:
N_episodes = 10
HYPERPARAMS = {
'ALPHA':1.5e-4,
'GAMMA': 0.99,
'EPSILON_H':1.00,
'EPSILON_L':0.02,
'EPS_DECAY':60000.0,
'EPI_START':4,
'N_FILTER':1,
'N_FC':2,
'N_memory':80000,
'N_batch':4,
'UPDATE_FREQ':5000,
'TERMINAL_POINTS':True,
'LOSS_SCALE':2.0
}
PARAMS = { 'Nc': 4,
'OUTPUT_STEP': 2,
'MAX_STEPS': 20000
}
# ------------------------------------------------------------------------------
# set up a series of hyperparameter scans
# A better way to to do this would be a grid search over all hyperparams (or those
# suspected to be most important), or a random search, which will often outperform
# a grid search.
results = []
alpha_vec = np.array([1.0e-6,1.0e-4,1.0e-2])
update_vec = np.array([1000,5000,10000])
batch_vec = np.array([32,64,128])
loss_scale_vec = np.array([1.0,2.0,4.0,10.0])
decay_vec = np.array([5.0e3]) #,1.0e4,2.0e4])
rate_inc_vec = np.array([2,4,6])
# select which scan you want to run
run_type = 'update_freq'
# set variables according to choice of hyperparameter to scan
if run_type=='alpha':
vals = alpha_vec
label0 = 'alpha = '
elif run_type=='update_freq':
vals = update_vec
label0 = 'update freq = '
elif run_type=='batch':
vals = batch_vec
label0 = 'batch size = '
elif run_type=='loss_scale':
vals = loss_scale_vec
label0 = 'loss scale = '
elif run_type=='decay':
vals = decay_vec
label0 = 'decay scale = '
elif run_type=='rate_increase':
vals = rate_inc_vec
label0 = 'rate_increase = '
else:
print('Unknown_run_type')
# for each value in the hyperparameter scan, reset the hyperparameter dictionary
for i in np.arange(len(vals)):
if run_type=='alpha':
HYPERPARAMS['ALPHA'] = vals[i]
elif run_type=='update_freq':
print(' \n ---- running update option ----- \n')
#HYPERPARAMS['ALPHA'] = 1.0e-4
HYPERPARAMS['UPDATE_FREQ'] = vals[i]
elif run_type=='batch':
print(' \n ---- running batch option ----- \n')
HYPERPARAMS['N_batch'] = vals[i]
elif run_type=='loss_scale':
print(' \n ---- running loss option ----- \n')
HYPERPARAMS['LOSS_SCALE'] = vals[i]
elif run_type=='decay':
print(' \n ---- running decay option')
HYPERPARAMS['EPS_DECAY'] = vals[i]
elif run_type=='rate_increase ----- \n':
print(' \n ---- running rate increase option')
HYPERPARAMS['RATE_INCREASE'] = vals[i]
else:
print('Unknown run_type')
# create a deepQ object, i.e set up a deepQ-learning agent
deepQ = DQN.deepQ(game, HYPERPARAMS, PARAMS)
# train the model
tmp_dict = deepQ.train(N_episodes)
# append the results of the training to results
results.append(tmp_dict)
# optionally plot the results of the scan.
if plot_results:
OUTPUT_STEP = PARAMS['OUTPUT_STEP']
ep_vec=OUTPUT_STEP*(1+np.arange(int(N_episodes/OUTPUT_STEP) ) )
cols = matplotlib.cm.jet(np.linspace(0,1,len(vals)))
fig,axes = plt.subplots(2,2)
for i in np.arange(len(vals)):
print(results[i]['steps'])
axes[0,0].plot(ep_vec,results[i]['rewards'],color=cols[i],label = label0+str(vals[i]))
axes[0,0].set_ylabel('avg reward')
axes[0,0].set_xlim([0,N_episodes])
axes[0,1].plot(ep_vec,0.5*(results[i]['maxQ']+results[i]['minQ']),color=cols[i],label = label0+str(vals[i]))
axes[0,1].set_ylabel('avg Q')
axes[0,1].set_xlim([0,N_episodes])
axes[1,0].plot(ep_vec,results[i]['actions'],color=cols[i],label = label0+str(vals[i]))
#axes[1,0].plot(ep_vec,results[i]['epsilon'],'k',label = label0+str(vals[i]))
axes[1,0].set_ylabel('avg action')
axes[1,0].set_xlim([0,N_episodes])
#axes[1,0].set_ylim([0,1])
axes[1,1].plot(ep_vec,results[i]['losses'],color=cols[i],label = label0+str(vals[i]))
axes[1,1].set_ylabel('avg loss')
axes[1,1].set_xlim([0,N_episodes])
plt.legend(frameon=False)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
11362779 | <reponame>nicofff/baas<filename>solvers/random_solver.py<gh_stars>1-10
import base_solver as base
import game
from lib import helpers
STATE_MISS = 0
STATE_HIT = 1
STATE_UNKNOWN = 2
class RandomSolver(base.BaseSolver):
def mark_tile_used(self,tile):
self.remaining_tiles.remove(tile)
def get_next_target(self):
ret = self.tiles[self.turn]
self.turn+=1
return ret
def play_game(bs_game,solver):
limit = 100
for turn in xrange(limit):
tile = solver.get_next_target()
#print tile
ret = bs_game.play_turn(tile)
#solver.mark_tile_used(tile)
#print ret
if (ret["code"] == -1):
print(turn +1)
return
solver = RandomSolver();
rounds = 100000
for x in xrange(rounds):
bs_game = game.BattleshipGame()
solver.reset()
play_game(bs_game,solver)
| StarcoderdataPython |
8151664 | from uuid import uuid4
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from actor.models import Actor
def current_year() -> int:
return timezone.now().year # pragma: no cover
class Movie(models.Model):
"""Movie/Film details."""
id = models.UUIDField(_("ID"), primary_key=True, db_index=True, default=uuid4)
title = models.CharField(_("Title"), max_length=200)
description = models.TextField(_("Description"), blank=True, null=True)
director = models.CharField(_("Director"), max_length=200, blank=True, null=True)
producer = models.CharField(_("Producer"), max_length=200, blank=True, null=True)
release_year = models.IntegerField(_("Release year"), blank=True, null=True)
rt_score = models.PositiveIntegerField(_("Tomatometer score"), default=0)
actors = models.ManyToManyField(Actor, related_name="movies")
# timestamp
created_at = models.DateTimeField(_("Created"), auto_now_add=True)
def save(self, *args, **kwargs):
if self.rt_score < 0:
self.rt_score = 0
elif self.rt_score > 100:
self.rt_score = 100
super().save(*args, **kwargs)
@property
def total_actors(self) -> int:
return self.actors.count()
def __str__(self) -> str:
return self.title
class Meta:
verbose_name = _("Movie")
verbose_name_plural = _("Movies")
# always provide latest added item
ordering = ("-created_at",)
| StarcoderdataPython |
3288376 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_unmanaged_device_facts
short_description: Retrieve facts about one or more of the OneView Unmanaged Device.
description:
- Retrieve facts about one or more of the Unmanaged Device from OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 2.0.1"
author: "<NAME> (@bsouza)"
options:
name:
description:
- Unmanaged Device name.
required: false
options:
description:
- "List with options to gather additional facts about the Unmanaged Device.
Options allowed:
C(environmental_configuration) gets a description of the environmental configuration for the Unmanaged Device."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Unmanaged Devices
oneview_unmanaged_device_facts:
config: "{{ config }}"
- debug: var=unmanaged_devices
- name: Gather paginated, filtered and sorted facts about Unmanaged Devices
oneview_unmanaged_device_facts:
config: "{{ config }}"
params:
start: 0
count: 2
sort: 'name:descending'
filter: "status='Disabled'"
- debug: var=unmanaged_devices
- name: Gather facts about an Unmanaged Device by name
oneview_unmanaged_device_facts:
config: "{{ config }}"
name: "{{ name }}"
- debug: var=unmanaged_devices
- name: Gather facts about an Unmanaged Device by name with environmental configuration
oneview_unmanaged_device_facts:
config: "{{ config }}"
name: "{{ name }}"
options:
- environmental_configuration
- debug: var=unmanaged_device_environmental_configuration
'''
RETURN = '''
unmanaged_devices:
description: The list of unmanaged devices.
returned: Always, but can be null.
type: list
unmanaged_device_environmental_configuration:
description: The description of the environmental configuration for the logical interconnect.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class UnmanagedDeviceFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(required=False, type='str'),
options=dict(required=False, type="list"),
params=dict(required=False, type='dict'),
)
def __init__(self):
super(UnmanagedDeviceFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.unmanaged_devices
def execute_module(self):
name = self.module.params["name"]
facts = dict()
if name:
unmanaged_devices = self.resource_client.get_by('name', name)
environmental_configuration = self.__get_environmental_configuration(unmanaged_devices)
if environmental_configuration is not None:
facts["unmanaged_device_environmental_configuration"] = environmental_configuration
else:
unmanaged_devices = self.resource_client.get_all(**self.facts_params)
facts["unmanaged_devices"] = unmanaged_devices
return dict(ansible_facts=facts)
def __get_environmental_configuration(self, unmanaged_devices):
environmental_configuration = None
if unmanaged_devices and "environmental_configuration" in self.options:
unmanaged_device_uri = unmanaged_devices[0]["uri"]
environmental_configuration = self.resource_client.get_environmental_configuration(
id_or_uri=unmanaged_device_uri
)
return environmental_configuration
def main():
UnmanagedDeviceFactsModule().run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
395921 |
NAME = 'diary.py'
ORIGINAL_AUTHORS = [
'<NAME>'
]
ABOUT = '''
Keeps a record of your diary
'''
COMMANDS = '''
>>>.diary record <diary entry>
Adds to that day's diary entry
>>>.diary show <date>
Will show the diary entry of specified date (can also accept "today", "yesterday")
>>>.diary delete
Will delete today's entry
>>>.diary help
Gives help about how to use the plugin
'''
WEBSITE = ''
| StarcoderdataPython |
11305034 | <filename>Helpers/GlobalSettings.py
from typing import Type, Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable, Callable
from Helpers.Torches import *
class Gsv:
mean = 'mean'
activation = 'activation'
rnn = 'rnn'
concat = 'concatenation'
product = 'product'
graph_uqi = 'uqi'
graph_only_uq = 'uq'
graph_only_ui = 'ui'
graph_only_qi = 'qi'
class Gs:
use_valid_dataset: bool = True
adjust_learning_rate: bool = True # 是否在 loss 非常小时降低学习率
lambda_muq_for_hem = 0.5
# 指一个 batch 中正样本的数量,例如为 100 且负采样倍数为 10,则一个 batch 有 1100 行
batch_size = 100
# DO NOT modify this
batch_size_times = 1
learning_rate = 0.001
embedding_size = 32
weight_decay = 0#1e-5
# 目前仅支持 2D-Graph
graph_completeness: str
# 各个用户的平均评价指标的统计信息,为 None 表示不统计
long_tail_stat_fn: str = None
random_negative_sample_size = 10
non_random_negative_sample_size = 0
negative_sample_size = random_negative_sample_size + non_random_negative_sample_size
# 特征交互,仅用于 HyperGCN HyperGAT
class HighOrderFeature:
alpha2 = [0, 1]
alpha2 = [0.01, 0.99]
alpha2 = [0.001, 0.999]
alpha2 = [0.0001, 0.9999]
alpha2 = [0.00001, 0.99999]
alpha2 = [0.5, 0.5]
alpha2 = [0.99, 0.01]
alpha2 = [0.6, 0.4]
alpha2 = [0.8, 0.2]
alpha2 = [1, 1]
alpha3 = [1 / 3] * 3
class Gnn:
gat_head = Gsv.product
gat_head = Gsv.concat
gat_activation = (nn.Tanh, 'tanh')
gat_activation = (nn.ReLU, 'relu')
gat_activation = (nn.LeakyReLU, 'leaky_relu')
class Query:
# 如何对输入的 query 特征进行转换
transform = Gsv.rnn #
transform = Gsv.activation #
transform = Gsv.mean #
transform_activation = nn.Tanh
transform_activation = nn.ReLU
class Prediction:
# 使用余弦相似度,否则使用点积。结论:点积更好
use_cosine_similarity: bool = False
class Tem:
encoder_count: int = 1
class Srrl:
KG_loss: bool = True
uni_weight: bool = False
regularization: float = 0#.00001
class Dataset:
user_history_limit: int = -1
user_history_limit: int = 500
class Debug:
# 是否输出高阶特征的绝对值均值和标准差信息
show_highorder_embedding_info: bool = False
# 存储各层 GNN 网络高阶特征的绝对值均值和对应阶的权重矩阵的绝对值均值的列表,按顺序为 1 2 3...阶特征
highorder_info: List[List[Tuple[float, float]]]
# 存储三个 Embedding 对象的权重绝对值均值
embedding_info: Tuple[float, float, float]
# Do not modify this
_calculate_embedding_info: bool = False
# Do not modify this
_calculate_highorder_info: bool = False | StarcoderdataPython |
1783703 | <filename>stupidfuckingbot.py
#i have no idea wtf i am doing
import os
import discord
import configparser
import random
from discord.ext import commands
config = configparser.ConfigParser()
config.read('settings.ini')
client = commands.Bot(command_prefix = 'l.')
frogdir = "animals/frog"
@client.event
async def on_ready():
print(f'Logged in as\n{client.user.name}\n{client.user.id}\n------')
@client.command()
async def ping(ctx):
await ctx.send(f'Ping: {round(client.latency * 1000)} ms')
@client.command()
async def frog(ctx):
frogl = os.listdir(frogdir)
frogp = discord.File(os.path.join(frogdir, random.choice(frogl)))
print(os.path.join(frogdir, random.choice(frogl)))
await ctx.send(file=frogp)
@client.event
async def on_message(message):
if message.author != client.user:
if "BERD" in message.content.upper():
berdp = discord.File("animals/berd.png")
await message.channel.send(file=berdp)
await client.process_commands(message)
client.run(config['Bot']['Token']) | StarcoderdataPython |
396031 | import os
from flask import Flask
from .database import Database
def create_app(config=None, instance_path=None):
# create the app
if instance_path is not None:
app = Flask(
__name__, instance_path=instance_path, instance_relative_config=True
)
else:
app = Flask(__name__, instance_relative_config=True)
# configure the app
app.config.from_mapping(SECRET_KEY="this-is-completely-secret")
if config is None:
# load the default config
app.config.from_pyfile("config.py", silent=True)
else:
# load the config passed in
app.config.from_mapping(config)
# ensure the instance folder exists
os.makedirs(app.instance_path, exist_ok=True)
# create the database
dbfile = os.path.join(app.instance_path, "budgie.db")
db = Database(dbfile)
from . import account_routes, tag_routes, transaction_routes, report_routes
account_routes.init_app(app, db)
tag_routes.init_app(app, db)
transaction_routes.init_app(app, db)
report_routes.init_app(app, db)
return app
| StarcoderdataPython |
332378 | from ctypes import Structure, c_ulong, POINTER, cast, addressof, py_object, c_long, c_void_p
from common import get_object_field_or_null, EMPTY, DUMMY
class PyDictKeyEntry(Structure):
_fields_ = [
('me_hash', c_long),
('me_key', py_object),
('me_value', py_object),
]
class PyDictKeysObject(Structure):
_fields_ = [
('dk_refcnt', c_long),
('dk_size', c_long),
('dict_lookup_func', POINTER(c_void_p)),
('dk_usable', c_long),
('dk_entries', PyDictKeyEntry),
]
class PyDictObject(Structure):
_fields_ = [
('ob_refcnt', c_ulong),
('ob_type', c_ulong),
('ma_used', c_long),
('ma_keys', POINTER(PyDictKeysObject)),
# Not actually a void*, split tables are not supported right now
('ma_values', POINTER(c_void_p))
]
def dictobject(d):
return cast(id(d), POINTER(PyDictObject)).contents
d = {0: 0}
del d[0]
dummy_internal = dictobject(d).ma_keys.contents.dk_entries.me_key
del d
def usable_fraction(size):
return (size * 2 + 1) // 3
def dump_py_dict(d):
do = dictobject(d)
keys = []
hashes = []
values = []
size = do.ma_keys.contents.dk_size
entries = cast(addressof(do.ma_keys.contents.dk_entries), POINTER(PyDictKeyEntry))
for i in range(size):
key = get_object_field_or_null(entries[i], 'me_key')
keys.append(key if key is not dummy_internal else DUMMY)
for i, key in enumerate(keys):
if key is EMPTY:
hashes.append(EMPTY)
values.append(EMPTY)
else:
hashes.append(entries[i].me_hash)
values.append(get_object_field_or_null(entries[i], 'me_value'))
return hashes, keys, values, usable_fraction(do.ma_keys.contents.dk_size) - do.ma_keys.contents.dk_usable, do.ma_used
| StarcoderdataPython |
3466954 | <filename>make_mesh.py
import sys
from glob import glob
import skimage
from skimage import measure
import nibabel
import numpy
import os
from math import floor
import argparse
#
def remove_inner_surface(img_data,mask,treshhold=0):
"""
Replace inner data of the given volume with a smoothed, uniform masking to avoid generation
of inner surface structures and staircase artifacts when using marching cube algorithm.
Parameters
----------
img_data : array
Input volume data to extract mesh from.
mask : array
Smoothed internal mask.
treshhold : int
Determines isosurface and values for the inner mask.
Returns
-------
fin : array
Manipulated data matrix to be used for marching cube.
iso_surface : float
Corresponding iso surface value to use for marching cube.
"""
x,y,z = numpy.shape(img_data)
x = floor(0.5* x)
y = floor(0.5*y)
z = floor(0.5*z)
#Keep original array
origin = numpy.copy(img_data)
#Fill in the holes within the boundary of the eroded mask
img_data[(img_data > 0) & (mask == 1)] = treshhold
#To create a smooth inner data matrix that has the overall mean value as max value, calculate value needed to multiply with mask
substitute_value = float(treshhold) / float(numpy.max(mask))
#Replace all inner values of the original data matrix with the smoothed mask multiplied by substitute
img_data[numpy.nonzero(mask)]=numpy.multiply(mask[numpy.nonzero(mask)],substitute_value)
#Choose the isosurface value slightly below the substitute value. This will ensure a singular mesh and a smooth surface in case mask is visible.
iso_surface = float(treshhold) / float(1.05)
#The final data matrix consists of the maximum values in either the smoothed mask or the original. This ensures that either the original data will be taken or, in case
#where the original data matrix will have too low intensities for marching cube to detect (i.e creating wholes in the mesh), the smoothed mask will be taken,resulting in smooth surface
fin = numpy.copy(img_data)
fin[numpy.nonzero(img_data)] = numpy.fmax(img_data[numpy.nonzero(img_data)],origin[numpy.nonzero(img_data)])
return(fin,iso_surface);
#Either take boundary from supplied mask or if not specified, from image directly
def cut_img_mas(file_input,file_output,size,axis,trim_starting_from,mask = None):
"""
Trim data matrix before mesh creation. Reads in nifti file and saves the trimmed image as nifti file.
Parameters
----------
file_input: str
File name of image to be loaded and cut (nifti format).
file_output: str
Output file name.
size : int
Number of voxels to trim.
axis : int
Axis along which to trim (0,1,2).
trim_starting_from : {'beginning','end'}
Either trim form beginning af axis inwards or from end of axis inwards.
mask : array, optional
If given, boundary of image will be determined from the mask. Needed if image has boundary with non-zero entries
"""
path = os.path.abspath('.')
path = path + '/'
img= nibabel.load(path+file_input)
img_data = img.get_fdata()
header=img.header.copy()
if (mask != None):
mask= nibabel.load(mask)
mask_data = mask.get_fdata()
box = get_bounding_slices(mask_data)
else:
box = get_bounding_slices(img_data)
img_data = cut_img(img_data,box,size,axis,trim_starting_from)
img_nifti=nibabel.Nifti1Image(img_data,None,header=header)
nibabel.save(img_nifti,file_output)
return
#Define the boundin:g box of the data matrix.
def get_bounding_slices(img):
"""
Determine the boundaries of the given image.
Parameters
----------
img : array
Image data matrix of which boundaries are to be determined.
Returns
-------
bbox : array
Array of size (Dim,2) with range of indices through the matrix that contain non-zero entries along each axis.
"""
dims = numpy.shape(img)
mask = img == 0
bbox = []
all_axis = numpy.arange(img.ndim)
for kdim in all_axis:
nk_dim = numpy.delete(all_axis, kdim)
mask_i = mask.all(axis=tuple(nk_dim))
dmask_i = numpy.diff(mask_i)
idx_i = numpy.nonzero(dmask_i)[0]
if len(idx_i) != 2:
#TODO: see if one boundary has been found, and check that)
print("No clear boundary found (no zero entries?) in dimension " + str(kdim))
print("Boundary of data matrix is returned instead")
idx_i = [0, dims[kdim]-2]
bbox.append([idx_i[0]+1, idx_i[1]+1])
return bbox
# Trim image along specified axis, size input = voxel
def cut_img(img,bbox,size,axis,trim_starting_from):
"""
Trim image data matrix.
Parameters
----------
img: array
Image data matrix to be trimmed.
bbox : array
Array of integer values for each axis that specify bounding box of image as returend by get_bounding_slices().
size: int
Number of voxels to trim.
axis: int
Axis along which to trim (0,1,2).
trim_starting_from : {'bginning','end'}
Either trim form beginning af axis inwards or from end of axis inwards.
Returns
-------
img : array
Trimmed data matrix.
"""
dims = numpy.shape(img)
ind = bbox[axis-1]
if (trim_starting_from == "beginning"):
new_ind = ind[0] + size
slc = [slice(None)] * len(img.shape)
slc[axis] = slice(0,new_ind)
elif (trim_starting_from == "end"):
new_ind = ind[1] - size
slc = [slice(None)] * len(img.shape)
slc[axis] = slice(new_ind,dims[axis])
img[tuple(slc)] = 0
return img
def f(i, j, k, affine):
"""
Returns affine transformed coordinates (i,j,k) -> (x,y,z) Use to set correct coordinates and size for the mesh.
Parameters
----------
i,j,k : int
Integer coordinates of points in 3D space to be transformed.
affine : array
4x4 matrix containing affine transformation information of Nifti-Image.
Returns
-------
x,y,z : int
Affine transformed coordinates of input points.
"""
M = affine[:3, :3]
abc = affine[:3, 3]
return M.dot([i, j, k]) + abc
#Writes an .obj file for the output of marching cube algorithm. Specify affine if needed in mesh. One = True for faces indexing starting at 1 as opposed to 0. Necessary for Blender/SurfIce
def write_obj(name,verts,faces,normals,values,affine=None,one=False):
"""
Write a .obj file for the output of marching cube algorithm.
Parameters
----------
name : str
Ouput file name.
verts : array
Spatial coordinates for vertices as returned by skimage.measure.marching_cubes_lewiner().
faces : array
List of faces, referencing indices of verts as returned by skimage.measure.marching_cubes_lewiner().
normals : array
Normal direction of each vertex as returned by skimage.measure.marching_cubes_lewiner().
affine : array,optional
If given, vertices coordinates are affine transformed to create mesh with correct origin and size.
one : bool
Specify if faces values should start at 1 or at 0. Different visualization programs use different conventions.
"""
if (one) : faces=faces+1
thefile = open(name,'w')
if affine is not None:
for item in verts:
transformed = f(item[0],item[1],item[2],affine)
thefile.write("v {0} {1} {2}\n".format(transformed[0],transformed[1],transformed[2]))
else :
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
print("File written 30%")
for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
print("File written 60%")
for item in faces:
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
thefile.close()
def main():
parser = argparse.ArgumentParser(description="Create surface mesh form nifti-volume",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--treshhold','-t',default=0,type=float)
parser.add_argument('--image_name','-i',type=str)
parser.add_argument('--mask_name','-m',type=str)
parser.add_argument('--cut', '-c',type=int,nargs = '*')
args = parser.parse_args()
path = os.path.abspath('.')
path = path + '/'
#Load necessary niftifiles: data volume, internal mask, intenal smoothed mask
img= nibabel.load(path + args.image_name)
img_data = img.get_fdata()
img2=nibabel.load(path + args.mask_name)
mask = img2.get_fdata()
#Replace inner values and run marching cube
img_data,iso_surface = remove_inner_surface(img_data,mask,args.treshhold)
verts, faces, normals, values = measure.marching_cubes_lewiner(img_data,iso_surface)
#save mesh as .obj
write_obj((path + (args.image_name).split(".")[0] + "_mesh_1.obj"),verts,faces,normals,values,affine = img.affine,one=True)
if __name__ == '__main__': main()
| StarcoderdataPython |
9670136 | import textwrap
import zipfile
import pytest
@pytest.fixture
def fancy_wheel(tmp_path):
path = tmp_path / "fancy-1.0.0-py2.py3-none-any.whl"
files = {
"fancy/": b"""""",
"fancy/__init__.py": b"""\
def main():
print("I'm fancy.")
""",
"fancy/__main__.py": b"""\
if __name__ == "__main__":
from . import main
main()
""",
"fancy-1.0.0.data/data/fancy/": b"""""",
"fancy-1.0.0.data/data/fancy/data.py": b"""\
# put me in data
""",
"fancy-1.0.0.dist-info/": b"""""",
"fancy-1.0.0.dist-info/top_level.txt": b"""\
fancy
""",
"fancy-1.0.0.dist-info/entry_points.txt": b"""\
[console_scripts]
fancy = fancy:main
[gui_scripts]
fancy-gui = fancy:main
""",
"fancy-1.0.0.dist-info/WHEEL": b"""\
Wheel-Version: 1.0
Generator: magic (1.0.0)
Root-Is-Purelib: true
Tag: py3-none-any
""",
"fancy-1.0.0.dist-info/METADATA": b"""\
Metadata-Version: 2.1
Name: fancy
Version: 1.0.0
Summary: A fancy package
Author: <NAME>
Author-email: <EMAIL>
License: MIT
Keywords: fancy amazing
Platform: UNKNOWN
Classifier: Intended Audience :: Developers
""",
# The RECORD file is indirectly validated by the WheelFile, since it only
# provides the items that are a part of the wheel.
"fancy-1.0.0.dist-info/RECORD": b"""\
fancy/__init__.py,,
fancy/__main__.py,,
fancy-1.0.0.data/data/fancy/data.py,,
fancy-1.0.0.dist-info/top_level.txt,,
fancy-1.0.0.dist-info/entry_points.txt,,
fancy-1.0.0.dist-info/WHEEL,,
fancy-1.0.0.dist-info/METADATA,,
fancy-1.0.0.dist-info/RECORD,,
""",
}
with zipfile.ZipFile(path, "w") as archive:
for name, indented_content in files.items():
archive.writestr(
name,
textwrap.dedent(indented_content.decode("utf-8")).encode("utf-8"),
)
return path
| StarcoderdataPython |
12836753 | # The MIT License (MIT).
# Copyright (c) 2015, <NAME> & contributors.
from imapfw.interface import implements, checkInterfaces
from imapfw.conf import Parser
from .interface import ActionInterface
# Annotations.
from imapfw.annotation import ExceptionClass
@checkInterfaces()
@implements(ActionInterface)
class UnitTests(object):
"""Run all the unit tests."""
honorHooks = False
requireRascal = False
def __init__(self):
self._suite = None
self._exitCode = 1
def exception(self, e: ExceptionClass) -> None:
raise
def getExitCode(self) -> int:
return self._exitCode
def init(self, parser: Parser) -> None:
import unittest
self._suite = unittest.TestSuite()
# Load all available unit tests.
from imapfw.testing.concurrency import TestConcurrency
from imapfw.testing.rascal import TestRascal
from imapfw.testing.folder import TestFolder
from imapfw.testing.message import TestMessage, TestMessages
from imapfw.testing.maildir import TestMaildirDriver
from imapfw.testing.edmp import TestEDMP
from imapfw.testing.types import TestTypeAccount, TestTypeRepository
from imapfw.testing.architect import TestArchitect, TestDriverArchitect
from imapfw.testing.architect import TestDriversArchitect
from imapfw.testing.architect import TestEngineArchitect
self._suite.addTest(unittest.makeSuite(TestConcurrency))
self._suite.addTest(unittest.makeSuite(TestRascal))
self._suite.addTest(unittest.makeSuite(TestFolder))
self._suite.addTest(unittest.makeSuite(TestMessage))
self._suite.addTest(unittest.makeSuite(TestMessages))
self._suite.addTest(unittest.makeSuite(TestMaildirDriver))
self._suite.addTest(unittest.makeSuite(TestEDMP))
self._suite.addTest(unittest.makeSuite(TestTypeAccount))
self._suite.addTest(unittest.makeSuite(TestTypeRepository))
self._suite.addTest(unittest.makeSuite(TestArchitect))
self._suite.addTest(unittest.makeSuite(TestDriverArchitect))
self._suite.addTest(unittest.makeSuite(TestDriversArchitect))
self._suite.addTest(unittest.makeSuite(TestEngineArchitect))
def run(self) -> None:
import unittest
runner = unittest.TextTestRunner(verbosity=2)
testResult = runner.run(self._suite)
if testResult.wasSuccessful():
self._exitCode = len(testResult.failures)
Parser.addAction('unitTests', UnitTests, help="run the integrated unit tests")
| StarcoderdataPython |
8099690 | <filename>catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/conditions/interface.py
from abc import abstractmethod, ABCMeta
from collections import namedtuple
from duckietown_utils.exceptions import DTConfigException
from duckietown_utils.instantiate_utils import indent
from duckietown_utils.system_cmd_imp import contract
from easy_regression.conditions.result_db import ResultDB
class RTParseError(DTConfigException):
""" Cannot parse condition """
CheckResult0 = namedtuple('CheckResult0',
['status', # One of the above in CHECK_RESULTS
'summary', # A short string
'details', # A long description
])
class CheckResult(CheckResult0):
def __str__(self):
s = 'CheckResult:'
s += '\n' + indent(self.status, ' status: ')
s += '\n' + indent(self.summary, ' summary: ')
s += '\n' + indent(self.details, '', ' details: ')
return s
class RTCheck():
__metaclass__ = ABCMeta
FAIL = 'fail'
OK = 'ok'
NODATA = 'nodata' # the historical data is not there yet
ABNORMAL = 'abnormal' # Other error in the evaluation
CHECK_RESULTS = [OK, FAIL, NODATA, ABNORMAL]
@abstractmethod
@contract(returns=CheckResult, result_db=ResultDB)
def check(self, result_db):
"""
Returns a CheckResult.
"""
@staticmethod
def from_string(line):
"""
Returns a RTCheck object.
Syntaxes allowed:
Simple checks:
v:analyzer/log/statistics == value
v:analyzer/log/statistics >= value
v:analyzer/log/statistics <= value
v:analyzer/log/statistics < value
v:analyzer/log/statistics > value
Check that it is in 10% of the value:
v:analyzer/log/statistics ==[10%] value
Use `@date` to reference the last value:
v:analyzer/log/statistics ==[10%] v:analyzer/log/statistic@date
Use `~branch@date` to reference the value of a branch at a certain date
v:analyzer/log/statistics ==[10%] v:analyzer/log/statistic~branch@date
Use `?commit` to reference the value of a branch at a specific commit:
v:analyzer/log/statistics ==[10%] v:analyzer/log/statistic?commit
Other checks:
v:analyzer/log/statistics contains ![log name]
Raises DTConfigException if the syntax is not valid.
"""
from .implementation import _parse_regression_test_check
return _parse_regression_test_check(line)
| StarcoderdataPython |
6410960 | <reponame>cnrpman/epickitchen-on-youcook2<filename>youcook2/clip_cutting.py
import os
import os.path
import sys
import shutil
import argparse
import math
from tqdm import tqdm
def opts():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--path', default='./',
help='path of decoded images')
parser.add_argument('--path_dst', default='../imgs_cliped',
help='path of decoded images')
parser.add_argument('--path_fr', default='../framerates.txt',
help='path of decoded images')
parser.add_argument('--path_manifest', default='../clip_manifest.txt',
help='path of cliped manifest')
parser.add_argument('--duration', default=10, type=int,
help='duration for each clip, by second')
parser.add_argument('--overlap', action='store_true',
help='overlapping between current and last clip')
parser.add_argument('--img_tmpl', default='img_{:05d}.jpg',
help="image name template")
return parser.parse_args()
def main():
args = opts()
record = list()
framerates = [line.strip().split(' ') for line in open(args.path_fr, 'r')]
vid2framerate = {os.path.splitext(line[0])[0] : float(line[1]) for line in framerates}
if not os.path.exists(args.path_dst):
os.mkdir(args.path_dst)
img_list = os.listdir(args.path)
for img in tqdm(img_list):
framerate = vid2framerate[os.path.splitext(img)[0]]
tot_frame = len(os.listdir(os.path.join(args.path, img)))
st_fr = 1
lst_fr = -1
if args.overlap:
duration = args.duration / 2
cnt = 2
else:
duration = args.duration
cnt = 1
while(True):
nxt_sec = cnt * duration
nxt_fr = min(math.floor(nxt_sec * framerate), tot_frame+1)
if st_fr == nxt_fr:
break
if args.overlap and lst_fr != -1:
cut(args, img, cnt-1, lst_fr, nxt_fr, framerate, record)
else:
cut(args, img, cnt, st_fr, nxt_fr, framerate, record)
cnt += 1
lst_fr = st_fr
st_fr = nxt_fr
with open(args.path_manifest, 'w') as f:
f.write('\n'.join(record))
def cut(args, img, cnt, start, end, framerate, record):
skip_flg = False
src_img = os.path.join(args.path, img)
dst_img = os.path.join(args.path_dst, img + ("_%04d" % cnt))
if not os.path.exists(dst_img):
os.mkdir(dst_img)
else:
if len(os.listdir(dst_img)) == end - start:
skip_flg = True
if not skip_flg:
for idx, fr in enumerate(range(start, end)):
src_path = os.path.join(src_img, args.img_tmpl.format(fr))
dst_path = os.path.join(dst_img, args.img_tmpl.format(idx+1))
shutil.move(src_path, dst_path)
record.append(' '.join((img + ("_%04d" % cnt), img, ("%.4f" % (start / framerate)), ("%.4f" % (end / framerate)))))
if __name__ == "__main__":
main()
| StarcoderdataPython |
4935355 | <reponame>fengyiqi/screentl
import datetime
import random
import re
from moviepy.editor import *
TODAY = datetime.date.today().strftime('%Y-%m-%d')
def make_video(folder: str = TODAY,
fps: int = 25,
audio_loc: str = 'audio',
text: str = TODAY
):
"""
make the video
:param folder: folder where to store the file. By default the folder name is the date.
:param fps: fps
:param audio_loc: audio file folder location
:param text: what you want to display in the video
:return: None
"""
files = os.listdir(folder)
# ensure the time sequence
num_list = [int(re.findall(r"\d+", file)[0]) for file in files if file.startswith('screenshot')]
num_list.sort()
images_list = []
for i in num_list:
images_list.append(f'{folder}/screenshot_{i}.png')
duration = len(num_list)/fps
# randomly select an music from a set where the music duration is longer than video
audio_candidates = []
for audio_file in os.listdir(audio_loc):
audio_clip = AudioFileClip(f'{audio_loc}/{audio_file}')
if duration <= audio_clip.end:
audio_candidates.append(f'{audio_loc}/{audio_file}')
audio_file = random.choice(audio_candidates)
video_clip = ImageSequenceClip(images_list, fps=fps)
# To use the text video, you have to install "ImageMagick". One can refer to "Other optional but
# useful dependencies" at https://zulko.github.io/moviepy/install.html
try:
txt = TextClip(f"{text}", color='whi', fontsize=60)
txt_mov = txt.set_pos('center').set_duration(3)
final = CompositeVideoClip([video_clip, txt_mov])
except:
final = CompositeVideoClip([video_clip])
background_music = AudioFileClip(audio_file)
final = final.set_audio(background_music)
# to compress the video, one can give a string like '2000k' to bitrate.
final.set_duration(duration).write_videofile(f'{folder}/video.mp4', bitrate=None)
| StarcoderdataPython |
6698327 | <filename>seafobj/backends/base.py
#coding: UTF-8
import zlib
from seafobj.exceptions import GetObjectError
class AbstractObjStore(object):
'''Base class of seafile object backend'''
def __init__(self, compressed, crypto=None):
self.compressed = compressed
self.crypto = crypto
def read_obj(self, repo_id, version, obj_id):
try:
data = self.read_obj_raw(repo_id, version, obj_id)
if self.crypto:
data = self.crypto.dec_data(data)
if self.compressed and version == 1:
data = zlib.decompress(data)
except Exception as e:
raise GetObjectError('Failed to read object %s/%s: %s' % (repo_id, obj_id, e))
return data
def read_obj_raw(self, repo_id, version, obj_id):
'''Read the raw content of the object from the backend. Each backend
subclass should have their own implementation.
'''
raise NotImplementedError
def get_name(self):
'''Get the backend name for display in the log'''
raise NotImplementedError
def list_objs(self, repo_id=None):
'''List all objects'''
raise NotImplementedError
def obj_exists(self, repo_id, obj_id):
raise NotImplementedError
def write_obj(self, data, repo_id, obj_id):
'''Write data to destination backend'''
raise NotImplementedError
def stat(self, repo_id, verison, obj_id):
if self.crypto or self.compressed:
try:
data = self.read_obj(repo_id, verison, obj_id)
return len(data)
except:
raise
return self.stat_raw(repo_id, obj_id)
def stat_raw(self, repo_id, obj_id):
raise NotImplementedError
| StarcoderdataPython |
5183553 | <gh_stars>0
"""
Provide an implementation of the optimal drop-off location searcher.
"""
from statprly.optimal_drop_off_location_searcher.interfaces import BaseOptimalDropOffLocationSearcher
class OptimalDropOffLocationSearcher(BaseOptimalDropOffLocationSearcher):
"""
Implementation of the optimal drop-off location searcher.
"""
def __init__(self, loss):
self.loss = loss
def get_optimal_location_index(self, heatmap: list) -> int:
"""
Get optimal location for sending a help to a person.
:param heatmap: histogram of non-normalized probability location of the person.
:return: optimal location index.
"""
optimal_heat_condition = sum(heatmap) / 2
optimal_heat = 0
for i, heat in enumerate(heatmap):
if optimal_heat >= optimal_heat_condition:
return i - 1
optimal_heat += heat
| StarcoderdataPython |
4934042 | import tweepy
import os
from dotenv import load_dotenv
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
BASILICA_API_KEY = os.getenv("BASILICA_API_KEY")
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
#api.update_status('tweepy + oauth!')
public_tweets = api.home_timeline()
for tweet in public_tweets:
print(tweet.text) | StarcoderdataPython |
1816674 | """定义learning_logs的URL模式"""
from django.conf.urls import url
from . import views
urlpatterns = [
# 主页
url(r'^$', views.index, name='index'),
url(r'^topics/$', views.topics, name='topics'),
url(r'^topics/(?P<topic_id>\d+)/$', views.topic, name='topic'),
url(r'^new_topic/$', views.new_topic, name='new_topic'),
url(r'^new_entry/(?P<topic_id>\d+)/$', views.new_entry, name='new_entry'),
url(r'^edit_entry/(?P<entry_id>\d+)/$', views.edit_entry, name='edit_entry'),
] | StarcoderdataPython |
297768 | # import pickle
from log_info import log_warning, log_info
def save_file(content, file_path, mode="w", encoding="utf-8"):
"""Wrapper function to write string or binary content to a file."""
if content is None:
log_warning(f"File content is empty: save_file({content}, {file_path}, {mode}, {encoding})")
else:
saved_file = open(file_path, mode=mode, encoding=encoding)
saved_file.write(content)
# pickle.dump(content, file)
saved_file.close()
| StarcoderdataPython |
8140216 | # -*- coding: utf-8 -*-
from node.utils import UNSET
from pkg_resources import iter_entry_points
from yafowil.compat import BYTES_TYPE
from yafowil.compat import IS_PY2
from yafowil.compat import LONG_TYPE
from yafowil.compat import STR_TYPE
from yafowil.compat import UNICODE_TYPE
import inspect
import json
import logging
import re
import unicodedata
import uuid
class entry_point(object):
"""Decorator for yafowil entry points.
"""
def __init__(self, order=0):
self.order = order
def __call__(self, ob):
ob.order = self.order
return ob
_yafowil_plugins = None
def get_plugins(ns=None):
global _yafowil_plugins
if _yafowil_plugins is None:
_yafowil_plugins = list()
for ep in iter_entry_points('yafowil.plugin'):
cb = ep.load()
_yafowil_plugins.append((ep, cb))
_yafowil_plugins = sorted(
_yafowil_plugins,
key=lambda x: getattr(x[1], 'order', 0)
)
for ep, cb in _yafowil_plugins:
if ns is not None and ep.name != ns:
continue
yield (ep, cb)
_plugin_names = dict()
def get_plugin_names(ns=None):
if ns not in _plugin_names:
_plugin_names[ns] = list(set(
[ep.dist.project_name for ep, cb in get_plugins(ns=ns)]
))
return _plugin_names[ns]
def get_example(example_name):
for ep, cb in get_plugins(ns='example'):
if ep.dist.project_name != example_name:
continue
info = cb()
return info
def get_example_names():
result = []
for ep, cb in get_plugins(ns='example'):
result.append(ep.dist.project_name)
return result
def vocabulary(definition):
"""Convert different kinds of input into a list of bi-tuples, both strings.
"""
if callable(definition):
definition = definition()
if isinstance(definition, STR_TYPE):
return [(definition, definition), ]
# dict-like
if hasattr(definition, '__getitem__') and hasattr(definition, 'keys'):
return [(_, definition[_]) for _ in definition.keys()]
# iterable
if hasattr(definition, '__iter__'):
new_vocab = []
for entry in definition:
if isinstance(entry, STR_TYPE):
# entry is a string
new_vocab.append((entry, entry))
elif hasattr(entry, '__iter__'):
# entry is a sequence
parts = [_ for _ in entry]
if len(parts) > 1:
# take first two parts and skips others
new_vocab.append(entry[0:2])
else:
# rare case, inner has one value only
new_vocab.append((entry[0], entry[0]))
return new_vocab
return definition
class Tag(object):
def __init__(self, translate):
self.translate = translate
self.encoding = 'utf-8'
def __call__(self, tag_name, *inners, **attributes):
"""Generates some xml/html tag.
``tagname``
name of a valid tag
``inners``
inner content of the tag. If empty a closed tag is generated
``attributes``
attributes of the tag, leading or trailing ``_`` underscores are
omitted from keywords.
Example::
>>> tag('p', 'Lorem Ipsum.', u'Hello World!',
... class_='fancy', id='2f5b8a234ff')
<p class="fancy" id="2f5b8a234ff">Lorem Ipsum. Hello World.</p>
"""
cl = list()
for key, value in attributes.items():
if value is None or value is UNSET:
continue
value = self.translate(value)
if not isinstance(value, UNICODE_TYPE):
# XXX: value = str(value).decode(self.encoding)
if isinstance(value, bytes):
value = value.decode(self.encoding)
else:
value = str(value)
cl.append((key.strip('_'), value))
attributes = u''
# NOTE: data attributes are enclosed in single quotes, since this makes
# passing json lists possible. jQuery only recognizes JSON lists in
# data attributes as such, if they are enclosed in single quotes,
# because the JSON standard requires string values to be enclosed in
# double quotes.
if cl:
attributes = list()
for attr in cl:
if 'data-' in attr[0]:
attributes.append(u"{0}='{1}'".format(*attr))
else:
attributes.append(u'{0}="{1}"'.format(*attr))
attributes = u' {0}'.format(u' '.join(sorted(attributes)))
cl = list()
for inner in inners:
inner = self.translate(inner)
if not isinstance(inner, UNICODE_TYPE):
# XXX: inner = str(inner).decode(self.encoding)
if isinstance(inner, bytes):
inner = inner.decode(self.encoding)
else:
inner = str(inner)
cl.append(inner)
if not cl:
return u'<{name}{attrs} />'.format(**{
'name': tag_name,
'attrs': attributes,
})
return u'<{name}{attrs}>{value}</{name}>'.format(**{
'name': tag_name,
'attrs': attributes,
'value': u''.join(i for i in cl),
})
# Deprecation message
def _deprecated_null_localization(msg):
logging.warning(
"Deprecated usage of 'yafowil.utils.tag', please "
"use the tag factory on RuntimeData instead."
)
return msg
tag = Tag(_deprecated_null_localization)
class managedprops(object):
def __init__(self, *args):
self.__yafowil_managed_props__ = args
def __call__(self, func):
func.__yafowil_managed_props__ = self.__yafowil_managed_props__
return func
def cssid(widget, prefix, postfix=None):
if widget.attrs.get('structural', False):
return None
path = widget.dottedpath.replace(u'.', u'-')
cssid = u'{0}-{1}'.format(prefix, path)
if postfix is not None:
cssid = u'{0}-{1}'.format(cssid, postfix)
return unicodedata.normalize('NFKD', cssid)\
.encode('ASCII', 'ignore')\
.replace(b' ', b'_').decode()
def callable_value(value, widget, data):
"""Call value if callable with widget and data as arguments and return
the callables return value. If value not callable, return as is.
As B/C mode, if callable accepts no arguments, try to call without
arguments.
"""
if not callable(value):
return value
try:
# assume property factory signature
# XXX: use keyword arguments?
# XXX: if callable raises TypeError we get non clear follow up
# errors.
return value(widget, data)
except TypeError:
try:
# assume function or class
spec = inspect.getargspec(value)
except TypeError:
spec = None
if spec is not None:
# assume B/C property factory signature if argument specs found
if len(spec.args) <= 1 and not spec.keywords:
try:
res = value()
logging.warning(
"Deprecated usage of callback attributes. Please "
"accept 'widget' and 'data' as arguments."
)
return res
except TypeError:
# XXX: raise here?
return value
# XXX: raise here?
return value
def attr_value(key, widget, data, default=None):
"""Return widget attribute value by key or default. If value is callable,
it's return value is used.
"""
return callable_value(
widget.attrs.get(key, default),
widget,
data
)
def as_data_attrs(data):
"""Convert either dict or list of (key, value) pairs into dict containing
HTML5 data attributes.
Keys gets prefixed with ``data-``, ``CamelCase`` gets converted
to ``caml-case``.
Values are ignored if ``None`` or ``UNSET``. If value is string, it's
taken as is, otherwise it's assumed that value is list or dict and gets
dumped as JSON string.
:param data: Either dict or list of (key, value) pairs.
:return: Dict containing HTML5 data attributes
"""
data_attrs = {}
# no data passed, return empty dict
if not data:
return data_attrs
# expect dict if no list
if not isinstance(data, list):
data = data.items()
for key, val in data:
# check against None and UNSET separately to please coverage tests
# rnix, 2014-04-30
if val is None:
continue
if val is UNSET:
continue
# convert value to JSON dump if no string.
if not isinstance(val, STR_TYPE):
# also remove leading and trailing double quotes,
# they are not needed for data-attributes
val = json.dumps(val).strip('"')
# replace camelCase with camel-case
key = re.sub('([a-z])([A-Z])', '\g<1>-\g<2>', key).lower()
data_attrs['data-{0}'.format(key)] = val
return data_attrs
# B/C: deprecate as of yafowil 2.4, remove in yafowil 3.0
generic_html5_attrs = as_data_attrs
def data_attrs_helper(widget, data, attrs):
"""Creates a dictionary of JSON encoded data-attributes from a list of
attribute-keys, ready to inject to a tag-renderer as expanded keyword
arguments.
:param widget: The yafowil widget.
:param data: The data object.
:param attrs: A list of data-attributes-keys to be used to generate the
data attributes dictionary.
:type attrs: list
:returns: Dictionary with keys as data-attribute-names, prefixed with
'data-' and values from the widget.
:rtype: dictionary
The items in the list are the keys of the attributes for the target tag.
Each key is prepended with 'data-'. The values are fetched from properties
set on the widget. If a value is None, it isn't set. Other values are JSON
encoded, which includes strings, booleans, lists, dicts.
.. note::
For camelCase attribute names are automatically split on word boundaries
and made lowercase (e.g. camel-case). Since jQuery 1.6, the keys are
converted to camelCase again after getting them with .data().
.. note::
The Tag class encloses data-attribute values in single quotes, since the
JSON standard requires strings to be enclosed in double-quotes. jQuery
requires this or .data() can't create lists or arrays out of
data-attribute values.
"""
items = [(key, attr_value(key, widget, data)) for key in attrs]
return as_data_attrs(items)
css_managed_props = [
'class', 'class_add',
'error_class', 'error_class_default',
'required_class', 'required_class_default',
]
def cssclasses(widget, data, classattr='class', additional=[]):
_classes = list()
attrs = widget.attrs
if attrs['error_class'] and data.errors:
if isinstance(attrs['error_class'], STR_TYPE):
_classes.append(attrs['error_class'])
else:
_classes.append(attrs['error_class_default'])
if attrs['required_class'] and attrs['required']:
if isinstance(attrs['required_class'], STR_TYPE):
_classes.append(attrs['required_class'])
else:
_classes.append(attrs['required_class_default'])
if attrs[classattr]:
_classes += attr_value(classattr, widget, data).split()
if attrs['class_add']:
_classes += attr_value('class_add', widget, data).split()
additional = [add for add in additional if add]
_classes += additional
return _classes and ' '.join(sorted(_classes)) or None
class EmptyValue(object):
"""Used to identify empty values in conjunction with data type conversion.
"""
def __nonzero__(self):
return False
def __str__(self):
return ''
def __len__(self):
return 0
def __repr__(self):
return '<EMPTY_VALUE>'
EMPTY_VALUE = EmptyValue()
DATATYPE_PRECONVERTERS = {
float: lambda x: isinstance(x, STR_TYPE) and x.replace(',', '.') or x
}
# B/C
DATATYPE_PRECONVERTERS['float'] = DATATYPE_PRECONVERTERS[float]
DATATYPE_CONVERTERS = {
'str': BYTES_TYPE,
'unicode': UNICODE_TYPE,
'int': int,
'integer': int,
'long': LONG_TYPE,
'float': float,
'uuid': uuid.UUID
}
def convert_value_to_datatype(value, datatype, empty_value=EMPTY_VALUE):
"""Convert given value to datatype.
Datatype is either a callable or a string out of ``'str'``, ``'unicode'``,
``'int'``, ``'integer'``, ``'long'``, ``'float'`` or ``'uuid'``
If value is ``UNSET``, return ``UNSET``, regardless of given datatype.
If value is ``EMPTY_VALUE``, return ``empty_value``, which defaults to
``EMPTY_VALUE`` marker.
If value is ``None`` or ``''``, return ``empty_value``, which defaults to
``EMPTY_VALUE`` marker. Be aware that empty value marker is even returned
if ``str`` datatype, to provide a consistent behavior.
Converter callables must raise one out of the following exceptions if
conversion fails:
* ``ValueError``
* ``UnicodeDecodeError``
* ``UnicodeEncodeError``
"""
if value is UNSET:
return UNSET
if value is EMPTY_VALUE:
return empty_value
if value in [None, '']:
return empty_value
if isinstance(datatype, STR_TYPE):
converter = DATATYPE_CONVERTERS[datatype]
else:
converter = datatype
try:
if isinstance(value, converter):
return value
except TypeError:
# converter is instance of class or function
pass
preconverter = DATATYPE_PRECONVERTERS.get(datatype)
if preconverter:
value = preconverter(value)
# special case bytes or str buildin type in python 3
# uses ascii codec to emulate same behavior as when converting with python2
# this is supposed to change in future
if not IS_PY2 and converter in (bytes, str):
return converter(value, 'ascii')
return converter(value)
def convert_values_to_datatype(value, datatype, empty_value=EMPTY_VALUE):
if isinstance(value, list):
res = list()
for item in value:
res.append(convert_value_to_datatype(
item,
datatype,
empty_value=empty_value
))
return res
return convert_value_to_datatype(value, datatype, empty_value=empty_value)
| StarcoderdataPython |
8051942 | <reponame>artexmg/resp-science<filename>respscience/process_file.py
import sys
import os
import convert_to_excel
if __name__ == "__main__":
if len(sys.argv) > 1:
input_path = sys.argv[1]
else:
input_path = "."
if len(sys.argv) > 2:
output_path = sys.argv[2]
else:
output_path = os.path.join(input_path,"output")
print(f"input: {input_path}")
print(f"output: {output_path}")
with os.scandir(input_path) as entries:
for entry in entries:
if entry.is_file():
print(f"processing {entry.name}")
filename = os.path.join(input_path,entry.name)
convert_to_excel.process_file(filename,output_path)
| StarcoderdataPython |
4833128 | #@<OUT> mysqlx.Type
NAME
Type - Data type constants.
SYNTAX
mysqlx.Type
DESCRIPTION
The data type constants assigned to a Column object retrieved through
RowResult.get_columns().
PROPERTIES
BIGINT
A large integer.
BIT
A bit-value type.
BYTES
A binary string.
DATE
A date.
DATETIME
A date and time combination.
DECIMAL
A packed "exact" fixed-point number.
ENUM
An enumeration.
FLOAT
A floating-point number.
GEOMETRY
A geometry type.
INT
A normal-size integer.
JSON
A JSON-format string.
MEDIUMINT
A medium-sized integer.
SET
A set.
SMALLINT
A small integer.
STRING
A character string.
TIME
A time.
TINYINT
A very small integer.
FUNCTIONS
help([member])
Provides help about this class and it's members
#@<OUT> mysqlx.date_value
NAME
date_value - Creates a Date object which represents a date time.
SYNTAX
mysqlx.date_value(year, month, day[, hour, day, minute[, milliseconds]])
WHERE
year: The year to be used in the new Date object.
month: The month to be used in the new Date object.
day: The month to be used in the new Date object.
hour: Hour to be used in the new Date object.
minutes: Minutes to be used in the new Date object.
seconds: Seconds to be used in the new Date object.
milliseconds: Milliseconds to be used in the new Date object.
DESCRIPTION
This function creates a Date object containing:
- A date value.
- A date and time value.
- A date and time value with milliseconds.
#@<OUT> mysqlx.expr
NAME
expr - Creates an Expression object based on a string.
SYNTAX
mysqlx.expr(expressionStr)
WHERE
expressionStr: The expression to be represented by the object
DESCRIPTION
An expression object is required in many operations on the X DevAPI.
Some applications of the expression objects include:
- Creation of documents based on a JSON string
- Defining calculated fields when inserting data on the database
- Defining calculated fields when pulling data from the database
#@<OUT> mysqlx.get_session
NAME
get_session - Creates a Session instance using the provided connection
data.
SYNTAX
mysqlx.get_session(connectionData[, password])
WHERE
connectionData: The connection data for the session
password: <PASSWORD>
RETURNS
A Session
DESCRIPTION
A Session object uses the X Protocol to allow executing operations on the
connected MySQL Server.
The connection data may be specified in the following formats:
- A URI string
- A dictionary with the connection options
A basic URI string has the following format:
[scheme://][user[:password]@]<host[:port]|socket>[/schema][?option=value&option=value...]
Connection Options
The following options are valid for use either in a URI or in a
dictionary:
- ssl-mode: The SSL mode to be used in the connection.
- ssl-ca: The path to the X509 certificate authority file in PEM format.
- ssl-capath: The path to the directory that contains the X509
certificate authority files in PEM format.
- ssl-cert: The path to the SSL public key certificate file in PEM
format.
- ssl-key: The path to the SSL private key file in PEM format.
- ssl-crl: The path to file that contains certificate revocation lists.
- ssl-crlpath: The path of directory that contains certificate revocation
list files.
- ssl-cipher: The list of permissible encryption ciphers for connections
that use TLS protocols up through TLSv1.2.
- tls-version: List of protocols permitted for secure connections.
- tls-ciphers: List of TLS v1.3 ciphers to use.
- auth-method: Authentication method.
- get-server-public-key: Request public key from the server required for
RSA key pair-based password exchange. Use when connecting to MySQL 8.0
servers with classic MySQL sessions with SSL mode DISABLED.
- server-public-key-path: The path name to a file containing a
client-side copy of the public key required by the server for RSA key
pair-based password exchange. Use when connecting to MySQL 8.0 servers
with classic MySQL sessions with SSL mode DISABLED.
- connect-timeout: The connection timeout in milliseconds. If not
provided a default timeout of 10 seconds will be used. Specifying a
value of 0 disables the connection timeout.
- compression: Enable compression in client/server protocol.
- compression-algorithms: Use compression algorithm in server/client
protocol.
- compression-level: Use this compression level in the client/server
protocol.
- connection-attributes: List of connection attributes to be registered
at the PERFORMANCE_SCHEMA connection attributes tables.
- local-infile: Enable/disable LOAD DATA LOCAL INFILE.
- net-buffer-length: The buffer size for TCP/IP and socket communication.
When these options are defined in a URI, their values must be URL
encoded.
The following options are also valid when a dictionary is used:
Base Connection Options
- scheme: the protocol to be used on the connection.
- user: the MySQL user name to be used on the connection.
- dbUser: alias for user.
- password: the password to be used on the connection.
- dbPassword: same as password.
- host: the hostname or IP address to be used on the connection.
- port: the port to be used in a TCP connection.
- socket: the socket file name to be used on a connection through unix
sockets.
- schema: the schema to be selected once the connection is done.
ATTENTION: The dbUser and dbPassword options are will be removed in a
future release.
The connection options are case insensitive and can only be defined once.
If an option is defined more than once, an error will be generated.
For additional information on connection data use \? connection.
#@<OUT> mysqlx.help
NAME
help - Provides help about this module and it's members
SYNTAX
mysqlx.help([member])
WHERE
member: If specified, provides detailed information on the given member.
#@<OUT> mysqlx help
NAME
mysqlx - Encloses the functions and classes available to interact with an
X Protocol enabled MySQL Product.
DESCRIPTION
The objects contained on this module provide a full API to interact with
the different MySQL Products implementing the X Protocol.
In the case of a MySQL Server the API will enable doing operations on the
different database objects such as schema management operations and both
table and collection management and CRUD operations. (CRUD: Create, Read,
Update, Delete).
Intention of the module is to provide a full API for development through
scripting languages such as JavaScript and Python, this would be normally
achieved through a normal session.
To use the properties and functions available on this module you first
need to import it.
When running the shell in interactive mode, this module is automatically
imported.
CONSTANTS
- LockContention Row locking mode constants.
- Type Data type constants.
FUNCTIONS
date_value(year, month, day[, hour, day, minute[, milliseconds]])
Creates a Date object which represents a date time.
expr(expressionStr)
Creates an Expression object based on a string.
get_session(connectionData[, password])
Creates a Session instance using the provided connection data.
help([member])
Provides help about this module and it's members
CLASSES
- BaseResult Base class for the different types of results returned by
the server.
- Collection A Collection is a container that may be used to store
Documents in a MySQL database.
- CollectionAdd Operation to insert documents into a Collection.
- CollectionFind Operation to retrieve documents from a Collection.
- CollectionModify Operation to update documents on a Collection.
- CollectionRemove Operation to delete documents on a Collection.
- DatabaseObject Provides base functionality for database objects.
- DocResult Allows traversing the DbDoc objects returned by a
Collection.find operation.
- Result Allows retrieving information about non query operations
performed on the database.
- RowResult Allows traversing the Row objects returned by a
Table.select operation.
- Schema Represents a Schema as retrieved from a session created
using the X Protocol.
- Session Enables interaction with a MySQL Server using the X
Protocol.
- SqlExecute Handler for execution SQL statements, supports parameter
binding.
- SqlResult Allows browsing through the result information after
performing an operation on the database done through
Session.sql
- Table Represents a Table on an Schema, retrieved with a session
created using mysqlx module.
- TableDelete Operation to delete data from a table.
- TableInsert Operation to insert data into a table.
- TableSelect Operation to retrieve rows from a table.
- TableUpdate Operation to add update records in a Table.
#@<OUT> Help on LockContention
NAME
LockContention - Row locking mode constants.
SYNTAX
mysqlx.LockContention
DESCRIPTION
These constants are used to indicate the locking mode to be used at the
lock_shared and lock_exclusive functions of the TableSelect and
CollectionFind objects.
PROPERTIES
DEFAULT
A default locking mode.
NOWAIT
A locking read never waits to acquire a row lock. The query
executes immediately, failing with an error if a requested row is
locked.
SKIP_LOCKED
A locking read never waits to acquire a row lock. The query
executes immediately, removing locked rows from the result set.
FUNCTIONS
help([member])
Provides help about this class and it's members
#@<OUT> Help on BaseResult
NAME
BaseResult - Base class for the different types of results returned by
the server.
DESCRIPTION
Base class for the different types of results returned by the server.
PROPERTIES
affected_items_count
Same as get_affected_items_count
execution_time
Same as get_execution_time
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
get_affected_items_count()
The the number of affected items for the last operation.
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on Collection
NAME
Collection - A Collection is a container that may be used to store
Documents in a MySQL database.
DESCRIPTION
A Document is a set of key and value pairs, as represented by a JSON
object.
A Document is represented internally using the MySQL binary JSON object,
through the JSON MySQL datatype.
The values of fields can contain other documents, arrays, and lists of
documents.
PROPERTIES
name
The name of this database object.
schema
The Schema object of this database object.
session
The Session object of this database object.
FUNCTIONS
add(...)
Creates a document addition handler.
add_or_replace_one(id, doc)
Replaces or adds a document in a collection.
count()
Returns the number of documents in the collection.
create_index(name, indexDefinition)
Creates an index on a collection.
drop_index()
Drops an index from a collection.
exists_in_database()
Verifies if this object exists in the database.
find([searchCondition])
Creates a handler which can be used to find documents.
get_name()
Returns the name of this database object.
get_one(id)
Fetches the document with the given _id from the collection.
get_schema()
Returns the Schema object of this database object.
get_session()
Returns the Session object of this database object.
help([member])
Provides help about this class and it's members
modify(searchCondition)
Creates a collection update handler.
remove(searchCondition)
Creates a document deletion handler.
remove_one(id)
Removes document with the given _id value.
replace_one(id, doc)
Replaces an existing document with a new document.
#@<OUT> Help on CollectionAdd
NAME
CollectionAdd - Operation to insert documents into a Collection.
DESCRIPTION
A CollectionAdd object represents an operation to add documents into a
Collection, it is created through the add function on the Collection
class.
FUNCTIONS
add(...)
Stores documents to be added into a collection.
execute()
Executes the add operation, the documents are added to the target
collection.
help([member])
Provides help about this class and it's members
#@<OUT> Help on CollectionFind
NAME
CollectionFind - Operation to retrieve documents from a Collection.
DESCRIPTION
A CollectionFind object represents an operation to retrieve documents
from a Collection, it is created through the find function on the
Collection class.
FUNCTIONS
bind(name, value)
Binds a value to a specific placeholder used on this CollectionFind
object.
execute()
Executes the find operation with all the configured options.
fields(...)
Sets the fields to be retrieved from each document matching the
criteria on this find operation.
find([searchCondition])
Sets the search condition to identify the Documents to be retrieved
from the owner Collection.
group_by(...)
Sets a grouping criteria for the resultset.
having(condition)
Sets a condition for records to be considered in aggregate function
operations.
help([member])
Provides help about this class and it's members
limit(numberOfDocs)
Sets the maximum number of documents to be returned by the
operation.
lock_exclusive([lockContention])
Instructs the server to acquire an exclusive lock on documents
matched by this find operation.
lock_shared([lockContention])
Instructs the server to acquire shared row locks in documents
matched by this find operation.
offset(quantity)
Sets number of documents to skip on the resultset when a limit has
been defined.
skip(numberOfDocs)
Sets number of documents to skip on the resultset when a limit has
been defined.
ATTENTION: This function will be removed in a future release, use
the offset() function instead.
sort(...)
Sets the sorting criteria to be used on the DocResult.
#@<OUT> Help on CollectionModify
NAME
CollectionModify - Operation to update documents on a Collection.
DESCRIPTION
A CollectionModify object represents an operation to update documents on
a Collection, it is created through the modify function on the Collection
class.
FUNCTIONS
array_append(docPath, value)
Appends a value into an array attribute in documents of a
collection.
array_delete(docPath)
Deletes the value at a specific position in an array attribute in
documents of a collection.
ATTENTION: This function will be removed in a future release, use
the unset() function instead.
array_insert(docPath, value)
Inserts a value into a specific position in an array attribute in
documents of a collection.
bind(name, value)
Binds a value to a specific placeholder used on this
CollectionModify object.
execute()
Executes the update operations added to the handler with the
configured filter and limit.
help([member])
Provides help about this class and it's members
limit(numberOfDocs)
Sets a limit for the documents to be updated by the operations
added to the handler.
merge(document)
Adds attributes taken from a document into the documents in a
collection.
ATTENTION: This function will be removed in a future release, use
the patch() function instead.
modify(searchCondition)
Sets the search condition to identify the Documents to be updated
on the owner Collection.
patch(document)
Performs modifications on a document based on a patch JSON object.
set(attribute, value)
Sets or updates attributes on documents in a collection.
sort(...)
Sets the document order in which the update operations added to the
handler should be done.
unset(...)
Removes attributes from documents in a collection.
#@<OUT> Help on CollectionRemove
NAME
CollectionRemove - Operation to delete documents on a Collection.
DESCRIPTION
A CollectionRemove object represents an operation to remove documents on
a Collection, it is created through the remove function on the Collection
class.
FUNCTIONS
bind(name, value)
Binds a value to a specific placeholder used on this
CollectionRemove object.
execute()
Executes the document deletion with the configured filter and
limit.
help([member])
Provides help about this class and it's members
limit(numberOfDocs)
Sets a limit for the documents to be deleted.
remove(searchCondition)
Sets the search condition to filter the documents to be deleted
from the owner Collection.
sort(...)
Sets the order in which the deletion should be done.
#@<OUT> Help on DatabaseObject
NAME
DatabaseObject - Provides base functionality for database objects.
DESCRIPTION
Provides base functionality for database objects.
PROPERTIES
name
The name of this database object.
schema
The Schema object of this database object.
session
The Session object of this database object.
FUNCTIONS
exists_in_database()
Verifies if this object exists in the database.
get_name()
Returns the name of this database object.
get_schema()
Returns the Schema object of this database object.
get_session()
Returns the Session object of this database object.
help([member])
Provides help about this class and it's members
#@<OUT> Help on DocResult
NAME
DocResult - Allows traversing the DbDoc objects returned by a
Collection.find operation.
DESCRIPTION
Allows traversing the DbDoc objects returned by a Collection.find
operation.
PROPERTIES
affected_items_count
Same as get_affected_items_count
execution_time
Same as get_execution_time
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
fetch_all()
Returns a list of DbDoc objects which contains an element for every
unread document.
fetch_one()
Retrieves the next DbDoc on the DocResult.
get_affected_items_count()
The the number of affected items for the last operation.
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on Result
NAME
Result - Allows retrieving information about non query operations
performed on the database.
DESCRIPTION
An instance of this class will be returned on the CRUD operations that
change the content of the database:
- On Table: insert, update and delete
- On Collection: add, modify and remove
Other functions on the Session class also return an instance of this
class:
- Transaction handling functions
PROPERTIES
affected_item_count
Same as get_affected_item_count
ATTENTION: This property will be removed in a future release, use
the affected_items_count property instead.
affected_items_count
Same as get_affected_items_count
auto_increment_value
Same as get_auto_increment_value
execution_time
Same as get_execution_time
generated_ids
Same as get_generated_ids.
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
get_affected_item_count()
The the number of affected items for the last operation.
ATTENTION: This function will be removed in a future release, use
the get_affected_items_count function instead.
get_affected_items_count()
The the number of affected items for the last operation.
get_auto_increment_value()
The last insert id auto generated (from an insert operation)
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_generated_ids()
Returns the list of document ids generated on the server.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on RowResult
NAME
RowResult - Allows traversing the Row objects returned by a Table.select
operation.
DESCRIPTION
Allows traversing the Row objects returned by a Table.select operation.
PROPERTIES
affected_items_count
Same as get_affected_items_count
column_count
Same as get_column_count
column_names
Same as get_column_names
columns
Same as get_columns
execution_time
Same as get_execution_time
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
fetch_all()
Returns a list of DbDoc objects which contains an element for every
unread document.
fetch_one()
Retrieves the next Row on the RowResult.
fetch_one_object()
Retrieves the next Row on the result and returns it as an object.
get_affected_items_count()
The the number of affected items for the last operation.
get_column_count()
Retrieves the number of columns on the current result.
get_column_names()
Gets the columns on the current result.
get_columns()
Gets the column metadata for the columns on the active result.
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on Schema
NAME
Schema - Represents a Schema as retrieved from a session created using
the X Protocol.
DESCRIPTION
View Support
MySQL Views are stored queries that when executed produce a result set.
MySQL supports the concept of Updatable Views: in specific conditions are
met, Views can be used not only to retrieve data from them but also to
update, add and delete records.
For the purpose of this API, Views behave similar to a Table, and so they
are treated as Tables.
Tables and Collections as Properties
A Schema object may expose tables and collections as properties, this way
they can be accessed as:
- schema.<collection_name>
- schema.<table_name>
This handy way of accessing tables and collections is available if they
met the following conditions:
- They existed at the moment the Schema object was retrieved from the
session.
- The name is a valid identifier.
- The name is different from any other property or function on the Schema
object.
If any of the conditions is not met, the way to access the table or
collection is by using the standard DevAPI functions:
- schema.get_table(<name>)
- schema.get_collection(<name>)
PROPERTIES
name
The name of this database object.
schema
The Schema object of this database object.
session
The Session object of this database object.
Some tables and collections are also exposed as properties of the Schema
object. For details look at 'Tables and Collections as Properties' on the
DETAILS section.
FUNCTIONS
create_collection(name[, options])
Creates in the current schema a new collection with the specified
name and retrieves an object representing the new collection
created.
drop_collection()
Drops the specified collection.
exists_in_database()
Verifies if this object exists in the database.
get_collection(name)
Returns the Collection of the given name for this schema.
get_collection_as_table(name)
Returns a Table object representing a Collection on the database.
get_collections()
Returns a list of Collections for this Schema.
get_name()
Returns the name of this database object.
get_schema()
Returns the Schema object of this database object.
get_session()
Returns the Session object of this database object.
get_table(name)
Returns the Table of the given name for this schema.
get_tables()
Returns a list of Tables for this Schema.
help([member])
Provides help about this class and it's members
modify_collection(name, options)
Modifies the schema validation of a collection.
RELATED TOPICS
- Dynamic Properties
#@<OUT> Help on Session
NAME
Session - Enables interaction with a MySQL Server using the X Protocol.
DESCRIPTION
Document Store functionality can be used through this object, in addition
to SQL.
This class allows performing database operations such as:
- Schema management operations.
- Access to relational tables.
- Access to Document Store collections.
- Enabling/disabling warning generation.
- Retrieval of connection information.
PROPERTIES
current_schema
Retrieves the active schema on the session.
default_schema
Retrieves the Schema configured as default for the session.
uri
Retrieves the URI for the current session.
FUNCTIONS
close()
Closes the session.
commit()
Commits all the operations executed after a call to
start_transaction().
create_schema(name)
Creates a schema on the database and returns the corresponding
object.
drop_schema(name)
Drops the schema with the specified name.
get_current_schema()
Retrieves the active schema on the session.
get_default_schema()
Retrieves the Schema configured as default for the session.
get_schema(name)
Retrieves a Schema object from the current session through it's
name.
get_schemas()
Retrieves the Schemas available on the session.
get_uri()
Retrieves the URI for the current session.
help([member])
Provides help about this class and it's members
is_open()
Returns true if session is known to be open.
quote_name(id)
Escapes the passed identifier.
release_savepoint(name)
Removes a savepoint defined on a transaction.
rollback()
Discards all the operations executed after a call to
start_transaction().
rollback_to(name)
Rolls back the transaction to the named savepoint without
terminating the transaction.
run_sql(query[, args])
Executes a query and returns the corresponding SqlResult object.
set_current_schema(name)
Sets the current schema for this session, and returns the schema
object for it.
set_fetch_warnings(enable)
Enables or disables warning generation.
set_savepoint([name])
Creates or replaces a transaction savepoint with the given name.
sql(statement)
Creates a SqlExecute object to allow running the received SQL
statement on the target MySQL Server.
start_transaction()
Starts a transaction context on the server.
#@<OUT> Help on SqlExecute
NAME
SqlExecute - Handler for execution SQL statements, supports parameter
binding.
DESCRIPTION
This object should only be created by calling the sql function at a
Session instance.
FUNCTIONS
bind(data)
Registers a value or a list of values to be bound on the execution
of the SQL statement.
execute()
Executes the sql statement.
help([member])
Provides help about this class and it's members
sql(statement)
Sets the sql statement to be executed by this handler.
#@<OUT> Help on Table
NAME
Table - Represents a Table on an Schema, retrieved with a session created
using mysqlx module.
DESCRIPTION
Represents a Table on an Schema, retrieved with a session created using
mysqlx module.
PROPERTIES
name
The name of this database object.
schema
The Schema object of this database object.
session
The Session object of this database object.
FUNCTIONS
count()
Returns the number of records in the table.
delete()
Creates a record deletion handler.
exists_in_database()
Verifies if this object exists in the database.
get_name()
Returns the name of this database object.
get_schema()
Returns the Schema object of this database object.
get_session()
Returns the Session object of this database object.
help([member])
Provides help about this class and it's members
insert(...)
Creates TableInsert object to insert new records into the table.
is_view()
Indicates whether this Table object represents a View on the
database.
select(...)
Creates a TableSelect object to retrieve rows from the table.
update()
Creates a record update handler.
#@<OUT> Help on TableDelete
NAME
TableDelete - Operation to delete data from a table.
DESCRIPTION
A TableDelete represents an operation to remove records from a Table, it
is created through the delete function on the Table class.
FUNCTIONS
bind(name, value)
Binds a value to a specific placeholder used on this operation.
delete()
Initializes the deletion operation.
execute()
Executes the delete operation with all the configured options.
help([member])
Provides help about this class and it's members
limit(numberOfRows)
Sets the maximum number of rows to be deleted by the operation.
order_by(...)
Sets the order in which the records will be deleted.
where(expression)
Sets the search condition to filter the records to be deleted from
the Table.
#@<OUT> Help on TableInsert
NAME
TableInsert - Operation to insert data into a table.
DESCRIPTION
A TableInsert object is created through the insert function on the Table
class.
FUNCTIONS
execute()
Executes the insert operation.
help([member])
Provides help about this class and it's members
insert(...)
Initializes the insertion operation.
values(value[, value, ...])
Adds a new row to the insert operation with the given values.
#@<OUT> Help on TableSelect
NAME
TableSelect - Operation to retrieve rows from a table.
DESCRIPTION
A TableSelect represents a query to retrieve rows from a Table. It is is
created through the select function on the Table class.
FUNCTIONS
bind(name, value)
Binds a value to a specific placeholder used on this operation.
execute()
Executes the select operation with all the configured options.
group_by(...)
Sets a grouping criteria for the retrieved rows.
having(condition)
Sets a condition for records to be considered in aggregate function
operations.
help([member])
Provides help about this class and it's members
limit(numberOfRows)
Sets the maximum number of rows to be returned on the select
operation.
lock_exclusive([lockContention])
Instructs the server to acquire an exclusive lock on rows matched
by this find operation.
lock_shared([lockContention])
Instructs the server to acquire shared row locks in documents
matched by this find operation.
offset(numberOfRows)
Sets number of rows to skip on the resultset when a limit has been
defined.
order_by(...)
Sets the order in which the records will be retrieved.
select(...)
Defines the columns to be retrieved from the table.
where(expression)
Sets the search condition to filter the records to be retrieved
from the Table.
#@<OUT> Help on TableUpdate
NAME
TableUpdate - Operation to add update records in a Table.
DESCRIPTION
A TableUpdate object is used to update rows in a Table, is created
through the update function on the Table class.
FUNCTIONS
bind(name, value)
Binds a value to a specific placeholder used on this operation.
execute()
Executes the update operation with all the configured options.
help([member])
Provides help about this class and it's members
limit(numberOfRows)
Sets the maximum number of rows to be updated by the operation.
order_by(...)
Sets the order in which the records will be updated.
set(attribute, value)
Adds an update operation.
update()
Initializes the update operation.
where(expression)
Sets the search condition to filter the records to be updated.
| StarcoderdataPython |
1925604 | import os
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "abc123"
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'airavata_django_portal_sdk',
]
GATEWAY_DATA_STORE_DIR = "/tmp"
GATEWAY_DATA_STORE_RESOURCE_ID = "resourceId"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASEDIR, 'db.sqlite3'),
}
}
| StarcoderdataPython |
1843036 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
from PyKDE4.kdecore import *
from PyKDE4.kdeui import *
class <%= options[:project_name].capitalize %>(plasmascript.Applet):
def __init__(self,parent,args=None):
plasmascript.Applet.__init__(self,parent)
def init(self):
self.setHasConfigurationInterface(False)
self.resize(375, 525)
self.setAspectRatioMode(Plasma.IgnoreAspectRatio)
self.layout = QGraphicsLinearLayout(Qt.Horizontal, self.applet)
self.webView = Plasma.WebView(self.applet)
self.webView.setUrl(KUrl("http://www.kde.org"))
self.layout.addItem(self.webView)
self.setLayout(self.layout)
def CreateApplet(parent):
return <%= options[:project_name].capitalize %>(parent) | StarcoderdataPython |
4842055 | """Python client for Sift Science's API.
See: https://siftscience.com/docs/references/events-api
"""
import json
import requests
import requests.auth
import sys
if sys.version_info[0] < 3:
import urllib
_UNICODE_STRING = basestring
else:
import urllib.parse as urllib
_UNICODE_STRING = str
import sift
import sift.version
API_URL = 'https://api.siftscience.com'
API3_URL = 'https://api3.siftscience.com'
DECISION_SOURCES = ['MANUAL_REVIEW', 'AUTOMATED_RULE', 'CHARGEBACK']
def _quote_path(s):
# by default, urllib.quote doesn't escape forward slash; pass the
# optional arg to override this
return urllib.quote(s, '')
class Client(object):
def __init__(
self,
api_key=None,
api_url=API_URL,
timeout=2.0,
account_id=None,
version=sift.version.API_VERSION,
session=None):
"""Initialize the client.
Args:
api_key: Your Sift Science API key associated with your customer
account. You can obtain this from
https://siftscience.com/console/developer/api-keys .
api_url: Base URL, including scheme and host, for sending events.
Defaults to 'https://api.siftscience.com'.
timeout: Number of seconds to wait before failing request. Defaults
to 2 seconds.
account_id: The ID of your Sift Science account. You can obtain
this from https://siftscience.com/console/account/profile .
version: The version of the Sift Science API to call. Defaults to
the latest version ('205').
"""
_assert_non_empty_unicode(api_url, 'api_url')
if api_key is None:
api_key = sift.api_key
_assert_non_empty_unicode(api_key, 'api_key')
self.session = session or requests.Session()
self.api_key = api_key
self.url = api_url
self.timeout = timeout
self.account_id = account_id or sift.account_id
self.version = version
def track(
self,
event,
properties,
path=None,
return_score=False,
return_action=False,
return_workflow_status=False,
force_workflow_run=False,
abuse_types=None,
timeout=None,
version=None):
"""Track an event and associated properties to the Sift Science client.
This call is blocking. Check out https://siftscience.com/resources/references/events-api
for more information on what types of events you can send and fields you can add to the
properties parameter.
Args:
event: The name of the event to send. This can either be a reserved
event name such as "$transaction" or "$create_order" or a custom event
name (that does not start with a $).
properties: A dict of additional event-specific attributes to track.
return_score: Whether the API response should include a score for this
user (the score will be calculated using this event).
return_action: Whether the API response should include actions in the response. For
more information on how this works, please visit the tutorial at:
https://siftscience.com/resources/tutorials/formulas .
return_workflow_status: Whether the API response should
include the status of any workflow run as a result of
the tracked event.
force_workflow_run: TODO:(rlong) Add after Rishabh adds documentation.
abuse_types(optional): List of abuse types, specifying for which abuse types a score
should be returned (if scores were requested). If not specified, a score will
be returned for every abuse_type to which you are subscribed.
timeout(optional): Use a custom timeout (in seconds) for this call.
version(optional): Use a different version of the Sift Science API for this call.
Returns:
A sift.client.Response object if the track call succeeded, otherwise
raises an ApiException.
"""
_assert_non_empty_unicode(event, 'event')
_assert_non_empty_dict(properties, 'properties')
headers = {'Content-type': 'application/json',
'Accept': '*/*',
'User-Agent': self._user_agent()}
if version is None:
version = self.version
if path is None:
path = self._event_url(version)
if timeout is None:
timeout = self.timeout
properties.update({'$api_key': self.api_key, '$type': event})
params = {}
if return_score:
params['return_score'] = 'true'
if return_action:
params['return_action'] = 'true'
if abuse_types:
params['abuse_types'] = ','.join(abuse_types)
if return_workflow_status:
params['return_workflow_status'] = 'true'
if force_workflow_run:
params['force_workflow_run'] = 'true'
try:
response = self.session.post(
path,
data=json.dumps(properties),
headers=headers,
timeout=timeout,
params=params)
return Response(response)
except requests.exceptions.RequestException as e:
raise ApiException(str(e), path)
def score(self, user_id, timeout=None, abuse_types=None, version=None):
"""Retrieves a user's fraud score from the Sift Science API.
This call is blocking. Check out https://siftscience.com/resources/references/score_api.html
for more information on our Score response structure.
Args:
user_id: A user's id. This id should be the same as the user_id used in
event calls.
timeout(optional): Use a custom timeout (in seconds) for this call.
abuse_types(optional): List of abuse types, specifying for which abuse types a score
should be returned (if scores were requested). If not specified, a score will
be returned for every abuse_type to which you are subscribed.
version(optional): Use a different version of the Sift Science API for this call.
Returns:
A sift.client.Response object if the score call succeeded, or raises
an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
if version is None:
version = self.version
headers = {'User-Agent': self._user_agent()}
params = {'api_key': self.api_key}
if abuse_types:
params['abuse_types'] = ','.join(abuse_types)
url = self._score_url(user_id, version)
try:
response = self.session.get(
url,
headers=headers,
timeout=timeout,
params=params)
return Response(response)
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_user_score(self, user_id, timeout=None, abuse_types=None):
"""Fetches the latest score(s) computed for the specified user and abuse types from the Sift Science API.
As opposed to client.score() and client.rescore_user(), this *does not* compute a new score for the user; it
simply fetches the latest score(s) which have computed. These scores may be arbitrarily old.
This call is blocking. See https://siftscience.com/developers/docs/python/score-api/get-score for more details.
Args:
user_id: A user's id. This id should be the same as the user_id used in
event calls.
timeout(optional): Use a custom timeout (in seconds) for this call.
abuse_types(optional): List of abuse types, specifying for which abuse types a score
should be returned (if scores were requested). If not specified, a score will
be returned for every abuse_type to which you are subscribed.
Returns:
A sift.client.Response object if the score call succeeded, or raises
an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
url = self._user_score_url(user_id, self.version)
headers = {'User-Agent': self._user_agent()}
params = {'api_key': self.api_key}
if abuse_types:
params['abuse_types'] = ','.join(abuse_types)
try:
response = self.session.get(
url,
headers=headers,
timeout=timeout,
params=params)
return Response(response)
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def rescore_user(self, user_id, timeout=None, abuse_types=None):
"""Rescores the specified user for the specified abuse types and returns the resulting score(s).
This call is blocking. See https://siftscience.com/developers/docs/python/score-api/rescore for more details.
Args:
user_id: A user's id. This id should be the same as the user_id used in
event calls.
timeout(optional): Use a custom timeout (in seconds) for this call.
abuse_types(optional): List of abuse types, specifying for which abuse types a score
should be returned (if scores were requested). If not specified, a score will
be returned for every abuse_type to which you are subscribed.
Returns:
A sift.client.Response object if the score call succeeded, or raises
an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
url = self._user_score_url(user_id, self.version)
headers = {'User-Agent': self._user_agent()}
params = {'api_key': self.api_key}
if abuse_types:
params['abuse_types'] = ','.join(abuse_types)
try:
response = self.session.post(
url,
headers=headers,
timeout=timeout,
params=params)
return Response(response)
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def label(self, user_id, properties, timeout=None, version=None):
"""Labels a user as either good or bad through the Sift Science API.
This call is blocking. Check out https://siftscience.com/resources/references/labels_api.html
for more information on what fields to send in properties.
Args:
user_id: A user's id. This id should be the same as the user_id used in
event calls.
properties: A dict of additional event-specific attributes to track.
timeout(optional): Use a custom timeout (in seconds) for this call.
version(optional): Use a different version of the Sift Science API for this call.
Returns:
A sift.client.Response object if the label call succeeded, otherwise
raises an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if version is None:
version = self.version
return self.track(
'$label',
properties,
path=self._label_url(user_id, version),
timeout=timeout,
version=version)
def unlabel(self, user_id, timeout=None, abuse_type=None, version=None):
"""unlabels a user through the Sift Science API.
This call is blocking. Check out https://siftscience.com/resources/references/labels_api.html
for more information.
Args:
user_id: A user's id. This id should be the same as the user_id used in
event calls.
timeout(optional): Use a custom timeout (in seconds) for this call.
abuse_type(optional): The abuse type for which the user should be unlabeled.
If omitted, the user is unlabeled for all abuse types.
version(optional): Use a different version of the Sift Science API for this call.
Returns:
A sift.client.Response object if the unlabel call succeeded, otherwise
raises an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
if version is None:
version = self.version
url = self._label_url(user_id, version)
headers = {'User-Agent': self._user_agent()}
params = {'api_key': self.api_key}
if abuse_type:
params['abuse_type'] = abuse_type
try:
response = self.session.delete(
url,
headers=headers,
timeout=timeout,
params=params)
return Response(response)
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_workflow_status(self, run_id, timeout=None):
"""Gets the status of a workflow run.
Args:
run_id: The ID of a workflow run.
Returns:
A sift.client.Response object if the call succeeded.
Otherwise, raises an ApiException.
"""
_assert_non_empty_unicode(run_id, 'run_id')
url = self._workflow_status_url(self.account_id, run_id)
if timeout is None:
timeout = self.timeout
try:
return Response(self.session.get(
url,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_decisions(self, entity_type, limit=None, start_from=None, abuse_types=None, timeout=None):
"""Get decisions available to customer
Args:
entity_type: only return decisions applicable to entity type {USER|ORDER|SESSION|CONTENT}
limit: number of query results (decisions) to return [optional, default: 100]
start_from: result set offset for use in pagination [optional, default: 0]
abuse_types: comma-separated list of abuse_types used to filter returned decisions (optional)
Returns:
A sift.client.Response object containing array of decisions if call succeeded
Otherwise raises an ApiException
"""
if timeout is None:
timeout = self.timeout
params = {}
_assert_non_empty_unicode(entity_type, 'entity_type')
if entity_type.lower() not in ['user', 'order', 'session', 'content']:
raise ValueError("entity_type must be one of {user, order, session, content}")
params['entity_type'] = entity_type
if limit:
params['limit'] = limit
if start_from:
params['from'] = start_from
if abuse_types:
params['abuse_types'] = abuse_types
url = self._get_decisions_url(self.account_id)
try:
return Response(self.session.get(url, params=params,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()}, timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def apply_user_decision(self, user_id, properties, timeout=None):
"""Apply decision to user
Args:
user_id: id of user
properties:
decision_id: decision to apply to user
source: {one of MANUAL_REVIEW | AUTOMATED_RULE | CHARGEBACK}
analyst: id or email, required if 'source: MANUAL_REVIEW'
time: in millis when decision was applied
Returns
A sift.client.Response object if the call succeeded, else raises an ApiException
"""
if timeout is None:
timeout = self.timeout
self._validate_apply_decision_request(properties, user_id)
url = self._user_decisions_url(self.account_id, user_id)
try:
return Response(self.session.post(
url,
data=json.dumps(properties),
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'Content-type': 'application/json',
'Accept': '*/*',
'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def apply_order_decision(self, user_id, order_id, properties, timeout=None):
"""Apply decision to order
Args:
user_id: id of user
order_id: id of order
properties:
decision_id: decision to apply to order
source: {one of MANUAL_REVIEW | AUTOMATED_RULE | CHARGEBACK}
analyst: id or email, required if 'source: MANUAL_REVIEW'
description: free form text (optional)
time: in millis when decision was applied (optional)
Returns
A sift.client.Response object if the call succeeded, else raises an ApiException
"""
if timeout is None:
timeout = self.timeout
_assert_non_empty_unicode(user_id, 'user_id')
_assert_non_empty_unicode(order_id, 'order_id')
self._validate_apply_decision_request(properties, user_id)
url = self._order_apply_decisions_url(self.account_id, user_id, order_id)
try:
return Response(self.session.post(
url,
data=json.dumps(properties),
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'Content-type': 'application/json',
'Accept': '*/*',
'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def _validate_apply_decision_request(self, properties, user_id):
_assert_non_empty_unicode(user_id, 'user_id')
if not isinstance(properties, dict):
raise TypeError("properties must be a dict")
elif not properties:
raise ValueError("properties dictionary may not be empty")
source = properties.get('source')
_assert_non_empty_unicode(source, 'source', error_cls=ValueError)
if source not in DECISION_SOURCES:
raise ValueError("decision 'source' must be one of [{0}]".format(", ".join(DECISION_SOURCES)))
properties.update({'source': source.upper()})
if source == 'MANUAL_REVIEW' and not properties.get('analyst', None):
raise ValueError("must provide 'analyst' for decision 'source': 'MANUAL_REVIEW'")
def get_user_decisions(self, user_id, timeout=None):
"""Gets the decisions for a user.
Args:
user_id: The ID of a user.
Returns:
A sift.client.Response object if the call succeeded.
Otherwise, raises an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
url = self._user_decisions_url(self.account_id, user_id)
try:
return Response(self.session.get(
url,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_order_decisions(self, order_id, timeout=None):
"""Gets the decisions for an order.
Args:
order_id: The ID of an order.
Returns:
A sift.client.Response object if the call succeeded.
Otherwise, raises an ApiException.
"""
_assert_non_empty_unicode(order_id, 'order_id')
if timeout is None:
timeout = self.timeout
url = self._order_decisions_url(self.account_id, order_id)
try:
return Response(self.session.get(
url,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_content_decisions(self, user_id, content_id, timeout=None):
"""Gets the decisions for a piece of content.
Args:
user_id: The ID of the owner of the content.
content_id: The ID of a piece of content.
Returns:
A sift.client.Response object if the call succeeded.
Otherwise, raises an ApiException.
"""
_assert_non_empty_unicode(content_id, 'content_id')
_assert_non_empty_unicode(user_id, 'user_id')
if timeout is None:
timeout = self.timeout
url = self._content_decisions_url(self.account_id, user_id, content_id)
try:
return Response(self.session.get(
url,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def get_session_decisions(self, user_id, session_id, timeout=None):
"""Gets the decisions for a user's session.
Args:
user_id: The ID of a user.
session_id: The ID of a session.
Returns:
A sift.client.Response object if the call succeeded.
Otherwise, raises an ApiException.
"""
_assert_non_empty_unicode(user_id, 'user_id')
_assert_non_empty_unicode(session_id, 'session_id')
if timeout is None:
timeout = self.timeout
url = self._session_decisions_url(self.account_id, user_id, session_id)
try:
return Response(self.session.get(
url,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def apply_session_decision(self, user_id, session_id, properties, timeout=None):
"""Apply decision to session
Args:
user_id: id of user
session_id: id of session
properties:
decision_id: decision to apply to session
source: {one of MANUAL_REVIEW | AUTOMATED_RULE | CHARGEBACK}
analyst: id or email, required if 'source: MANUAL_REVIEW'
description: free form text (optional)
time: in millis when decision was applied (optional)
Returns
A sift.client.Response object if the call succeeded, else raises an ApiException
"""
if timeout is None:
timeout = self.timeout
_assert_non_empty_unicode(session_id, 'session_id')
self._validate_apply_decision_request(properties, user_id)
url = self._session_apply_decisions_url(self.account_id, user_id, session_id)
try:
return Response(self.session.post(
url,
data=json.dumps(properties),
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'Content-type': 'application/json',
'Accept': '*/*',
'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def apply_content_decision(self, user_id, content_id, properties, timeout=None):
"""Apply decision to content
Args:
user_id: id of user
content_id: id of content
properties:
decision_id: decision to apply to session
source: {one of MANUAL_REVIEW | AUTOMATED_RULE | CHARGEBACK}
analyst: id or email, required if 'source: MANUAL_REVIEW'
description: free form text (optional)
time: in millis when decision was applied (optional)
Returns
A sift.client.Response object if the call succeeded, else raises an ApiException
"""
if timeout is None:
timeout = self.timeout
_assert_non_empty_unicode(content_id, 'content_id')
self._validate_apply_decision_request(properties, user_id)
url = self._content_apply_decisions_url(self.account_id, user_id, content_id)
try:
return Response(self.session.post(
url,
data=json.dumps(properties),
auth=requests.auth.HTTPBasicAuth(self.api_key, ''),
headers={'Content-type': 'application/json',
'Accept': '*/*',
'User-Agent': self._user_agent()},
timeout=timeout))
except requests.exceptions.RequestException as e:
raise ApiException(str(e), url)
def _user_agent(self):
return 'SiftScience/v%s sift-python/%s' % (sift.version.API_VERSION, sift.version.VERSION)
def _event_url(self, version):
return self.url + '/v%s/events' % version
def _score_url(self, user_id, version):
return self.url + '/v%s/score/%s' % (version, _quote_path(user_id))
def _user_score_url(self, user_id, version):
return self.url + '/v%s/users/%s/score' % (version, urllib.quote(user_id))
def _label_url(self, user_id, version):
return self.url + '/v%s/users/%s/labels' % (version, _quote_path(user_id))
def _workflow_status_url(self, account_id, run_id):
return (API3_URL + '/v3/accounts/%s/workflows/runs/%s' %
(_quote_path(account_id), _quote_path(run_id)))
def _get_decisions_url(self, account_id):
return API3_URL + '/v3/accounts/%s/decisions' % (_quote_path(account_id),)
def _user_decisions_url(self, account_id, user_id):
return (API3_URL + '/v3/accounts/%s/users/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id)))
def _order_decisions_url(self, account_id, order_id):
return (API3_URL + '/v3/accounts/%s/orders/%s/decisions' %
(_quote_path(account_id), _quote_path(order_id)))
def _session_decisions_url(self, account_id, user_id, session_id):
return (API3_URL + '/v3/accounts/%s/users/%s/sessions/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id), _quote_path(session_id)))
def _content_decisions_url(self, account_id, user_id, content_id):
return (API3_URL + '/v3/accounts/%s/users/%s/content/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id), _quote_path(content_id)))
def _order_apply_decisions_url(self, account_id, user_id, order_id):
return (API3_URL + '/v3/accounts/%s/users/%s/orders/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id), _quote_path(order_id)))
def _session_apply_decisions_url(self, account_id, user_id, session_id):
return (API3_URL + '/v3/accounts/%s/users/%s/sessions/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id), _quote_path(session_id)))
def _content_apply_decisions_url(self, account_id, user_id, content_id):
return (API3_URL + '/v3/accounts/%s/users/%s/content/%s/decisions' %
(_quote_path(account_id), _quote_path(user_id), _quote_path(content_id)))
class Response(object):
HTTP_CODES_WITHOUT_BODY = [204, 304]
def __init__(self, http_response):
"""
Raises ApiException on invalid JSON in Response body or non-2XX HTTP
status code.
"""
# Set defaults.
self.body = None
self.request = None
self.api_status = None
self.api_error_message = None
self.http_status_code = http_response.status_code
self.url = http_response.url
if (self.http_status_code not in self.HTTP_CODES_WITHOUT_BODY) and http_response.text:
try:
self.body = http_response.json()
if 'status' in self.body:
self.api_status = self.body['status']
if 'error_message' in self.body:
self.api_error_message = self.body['error_message']
if 'request' in self.body.keys() and isinstance(self.body['request'], str):
self.request = json.loads(self.body['request'])
except ValueError:
raise ApiException(
'Failed to parse json response from {0}'.format(self.url),
url=self.url,
http_status_code=self.http_status_code,
body=self.body,
api_status=self.api_status,
api_error_message=self.api_error_message,
request=self.request)
finally:
if int(self.http_status_code) < 200 or int(self.http_status_code) >= 300:
raise ApiException(
'{0} returned non-2XX http status code {1}'.format(self.url, self.http_status_code),
url=self.url,
http_status_code=self.http_status_code,
body=self.body,
api_status=self.api_status,
api_error_message=self.api_error_message,
request=self.request)
def __str__(self):
return ('{%s "http_status_code": %s}' %
('' if self.body is None else '"body": ' +
json.dumps(self.body) + ',', str(self.http_status_code)))
def is_ok(self):
if self.http_status_code in self.HTTP_CODES_WITHOUT_BODY:
return 204 == self.http_status_code
# NOTE: Responses from /v3/... endpoints do not contain an API status.
if self.api_status:
return self.api_status == 0
return self.http_status_code == 200
class ApiException(Exception):
def __init__(self, message, url, http_status_code=None, body=None, api_status=None,
api_error_message=None, request=None):
Exception.__init__(self, message)
self.url = url
self.http_status_code = http_status_code
self.body = body
self.api_status = api_status
self.api_error_message = api_error_message
self.request = request
def _assert_non_empty_unicode(val, name, error_cls=None):
error = False
if not isinstance(val, _UNICODE_STRING):
error_cls = error_cls or TypeError
error = True
elif not val:
error_cls = error_cls or ValueError
error = True
if error:
raise error_cls('{0} must be a non-empty string'.format(name))
def _assert_non_empty_dict(val, name):
if not isinstance(val, dict):
raise TypeError('{0} must be a non-empty dict'.format(name))
elif not val:
raise ValueError('{0} must be a non-empty dict'.format(name))
| StarcoderdataPython |
5184786 | <reponame>calind/zipa
import httpretty
import pytest
from requests.exceptions import HTTPError
from zipa import api_test_com as t
def pretty_api():
httpretty.register_uri(httpretty.GET, 'https://api.test.com/item/a',
status=200,
content_type='application/json',
body=u'{"name": "a"}')
httpretty.register_uri(httpretty.GET, 'https://api.test.com/list',
status=200,
content_type='application/json',
body=u'[{"item1": "name1"},{"item2": "name2"}]',
adding_headers={
'Link': '<https://api.test.com/list/2>; '
'rel="next"',
})
httpretty.register_uri(httpretty.GET, 'https://api.test.com/list/2',
status=200,
content_type='application/json',
body=u'[{"item3": "name3"},{"item4": "name4"}]',
adding_headers={
'Link': '<https://api.test.com/list/3>; '
'rel="next"',
})
httpretty.register_uri(httpretty.GET, 'https://api.test.com/list/3',
status=200,
content_type='application/json',
body=u'[{"item5": "name5"}]')
httpretty.register_uri(httpretty.GET, 'https://api.test.com/list/first',
status=200,
content_type='application/json',
body=u'[{"item1": "name1"},{"item2": "name2"}]',
adding_headers={
'Link': '<https://api.test.com/list/error>; '
'rel="next"',
})
httpretty.register_uri(httpretty.GET, 'https://api.test.com/list/error',
status=400,
content_type='application/json',
body=u'{"detail":"error"}')
class TestResourceIter:
@httpretty.activate
def test_iter_returns_single_object(self):
pretty_api()
for i in t.item['a']:
assert i.name == 'a'
@httpretty.activate
def test_iter_completes(self):
pretty_api()
items = []
for i in t.list:
items.append(i)
assert items == [{u'item1': u'name1'}, {u'item2': u'name2'},
{u'item3': u'name3'}, {u'item4': u'name4'},
{u'item5': u'name5'}]
@httpretty.activate
def test_iter_next_link_is_error(self):
pretty_api()
items = []
with pytest.raises(HTTPError):
for i in t.list.first:
items.append(i)
| StarcoderdataPython |
11293031 | # -*- coding: utf-8 -*-
"""
Basic HTTP access interface.
This module handles communication between the bot and the HTTP threads.
This module is responsible for
- Setting up a connection pool
- Providing a (blocking) interface for HTTP requests
- Translate site objects with query strings into urls
- Urlencoding all data
- Basic HTTP error handling
"""
#
# (C) Pywikipedia bot team, 2007
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: dcddc41b69b2c506894a7cbec8c1e94e7ed3ec5e $'
__docformat__ = 'epytext'
import Queue
import urllib
import urlparse
import logging
import atexit
from pywikibot import config
from pywikibot.exceptions import Server504Error
import pywikibot
import cookielib
import threadedhttp
import pywikibot.version
_logger = "comm.http"
# global variables
# the User-agent: header. The default is
# '<script>/<revision> Pywikipediabot/2.0', where '<script>' is the currently
# executing script and version is the SVN revision of Pywikipediabot.
USER_AGENT_FORMAT = '{script}/r{version[rev]} Pywikipediabot/2.0'
useragent = USER_AGENT_FORMAT.format(script=pywikibot.calledModuleName(),
version=pywikibot.version.getversiondict())
numthreads = 1
threads = []
connection_pool = threadedhttp.ConnectionPool()
http_queue = Queue.Queue()
cookie_jar = threadedhttp.LockableCookieJar(
config.datafilepath("pywikibot.lwp"))
try:
cookie_jar.load()
except (IOError, cookielib.LoadError):
pywikibot.debug(u"Loading cookies failed.", _logger)
else:
pywikibot.debug(u"Loaded cookies from file.", _logger)
# Build up HttpProcessors
pywikibot.log('Starting %(numthreads)i threads...' % locals())
for i in range(numthreads):
proc = threadedhttp.HttpProcessor(http_queue, cookie_jar, connection_pool)
proc.setDaemon(True)
threads.append(proc)
proc.start()
# Prepare flush on quit
def _flush():
for i in threads:
http_queue.put(None)
pywikibot.log(u'Waiting for threads to finish... ')
for i in threads:
i.join()
pywikibot.log(u"All threads finished.")
atexit.register(_flush)
# export cookie_jar to global namespace
pywikibot.cookie_jar = cookie_jar
def request(site, uri, ssl=False, *args, **kwargs):
"""Queue a request to be submitted to Site.
All parameters not listed below are the same as
L{httplib2.Http.request}, but the uri is relative
@param site: The Site to connect to
@param uri: the URI to retrieve (relative to the site's scriptpath)
@param ssl: Use https connection
@return: The received data (a unicode string).
"""
if ssl:
proto = "https"
host = site.ssl_hostname()
uri = site.ssl_pathprefix() + uri
else:
proto = site.protocol()
host = site.hostname()
baseuri = urlparse.urljoin("%(proto)s://%(host)s" % locals(), uri)
# set default user-agent string
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault("user-agent", useragent)
request = threadedhttp.HttpRequest(baseuri, *args, **kwargs)
http_queue.put(request)
request.lock.acquire()
#TODO: do some error correcting stuff
#if all else fails
if isinstance(request.data, Exception):
raise request.data
if request.data[0].status == 504:
raise Server504Error("Server %s timed out" % site.hostname())
if request.data[0].status != 200:
pywikibot.warning(u"Http response status %(status)s"
% {'status': request.data[0].status})
return request.data[1]
| StarcoderdataPython |
1839624 | <filename>jade/basic/TKinter/ImageFrame.py
from Tkinter import *
class ImageFrame(Frame):
def __init__(self, _tk_, file_path, **options):
Frame.__init__(self, _tk_, **options)
#DesignPhoto =PhotoImage(file = file_path)
DesignPhoto = PhotoImage(file = file_path)
self.Photo = Label(master=self, image=DesignPhoto)
self.Photo.image = DesignPhoto
self.Photo.grid(row=0, column=0, padx=3, pady=3, sticky=W+E+N+S)
| StarcoderdataPython |
1631315 | <reponame>LucasFidon/fetal-brain-segmentation-partial-supervision-miccai21<filename>src/network_architectures/custom_3dunet/unet.py
# -*- coding: utf-8 -*-
"""
Main module.
Based on https://github.com/fepegar/unet/tree/master/unet
"""
from typing import Optional
import torch.nn as nn
from src.network_architectures.custom_3dunet.encoding import Encoder, EncodingBlock
from src.network_architectures.custom_3dunet.decoding import Decoder
from src.network_architectures.custom_3dunet.conv import ConvolutionalBlock
MAX_CHANNELS = 320 # cap the number of features
class UNet(nn.Module):
def __init__(
self,
in_channels: int = 1,
out_classes: int = 2,
dimensions: int = 2,
num_encoding_blocks: int = 5,
out_channels_first_layer: int = 32,
normalization: Optional[str] = None,
pooling_type: str = 'max',
upsampling_type: str = 'conv',
preactivation: bool = False,
residual: bool = False,
padding: int = 0,
padding_mode: str = 'zeros',
activation: Optional[str] = 'ReLU',
initial_dilation: Optional[int] = None,
dropout: float = 0,
gradient_checkpointing: bool = False,
):
super().__init__()
depth = num_encoding_blocks - 1
out_channels_encoding_blocks = [
min(MAX_CHANNELS, out_channels_first_layer * (2 ** i))
for i in range(num_encoding_blocks)
]
# Force padding if residual blocks
if residual:
padding = 1
# Encoder
self.encoder = Encoder(
in_channels,
out_channels_first_layer,
# out_channels_encoding_blocks,
dimensions,
pooling_type,
depth,
normalization,
preactivation=preactivation,
residual=residual,
padding=padding,
padding_mode=padding_mode,
activation=activation,
initial_dilation=initial_dilation,
dropout=dropout,
gradient_checkpointing=gradient_checkpointing,
)
bottom_in_channel = min(
MAX_CHANNELS,
out_channels_first_layer * (2 ** (num_encoding_blocks - 2))
)
bottom_out_channels = min(
MAX_CHANNELS,
bottom_in_channel * 2,
)
self.bottom_block = EncodingBlock(
in_channels=bottom_in_channel,
out_channels_first=bottom_out_channels,
out_channels_second=bottom_out_channels,
dimensions=dimensions,
normalization=normalization,
pooling_type=None,
preactivation=preactivation,
residual=residual,
padding=padding,
padding_mode=padding_mode,
activation=activation,
dilation=self.encoder.dilation,
dropout=dropout,
)
# Decoder
num_decoding_blocks = depth
self.decoder = Decoder(
out_channels_encoding_blocks,
dimensions,
upsampling_type,
num_decoding_blocks,
normalization=normalization,
preactivation=preactivation,
residual=residual,
padding=padding,
padding_mode=padding_mode,
activation=activation,
initial_dilation=self.encoder.dilation,
dropout=dropout,
)
# Classifier
if dimensions == 2:
in_channels = out_channels_first_layer
elif dimensions == 3:
in_channels = out_channels_first_layer
self.classifier = ConvolutionalBlock(
dimensions, in_channels, out_classes,
kernel_size=1, activation=None,
)
def forward(self, x):
skip_connections, encoding = self.encoder(x)
encoding = self.bottom_block(encoding)
x = self.decoder(skip_connections, encoding)
return self.classifier(x)
class UNet3D(UNet):
def __init__(self, *args, **user_kwargs):
kwargs = {}
kwargs['dimensions'] = 3
kwargs['num_encoding_blocks'] = 4
kwargs['out_channels_first_layer'] = 30
kwargs['normalization'] = 'instance'
kwargs.update(user_kwargs)
super().__init__(*args, **kwargs)
| StarcoderdataPython |
47687 | <gh_stars>1-10
from Model import Model
from Config import Config
from math import ceil
import readfile
import customer_init
import numpy as np
import time
import datetime
import util
import os
import random
from tempfile import TemporaryFile
from customer_init import orthogonal_initializer
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
# import the inspect_checkpoint library
from tensorflow.python.tools import inspect_checkpoint as chkp
import math
class SegnetModel(Model):
def __init__(self):
self.config = Config()
def add_placeholders(self):
self.train_data_node = tf.placeholder(tf.float32, shape=[self.config.BATCH_SIZE,
self.config.IMAGE_HEIGHT, self.config.IMAGE_WIDTH, self.config.IMAGE_DEPTH])
self.train_label_node = tf.placeholder(tf.int32, shape=[self.config.BATCH_SIZE, self.config.IMAGE_HEIGHT, self.config.IMAGE_WIDTH,1])
self.phase_train = tf.placeholder(tf.bool, name="phase_train")
self.average_pl = tf.placeholder(tf.float32)
self.acc_pl = tf.placeholder(tf.float32)
self.iu_pl = tf.placeholder(tf.float32)
self.test_data_node = tf.placeholder(
tf.float32,
shape=[self.config.TEST_BATCH_SIZE,
self.config.IMAGE_HEIGHT, self.config.IMAGE_WIDTH, self.config.IMAGE_DEPTH])
self.test_labels_node = tf.placeholder(tf.int64, shape=[self.config.TEST_BATCH_SIZE, self.config.IMAGE_HEIGHT, self.config.IMAGE_WIDTH,1])
def add_loss_op(self, pred):
pass
def add_training_op(self, total_loss):
""" fix lr """
lr = self.config.INITIAL_LEARNING_RATE
loss_averages_op = util._add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
self.config.MOVING_AVERAGE_DECAY, self.global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def train_on_batch(self, sess, inputs_batch, labels_batch):
pass
def add_prediction_op(self):
# norm1
norm1 = tf.nn.lrn(self.train_data_node, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75,
name='norm1')
# conv1
conv1 = self.conv_layer_with_bn(norm1, [7, 7, self.train_data_node.get_shape().as_list()[3], 64], self.phase_train, name="conv1")
# pool1
pool1, pool1_indices = tf.nn.max_pool_with_argmax(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# conv2
conv2 = self.conv_layer_with_bn(pool1, [7, 7, 64, 64], self.phase_train, name="conv2")
# pool2
pool2, pool2_indices = tf.nn.max_pool_with_argmax(conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# conv3
conv3 = self.conv_layer_with_bn(pool2, [7, 7, 64, 64], self.phase_train, name="conv3")
# pool3
pool3, pool3_indices = tf.nn.max_pool_with_argmax(conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# conv4
conv4 = self.conv_layer_with_bn(pool3, [7, 7, 64, 64], self.phase_train, name="conv4")
""" End of encoder """
""" start upsample """
# pool4
pool4, pool4_indices = tf.nn.max_pool_with_argmax(conv4, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# upsample4
# Need to change when using different dataset out_w, out_h
# upsample4 = upsample_with_pool_indices(pool4, pool4_indices, pool4.get_shape(), out_w=45, out_h=60, scale=2, name='upsample4')
upsample4 = self.deconv_layer(pool4, [2, 2, 64, 64], [self.config.BATCH_SIZE, 64, 64, 64], 2, "up4")
# decode 4
conv_decode4 = self.conv_layer_with_bn(upsample4, [7, 7, 64, 64], self.phase_train, False, name="conv_decode4")
# upsample 3
# upsample3 = upsample_with_pool_indices(conv_decode4, pool3_indices, conv_decode4.get_shape(), scale=2, name='upsample3')
upsample3 = self.deconv_layer(conv_decode4, [2, 2, 64, 64], [self.config.BATCH_SIZE, 128, 128, 64], 2, "up3")
# decode 3
conv_decode3 = self.conv_layer_with_bn(upsample3, [7, 7, 64, 64], self.phase_train, False, name="conv_decode3")
# upsample2
# upsample2 = upsample_with_pool_indices(conv_decode3, pool2_indices, conv_decode3.get_shape(), scale=2, name='upsample2')
upsample2 = self.deconv_layer(conv_decode3, [2, 2, 64, 64], [self.config.BATCH_SIZE, 256, 256, 64], 2, "up2")
# decode 2
conv_decode2 = self.conv_layer_with_bn(upsample2, [7, 7, 64, 64], self.phase_train, False, name="conv_decode2")
# upsample1
# upsample1 = upsample_with_pool_indices(conv_decode2, pool1_indices, conv_decode2.get_shape(), scale=2, name='upsample1')
upsample1 = self.deconv_layer(conv_decode2, [2, 2, 64, 64], [self.config.BATCH_SIZE, 512, 512, 64], 2, "up1")
# decode4
conv_decode1 = self.conv_layer_with_bn(upsample1, [7, 7, 64, 64], self.phase_train, False, name="conv_decode1")
""" Start Classify """
# output predicted class number (6)
with tf.variable_scope('conv_classifier', reuse=tf.AUTO_REUSE) as scope:
kernel = util._variable_with_weight_decay('weights',
shape=[1, 1, 64, 2],
initializer=customer_init.msra_initializer(1, 64),
wd=0.0005)
conv = tf.nn.conv2d(conv_decode1, kernel, [1, 1, 1, 1], padding='SAME')
biases = util._variable('biases', [2], tf.constant_initializer(0.0))
conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name)
logit = conv_classifier
loss = self.cal_loss(conv_classifier, self.train_label_node)
return loss, logit
def cal_loss(self, conv_classifier, labels):
with tf.name_scope("loss"):
logits = tf.reshape(conv_classifier, (-1, self.config.NUM_CLASSES))
epsilon = tf.constant(value=1e-10)
logits = logits + epsilon
softmax = tf.nn.softmax(logits)
# consturct one-hot label array
label_flat = tf.reshape(labels, (-1, 1))
# should be [batch ,num_classes]
labels = tf.reshape(tf.one_hot(label_flat, depth=self.config.NUM_CLASSES), (-1, self.config.NUM_CLASSES))
w1_n = tf.ones([softmax.shape[0],1],tf.float32)
w2_n = tf.slice(softmax,[0,0],[-1,1])
_T = 0.3
T = tf.ones(softmax.shape[0],1) * _T
condition = tf.greater(w2_n, 0.5)
w2_n = tf.where(condition, tf.math.maximum(_T, w2_n), tf.ones(w2_n.shape))
#w2_n = tf.cond(tf.greater(w2_n, 0.5), lambda : 1-w2_n, lambda : [1])
#tf.cond(tf.greater(w2_n,0.5) , lambda : 1, lambda : 0)
weight = tf.concat([w2_n,w1_n],1)
cross_entropy = -tf.reduce_sum(weight * labels * tf.log(softmax + epsilon), axis=[1])
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss
def conv_layer_with_bn(self, inputT, shape, train_phase, activation = True, name = None):
in_channel = shape[2]
out_channel = shape[3]
k_size = shape[0]
with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
kernel = util._variable_with_weight_decay('ort_weights', shape=shape, initializer=orthogonal_initializer(), wd=None)
conv = tf.nn.conv2d(inputT, kernel, [1, 1, 1, 1], padding='SAME')
biases = util._variable('biases', [out_channel], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
if activation is True:
conv_out = tf.nn.relu(self.batch_norm_layer(bias, train_phase, scope.name))
else:
conv_out = self.batch_norm_layer(bias, train_phase, scope.name)
return conv_out
def batch_norm_layer(self, inputT, is_training, scope):
return tf.cond(is_training,
lambda: tf.contrib.layers.batch_norm(inputT, is_training=True,
center=False, updates_collections=None,
scope=scope + "_bn"),
lambda: tf.contrib.layers.batch_norm(inputT, is_training=False,
updates_collections=None, center=False, scope=scope + "_bn",
reuse=True))
def deconv_layer(self, inputT, f_shape, output_shape, stride=2, name=None):
# output_shape = [b, w, h, c]
# sess_temp = tf.InteractiveSession()
sess_temp = tf.global_variables_initializer()
strides = [1, stride, stride, 1]
with tf.variable_scope(name):
weights = self.get_deconv_filter(f_shape)
deconv = tf.nn.conv2d_transpose(inputT, weights, output_shape,
strides=strides, padding='SAME')
return deconv
def get_deconv_filter(self, f_shape):
"""
reference: https://github.com/MarvinTeichmann/tensorflow-fcn
"""
width = f_shape[0]
heigh = f_shape[0]
f = ceil(width / 2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="up_filter", initializer=init,
shape=weights.shape)
def get_train_val(self, image_filenames, label_filenames):
val_size = int(len(image_filenames) * 0.06)
val_image_filenames = []
val_label_filenames = []
for i in range(val_size):
pop_index = random.randint(0, len(image_filenames)-1)
val_image_filenames.append(image_filenames.pop(pop_index))
val_label_filenames.append(label_filenames.pop(pop_index))
val_image_filenames.pop(0)
val_label_filenames.pop(0)
return image_filenames, label_filenames, val_image_filenames, val_label_filenames
def training(self, is_finetune=False):
batch_size = self.config.BATCH_SIZE
train_dir = self.config.log_dir # ../data/Logs
image_dir = self.config.image_dir # ../data/train
val_dir = self.config.val_dir # ../data/val
finetune_ckpt = self.config.finetune
image_w = self.config.IMAGE_WIDTH
image_h = self.config.IMAGE_HEIGHT
image_c = self.config.IMAGE_DEPTH
image_filenames, label_filenames = readfile.get_filename_list(image_dir, prefix = "../data/train")
print "total file size {}".format(len(image_filenames))
#val_image_filenames, val_label_filenames = readfile.get_filename_list(val_dir, prefix = "../data/val", is_train=False)
# image_filenames, label_filenames, val_image_filenames, val_label_filenames = self.get_train_val(image_filenames, label_filenames)
# print "train size {}".format(len(image_filenames))
# print "test size {}".format(len(val_image_filenames))
# should be changed if your model stored by different convention
startstep = 0 if not is_finetune else int(self.config.finetune.split('-')[-1])
#with tf.device('/device:GPU:0'):
with tf.Graph().as_default():
self.add_placeholders()
self.global_step = tf.Variable(0, trainable=False)
train_dataset = readfile.get_dataset(image_filenames, label_filenames, self.config.BATCH_SIZE, True)
# val_dataset = readfile.get_dataset(val_image_filenames, val_label_filenames, self.config.EVAL_BATCH_SIZE)
train_iterator = train_dataset.make_one_shot_iterator()
next_train_element = train_iterator.get_next()
# val_iterator = val_dataset.make_one_shot_iterator()
# next_val_element = val_iterator.get_next()
# Build a Graph that computes the logits predictions from the inference model.
loss, eval_prediction = self.add_prediction_op()
# Build a Graph that trains the model with one batch of examples and updates the model parameters.
train_op = self.add_training_op(loss)
saver = tf.train.Saver(tf.global_variables(),write_version= saver_pb2.SaverDef.V1)
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
# Build an initialization operation to run below.
if (is_finetune == True):
saver.restore(sess, finetune_ckpt)
else:
init = tf.global_variables_initializer()
sess.run(init)
# Summery placeholders
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
average_pl = tf.placeholder(tf.float32)
acc_pl = tf.placeholder(tf.float32)
iu_pl = tf.placeholder(tf.float32)
average_summary = tf.summary.scalar("test_average_loss", average_pl)
acc_summary = tf.summary.scalar("test_accuracy", acc_pl)
iu_summary = tf.summary.scalar("Mean_IU", iu_pl)
for step in range(startstep, startstep + self.config.maxsteps):
image_batch, label_batch = sess.run(next_train_element)
# since we still use mini-batches in validation, still set bn-layer phase_train = True
feed_dict = {
self.train_data_node: image_batch,
self.train_label_node: label_batch,
self.phase_train: True
}
start_time = time.time()
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 50 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.4f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# eval current training batch pre-class accuracy
pred = sess.run(eval_prediction, feed_dict=feed_dict)
util.per_class_acc(pred, label_batch)
# if step % 100 == 0:
# print("start validating.....")
# total_val_loss = 0.0
# hist = np.zeros((self.config.NUM_CLASSES, self.config.NUM_CLASSES))
# for test_step in range(int(self.config.TEST_ITER)):
# val_images_batch, val_labels_batch = sess.run(next_val_element)
#
# _val_loss, _val_pred = sess.run([loss, eval_prediction], feed_dict={
# self.train_data_node: val_images_batch,
# self.train_label_node: val_labels_batch,
# self.phase_train: True
# })
# total_val_loss += _val_loss
# hist += util.get_hist(_val_pred, val_labels_batch)
# print("val loss: ", total_val_loss / self.config.TEST_ITER)
# acc_total = np.diag(hist).sum() / hist.sum()
# iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
# test_summary_str = sess.run(average_summary, feed_dict={average_pl: total_val_loss / self.config.TEST_ITER})
# acc_summary_str = sess.run(acc_summary, feed_dict={acc_pl: acc_total})
# iu_summary_str = sess.run(iu_summary, feed_dict={iu_pl: np.nanmean(iu)})
# util.print_hist_summery(hist)
# print(" end validating.... ")
#
# summary_str = sess.run(summary_op, feed_dict=feed_dict)
# summary_writer.add_summary(summary_str, step)
# summary_writer.add_summary(test_summary_str, step)
# summary_writer.add_summary(acc_summary_str, step)
# summary_writer.add_summary(iu_summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == self.config.maxsteps:
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def visualize_prediction(self, meta_name = None, data_name = None):
with tf.Session() as sess:
self.add_placeholders()
prediction = np.random.randint(2, size=self.train_label_node.shape)
prediction.astype(np.float32)
loss, eval_prediction = self.add_prediction_op()
saver = tf.train.Saver()
data_file_path = os.path.join(self.config.test_ckpt, data_name)
if os.path.isfile(data_file_path):
saver.restore(sess, data_file_path)
else:
raise Exception('restore variable data fail')
# chkp.print_tensors_in_checkpoint_file(data_file_path, tensor_name = '', all_tensors = True)
image_filenames, label_filenames = readfile.get_filename_list("../data/test_prediction", prefix="../data/test_prediction",
is_train=False)
print "image length {}".format(len(image_filenames))
image_paths = tf.convert_to_tensor(image_filenames, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(image_paths)
dataset = dataset.map(readfile.map_fn_test, num_parallel_calls=8)
dataset = dataset.batch(self.config.BATCH_SIZE)
test_iterator = dataset.make_one_shot_iterator()
test_next_element = test_iterator.get_next()
image_batch = sess.run(test_next_element)
feed_dict = {
self.train_data_node: image_batch,
self.phase_train: True
}
result = sess.run([eval_prediction], feed_dict)[0]
print "begin to write the result as image back to folder..."
for i in range(self.config.BATCH_SIZE):
util.writemask(result[i],'mask_'+str(i)+".png")
def get_submission_result(self, meta_name = None, data_name = None):
is_first = True
with tf.Session() as sess:
self.add_placeholders()
prediction = np.random.randint(2, size=self.train_label_node.shape)
prediction.astype(np.float32)
loss, eval_prediction = self.add_prediction_op()
# meta_file_path = os.path.join(self.config.test_ckpt, meta_name)
# if os.path.isfile(meta_file_path):
# saver = tf.train.import_meta_graph(meta_file_path,clear_devices=True)
# else:
# raise Exception('restore graph meta data fail')
saver = tf.train.Saver()
data_file_path = os.path.join(self.config.test_ckpt, data_name)
if os.path.isfile(data_file_path):
saver.restore(sess, data_file_path)
else:
raise Exception('restore variable data fail')
#chkp.print_tensors_in_checkpoint_file(data_file_path, tensor_name = '', all_tensors = True)
image_filenames, label_filenames = readfile.get_filename_list("../data/val", prefix="../data/val", is_train=False)
# the length of validation set; 2169
print "image length {}".format(len(image_filenames))
# construct the image dataset
image_paths = tf.convert_to_tensor(image_filenames, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(image_paths)
dataset = dataset.map(readfile.map_fn_test, num_parallel_calls=8)
dataset = dataset.batch(self.config.BATCH_SIZE)
test_iterator = dataset.make_one_shot_iterator()
test_next_element = test_iterator.get_next()
for i in range(len(image_filenames)/self.config.BATCH_SIZE):
#for i in range(2):
# for i in range(len(image_filenames))
image_batch = sess.run(test_next_element)
#print image_batch.shape
feed_dict = {
self.train_data_node: image_batch,
self.phase_train: True
}
if is_first:
result = sess.run([eval_prediction],feed_dict)[0]
# prediction = tf.stack([prediction, result])
print "prediction shape : {}".format(result.shape)
is_first = False
continue
# 5,512,512,2
new_result = sess.run([eval_prediction],feed_dict)[0]
#print "old result shape {}".format(np.asarray(result).shape)
#print "new result shape {}".format(new_result.shape)
result = np.concatenate([result, new_result],axis=0)
#prediction = tf.stack([prediction, result])
print "prediction shape : {}".format(result.shape)
# preprocess the prediction and product submission, prediction is [numexample, 512, 512, 2]
util.create_submission('../data/subid2_1.csv', result, image_filenames)
if __name__ == '__main__':
segmodel = SegnetModel()
# print all tensors in checkpoint file
segmodel.visualize_prediction(meta_name="model.ckpt-38000.meta", data_name="model.ckpt-38000")
#segmodel.get_submission_result()
| StarcoderdataPython |
11214364 |
from __future__ import unicode_literals
# import frappe
# from _future_ import unicode_literals
import frappe
import frappe.utils
import moneris_payment
import json
import datetime
from frappe import _
from frappe.utils import getdate,nowdate
from frappe.utils import cint, fmt_money
from datetime import date
from moneris_payment.moneris_payment.doctype.moneris_settings.moneris_settings import get_gateway_controller
from moneris_payment.MonerisPaymentGateway import *
no_cache = 1
no_sitemap = 1
expected_keys = ('amount', 'title', 'description', 'reference_doctype', 'reference_docname',
'payer_name', 'payer_email', 'order_id', 'currency')
def get_context(context):
context.no_cache = 1
# all these keys exist in form_dict
if not (set(expected_keys) - set(list(frappe.form_dict))):
for key in expected_keys:
context[key] = frappe.form_dict[key]
if frappe.form_dict['payer_email']:
if frappe.form_dict['payer_email']!=frappe.session.user:
frappe.throw(_("Not permitted"), frappe.PermissionError)
else:
frappe.redirect_to_message(_('Some information is missing'),
_('Looks like someone sent you to an incomplete URL. Please ask them to look into it.'))
frappe.local.flags.redirect_location = frappe.local.response.location
raise frappe.Redirect
context.reference_docname=frappe.form_dict['order_id']
context.customercards=frappe.db.get_all("Moneris Vault",fields={'*'},filters={"user_id":frappe.session.user},order_by="creation desc")
gateway_controller = get_gateway_controller(context.reference_doctype, context.reference_docname)
context['amount'] = fmt_money(amount=context['amount'], currency=context['currency'])
if frappe.db.get_value(context.reference_doctype, context.reference_docname, "is_a_subscription"):
payment_plan = frappe.db.get_value(context.reference_doctype, context.reference_docname, "payment_plan")
recurrence = frappe.db.get_value("Payment Plan", payment_plan, "recurrence")
context['amount'] = context['amount'] + " " + _(recurrence)
else:
frappe.redirect_to_message(_('Some information is missing'),
_('Looks like someone sent you to an incomplete URL. Please ask them to look into it.'))
frappe.local.flags.redirect_location = frappe.local.response.location
raise frappe.Redirect
# @frappe.whitelist(allow_guest=True)
# def make_payment(payment_request_id,payment_amount,card_number,card_expire,card_cvv,reference_doctype,reference_docname):
# try:
# # Order Info
# payment_request=frappe.get_doc("Payment Request", payment_request_id)
# sale_order=frappe.get_doc("Sales Order", payment_request.reference_name)
# billing_info=frappe.get_doc("Address", sale_order.customer_address)
# shipping_info=frappe.get_doc("Address", sale_order.shipping_address_name)
# # customer_info=frappe.get_doc("Customer", sale_order.customer)
# sale_order_items=frappe.db.get_all("Sales Order Item", fields=['item_code,item_name,rate,qty'], filters={'parent':sale_order.name},limit_page_length=1000)
# order_id = payment_request.reference_name+"-"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# amount = payment_amount
# pan =card_number.replace(' ', '')
# expiry_date = card_expire
# crypt = card_cvv
# customer=CustInfo()
# #Customer Billing Info and Shipping Info
# first_name=sale_order.contact_display
# last_name=" "
# company_name=""
# # Billing Info
# billing_full_address=billing_info.address_line1
# if billing_info.address_line2:
# billing_full_address=billing_full_address+","+billing_info.address_line2
# if len(billing_full_address)>70:
# billing_full_address=billing_full_address[:70]
# billing_address=billing_full_address
# billing_city=billing_info.city
# billing_state_province=billing_info.state
# billing_postal_code=billing_info.pincode
# billing_country=billing_info.country
# billing_phone_number=billing_info.phone
# billing_fax=""
# billing_tax1=""
# if sale_order.total_taxes_and_charges:
# billing_tax1=str(sale_order.total_taxes_and_charges)
# billing_tax2=""
# billing_tax3=""
# shipping_cost="0.00"
# # # Shipping Info
# shipping_full_address=shipping_info.address_line1
# if shipping_info.address_line2:
# shipping_full_address=shipping_full_address+","+shipping_info.address_line2
# if len(shipping_full_address)>70:
# shipping_full_address=shipping_full_address[:70]
# shipping_address=shipping_full_address
# shipping_city=shipping_info.city
# shipping_state_province=shipping_info.state
# shipping_postal_code=shipping_info.pincode
# shipping_country=shipping_info.country
# shipping_phone_number=shipping_info.phone
# shipping_fax=""
# shipping_tax1=""
# if sale_order.total_taxes_and_charges:
# shipping_tax1=str(sale_order.total_taxes_and_charges)
# shipping_tax2=""
# shipping_tax3=""
# shipping_cost="0.00"
# billing_obj=BillingInfo(first_name, last_name, company_name, billing_address, billing_city, billing_state_province, billing_postal_code, billing_country, billing_phone_number, billing_fax, billing_tax1, billing_tax2, billing_tax3, shipping_cost)
# shpping_obj=ShippingInfo(first_name, last_name, company_name, shipping_address, shipping_city, shipping_state_province, shipping_postal_code, shipping_country, shipping_phone_number, shipping_fax, shipping_tax1, shipping_tax2, shipping_tax3, shipping_cost)
# customer.setBilling(billing_obj)
# customer.setShipping(shpping_obj)
# customer.setEmail(frappe.session.user)
# customer.setInstruction(" ")
# # Product Items
# for item in sale_order_items:
# customer.addItem(Item(item.item_name[:45],str(item.qty),item.item_code.split(':')[0],str(item.rate)))
# #Purchase
# purchase = Purchase(order_id, amount , pan, expiry_date, crypt)
# purchase.setCustInfo(customer)
# MSGObject=mpgHttpsPost(purchase)
# MSGObject.postRequest()
# #Response
# resp = MSGObject.getResponse()
# if resp.getComplete()=="true":
# gateway_controller = get_gateway_controller(reference_doctype,reference_docname)
# if frappe.db.get_value(reference_doctype, reference_docname, 'is_a_subscription'):
# reference = frappe.get_doc(reference_doctype, reference_docname)
# data = reference.create_subscription("Moneris", gateway_controller, data)
# else:
# data = frappe.get_doc("Moneris Settings", gateway_controller).create_request(data)
# frappe.db.commit()
# return data
# else:
# return {"ReceiptId" : resp.getReceiptId(),
# "ReferenceNum" :resp.getReferenceNum(),
# "ResponseCod" : resp.getResponseCode(),
# "AuthCode" : resp.getAuthCode(),
# "TransTime" : resp.getTransTime(),
# "TransDate" : resp.getTransDate(),
# "TransType" : resp.getTransType(),
# "Complete" : resp.getComplete(),
# "Message" : resp.getMessage(),
# "TransAmount" : resp.getTransAmount(),
# "CardType" : resp.getCardType(),
# "TransID" : resp.getTransID(),
# "TimedOut" : resp.getTimedOut(),
# "BankTotals" : resp.getBankTotals(),
# "Ticket" : resp.getTicket()}
# except Exception,e:
# print(e)
# return e
@frappe.whitelist(allow_guest=True)
def make_payment(data,reference_doctype=None, reference_docname=None):
try:
gateway_controller = get_gateway_controller(reference_doctype,reference_docname)
data = frappe.get_doc("Moneris Settings", gateway_controller).create_request(data)
frappe.db.commit()
return data
except Exception as e:
print(e)
return e
@frappe.whitelist(allow_guest=True)
def refund_payment():
# Refund
refund=Refund("test_python-vls-2019-01-24-13-36-16","10.00","82990-0_12","7")
MSGObject=mpgHttpsPost(refund)
MSGObject.postRequest()
resp = MSGObject.getResponse()
return {"ReceiptId" : resp.getReceiptId(),
"ReferenceNum" : resp.getReferenceNum(),
"ResponseCode" : resp.getResponseCode(),
"AuthCode" : resp.getAuthCode(),
"TransTime" : resp.getTransTime(),
"TransDate" : resp.getTransDate(),
"TransType" : resp.getTransType(),
"Complete" : resp.getComplete(),
"Message" : resp.getMessage(),
"TransAmount" : resp.getTransAmount(),
"CardType" : resp.getCardType(),
"TransID" : resp.getTransID(),
"TimedOut" : resp.getTimedOut(),
"BankTotals" : resp.getBankTotals(),
"Ticket" : resp.getTicket()}
| StarcoderdataPython |
6551416 | <reponame>Phillistan16/fastestimator
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from fastestimator.backend.abs import abs
from fastestimator.backend.argmax import argmax
from fastestimator.backend.binary_crossentropy import binary_crossentropy
from fastestimator.backend.cast import cast
from fastestimator.backend.categorical_crossentropy import categorical_crossentropy
from fastestimator.backend.check_nan import check_nan
from fastestimator.backend.clip_by_value import clip_by_value
from fastestimator.backend.concat import concat
from fastestimator.backend.exp import exp
from fastestimator.backend.expand_dims import expand_dims
from fastestimator.backend.feed_forward import feed_forward
from fastestimator.backend.gather import gather
from fastestimator.backend.gather_from_batch import gather_from_batch
from fastestimator.backend.get_gradient import get_gradient
from fastestimator.backend.get_image_dims import get_image_dims
from fastestimator.backend.get_lr import get_lr
from fastestimator.backend.hinge import hinge
from fastestimator.backend.iwd import iwd
from fastestimator.backend.lambertw import lambertw
from fastestimator.backend.load_model import load_model
from fastestimator.backend.matmul import matmul
from fastestimator.backend.maximum import maximum
from fastestimator.backend.mean_squared_error import mean_squared_error
from fastestimator.backend.ones_like import ones_like
from fastestimator.backend.percentile import percentile
from fastestimator.backend.permute import permute
from fastestimator.backend.pow import pow
from fastestimator.backend.random_normal_like import random_normal_like
from fastestimator.backend.random_uniform_like import random_uniform_like
from fastestimator.backend.reduce_max import reduce_max
from fastestimator.backend.reduce_mean import reduce_mean
from fastestimator.backend.reduce_min import reduce_min
from fastestimator.backend.reduce_sum import reduce_sum
from fastestimator.backend.reshape import reshape
from fastestimator.backend.roll import roll
from fastestimator.backend.save_model import save_model
from fastestimator.backend.set_lr import set_lr
from fastestimator.backend.sign import sign
from fastestimator.backend.sparse_categorical_crossentropy import sparse_categorical_crossentropy
from fastestimator.backend.squeeze import squeeze
from fastestimator.backend.tensor_pow import tensor_pow
from fastestimator.backend.tensor_round import tensor_round
from fastestimator.backend.tensor_sqrt import tensor_sqrt
from fastestimator.backend.to_shape import to_shape
from fastestimator.backend.to_tensor import to_tensor
from fastestimator.backend.to_type import to_type
from fastestimator.backend.transpose import transpose
from fastestimator.backend.update_model import update_model
from fastestimator.backend.watch import watch
from fastestimator.backend.zeros_like import zeros_like
from fastestimator.backend.zscore import zscore
| StarcoderdataPython |
3541911 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import logging
import types
import copy
import os
import os.path
import test_framework.util
from test_framework.util import NoConfigValue
def BU_setMay2018ForkTime(self, secondsSinceEpoch):
self.set("mining.forkMay2018Time=%d" % secondsSinceEpoch)
return None
def ABC_setMay2018ForkTime(self, secondsSinceEpoch):
logging.error("%s not implemented", sys._getframe().f_code.co_name)
return None
def XT_setMay2018ForkTime(self, secondsSinceEpoch):
logging.error("%s not implemented", sys._getframe().f_code.co_name)
return None
def HUB_setMay2018ForkTime(self, secondsSinceEpoch):
logging.error("%s not implemented", sys._getframe().f_code.co_name)
return None
def addInteropApis(node, bin):
if "bucash" in bin:
node.clientName = "bucash"
node.setMay2018ForkTime = types.MethodType(BU_setMay2018ForkTime,node)
elif "abc" in bin:
node.clientName = "abc"
pass
elif "xt" in bin:
node.clientName = "xt"
pass
elif "hub" in bin:
node.clientName = "hub"
pass
else:
node.clientName = "unknown"
pass
return node
configXlat = {
"forkMay2018time" : { "bucash" : "mining.forkMay2018Time",
"xt" : "thirdhftime",
"hub" : "",
"abc" : "monolithactivationtime" }
}
def start(datadir, clientDirs, bins, conf):
nodes = []
i = 0
for name, executable in zip(clientDirs, bins):
confDict = copy.copy(conf)
for k, xlat in configXlat.items(): # convert "standard" conf options to client specific
if k in confDict:
val = confDict[k]
del confDict[k]
if xlat[name]:
confDict[xlat[name]] = val
test_framework.util.initialize_datadir(datadir, i, confDict)
node = test_framework.util.start_node(i, datadir, binary=executable)
addInteropApis(node, name)
i+=1
nodes.append(node)
return nodes
| StarcoderdataPython |
6547126 | <gh_stars>1-10
import pandas as pd
import numpy as np
from ppandas.helper.bayes_net_helper import BayesNetHelper
class QueryHelper():
def __init__(self, mapping):
self.mapping = mapping
def query(self, bayes_net, query_vars, evidence_vars):
# if evidence vars is mismatched and was assigned specific value,
# need to expand the query
need_to_expand = []
if evidence_vars is not None:
new_evidence_vars, need_to_expand = self.mapEvidenceVars(
evidence_vars)
if need_to_expand:
list_of_new_evidence_vars = self.expandQueries(
new_evidence_vars, need_to_expand)
df_res = self.performExpandedQueries(
bayes_net, query_vars, list_of_new_evidence_vars)
else:
df_res = BayesNetHelper.query(bayes_net, query_vars, evidence_vars)
return self.combine(df_res)
def map_query(self, bayes_net, query_vars, evidence_vars):
# if evidence vars is mismatched and was assigned specific value,
# need to expand the query
need_to_expand = []
if evidence_vars is not None:
new_evidence_vars, need_to_expand = self.mapEvidenceVars(
evidence_vars)
if need_to_expand:
list_of_new_evidence_vars = self.expandQueries(
new_evidence_vars, need_to_expand)
df_res = self.performExpandedQueries(
bayes_net, query_vars, list_of_new_evidence_vars)
else:
df_res = BayesNetHelper.map_query(bayes_net, query_vars, evidence_vars)
return df_res
def mapEvidenceVars(self, evidence_vars):
new_evidence_vars = {}
need_to_expand = []
for evidence, value in evidence_vars.items():
if evidence in self.mapping.keys():
new_evidence_vars[evidence] = self.mapping[evidence][value]
need_to_expand.append(evidence)
else:
new_evidence_vars[evidence] = value
return new_evidence_vars, need_to_expand
def combine(self, df):
vars = list(df.columns)[:-1]
for var in vars:
if var in self.mapping.keys():
inverted_map = {}
for k, v in self.mapping[var].items():
for value in v:
inverted_map[value] = k
df[var] = df[var].map(inverted_map)
return df.groupby(vars).sum().reset_index()
def expandQueries(self, new_evidence_vars, need_to_expand):
# input: {ev1:[1,2],ev2:[1,2,3],ev3:1.....},
# need_to_expand contains all evidence var names
# that has multiple entries
expanded_evidenve_vars = []
to_expand = []
constant_evidence_vars = new_evidence_vars.copy()
for ev in need_to_expand:
to_expand.append({ev: constant_evidence_vars.pop(ev)})
while to_expand:
ev_dict = to_expand.pop()
# should only contain a single entry
ev, values = next(iter(ev_dict.items()))
if not expanded_evidenve_vars:
for value in values:
expanded_evidenve_vars.append({ev: value})
else:
for value in values:
for other_ev_dict in expanded_evidenve_vars:
other_ev_dict.update({ev: value})
for evidence_dict in expanded_evidenve_vars:
evidence_dict.update(constant_evidence_vars)
return expanded_evidenve_vars
def performExpandedQueries(self, bayes_net,
query_vars, list_of_new_evidence_vars):
df_res = None
# evidenve1 | evidence 2 |.... | P()
#----------- | ------------ |.... | ---
df_evidence_probability = BayesNetHelper.query(
bayes_net, list_of_new_evidence_vars[0].keys(),
evidence_vars=None)
for evidence_vars in list_of_new_evidence_vars:
if df_res is None:
# P(query|evidence1,evidence2...)* P(evidence1,evidence2...)
df_res = BayesNetHelper.query(
bayes_net, query_vars, evidence_vars)
# print(evidence_vars)
# print('---------- df res (1st query)---------')
# print(df_res)
y = df_res.iloc[:, -1].values.astype(np.float)\
* self.get_probability_of_evidences(
df_evidence_probability, evidence_vars)
df_res.iloc[:, -1] = y
else:
df_new = BayesNetHelper.query(
bayes_net, query_vars, evidence_vars)
y = df_new.iloc[:, -1].values.astype(np.float)\
* self.get_probability_of_evidences(
df_evidence_probability, evidence_vars)
df_new.iloc[:, -1] = y
df_res = df_res.append(df_new, ignore_index=True)
#normalize third column of df_res
y = df_res.iloc[:, -1].values.astype(np.float)
df_res.iloc[:, -1] = y/np.sum(y)
return df_res
def get_probability_of_evidences(self,
df_evidence_probability, evidence_vars):
i = None
#'Difficulty == \'Easy\' & Intelligence == \'Smart\''
#evidence_vars = {'Age': '[40,50)', 'Gender': 'female'}
for evidence, value in evidence_vars.items():
if i is None:
i = '{} == \'{}\''.format(evidence, value)
else:
i += '& {} == \'{}\''.format(evidence, value)
query_index = df_evidence_probability.query(i).index
p = df_evidence_probability.iloc[query_index, -1].values[0]
return float(p)
| StarcoderdataPython |
12815893 | #!/usr/bin/env python
import numpy as np
import sys
import scipy.io as io_mat
from subprocess import call
import os
import matplotlib
#matplotlib.use('Svg')
import matplotlib.pyplot as plt
font = {'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
name_sol = sys.argv[1]
matfile = [name_sol+ '_pp2.mat',\
name_sol+ '_pp4.mat',\
name_sol+ '_pp8.mat',\
name_sol+ '_pp16.mat',\
name_sol+ '_pp2_dsp.mat',\
name_sol+ '_pp4_dsp.mat',\
name_sol+ '_pp8_dsp.mat',\
name_sol+'_pp16_dsp.mat']
dat_slip=[]; fcoord=[]; ocoord=[]; dat_seis=[]; dt_dyn=[]; dt_slip=[]
dat_log=[]; dat_fqs=[]; dt=[]; dt_rsf=[]; dat_log_rsf=[]
for f in matfile:
log_tmp=np.squeeze(io_mat.loadmat(f)['dat_log'])
fcoord.append(np.squeeze(io_mat.loadmat(f)['crd_flt']))
dat_slip.append(np.squeeze(io_mat.loadmat(f)['dat_slip']))
dt_slip.append(np.squeeze(io_mat.loadmat(f)['dt_slip']))
ocoord.append(np.squeeze(io_mat.loadmat(f)['crd_obs']))
dat_seis.append(np.squeeze(io_mat.loadmat(f)['dat_seis']))
dt_dyn.append(np.squeeze(io_mat.loadmat(f)['dt_dyn']))
dat_fqs.append(np.squeeze(io_mat.loadmat(f)['dat_fqs']))
dt.append(np.squeeze(io_mat.loadmat(f)['dt']))
dt_rsf.append(np.squeeze(io_mat.loadmat(f)['dt_rsf']))
dat_log_rsf.append(np.squeeze(io_mat.loadmat(f)['dat_log_rsf']))
dat_log.append(log_tmp)
# Calculate event magnitude, location
mag=[]; xloc=[]; tevt=[]
vtol=1E-3
for k in [4,5,6,7]: # Use absolute output
flt=fcoord[k][:,1]
idx=np.argsort(flt)
xflt=fcoord[k][idx,:]
if len(dat_log[k].shape)==1:
event=np.array([dat_log[k][:]])
else:
event=dat_log[k]
tmpmag=[]; tmploc=[]; tmpt=[]
for j,i in zip(event[:,0],range(len(event))):
if event[i,1]==1:
# Magnitude
sdrp=dat_fqs[k][j+1,idx,0] - dat_fqs[k][j,idx,0]
try:
slip=dat_slip[k][i][1500,idx,0]
except:
slip=dat_slip[k].item()[1500,idx,0]
idslp=np.where(sdrp<0.)
tmpmag.append(np.log10(-sum(slip[idslp]*sdrp[idslp])))
# Location
try:
v=dat_slip[k-4][i][5,idx,0]
except:
v=dat_slip[k-4].item()[5,idx,0]
idtmp=np.where(v>=vtol)
z=xflt[sum(idtmp[0])/len(idtmp[0]),:]
x=xflt[0,0]+(xflt[-1,0]-xflt[0,0])*(z-xflt[0,1])/(xflt[-1,1]-xflt[0,1])
tmploc.append(np.array([x,z]))
# Event time
tmpt.append((dat_log_rsf[k][event[i,0]-2]*dt_rsf[k]+dt[k])/3600)
mag.append(tmpmag)
xloc.append(tmploc)
tevt.append(tmpt)
plt.figure()
color = ['b','c','m','k']
plt.plot(xflt[:,0],xflt[:,1])
for k in [0,1,2,3]:
ax=plt.subplot(2,2, k+1)
plt.plot(xflt[[0,-1],0],xflt[[0,-1],1])
scatplt=[]; scatlab=[]
for i in range(len(mag[k])):
scatplt.append(plt.scatter(xloc[k][i][0],xloc[k][i][1],marker='o',s=mag[k][i]*8,c=color[i]))
if k==0 and i==0:
lab = 't= %0.1f' %(tevt[k][i])+' hr'
plt.ylabel('z [km]')
tit = 'period %d' %(2**(k+1))+' days'
plt.xlabel('x [km]')
else:
lab = '%0.1f' %(tevt[k][i])
tit = '%d' %(2**(k+1))
if k!=0:
plt.tick_params(
axis='both',
which='both',
bottom='off',
left='off',
labelbottom='off',
labelleft='off')
scatlab.append(lab)
ax.set_xlim([-.05,.05])
ax.set_ylim([-2.08,-2.0])
plt.xticks(np.arange(-.05,.06, .05))
plt.yticks(np.arange(-2.08,-1.99, .025))
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(scatplt,
scatlab,
scatterpoints=1,
loc='lower right',
ncol=1,
fontsize=12)
plt.title(tit)
plt.tight_layout()
plt.savefig(name_sol+'_evt.png')
# Stress ratio plot
plt.figure()
ax=plt.subplot()
for k in [0,1,2,3]:
flt=fcoord[k][:,1]
idx=np.argsort(flt)
dat_plt=dat_fqs[k][:,idx,:]
n_plt=len(dat_plt[:,0,0])
id_plt=int(len(dat_plt[0,:,0])/2)
flt=flt[idx]
depth=flt[id_plt]
yplt=-dat_plt[:,id_plt,0]/dat_plt[:,id_plt,1]
xplt=dat_log_rsf[k]*dt_rsf[k]/3600.
if k==0:
lab='period '+'%d' %(2**(k+1))+' days'
else:
lab='%d' %(2**(k+1))
plt.plot(xplt,yplt[1:-1],label=lab)
plt.legend(loc='lower left',prop={'size':12})
plt.title('depth = '+'%0.1f' %(depth) +' [km]')
ax.set_xlim([0,1728])
plt.xlabel('time [hr]')
plt.ylabel(r'$\tau/\sigma_n$')
plt.savefig(name_sol+'_mu.png')
# mu vs volume
plt.figure()
ax=plt.subplot()
for k in [0,1,2,3]:
flt=fcoord[k][:,1]
idx=np.argsort(flt)
dat_plt=dat_fqs[k][:,idx,:]
n_plt=len(dat_plt[:,0,0])
id_plt=int(len(dat_plt[0,:,0])/2)
flt=flt[idx]
depth=flt[id_plt]
yplt=-dat_plt[:,id_plt,0]/dat_plt[:,id_plt,1]
xplt=dat_log_rsf[k]*dt_rsf[k]/3600.
vol=np.zeros(len(xplt),dtype=float)
vplt=np.zeros(len(xplt),dtype=float)
T=2.**(k+1)*3600*24
for i in range(len(xplt)):
t=xplt[i]*3600-24*3600.
if t%T<T/2. and t>0.:
flux=250.
else:
flux=0.
vol[i]=flux/24./3600.
for i in range(len(xplt)):
vplt[i]=sum(vol[:i+1])*dt[k]
if k==0:
lab='period '+'%d' %(2**(k+1))+' days'
else:
lab='%d' %(2**(k+1))
#plt.plot(xplt,vplt,label=lab)
plt.plot(vplt,yplt[2:],label=lab)
#plt.legend(loc=3,prop={'size':12})
#plt.title('depth = '+'%0.1f' %(depth) +' [km]')
plt.xlabel(r'volume [m$^3$/m]')
plt.ylabel(r'$\tau/\sigma_n$')
ax.set_xlim([0,9000])
plt.savefig(name_sol+'_muv.png')
| StarcoderdataPython |
12820258 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: urls.py
Description :
Author : Afa
date: 2017/4/13
-------------------------------------------------
Change Activity:
2017/4/13:
-------------------------------------------------
"""
__author__ = 'Afa'
from blog import views
from django.urls import path
urlpatterns = [
path('', views.index, name='index'),
path('list/', views.blog_list, name='list'),
path('tag/<str:name>/', views.tag, name='tag'),
path('category/<str:name>/', views.category, name='category'),
path('detail/<int:pk>/', views.detail, name='detail'),
path('archive/', views.archive, name='archive'),
path('search/', views.search, name='search'),
path('message/', views.message, name='message'),
path('getComment/', views.get_comment, name='get_comment'),
]
| StarcoderdataPython |
9616440 | """
Author: <NAME> (https://github.com/ktxlh)
"""
##############################################################################
# Configurations #
##############################################################################
# See Piazza URL for your IDs: https://piazza.com/class/<class_id>?cid=<cid>
class_id = "ky3a26n35u1bv"
cids = [54, 59, 62, 63]
output_fname = "Reading Responses 03-02 to 03-16.csv"
##############################################################################
from piazza_api import Piazza
import pandas as pd
p = Piazza()
p.user_login()
class_p = p.network(class_id)
users = class_p.get_all_users()
students = list(filter(lambda x: x['role'] == 'student', users))
students = list(filter(lambda x: len(x['name']) > 0, students))
student_uid_map = {x['id']: x['name'] for x in students}
names = list(set(map(lambda x: x['name'], students))) # deduplicate
names.sort(key=lambda x: x) # sort by full name
names.sort(key=lambda x: x.split()[-1]) # sort by last name
name_idx_map = {x: i for i, x in enumerate(names)}
df = pd.DataFrame({'names': names})
sum_scores = [0 for _ in range(len(df))]
for cid in cids:
post = class_p.get_post(cid)
subject = post['history'][-1]['subject']
scores = [0 for _ in range(len(df))]
for child in post["children"]:
idx = name_idx_map[student_uid_map[child['uid']]]
admin_endorses = list(filter(lambda x: x['admin'], child['tag_good']))
scores[idx] = len(admin_endorses) + 1
sum_scores[idx] += len(admin_endorses) + 1
df[subject] = scores
df['sum'] = sum_scores
df.to_csv(output_fname, header=True, index=False)
| StarcoderdataPython |
4831016 | <filename>targeneintersect/targeneintersect.py
# -*- coding: utf-8 -*-
"""Main module."""
import pybedtools
def target_gene_intersect(genes, dataframe):
"""Uses pybedtools intersect to find target genes for dataframe"""
gene_frame = pybedtools.BedTool(genes)
cols_ = dataframe.shape[1]
keep = list(range(6, 6+cols_))
keep.insert(0, 3)
intersect = gene_frame.intersect(pybedtools.BedTool.from_dataframe(
dataframe), wb=True).saveas().to_dataframe().iloc[:, keep]
return intersect
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.