content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# k_insertion.py
# An implementation of the k-insertion attack where the attacker adds k data
# points to the model
# Matthew Sedam
from adlib.adversaries.adversary import Adversary
from adlib.utils.common import fuzz_matrix
from data_reader.binary_input import Instance
from data_reader.real_input import RealFeatureVector
import math
import multiprocessing as mp
import numpy as np
import time
from copy import deepcopy
from typing import List, Dict
class KInsertion(Adversary):
"""
Performs a k-insertion attack where the attacked data is the original data
plus k feature vectors designed to induce the most error in poison_instance.
"""
def __init__(self, learner, poison_instance, alpha=1e-8, beta=0.1,
decay=-1, eta=0.9, max_iter=125, number_to_add=10,
verbose=False):
"""
:param learner: the trained learner
:param poison_instance: the instance in which to induce the most error
:param alpha: convergence condition (diff <= alpha)
:param beta: the learning rate
:param decay: the decay rate
:param eta: the momentum percentage
:param max_iter: the maximum number of iterations
:param number_to_add: the number of new instances to add
:param verbose: if True, print the feature vector and gradient for each
iteration
"""
Adversary.__init__(self)
self.learner = deepcopy(learner)
self.poison_instance = poison_instance
self.alpha = alpha
self.beta = beta
self.decay = self.beta / max_iter if decay < 0 else decay
self.eta = eta
self.max_iter = max_iter
self.orig_beta = beta
self.number_to_add = number_to_add
self.verbose = verbose
self.instances = None
self.orig_instances = None
self.fvs = None # feature vectors
self.labels = None # labels
self.x = None # The feature vector of the instance to be added
self.y = None # x's label
self.inst = None
self.kernel = self._get_kernel()
self.kernel_derivative = self._get_kernel_derivative()
self.z_c = None
self.matrix = None
self.poison_loss_before = None
self.poison_loss_after = None
np.set_printoptions(threshold=0)
def attack(self, instances) -> List[Instance]:
"""
Performs a k-insertion attack
:param instances: the input instances
:return: the attacked instances
"""
if len(instances) == 0:
raise ValueError('Need at least one instance.')
self.orig_instances = deepcopy(instances)
self.instances = self.orig_instances
self.learner.training_instances = self.instances
self._calculate_constants()
learner = self.learner.model.learner
learner.fit(self.fvs, self.labels)
self.poison_loss_before = self._calc_inst_loss(self.poison_instance)
for k in range(self.number_to_add):
print()
print('###################################################', end='')
print('################')
self._generate_x_y_and_inst()
self.beta = self.orig_beta
# Main learning loop for one insertion
old_x = deepcopy(self.x)
fv_dist = 0.0
grad_norm = 0.0
uv_norm = 0.0
iteration = 0
old_update_vector = 0.0
while (iteration == 0 or (fv_dist > self.alpha and
iteration < self.max_iter)):
print('Iteration: ', iteration, ' - FV distance: ', fv_dist,
' - gradient norm: ', grad_norm, ' - UV norm: ', uv_norm,
' - beta: ', self.beta, sep='')
begin = time.time()
# Train with newly generated instance
self.instances.append(self.inst)
self.learner.training_instances = self.instances
self.fvs = self.fvs.tolist()
self.fvs.append(self.x)
self.fvs = np.array(self.fvs)
self.labels = self.labels.tolist()
self.labels.append(self.y)
self.labels = np.array(self.labels)
learner.fit(self.fvs, self.labels)
# Gradient descent with momentum
gradient = self._calc_gradient()
# If gradient is too large, only move a very small amount in its
# direction.
old_grad_norm = grad_norm
grad_norm = np.linalg.norm(gradient)
if grad_norm >= 2 * old_grad_norm and iteration > 0:
reduce_val = np.max(abs(gradient)) / 2
reduce_val = 1.0 if reduce_val <= 0 else reduce_val
gradient /= reduce_val
if self.verbose:
print('\nGradient:\n', gradient, sep='')
update_vector = (self.eta * old_update_vector +
(1 - self.eta) * gradient)
old_uv_norm = uv_norm
uv_norm = np.linalg.norm(update_vector)
if uv_norm >= 2 * old_uv_norm and iteration > 0:
reduce_val = np.max(abs(update_vector)) / 2
reduce_val = 1.0 if reduce_val <= 0 else reduce_val
update_vector /= reduce_val
if self.verbose:
print('\nUpdate Vector:\n', update_vector, sep='')
self.x -= self.beta * update_vector
self.x = np.array(list(map(lambda x: 0.0 if x < 0.0 else x,
self.x)), dtype='float64')
if self.verbose:
print('\nFeature vector:\n', self.x, '\n', sep='')
print('Max gradient value:', np.max(gradient), '- Min',
'gradient value:', np.min(gradient))
print('Max UV value:', np.max(update_vector), '- Min',
'UV value:', np.min(update_vector))
print('Max FV value:', np.max(self.x), '- Min FV value:',
np.min(self.x))
print('Label:', self.y, '\n')
self._generate_inst()
self.instances = self.instances[:-1]
self.fvs = self.fvs[:-1]
self.labels = self.labels[:-1]
fv_dist = np.linalg.norm(self.x - old_x)
old_x = deepcopy(self.x)
self.beta *= 1 / (1 + self.decay * iteration)
old_update_vector = deepcopy(update_vector)
end = time.time()
print('TIME: ', end - begin, 's', sep='')
iteration += 1
print('Iteration: FINAL - FV distance: ', fv_dist, ' - alpha: ',
self.alpha, ' - beta: ', self.beta, sep='')
print('Number added so far: ', k + 1, '\n', sep='')
# Add the newly generated instance and retrain with that dataset
self.instances.append(self.inst)
self.learner.training_instances = self.instances
self.learner.train()
self._calculate_constants()
print('###################################################', end='')
print('################')
print()
self.poison_loss_after = self._calc_inst_loss(self.poison_instance)
return self.instances
def _calculate_constants(self):
"""
Calculates constants for the gradient descent loop
"""
# Calculate feature vectors
self.fvs = []
for i in range(len(self.instances)):
fv = self.instances[i].get_feature_vector().get_csr_matrix()
fv = np.array(fv.todense().tolist()).flatten()
self.fvs.append(fv)
self.fvs = np.array(self.fvs, dtype='float64')
# Calculate labels
self.labels = []
for inst in self.instances:
self.labels.append(inst.get_label())
self.labels = np.array(self.labels)
def _calc_inst_loss(self, inst: Instance):
"""
Calculates the logistic loss for one instance
:param inst: the instance
:return: the logistic loss
"""
fv = inst.get_feature_vector().get_csr_matrix()
fv = np.array(fv.todense().tolist()).flatten()
# reshape is for the decision function when inputting only one sample
loss = self.learner.model.learner.decision_function(fv.reshape(1, -1))
loss *= -1 * inst.get_label()
loss = math.log(1 + math.exp(loss))
return loss
def _generate_x_y_and_inst(self):
"""
Generates self.x, self.y, and self.inst
"""
self.x = self.poison_instance.get_feature_vector().get_csr_matrix()
self.x = np.array(self.x.todense().tolist(), dtype='float64').flatten()
self.x += abs(np.random.normal(0, 0.00001, len(self.x)))
self.y = -1 * self.poison_instance.get_label()
self._generate_inst()
def _generate_inst(self):
"""
:return: a properly generated Instance that has feature vector self.x
and label self.y
"""
indices = []
data = []
for i, val in enumerate(self.x):
if val != 0:
indices.append(i)
data.append(val)
# Generate new instance
fv = RealFeatureVector(len(self.x), indices, data)
self.inst = Instance(self.y, fv)
def _calc_gradient(self):
"""
:return: the calculated gradient, an np.ndarray
"""
result = self._solve_matrix()
self.z_c = result[0]
self.matrix = result[1]
size = self.instances[0].get_feature_count()
pool = mp.Pool(mp.cpu_count())
gradient = list(pool.map(self._calc_grad_helper, range(size)))
pool.close()
pool.join()
gradient = np.array(gradient, dtype='float64')
return gradient
def _calc_grad_helper(self, i):
"""
Helper function for gradient. Calculates one partial derivative.
:param i: determines which partial derivative
:return: the partial derivative
"""
current = 0 # current partial derivative
vector = [0]
for j in self.learner.model.learner.support_:
vector.append(
self._Q(self.instances[j], self.inst, derivative=True, k=i))
vector = np.array(vector)
solution = self.matrix.dot(vector)
partial_b_partial_x_k = solution[0]
partial_z_s_partial_x_k = solution[1:]
s_v_indices = self.learner.model.learner.support_.tolist()
for j in range(len(self.orig_instances)):
if j in self.learner.model.learner.support_:
q_i_t = self._Q(self.orig_instances[j], self.inst)
partial_z_i_partial_x_k = partial_z_s_partial_x_k[
s_v_indices.index(j)]
current += q_i_t * partial_z_i_partial_x_k
current += (self._Q(self.instances[-1], self.inst, True, i) *
self.z_c)
if len(self.instances) in self.learner.model.learner.support_:
current += (self._Q(self.instances[-1], self.inst) *
partial_z_s_partial_x_k[-1])
current += self.inst.get_label() * partial_b_partial_x_k
return current
def _solve_matrix(self):
"""
:return: z_c, matrix for derivative calculations
Note: I tried using multiprocessing Pools, but these were slower than
using the built-in map function.
"""
learner = self.learner.model.learner
size = learner.n_support_[0] + learner.n_support_[1] + 1 # binary
matrix = np.full((size, size), 0.0)
if len(self.instances) - 1 not in learner.support_: # not in S
if self.learner.predict(self.inst) != self.inst.get_label(): # in E
z_c = learner.C
else: # in R, z_c = 0, everything is 0
return 0.0, matrix
else: # in S
# Get index of coefficient
index = learner.support_.tolist().index(len(self.instances) - 1)
z_c = learner.dual_coef_.flatten()[index]
y_s = []
for i in learner.support_:
y_s.append(self.instances[i].get_label())
y_s = np.array(y_s)
q_s = []
for i in range(size - 1):
values = list(map(
lambda idx: self._Q(self.instances[learner.support_[i]],
self.instances[learner.support_[idx]]),
range(size - 1)))
q_s.append(values)
q_s = np.array(q_s)
for i in range(1, size):
matrix[0][i] = y_s[i - 1]
matrix[i][0] = y_s[i - 1]
for i in range(1, size):
for j in range(1, size):
matrix[i][j] = q_s[i - 1][j - 1]
try:
matrix = np.linalg.inv(matrix)
except np.linalg.linalg.LinAlgError:
print('SINGULAR MATRIX ERROR')
matrix = fuzz_matrix(matrix)
matrix = np.linalg.inv(matrix)
matrix = -1 * z_c * matrix
return z_c, matrix
def _Q(self, inst_1: Instance, inst_2: Instance, derivative=False, k=-1):
"""
Calculates Q_ij or partial Q_ij / partial x_k
:param inst_1: the first instance
:param inst_2: the second instance
:param derivative: True -> calculate derivative, False -> calculate Q
:param k: determines which derivative to calculate
:return: Q_ij or the derivative where i corresponds to inst_1 and j
corresponds to inst_2
"""
if inst_1.get_feature_count() != inst_2.get_feature_count():
raise ValueError('Feature vectors need to have same length.')
fvs = []
for i in range(2):
if i == 0:
inst = inst_1
else:
inst = inst_2
fvs.append(inst.get_feature_vector().get_csr_matrix())
fvs[i] = np.array(fvs[i].todense().tolist()).flatten()
if derivative:
ret_val = self.kernel_derivative(fvs[0], fvs[1], k)
else:
ret_val = self.kernel(fvs[0], fvs[1])
return inst_1.get_label() * inst_2.get_label() * ret_val
def _kernel_linear(self, fv_1: np.ndarray, fv_2: np.ndarray):
"""
Returns the value of the specified kernel function
:param fv_1: feature vector 1 (np.ndarray)
:param fv_2: feature vector 2 (np.ndarray)
:return: the value of the specified kernel function
"""
if len(fv_1) != len(fv_2):
raise ValueError('Feature vectors need to have same length.')
return fv_1.dot(fv_2)
def _kernel_derivative_linear(self, fv_1: np.ndarray,
fv_2: np.ndarray, k: int):
"""
Returns the value of the derivative of the specified kernel function
with fv_2 being the variable (i.e. K(x_i, x_c), finding gradient
evaluated at x_c
:param fv_1: fv_1: feature vector 1 (np.ndarray)
:param fv_2: fv_2: feature vector 2 (np.ndarray)
:param k: which partial derivative (0-based indexing, int)
:return: the value of the derivative of the specified kernel function
"""
if len(fv_1) != len(fv_2) or k < 0 or k >= len(fv_1):
raise ValueError('Feature vectors need to have same '
'length and k must be a valid index.')
return fv_1[k]
def _kernel_poly(self, fv_1: np.ndarray, fv_2: np.ndarray):
"""
Returns the value of the specified kernel function
:param fv_1: feature vector 1 (np.ndarray)
:param fv_2: feature vector 2 (np.ndarray)
:return: the value of the specified kernel function
"""
if len(fv_1) != len(fv_2):
raise ValueError('Feature vectors need to have same length.')
return ((self.learner.gamma * fv_1.dot(fv_2) + self.learner.coef0) **
self.learner.degree)
def _kernel_derivative_poly(self, fv_1: np.ndarray,
fv_2: np.ndarray, k: int):
"""
Returns the value of the derivative of the specified kernel function
with fv_2 being the variable (i.e. K(x_i, x_c), finding gradient
evaluated at x_c
:param fv_1: fv_1: feature vector 1 (np.ndarray)
:param fv_2: fv_2: feature vector 2 (np.ndarray)
:param k: which partial derivative (0-based indexing, int)
:return: the value of the derivative of the specified kernel function
"""
if len(fv_1) != len(fv_2) or k < 0 or k >= len(fv_1):
raise ValueError('Feature vectors need to have same '
'length and k must be a valid index.')
return (fv_1[k] * self.learner.degree *
self.learner.gamma *
((self.learner.gamma * fv_1.dot(fv_2) + self.learner.coef0) **
(self.learner.degree - 1)))
def _kernel_rbf(self, fv_1: np.ndarray, fv_2: np.ndarray):
"""
Returns the value of the specified kernel function
:param fv_1: feature vector 1 (np.ndarray)
:param fv_2: feature vector 2 (np.ndarray)
:return: the value of the specified kernel function
"""
if len(fv_1) != len(fv_2):
raise ValueError('Feature vectors need to have same length.')
norm = np.linalg.norm(fv_1 - fv_2) ** 2
return math.exp(-1 * self.learner.gamma * norm)
def _kernel_derivative_rbf(self, fv_1: np.ndarray,
fv_2: np.ndarray, k: int):
"""
Returns the value of the derivative of the specified kernel function
with fv_2 being the variable (i.e. K(x_i, x_c), finding gradient
evaluated at x_c
:param fv_1: fv_1: feature vector 1 (np.ndarray)
:param fv_2: fv_2: feature vector 2 (np.ndarray)
:param k: which partial derivative (0-based indexing, int)
:return: the value of the derivative of the specified kernel function
"""
if len(fv_1) != len(fv_2) or k < 0 or k >= len(fv_1):
raise ValueError('Feature vectors need to have same '
'length and k must be a valid index.')
return (self._kernel_rbf(fv_1, fv_2) * 2 *
self.learner.gamma * (fv_1[k] - fv_2[k]))
def _kernel_sigmoid(self, fv_1: np.ndarray, fv_2: np.ndarray):
"""
Returns the value of the specified kernel function
:param fv_1: feature vector 1 (np.ndarray)
:param fv_2: feature vector 2 (np.ndarray)
:return: the value of the specified kernel function
"""
if len(fv_1) != len(fv_2):
raise ValueError('Feature vectors need to have same length.')
inside = self.learner.gamma * fv_1.dot(fv_2) + self.learner.coef0
return math.tanh(inside)
def _kernel_derivative_sigmoid(self, fv_1: np.ndarray,
fv_2: np.ndarray, k: int):
"""
Returns the value of the derivative of the specified kernel function
with fv_2 being the variable (i.e. K(x_i, x_c), finding gradient
evaluated at x_c
:param fv_1: fv_1: feature vector 1 (np.ndarray)
:param fv_2: fv_2: feature vector 2 (np.ndarray)
:param k: which partial derivative (0-based indexing, int)
:return: the value of the derivative of the specified kernel function
"""
if len(fv_1) != len(fv_2) or k < 0 or k >= len(fv_1):
raise ValueError('Feature vectors need to have same '
'length and k must be a valid index.')
inside = self.learner.gamma * fv_1.dot(fv_2) + self.learner.coef0
return self.learner.gamma * fv_1[k] / (math.cosh(inside) ** 2)
def _get_kernel(self):
"""
:return: the appropriate kernel function
"""
if self.learner.model.learner.kernel == 'linear':
return self._kernel_linear
elif self.learner.model.learner.kernel == 'poly':
return self._kernel_poly
elif self.learner.model.learner.kernel == 'rbf':
return self._kernel_rbf
elif self.learner.model.learner.kernel == 'sigmoid':
return self._kernel_sigmoid
else:
raise ValueError('No matching kernel function found.')
def _get_kernel_derivative(self):
"""
:return: the appropriate kernel derivative function
"""
if self.learner.model.learner.kernel == 'linear':
return self._kernel_derivative_linear
elif self.learner.model.learner.kernel == 'poly':
return self._kernel_derivative_poly
elif self.learner.model.learner.kernel == 'rbf':
return self._kernel_derivative_rbf
elif self.learner.model.learner.kernel == 'sigmoid':
return self._kernel_derivative_sigmoid
else:
raise ValueError('No matching kernel function found.')
| [
2,
479,
62,
28463,
295,
13,
9078,
198,
2,
1052,
7822,
286,
262,
479,
12,
28463,
295,
1368,
810,
262,
15250,
6673,
479,
1366,
198,
2,
2173,
284,
262,
2746,
198,
2,
9308,
22710,
321,
198,
198,
6738,
512,
8019,
13,
324,
690,
3166,
... | 2.068555 | 10,342 |
pytest_plugins = [
'mypy.test.data',
'pytest_cov',
]
| [
9078,
9288,
62,
37390,
796,
685,
198,
220,
220,
220,
705,
1820,
9078,
13,
9288,
13,
7890,
3256,
198,
220,
220,
220,
705,
9078,
9288,
62,
66,
709,
3256,
198,
60,
198
] | 1.90625 | 32 |
from gtts import gTTS
import os
mytext = 'write here'
language = 'es'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("audio.mp3")
os.system("mpg321 audio.mp3") | [
6738,
308,
83,
912,
1330,
308,
51,
4694,
201,
198,
201,
198,
11748,
28686,
201,
198,
201,
198,
1820,
5239,
796,
705,
13564,
994,
6,
201,
198,
201,
198,
16129,
796,
705,
274,
6,
201,
198,
201,
198,
1820,
26801,
796,
308,
51,
4694,
... | 2.204545 | 88 |
#!/usr/bin/env python
# Script for testing upload a file to Fedora to get an upload id for use as
# a datastream location.
# Example of using a callback method on the upload api call.
# Requires progressbar
import argparse
import base64
import progressbar
import pycurl
import tempfile
from eulfedora.server import Repository
from eulfedora.util import force_bytes, force_text
from test import testsettings
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Upload a file to fedora for use as datastream content')
parser.add_argument('pid', metavar='pid',
help='pid to download from')
parser.add_argument('ds', metavar='dsid',
help='id of datastream to download')
parser.add_argument('--curl', action='store_true',
help='upload with pycurl')
args = parser.parse_args()
if args.curl:
curl_download_file(args.pid, args.ds)
else:
download_file(args.pid, args.ds) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
12327,
329,
4856,
9516,
257,
2393,
284,
32245,
284,
651,
281,
9516,
4686,
329,
779,
355,
198,
2,
257,
4818,
459,
1476,
4067,
13,
198,
2,
17934,
286,
1262,
257,
23838,
2446,
... | 2.670213 | 376 |
import factory
from django.contrib.auth.models import User
| [
11748,
8860,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
628,
628
] | 3.647059 | 17 |
#!/usr/bin/env python
#
# Copyright (c) 2018-2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Classes to handle Carla pedestrians
"""
import rospy
from derived_object_msgs.msg import Object
from shape_msgs.msg import SolidPrimitive
from nav_msgs.msg import Odometry
from carla_ros_bridge.actor import Actor
from carla_msgs.msg import CarlaWalkerControl
from carla import WalkerControl
class Walker(Actor):
"""
Actor implementation details for pedestrians
"""
def __init__(self, carla_actor, parent, communication, prefix=None):
"""
Constructor
:param carla_actor: carla walker actor object
:type carla_actor: carla.Walker
:param parent: the parent of this
:type parent: carla_ros_bridge.Parent
:param communication: communication-handle
:type communication: carla_ros_bridge.communication
:param prefix: the topic prefix to be used for this actor
:type prefix: string
"""
if not prefix:
prefix = "walker/{:03}".format(carla_actor.id)
super(Walker, self).__init__(carla_actor=carla_actor,
parent=parent,
communication=communication,
prefix=prefix)
self.classification = Object.CLASSIFICATION_PEDESTRIAN
self.classification_age = 0
self.control_subscriber = rospy.Subscriber(
self.get_topic_prefix() + "/walker_control_cmd",
CarlaWalkerControl, self.control_command_updated)
def control_command_updated(self, ros_walker_control):
"""
Receive a CarlaWalkerControl msg and send to CARLA
This function gets called whenever a ROS message is received via
'/carla/<role name>/walker_control_cmd' topic.
The received ROS message is converted into carla.WalkerControl command and
sent to CARLA.
:param ros_walker_control: current walker control input received via ROS
:type self.info.output: carla_ros_bridge.msg.CarlaWalkerControl
:return:
"""
walker_control = WalkerControl()
walker_control.direction.x = ros_walker_control.direction.x
walker_control.direction.y = -ros_walker_control.direction.y
walker_control.direction.z = ros_walker_control.direction.z
walker_control.speed = ros_walker_control.speed
walker_control.jump = ros_walker_control.jump
self.carla_actor.apply_control(walker_control)
def send_walker_msgs(self):
"""
Sends messages related to walker status
:return:
"""
odometry = Odometry(header=self.get_msg_header("map"))
odometry.child_frame_id = self.get_prefix()
odometry.pose.pose = self.get_current_ros_pose()
odometry.twist.twist = self.get_current_ros_twist()
self.publish_message(self.get_topic_prefix() + "/odometry", odometry)
def update(self, frame, timestamp):
"""
Function (override) to update this object.
On update walkers send:
- tf global frame
- object message
- marker message
:return:
"""
self.classification_age += 1
self.publish_transform(self.get_ros_transform())
self.publish_marker()
self.send_walker_msgs()
super(Walker, self).update(frame, timestamp)
def get_object_info(self):
"""
Function to send object messages of this walker
A derived_object_msgs.msg.Object is prepared to be published via '/carla/objects'
:return:
"""
walker_object = Object(header=self.get_msg_header("map"))
# ID
walker_object.id = self.get_id()
# Pose
walker_object.pose = self.get_current_ros_pose()
# Twist
walker_object.twist = self.get_current_ros_twist()
# Acceleration
walker_object.accel = self.get_current_ros_accel()
# Shape
walker_object.shape.type = SolidPrimitive.BOX
walker_object.shape.dimensions.extend([
self.carla_actor.bounding_box.extent.x * 2.0,
self.carla_actor.bounding_box.extent.y * 2.0,
self.carla_actor.bounding_box.extent.z * 2.0])
# Classification if available in attributes
if self.classification != Object.CLASSIFICATION_UNKNOWN:
walker_object.object_classified = True
walker_object.classification = self.classification
walker_object.classification_certainty = 1.0
self.classification_age += 1
walker_object.classification_age = self.classification_age
return walker_object
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
12,
23344,
8180,
10501,
198,
2,
198,
2,
770,
670,
318,
11971,
739,
262,
2846,
286,
262,
17168,
5964,
13,
198,
2,
1114,
257,
4866,
11,
766... | 2.378338 | 2,022 |
from django.shortcuts import render,HttpResponse,render_to_response,HttpResponseRedirect
from django.contrib import auth
from vendorprofile.models import Teacher
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
import sys
import os
import datetime
import time
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
loc = os.path.join(__location__, 'media/')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, 'media')
@isLogin
@isLogin
#-----------------------------------------------------------------------#
#-----Admin -------------#
@login_required(login_url='/admin/login/')
@login_required(login_url='/admin/login/')
@login_required(login_url='/admin/login/')
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
43481,
31077,
11,
13287,
62,
1462,
62,
26209,
11,
43481,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6284,
198,
6738,
18371,
13317,
13,
27530,
1330,
220,
32019,
... | 3.069182 | 318 |
import os
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import threading
import requests
from multissh.connection import Connection
from multissh.utils import parse_credentials, print_error, print_str, get_input
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
24580,
13,
69,
315,
942,
13,
16663,
1330,
14122,
27201,
23002,
38409,
198,
11748,
4704,
278,
198,
198,
11748,
7007,
198,
198,
6738,
1963,
747,
71,
13,
38659,
1330,
26923,
198,
6738,
1963,
747,... | 3.71875 | 64 |
from django.test import TestCase, Client
from ...models import City, Country | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
20985,
198,
6738,
2644,
27530,
1330,
2254,
11,
12946
] | 4.222222 | 18 |
# Generated by Django 3.0.6 on 2020-05-27 15:51
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
2713,
12,
1983,
1315,
25,
4349,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
498... | 3.089286 | 56 |
import joblib
import os
import io
import sys
import streamlit as st
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import plotly.figure_factory as ff
import cv2
import collections
from collections import defaultdict
import operator
from skimage.io import imread, imshow
from skimage import feature
from PIL import Image as IMG
from scipy.stats import itemfreq
# Import Functions for Image Features
from brightness_dullness import color_analysis
from brightness_dullness import perform_color_analysis
from uniformity import uniformity
from dominant_color_clustering import dominant_color
from average_color import average_color
from blurriness import blurriness
from img_size import compression_size
from img_size import get_dimensions
st.set_option('deprecation.showfileUploaderEncoding', False)
#B eginning of app text
st.markdown("<h1 style='text-align: center; color: black;'>Kick-Into-Traction</h1>", unsafe_allow_html=True)
st.write('### First impressions make a difference.')
st.write('')
st.write('### Crowdfunding campaigns can fail due to using poor images.')
st.write('')
st.write('Kick-Into-Traction is here to help your campaign get momentum quickly by focusing on what people see first: **The Cover Photo**')
st.write('')
st.write('### Have your cover photo analyzed for red flags.')
# Image uploader
uploaded_file = st.file_uploader("Submit Your Campaign Cover Image Here:")
# Read in saved logistic regression model
#load_logreg = pickle.load(open('kit_logreg.pkl', 'rb'))
# Code to execute once file is uploaded
if uploaded_file is not None:
image = IMG.open(uploaded_file)
saved_img = image.save('saved_img.jpeg')
st.image(image, caption='Uploading Successful.', use_column_width=True)
st.write("")
#st.write("Analyzing...")
# Do calculations
dullness = perform_color_analysis(uploaded_file, 'black')
brightness = perform_color_analysis(uploaded_file, 'white')
uniformity = uniformity(uploaded_file)
blurriness = blurriness('saved_img.jpeg')
compression_size = compression_size('saved_img.jpeg')
dc = dominant_color('saved_img.jpeg')
# Normalize RGB values from 256 to 1
dom_red = np.round(dc[0]/255,2)
dom_green = np.round(dc[1]/255,2)
dom_blue = np.round(dc[2]/255,2)
# Normalize RGB values from 256 to 1
ac = average_color('saved_img.jpeg')
ave_red = np.round(ac[0]/255,2)
ave_green = np.round(ac[1]/255,2)
ave_blue = np.round(ac[2]/255,2)
#print(x)
# Populate DataFrame
data1 = {
'compression_size': compression_size,
}
data2 = {
'dullness': dullness,
'brightness': brightness,
'uniformity': uniformity,
'blurriness': blurriness,
}
data3 = {
'dom red' : dom_red,
'dom green' : dom_green,
'dom blue' : dom_blue,
'ave red' : ave_red,
'ave green' : ave_green,
'ave blue' : ave_blue
}
# Display the image data in three dataframe charts
st.write('## Your Image Features')
st.write('**Compression (bytes)**')
features1 = pd.DataFrame(data1, index=[0])
st.dataframe(features1)
st.write('**Structural Features (a.u.)**')
features2 = pd.DataFrame(data2, index=[0])
st.dataframe(features2)
st.write('**Colors (0-1)**')
features3 = pd.DataFrame(data3, index=[0])
st.dataframe(features3)
#
keys = ['dullness','brightness','uniformity','average_red','average_green',
'average_blue','dominant_red','dominant_green','dominant_blue','blurriness',
'compression_size']
values = np.array([dullness, brightness, uniformity, ave_red, ave_green, ave_blue,
dom_red, dom_green, dom_blue, blurriness,
compression_size])
dictionary = dict(zip(keys,values))
# Import model
modelname = 'kit_RFM_10_20_20.pkl'
model = joblib.load(modelname)
inp_data = values.reshape(1,-1)
pred = model.predict(inp_data)
st.write('')
if pred == 0:
st.write('### Our machine learning models predict that your campaign cover image will **NOT** make a good impression.')
else:
st.write('### Our machine learning models predict that your campaign cover image is sufficiently high in quality to make a good impression, but there is always room for improvement.')
st.write('')
st.write('## What can you do to improve?')
st.write('1.) The **2** most significant features are: Compression Size and Uniformity.')
st.write('2.) Look at how past successful campgain images stack up in comparison to yours!')
st.write('3.) If your image scores differently, consider post-processing improvements to augment the key features.')
# Suggestions
st.write('## Suggesions for Improving your Image')
st.write('')
#log_reg = 'logreg_10_2_20.pkl'
#pipe = joblib.load(log_reg)
# Import training data for recommnedation figures
features_table = 'final_features_df1.pkl'
df = joblib.load(features_table)
@st.cache
df = load_data()
# Separate the funded and unfunded projects
funded_projects = df[df['state'] == 1]
#failed_projects = df[df['state'] == 0]
# Figures for suggestions
# Compression Size
st.write('# Compression Size')
fig, ax = plt.subplots()
ax.hist(df['compression_size']*1e-6,bins = 1000)
l2 = ax.axvline(x=compression_size*1e-6, color='red', label='Your Image: '+str(np.round(compression_size*1e-6, 4))+' MB')
ax.set_title('Successful Campaigns Histogram - Compression Size')
ax.set_xlim([0,2])
ax.axes.yaxis.set_ticks([])
ax.legend(handles=[l2])
ax.set_xlabel('Compression Size (MB)')
ax.set_ylabel('Relative Frequency')
st.pyplot(fig)
st.write('### **If compression is very low, your image is likely of poor quality.**')
st.write('')
# Uniformity
st.write('# Uniformity')
fig, ax = plt.subplots()
ax.hist(df['uniformity'],bins = 500)
l2 = ax.axvline(x=uniformity, color='red', label='Your Image: '+str(np.round(uniformity, 4))+' a.u.')
ax.set_title('Successful Campaigns Histogram - Uniformity')
ax.axes.yaxis.set_ticks([])
ax.legend(handles=[l2])
ax.set_xlabel('Uniformity (a.u.)')
ax.set_ylabel('Relative Frequency')
st.pyplot(fig)
st.write('### **If uniformity is very low, your image shows little pixel variation, has few edges and maybe uninteresting.**')
st.write('')
# Remove file created for openCV processing
os.remove('saved_img.jpeg')
| [
11748,
1693,
8019,
198,
11748,
28686,
198,
11748,
33245,
198,
11748,
25064,
198,
198,
11748,
4269,
18250,
355,
336,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
2603,
29487,
... | 2.531332 | 2,665 |
from typing import List
from itertools import product
if __name__ == '__main__':
seen_scales = []
diatonic_intervals = get_diatonic_intervals()
all_root_notes = Scale.get_all_tonic_notes()
print(f'Generating scales from {len(all_root_notes)} enharmonic root notes...')
for diatonic_interval in diatonic_intervals:
for note in all_root_notes:
# print(f'Producing scale with interval{diatonic_interval} @ {note}...')
try:
seen_scales.append(Scale(note, diatonic_interval))
except RuntimeError as e:
# Uncomment this to see scales that can't be produced with our enharmonically-equivalent pitch def.
# print(f'Scale is impossible: {str(e)}')
pass
print(f'Generated {len(seen_scales)} scales')
collisions = []
for scale1 in seen_scales:
for scale2 in seen_scales:
# Don't check a scale against itself
if scale1 == scale2:
continue
# Don't compare if we've already seen this scale combination before
if (scale1, scale2) in collisions or (scale2, scale1) in collisions:
break
if scale1.has_same_notes(scale2):
# print(f'Found collision!\nScale 1: {scale1}\nScale 2: {scale2}')
collisions.append((scale1, scale2))
print(f'Found {len(collisions)} collisions')
print(f'There exists {len(seen_scales) - len(collisions)} '
f'diatonic scales from the tonic notes: {", ".join(all_root_notes)}')
| [
6738,
19720,
1330,
7343,
198,
6738,
340,
861,
10141,
1330,
1720,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1775,
62,
1416,
2040,
796,
17635,
628,
220,
220,
220,
2566,
265,
9229,
62,
... | 2.304665 | 686 |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load(
"@rules_verilator//verilator/internal:versions.bzl",
_DEFAULT_VERSION = "DEFAULT_VERSION",
_version_info = "version_info",
)
verilator_repository = repository_rule(
_verilator_repository,
attrs = {
"version": attr.string(mandatory = True),
"_buildfile": attr.label(
default = Label("@rules_verilator//verilator/internal:verilator.BUILD"),
),
},
)
| [
2220,
7203,
31,
65,
41319,
62,
31391,
1003,
31391,
14,
11249,
62,
4299,
82,
14,
260,
7501,
25,
4023,
13,
65,
48274,
1600,
366,
4023,
62,
17474,
4943,
198,
2220,
7,
198,
220,
220,
220,
44212,
38785,
62,
332,
346,
1352,
1003,
332,
3... | 2.2891 | 211 |
from traffic.data import airports
airports.query(
'icao.str.match("[A-Z]{4}") and iata.notnull() '
'and type != "closed" and type != "small_airport" '
'and type != "heliport" and type != "seaplane_base" '
'and icao != "ZHZH"' # misleading identifier for this Canadian airport
).sort_values( # type: ignore
"icao"
).to_csv(
"airports.csv", index=False
)
| [
6738,
4979,
13,
7890,
1330,
22600,
198,
198,
958,
3742,
13,
22766,
7,
198,
220,
220,
220,
705,
3970,
78,
13,
2536,
13,
15699,
7203,
58,
32,
12,
57,
60,
90,
19,
92,
4943,
290,
1312,
1045,
13,
1662,
8423,
3419,
705,
198,
220,
220,... | 2.516556 | 151 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198
] | 1.833333 | 24 |
from .element import Element
class RadioButton(Element):
"""
Element class to represent HTML Radio Button
"""
| [
6738,
764,
30854,
1330,
11703,
628,
198,
4871,
8829,
21864,
7,
20180,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
11703,
1398,
284,
2380,
11532,
8829,
20969,
198,
220,
220,
220,
37227,
198
] | 3.542857 | 35 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from localflavor.stub import _
#: A list of Latvian municipalities and republican cities.
#: Identifiers based on ISO 3166-2:LV. https://en.wikipedia.org/wiki/ISO_3166-2:LV
MUNICIPALITY_CHOICES = (
# Republican cities (not part of any municipality)
('DGV', _('Daugavpils')),
('JEL', _('Jelgava')),
('JKB', _('Jēkabpils')),
('JUR', _('Jūrmala')),
('LPX', _('Liepāja')),
('REZ', _('Rēzekne')),
('RIX', _('Riga')),
('VMR', _('Valmiera')),
('VEN', _('Ventspils')),
# Municipalities
('001', _('Aglona municipality')),
('002', _('Aizkraukle municipality')),
('003', _('Aizpute municipality')),
('004', _('Aknīste municipality')),
('005', _('Aloja municipality')),
('006', _('Alsunga municipality')),
('007', _('Alūksne municipality')),
('008', _('Amata municipality')),
('009', _('Ape municipality')),
('010', _('Auce municipality')),
('011', _('Ādaži municipality')),
('012', _('Babīte municipality')),
('013', _('Baldone municipality')),
('014', _('Baltinava municipality')),
('015', _('Balvi municipality')),
('016', _('Bauska municipality')),
('017', _('Beverīna municipality')),
('018', _('Brocēni municipality')),
('019', _('Burtnieki municipality')),
('020', _('Carnikava municipality')),
('021', _('Cesvaine municipality')),
('022', _('Cēsis municipality')),
('023', _('Cibla municipality')),
('024', _('Dagda municipality')),
('025', _('Daugavpils municipality')),
('026', _('Dobele municipality')),
('027', _('Dundaga municipality')),
('028', _('Durbe municipality')),
('029', _('Engure municipality')),
('030', _('Ērgļi municipality')),
('031', _('Garkalne municipality')),
('032', _('Grobiņa municipality')),
('033', _('Gulbene municipality')),
('034', _('Iecava municipality')),
('035', _('Ikšķile municipality')),
('036', _('Ilūkste municipality')),
('037', _('Inčukalns municipality')),
('038', _('Jaunjelgava municipality')),
('039', _('Jaunpiebalga municipality')),
('040', _('Jaunpils municipality')),
('041', _('Jelgava municipality')),
('042', _('Jēkabpils municipality')),
('043', _('Kandava municipality')),
('044', _('Kārsava municipality')),
('045', _('Kocēni municipality')),
('046', _('Koknese municipality')),
('047', _('Krāslava municipality')),
('048', _('Krimulda municipality')),
('049', _('Krustpils municipality')),
('050', _('Kuldīga municipality')),
('051', _('Ķegums municipality')),
('052', _('Ķekava municipality')),
('053', _('Lielvārde municipality')),
('054', _('Limbaži municipality')),
('055', _('Līgatne municipality')),
('056', _('Līvāni municipality')),
('057', _('Lubāna municipality')),
('058', _('Ludza municipality')),
('059', _('Madona municipality')),
('060', _('Mazsalaca municipality')),
('061', _('Mālpils municipality')),
('062', _('Mārupe municipality')),
('063', _('Mērsrags municipality')),
('064', _('Naukšēni municipality')),
('065', _('Nereta municipality')),
('066', _('Nīca municipality')),
('067', _('Ogre municipality')),
('068', _('Olaine municipality')),
('069', _('Ozolnieki municipality')),
('070', _('Pārgauja municipality')),
('071', _('Pāvilosta municipality')),
('072', _('Pļaviņas municipality')),
('073', _('Preiļi municipality')),
('074', _('Priekule municipality')),
('075', _('Priekuļi municipality')),
('076', _('Rauna municipality')),
('077', _('Rēzekne municipality')),
('078', _('Riebiņi municipality')),
('079', _('Roja municipality')),
('080', _('Ropaži municipality')),
('081', _('Rucava municipality')),
('082', _('Rugāji municipality')),
('083', _('Rundāle municipality')),
('084', _('Rūjiena municipality')),
('085', _('Sala municipality')),
('086', _('Salacgrīva municipality')),
('087', _('Salaspils municipality')),
('088', _('Saldus municipality')),
('089', _('Saulkrasti municipality')),
('090', _('Sēja municipality')),
('091', _('Sigulda municipality')),
('092', _('Skrīveri municipality')),
('093', _('Skrunda municipality')),
('094', _('Smiltene municipality')),
('095', _('Stopiņi municipality')),
('096', _('Strenči municipality')),
('097', _('Talsi municipality')),
('098', _('Tērvete municipality')),
('099', _('Tukums municipality')),
('100', _('Vaiņode municipality')),
('101', _('Valka municipality')),
('102', _('Varakļāni municipality')),
('103', _('Vārkava municipality')),
('104', _('Vecpiebalga municipality')),
('105', _('Vecumnieki municipality')),
('106', _('Ventspils municipality')),
('107', _('Viesīte municipality')),
('108', _('Viļaka municipality')),
('109', _('Viļāni municipality')),
('110', _('Zilupe municipality')),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
1179,
1604,
75,
5570,
13,
301,
549,
1330,
4808,
198,
198,
2,
25,
317,
1351,
286,
5476,
85,
... | 2.416869 | 2,063 |
from django.forms import ModelForm
from .models import Order, Customer
from django.contrib.auth.models import User
from .models import Commit
| [
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
764,
27530,
1330,
8284,
11,
22092,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
764,
27530,
1330,
35910,
628
] | 3.972222 | 36 |
"""
height_handler.py
=================
Manages height based on these rules:
* define height as new column: "height_above_surface"
* For land data:
- Use the mapping in Simon's table below from "observed_variable".
- If there are any records with an "observed_variable" not mentioned: set to 2 metres.
* For marine data:
- use value of "observation.observation_height_above_station_surface"
- This is done inside: restructure-marine.py
"""
import pandas as pd
# Land rules:
# CDM_code Variable_ name Observation_height_above_station_surface metres (m)
_land_rules = """
85 Temperature 2
36 Dew point temperature 2
58 Sea Level Pressure 2
57 Surface level Pressure 2
106 Wind Speed 10
107 Wind Direction 10
55 snow water equivalent 1
53 Snow depth 0
45 Fresh Snow 1
44 Precipitation 1
""".strip().split('\n')
land_rules = dict([(int(_.split('\t')[0]), int(_.split('\t')[2])) for _ in _land_rules])
if __name__ == '__main__':
test()
| [
37811,
198,
17015,
62,
30281,
13,
9078,
198,
4770,
28,
198,
198,
5124,
1095,
6001,
1912,
319,
777,
3173,
25,
628,
1635,
8160,
6001,
355,
649,
5721,
25,
366,
17015,
62,
29370,
62,
42029,
1,
628,
1635,
1114,
1956,
1366,
25,
198,
220,
... | 2.887906 | 339 |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import time
from nvflare.fuel.hci.client.fl_admin_api import FLAdminAPI
from nvflare.fuel.hci.client.fl_admin_api_constants import FLDetailKey
from nvflare.fuel.hci.client.fl_admin_api_spec import TargetType
if __name__ == "__main__":
main()
| [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.32567 | 261 |
# -*- coding: utf-8 -*-
import fire
if __name__ == '__main__':
fire.Fire()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2046,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2046,
13,
13543,
3419,
198
] | 2.184211 | 38 |
import os
import sys
import argparse
from fairseq.models.roberta import RobertaModel
GLUE_DATA_DIR = '/ssd2/GLUE'
RAW_DATA_DIR = os.path.join(GLUE_DATA_DIR, 'glue_data')
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", default="", type=str)
parser.add_argument("--checkpoint_name", default="checkpoint_best.pt", type=str)
parser.add_argument("--task_name", default="", type=str)
cmd = '--model_path /ssd2/GLUE_results/RTE/roberta_medium_32-16-0.0007/2e-05_32.round.2541887044 --task_name RTE'
args = parser.parse_args(cmd.split())
data_dir = os.path.join(GLUE_DATA_DIR, args.task_name + '-bin')
data_dir_raw = os.path.join(RAW_DATA_DIR, args.task_name)
roberta = RobertaModel.from_pretrained(
args.model_path,
checkpoint_file=args.checkpoint_name,
# data_name_or_path=f'{args.task_name}-bin'
data_name_or_path=data_dir
)
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
with open(os.path.join(data_dir_raw, 'dev.tsv')) as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = roberta.encode(sent1, sent2)
prediction = roberta.predict('sentence_classification_head', tokens).argmax()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples)) | [
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
3148,
41068,
13,
27530,
13,
305,
4835,
64,
1330,
5199,
64,
17633,
628,
198,
8763,
8924,
62,
26947,
62,
34720,
796,
31051,
824,
67,
17,
14,
8763,
8924,
6,
198,
20530,... | 2.474763 | 634 |
import numpy as np
import xgboost as xgb
from imxgboost.weighted_loss import Weight_Binary_Cross_Entropy
from imxgboost.focal_loss import Focal_Binary_Loss
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, matthews_corrcoef
class imbalance_xgboost(BaseEstimator, ClassifierMixin):
"""Data in the form of [nData * nDim], where nDim stands for the number of features.
This wrapper would provide a Xgboost interface with sklearn estimiator structure, which could be stacked in other Sk pipelines
"""
def __init__(self, num_round=10, max_depth=10, eta=0.3, verbosity=1, objective_func='binary:logitraw',
eval_metric='logloss', booster='gbtree', special_objective=None, early_stopping_rounds=None, imbalance_alpha=None,
focal_gamma=None):
"""
Parameters to initialize a Xgboost estimator
:param num_round. The rounds we would like to iterate to train the model
:param max_depth. The maximum depth of the classification boosting, need to be specified
:param num_class. The number of classes for the classifier
:param eta Step. Size shrinkage used in update to prevents overfitting
:param verbosity. Set to '1' or '0' to determine if print the information during training. True is higly recommended
:param objective_func. The objective function we would like to optimize
:param eval_metric. The loss metrix. Note this is partially correlated to the objective function, and unfit loss function would lead to problematic loss
:param booster. The booster to be usde, can be 'gbtree', 'gblinear' or 'dart'.
:param imbalance_alpha. The \alpha value for imbalanced loss. Will make impact on '1' classes. Must have when special_objective 'weighted'
:param focal_gamma. The \gamma value for focal loss. Must have when special_objective 'focal'
"""
self.num_round = num_round
self.max_depth = max_depth
self.eta = eta
self.verbosity = verbosity
self.objective_func = objective_func
self.eval_metric = eval_metric
self.booster = booster
self.eval_list = []
self.boosting_model = 0
self.special_objective = special_objective
self.early_stopping_rounds = early_stopping_rounds
self.imbalance_alpha = imbalance_alpha
self.focal_gamma = focal_gamma
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2124,
70,
39521,
355,
2124,
22296,
198,
6738,
545,
87,
70,
39521,
13,
6551,
276,
62,
22462,
1330,
14331,
62,
33,
3219,
62,
21544,
62,
14539,
28338,
198,
6738,
545,
87,
70,
39521,
13,
69,
43... | 2.820658 | 881 |
import click
from agent.cli.prompt.pipeline.schemaless import SchemalessPrompter
| [
11748,
3904,
198,
198,
6738,
5797,
13,
44506,
13,
16963,
457,
13,
79,
541,
4470,
13,
1416,
4411,
282,
408,
1330,
1446,
4411,
282,
408,
24129,
42104,
628
] | 2.964286 | 28 |
from django.contrib import admin
from timing.models import *
admin.site.register(Experiment)
#admin.site.register(Gender)
#admin.site.register(Race)
#admin.site.register(Ethnicity)
#admin.site.register(CountryOfResidence)
#admin.site.register(EducationLevel)
#admin.site.register(Spirituality)
#admin.site.register(ReligiousAffiliation)
#admin.site.register(PoliticalIdentity)
#admin.site.register(Occupation)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
10576,
13,
27530,
1330,
1635,
198,
198,
28482,
13,
15654,
13,
30238,
7,
20468,
3681,
8,
198,
2,
28482,
13,
15654,
13,
30238,
7,
41394,
8,
198,
2,
28482,
13,
15654,
13,
... | 3.145038 | 131 |
# Generated by Django 2.1.4 on 2019-03-17 16:29
import app.core.helpers
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
13130,
12,
3070,
12,
1558,
1467,
25,
1959,
198,
198,
11748,
598,
13,
7295,
13,
16794,
364,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 40 |
import os
import bpy
import sys
import traceback
from pathlib import Path
from . communication import send_dict_as_json
from . environment import user_addon_directory, addon_directories
| [
11748,
28686,
198,
11748,
275,
9078,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
764,
6946,
1330,
3758,
62,
11600,
62,
292,
62,
17752,
198,
6738,
764,
2858,
1330,
2836,
62,
48078,
62,
34945... | 3.957447 | 47 |
import pytest
from pyupgrade import _fix_tokens
from pyupgrade import _imports_unicode_literals
@pytest.mark.parametrize(
('s', 'expected'),
(
('', False),
('import x', False),
('from foo import bar', False),
('x = 5', False),
('from __future__ import unicode_literals', True),
(
'"""docstring"""\n'
'from __future__ import unicode_literals',
True,
),
(
'from __future__ import absolute_import\n'
'from __future__ import unicode_literals\n',
True,
),
('from .__future__ import unicode_literals\n', False),
),
)
@pytest.mark.parametrize(
('s', 'min_version'),
(
# Syntax errors are unchanged
('(', (2, 7)),
# Without py3-plus, no replacements
("u''", (2, 7)),
# Regression: string containing newline
('"""with newline\n"""', (3,)),
pytest.param(
'def f():\n'
' return"foo"\n',
(3,),
id='Regression: no space between return and string',
),
),
)
@pytest.mark.parametrize(
('s', 'min_version', 'expected'),
(
# With py3-plus, it removes u prefix
("u''", (3,), "''"),
# Importing unicode_literals also cause it to remove it
(
'from __future__ import unicode_literals\n'
'u""\n',
(2, 7),
'from __future__ import unicode_literals\n'
'""\n',
),
),
)
| [
11748,
12972,
9288,
198,
198,
6738,
12972,
929,
9526,
1330,
4808,
13049,
62,
83,
482,
641,
198,
6738,
12972,
929,
9526,
1330,
4808,
320,
3742,
62,
46903,
1098,
62,
17201,
874,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
... | 1.957125 | 793 |
from .generation_dataset import GenDataset
from .recognition_dataset import ImageList, SeparateBatchSampler, SeparateImageList
| [
201,
198,
6738,
764,
20158,
62,
19608,
292,
316,
1330,
5215,
27354,
292,
316,
201,
198,
6738,
764,
26243,
653,
62,
19608,
292,
316,
1330,
7412,
8053,
11,
8621,
30748,
33,
963,
16305,
20053,
11,
8621,
30748,
5159,
8053,
201,
198
] | 3.195122 | 41 |
# Generated by Django 3.0.10 on 2021-01-19 12:14
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
940,
319,
33448,
12,
486,
12,
1129,
1105,
25,
1415,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
from _dmtdef import *
| [
6738,
4808,
67,
16762,
4299,
1330,
1635,
198
] | 2.75 | 8 |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import re
from django.db.models import Q
from django.shortcuts import get_object_or_404
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
11748,
302,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13... | 3.362069 | 58 |
from mock import patch
from six.moves import xrange
import unittest
from django.conf import settings
from django.db import connections
from django.db.utils import DataError, IntegrityError
from django.db.models import CASCADE
from django.test import TestCase
import time
from datetime import datetime
from django_sharding_library.constants import Backends
from django_sharding_library.fields import (
ShardedIDFieldMixin,
ShardLocalStorageFieldMixin,
ShardStorageFieldMixin,
ShardForeignKeyStorageFieldMixin,
ShardForeignKeyStorageField,
)
from django_sharding_library.id_generation_strategies import BaseIDGenerationStrategy
from tests.models import (
ShardedModelIDs,
ShardedTestModelIDs,
TestModel,
ShardStorageTable,
PostgresCustomAutoIDModel,
PostgresCustomIDModel,
PostgresShardUser
)
| [
6738,
15290,
1330,
8529,
198,
6738,
2237,
13,
76,
5241,
1330,
2124,
9521,
198,
11748,
555,
715,
395,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
8787,
198,
6738,
42625,
14208,
13,
9945,
... | 3.175373 | 268 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Alexey Anisenkov, anisyonk@cern.ch, 2018
"""
Job specific Info Service
It could customize/overwrite settings provided by the main Info Service
:author: Alexey Anisenkov
:contact: anisyonk@cern.ch
:date: January 2018
"""
from .infoservice import InfoService
from .jobinfo import JobInfoProvider
import logging
logger = logging.getLogger(__name__)
class JobInfoService(InfoService): ## TO BE DEPRECATED/REMOVED
"""
Info service: Job specific
Job could overwrite settings provided by Info Service
*** KEPT for a while in repo .. most probably will be deprecated and removed soon **
"""
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.233083 | 266 |
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
## Training parameters
model_name = 'distilbert-base-uncased'
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = 'output/train_stsb_ct-{}-{}'.format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = 'data/wiki1m_for_simcse.txt'
if not os.path.exists(wikipedia_dataset_path):
util.http_get('https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt', wikipedia_dataset_path)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, 'r', encoding='utf8') as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = 'data/stsbenchmark'
sts_dataset_path = f'{data_folder}/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={'lr': 1e-5},
output_path=model_save_path,
use_amp=False #Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
| [
11748,
28034,
198,
6738,
6827,
62,
35636,
364,
13,
18206,
2288,
1330,
13302,
6048,
278,
18925,
414,
36,
2100,
84,
1352,
198,
6738,
6827,
62,
35636,
364,
1330,
11352,
594,
8291,
16354,
11,
5972,
2667,
25060,
11,
4981,
11,
7736,
11,
234... | 2.652616 | 1,376 |
import torch
from torch import nn
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198
] | 3.777778 | 9 |
# -*- coding: utf-8 -*-
# Copyright CERN since 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
SQLAlchemy utilities
'''
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Executable, ClauseElement
@compiles(InsertFromSelect)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
327,
28778,
1201,
1853,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
42... | 3.576744 | 215 |
from __future__ import print_function, absolute_import
from paths import Paths as paths
from . import bash_utils
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
6738,
13532,
1330,
10644,
82,
355,
13532,
198,
6738,
764,
1330,
27334,
62,
26791,
628
] | 4.071429 | 28 |
import cv2
import pickle
import glob
import os
undistorted_dir_name = 'output_images/undistorted'
chess_undistor_dir_name ="output_images/chessundistorted"
# Read in the saved objpoints and imgpoints
dist_pickle = pickle.load( open( "caliberation_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
images = glob.glob('test_images/*.jpg')
undistort(images,undistorted_dir_name)
images = glob.glob('camera_cal/*.jpg')
undistort(images,chess_undistor_dir_name)
#print(images)
| [
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
11748,
15095,
198,
11748,
28686,
198,
198,
917,
396,
9741,
62,
15908,
62,
3672,
796,
705,
22915,
62,
17566,
14,
917,
396,
9741,
6,
198,
2395,
824,
62,
917,
32380,
62,
15908,
62,
3672,
... | 2.661376 | 189 |
"""Module Description
Copyright (c) 2009 H. Gene Shin <shin@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: H. Gene Shin
@contact: shin@jimmy.harvard.edu
This module contains functions that produce R scripts corresponding to certain R functions.
For example, R.plot(x,y) produces a R script of plot(x,y).
For the details on the parameters of the below functions, refer to R documents.
"""
# ------------------------------------
# Python modules
# ------------------------------------
import sys
from array import *
import CEAS.graphics as graphics
# ------------------------------------
# function - statistics
# ------------------------------------
def pbinom(q,size,p,lower_tail=True,log_p=False):
"""produce a script of pbinom in R"""
rscript='pbinom('
if type(q).__name__=='list' or 'tuple':
rscript+='c(%s),' %str(q)[1:-1]
else:
rscript+='%s,' %(str(q))
if type(size).__name__=='list' or 'tuple':
rscript+='c(%s),' %str(q)[1:-1]
else:
rscript+='%s,' %(str(q))
if type(p).__name__=='list' or 'tuple':
rscript+='c(%s),' %str(q)[1:-1]
else:
rscript+='%s,' %(str(q))
if not lower_tail:
rscript+='lower.tail=FALSE,'
if log_p:
rscript+='log.p=TRUE,'
rscript.rstrip(',')
rscript+=')\n'
return rscript
# ------------------------------------
# function - general
# ------------------------------------
def vector(x, varname='x'):
"""Return an one-line R script of a vector
Parameters:
1. x: a list or an array of numbers or string
2. type: the type of elements of x
3. varname: the name of the vector
"""
return "%s <- c(%s)\n" %(varname, str(x)[1:-1])
def matrix(mat,nrow=1,ncol=1,byrow=False):
"""Given a two dimensional array, write the array in a matrix form"""
nr=len(mat)
rscript='m<-matrix(data=c('
try:
nc=len(mat[0])
for m in mat:
rscript+=str(m)[1:-1]+ ', '
rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc)
except TypeError:
rscript+=str(mat)[1:-1]+','
rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol)
if byrow: rscript+='byrow=TRUE,'
rscript=rscript[:-1]+')\n'
return rscript
# ------------------------------------
# function - graphhics
# ------------------------------------
def polygon(x, y = None, density=None, angle=45, border = None, col = None):
"""polygon function"""
if type(x)==list or type(x)==tuple:
rscript='x<-c('
for i in x:
if i==i: rscript+='%f,' %i
else: rscript+='NaN,'
rscript=rscript[:-1]+')\n'
elif type(x)==int or type(x)==float:
if x==x: rscript='x<-c(%f)\n' %x
else: rscript='x<-c(NaN)\n'
if type(y)==list or type(y)==tuple:
rscript+='y<-c('
for i in y:
if i==i:rscript+='%f,' %i
else: rscript+='NaN,'
rscript=rscript[:-1]+')\n'
elif type(y)==int or type(y)==float:
if y==y: rscript='y<-c(%f)\n' %x
else: rscript='y<-c(NaN)\n'
rscript+='polygon(x,y,'
if density:
if type(density)==tuple or type(density)==list:
rscript+='density=c('
for d in density:
rscript+='%d,' %d
rscript+='),'
if angle != 45:
rscript+='angle=%d,' %angle
if border:
rscript+='border="%s",' %border
if col:
if type(col) ==list or type(col)==tuple:
rscript += 'col=c('
for c in col:
rscript+= '"%s",' %str(c)
rscript = rscript[:-1]+'),'
elif type(col)==str:
rscript += 'col=%s,' %str(col)
rscript=rscript[:-1]+')\n'
return rscript
def hist(x, breaks, freq=False, right = True, density=None, main='', xlim=None, ylim=None, xlab='', ylab='', axes=True, col=None, border=None, returnClass=False):
"""Histogram"""
if type(x)==list or type(x)==tuple or type(x)==array:
rscript = 'x <- c(%s)\n' %str(list(x))[1:-1]
elif type(x)==str and x:
rscript = 'x <- %s\n' %x
if type(breaks)==list or type(breaks)==tuple or type(x)==array:
rscript += 'breaks <- c(%s)\n' %str(list(breaks))[1:-1]
elif type(breaks)==int:
rscript += 'breaks <- %d\n' %breaks
elif type(breaks)==str and breaks:
rscript += 'breaks <- "%s"\n' %breaks
# draw or just return histogram class?
if returnClass:
rscript +='hs <- hist(x, breaks, '
else:
rscript +='hist(x, breaks, '
if freq:
rscript += 'freq=TRUE, probability=FALSE, '
else:
rscript += 'freq=FALSE, probability=TRUE, '
if not right:
rscript += 'right=FALSE, '
if density:
rscript += 'density=%f, ' %density
if main:
rscript += 'main="%s", ' %main
if xlim:
rscript += 'xlim=c(%s), ' %str(list(xlim))[1:-1]
if ylim:
rscript += 'ylim=c(%s), ' %str(list(ylim))[1:-1]
if xlab:
rscript += 'xlab="%s", ' %xlab
if ylab:
rscript += 'ylab="%s", ' %ylab
if not axes:
rscript += 'axes=FALSE, '
if col:
rscript += 'col=%s, ' %col
if border:
rscript += 'border=%s, ' %border
rscript =rscript[:-2] + ')\n'
return rscript
def seq(fr, to, by=1, rval = 's'):
"""Seq function"""
rscript = ''
if rval:
if by != 1:
rscript += '%s <- seq(from=%s, to=%s, by=%s)\n' %(rval, str(fr), str(to), str(by))
else:
rscript += '%s <- seq(from=%s, to=%s)\n' %(rval, str(fr), str(to))
else:
if by != 1:
rscript += 'seq(from=%s, to=%s, by=%s)\n' %(str(fr), str(to), str(by))
else:
rscript += 'seq(from=%s, to=%s)\n' %(str(fr), str(to))
return rscript
###
# par is underconstruction. It just adjusts margins and multiple plots in one figure currently.
def layout(mat,widths=None,heights=None):
"""layout"""
ncol=len(mat[0])
nrow=len(mat)
arr=[]
list(map(lambda m: arr.extend(m),mat))
rscript='layout(matrix(c(%s), %d, %d, byrow = TRUE),' %(str(arr)[1:-1],nrow,ncol)
if widths:
rscript+='widths=c(%s),' %(str(widths)[1:-1])
if heights:
rscript+='heights=c(%s),' %(str(heights)[1:-1])
rscript=rscript[:-1]+')\n'
return rscript
def rainbow(n, s = 1, v = 1, start = 1, rval='cols'):
"""Return n rainbow colors
"""
rscript = ''
if not n: return rscript
rscript += '%s <- rainbow(%d,' %(rval, n)
if s != 1:
rscript += 's=%s,' %str(s)
if v != 1:
rscript += 'v=%s,' %str(v)
if start != 1:
rscript += 'start=%s,' %str(start)
rscript = rscript[:-1] +')\n'
return rscript
def colorRampPalette(colors, bias=1, space='rgb', interpolate='linear', rval='colorPalette'):
"""ColorRampPalette function"""
rscript = ''
if colors:
if type(colors) == list or type(colors) == tuple:
rscript += '%s <- colorRampPalette(c(%s),' %(rval, str(colors)[1:-1])
elif type(colors) == str:
rscript += '%s <- colorRampPalette(%s,' %(rval, colors)
if bias != 1:
rscript += 'bias = %s,' %str(bias)
if interpolate != 'linear':
rscript += 'interpolate = "%s",' %str(interpolate)
if rscript:
rscript = rscript[:-1] +')\n'
return rscript
def heatmap_bar(x, y, fromto, ylim, bot=0, top=2, vertical=False, cmap='cmap'):
"""Draw a bar of heatmap.
Parameters;
1. x: x values
2. y: y values
3. fromto: the first and last points of the bar. If the first point is smaller than x's smallest, between it and the first of x there is no color
4. ylim: ylimits
5. bot: bottom of the bar
6. top: top of the bar
7. vertical: the bar stands vertical or lies horizontal?
8. cmap: the colormap variable (vector) name in the R script. WARNING: to use heatmap_bar, a colormap must be given prior to this function and correctly referred to.
"""
# quantize y values
#qy = graphics.quantize(list(y), ylim=ylim, n_levels=256)
# get the color code for red, green and blue
# red = [graphics.JET256['red'][q] for q in qy]
# green = [graphics.JET256['green'][q] for q in qy]
# blue = [graphics.JET256['blue'][q] for q in qy]
# red = [graphics.COOL256['red'][q] for q in qy]
# green = [graphics.COOL256['green'][q] for q in qy]
# blue = [graphics.COOL256['blue'][q] for q in qy]
# rscript
rscript=''
if vertical:
# draw the outer frame
if x[0]>fromto[0]:
rscript+='polygon(x=c(%s, %s, %s, %s),y=c(%s, %s, %s, %s))\n' %(str(bot), str(top), str(top), str(bot), str(fromto[0]), str(fromto[0]), str(x[0]), str(x[0]))
# med=sum(ylim)/2.0
# in the R script, y represents x in our data set because we are drawing a vertical bar
#rscript += 'y <- c(%s)\n' %str(list(x))[1:-1]
if x[-1]<fromto[1]:
rscript+='y<-c(%s, %s)\n' %(str(list(x))[1:-1], str(fromto[1]))
else:
# rscript+='y<-c(%s, %s)\n' %(str(list(x))[1:-1], str(x[-1]))
rscript += 'y<-c(%s)\n' %str(list(x))[1:-1]
# rscript += 'cols <- rgb(c(%s), c(%s), c(%s))\n' %(str(red)[1:-1], str(green)[1:-1], str(blue)[1:-1])
rscript += 'vals <- c(%s)\n' %str(list(y))[1:-1]
rscript += 'vals[vals > %s] <- %s\n' %(str(ylim[1]), str(ylim[1]))
rscript += 'vals[vals < %s] <- %s\n' %(str(ylim[0]), str(ylim[0]))
rscript += 'vals <- round((length(cmap)-1) * (vals - %s)/(%s - %s)) + 1\n' %(str(ylim[0]), str(ylim[1]), str(ylim[0]))
rscript += 'cols <- cmap[vals]\n'
rscript += 'for (i in 1:length(cols)) {\n'
rscript += '\tpolygon(x=c(%s, %s, %s, %s), y=c(y[i], y[i], y[i+1], y[i+1]), col=cols[i], border=cols[i])\n' %(str(bot), str(top), str(top), str(bot))
rscript += '}\n'
# rscript+='x<-c(%s)\n' %(str(list(y))[1:-1])
# rscript+='for (i in 1:length(x)) {\n'
# rscript+='\tif (x[i]<=%f) {\n' %med
# rscript+='\t\tcol<-rgb(0, log10((%f-max(x[i],%f))/(%f-%f)*9+1), 0)\n' %(med, ylim[0],ylim[1], ylim[0])
# rscript+='\t} else {\n'
# rscript+='\t\tcol<- rgb(log10((min(x[i],%f)-%f)/(%f-%f)*9+1), 0, 0)\n' %(ylim[1],med, ylim[1], ylim[0])
# rscript+='\t}\n'
# rscript+='\tpolygon(x=c(%d,%d,%d,%d),y=c(y[i],y[i],y[i+1],y[i+1]),col=col,border=col)\n' %(bot,top,top,bot)
# rscript+='}\n'
else:
if x[0]>fromto[0]:
rscript+='polygon(x=c(%s, %s, %s, %s), y=c(%s, %s, %s, %s))\n' %(str(fromto[0]), str(x[0]), str(x[0]), str(fromto[0]), str(bot), str(bot), str(top), str(top))
# med=sum(ylim)/2.0
if x[-1]<fromto[1]:
rscript+='x<-c(%s, %s)\n' %(str(list(x))[1:-1], str(fromto[1]))
else:
# rscript+='x<-c(%s, %s)\n' %(str(list(x))[1:-1], str(x[-1]))
rscript += 'x<-c(%s)\n' %str(list(x))[1:-1]
# quantize the values and get the color code
#rscript += 'cols <- rgb(c(%s), c(%s), c(%s))\n' %(str(red)[1:-1], str(green)[1:-1], str(blue)[1:-1])
rscript += 'vals <- c(%s)\n' %str(list(y))[1:-1]
rscript += 'vals[vals > %s] <- %s\n' %(str(ylim[1]), str(ylim[1]))
rscript += 'vals[vals < %s] <- %s\n' %(str(ylim[0]), str(ylim[0]))
rscript += 'vals <- round((length(cmap)-1) * (vals - %s)/(%s - %s)) + 1\n' %(str(ylim[0]), str(ylim[1]), str(ylim[0]))
rscript += 'cols <- cmap[vals]\n'
rscript += 'for (i in 1:length(cols)) {\n'
rscript += '\tpolygon(x=c(x[i], x[i+1],x [i+1], x[i]),y=c(%s, %s, %s, %s), col=cols[i], border=cols[i])\n' %(str(bot), str(bot), str(top), str(top))
rscript += '}\n'
# rscript+='y<-c(%s)\n' %str(list(y))[1:-1]
# rscript+='for (i in 1:length(y)) {\n'
# rscript+='\tif (y[i]<=%f) {\n' %med
# rscript+='\t\tcol<- rgb(0, log10((%f-max(y[i],%f))/(%f-%f)*9+1), 0)\n' %(med, ylim[0],ylim[1], ylim[0])
# rscript+='\t} else {\n'
# rscript+='\t\tcol<- rgb(log10((min(y[i],%f)-%f)/(%f-%f)*9+1), 0, 0)\n' %(ylim[1],med, ylim[1], ylim[0])
# rscript+='\t}\n'
# rscript+='\tpolygon(x=c(x[i],x[i+1],x[i+1],x[i]),y=c(%d,%d,%d,%d),col=col,border=col)\n' %(bot,bot,top,top)
# rscript+='}\n'
return rscript
def heatmap_rectangles(xstart, xend, y, xlim, ylim, bot=0, top=1, xaxt = "n", cmap="cmap"):
"""Draw a bar of heatmap.
Parameters;
1. xstart: the start positions of rectangles
2. xend: the end positions of rectangles
3. y: the y values (scores) of rectangles
3. xlim: x limits
4. ylim: y limits
5. bot: bottom of the bar
6. top: top of the bar
7. cmap: the colormap variable (vector) name in the R script. WARNING: to use heatmap_bar, a colormap must be given prior to this function and correctly referred to.
"""
# error checking: check whether xstart, xend and y are iterable types
rscript = ''
type_xstart = type(xstart)
type_xend = type(xend)
type_y = type(y)
if type_xstart != list and type_xstart != tuple and type_xstart != array:
return rscript
if type_xend != list and type_xend != tuple and type_xend != array:
return rscript
if type_y != list and type_y != tuple and type_y != array:
return rscript
rscript += plot(xlim, [bot,top], xlim=xlim, ylim=[bot,top], tp="n", frame=False, yaxt="n", xaxt=xaxt, xlab="", ylab="", main="")
# copy the rectangle coordinates and their values. The values are saturated wrt ylim
if type_xstart == array:
rscript += 'start <- c(%s)\n' %str(list(xstart))[1:-1]
else:
rscript += 'start <- c(%s)\n' %str(xstart)[1:-1]
if type_xend == array:
rscript += 'end <- c(%s)\n' %str(list(xend))[1:-1]
else:
rscript += 'end <- c(%s)\n' %str(xend)[1:-1]
if type_y == array:
rscript += 'vals <- c(%s)\n' %str(list(y))[1:-1]
else:
rscript += 'vals <- c(%s)\n' %str(y)[1:-1]
rscript += 'vals[vals > %s] <- %s\n' %(str(ylim[1]), str(ylim[1]))
rscript += 'vals[vals < %s] <- %s\n' %(str(ylim[0]), str(ylim[0]))
rscript += 'vals <- round((length(cmap)-1) * (vals - %s)/(%s - %s)) + 1\n' %(str(ylim[0]), str(ylim[1]), str(ylim[0]))
rscript += 'cols <- %s[vals]\n' %cmap
rscript += 'for (i in 1:length(cols)) {\n'
rscript += '\tpolygon(x=c(start[i], end[i], end[i], start[i]), y=c(%d, %d, %d, %d), col=cols[i], border=cols[i])\n' %(bot, bot, top, top)
rscript += '}\n'
return rscript
def rectangles_with_heights(xstart, xend, y, ylim, bot, top, xaxt = "n", col = ["red"]):
"""Draw a bar of heatmap.
Parameters;
1. xstart: the start positions of rectangles
2. xend: the end positions of rectangles
3. y: the y values (scores) of rectangles
4. ylim: y limits
5. bot: bottom of the bar
6. top: top of the bar
7. xaxt: "n" = not drawing the x axis; other values = drawing
8. col: the color of rectangles
"""
# error checking: check whether xstart, xend and y are iterable types
rscript = ''
type_xstart = type(xstart)
type_xend = type(xend)
type_y = type(y)
if type_xstart != list and type_xstart != tuple and type_xstart != array:
return rscript
if type_xend != list and type_xend != tuple and type_xend != array:
return rscript
if type_y != list and type_y != tuple and type_y != array:
return rscript
# to distinguish if a vaiable name is given for 'col'
if type(col) == tuple or type(col) == list:
colstr = 'c(' + ','.join(['"%s"' %str(s) for s in col]) + ')'
elif type(col) == str:
colstr = col
# med point
med = 1.0*(bot + top)/2
hdelta = 1.0*(top-bot)/2
# copy the rectangle coordinates and their values. The values are saturated wrt ylim
if type_xstart == array:
rscript += 'start <- c(%s)\n' %str(list(xstart))[1:-1]
else:
rscript += 'start <- c(%s)\n' %str(xstart)[1:-1]
if type_xend == array:
rscript += 'end <- c(%s)\n' %str(list(xend))[1:-1]
else:
rscript += 'end <- c(%s)\n' %str(xend)[1:-1]
if type_y == array:
rscript += 'vals <- c(%s)\n' %str(list(y))[1:-1]
else:
rscript += 'vals <- c(%s)\n' %str(y)[1:-1]
# saturate the values
rscript += 'vals[vals > %s] <- %s\n' %(str(ylim[1]), str(ylim[1]))
rscript += 'vals[vals < %s] <- %s\n' %(str(ylim[0]), str(ylim[0]))
# normalize the values
if ylim[0] < 0:
rscript += 'heights <- %f * ((vals - %s)/(%s - %s) -0.5) + %s\n' %(hdelta/0.5, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(med))
rscript += 'for (i in 1:length(heights)) {\n'
rscript += '\tpolygon(x=c(start[i], end[i], end[i], start[i]), y=c(%s, %s, heights[i], heights[i]), col=%s, border=%s)\n' %(str(med), str(med), colstr, colstr)
else:
rscript += 'heights <- %f * ((vals - %s)/(%s - %s)) + %s\n' %(2*hdelta, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(bot))
rscript += 'for (i in 1:length(heights)) {\n'
rscript += '\tpolygon(x=c(start[i], end[i], end[i], start[i]), y=c(%s, %s, heights[i], heights[i]), col=%s, border=%s)\n' %(str(bot), str(bot), colstr, colstr)
rscript += '}\n'
return rscript
def rectangles_with_heights_and_colors(xstart, xend, y, ylim, bot, top, xaxt = "n", cmap='cmap'):
"""Draw bars with heights and colors defined by cmap.
Parameters;
1. xstart: the start positions of rectangles
2. xend: the end positions of rectangles
3. y: the y values (scores) of rectangles
4. ylim: y limits
5. bot: bottom of the bar
6. top: top of the bar
7. cmap: the colormap variable (vector) name in the R script. WARNING: to use heatmap_bar, a colormap must be given prior to this function and correctly referred to.
"""
# error checking: check whether xstart, xend and y are iterable types
rscript = ''
type_xstart = type(xstart)
type_xend = type(xend)
type_y = type(y)
if type_xstart != list and type_xstart != tuple and type_xstart != array:
return rscript
if type_xend != list and type_xend != tuple and type_xend != array:
return rscript
if type_y != list and type_y != tuple and type_y != array:
return rscript
# med point
med = 1.0*(bot + top)/2
hdelta = 1.0*(top-bot)/2
# copy the rectangle coordinates and their values. The values are saturated wrt ylim
if type_xstart == array:
rscript += 'start <- c(%s)\n' %str(list(xstart))[1:-1]
else:
rscript += 'start <- c(%s)\n' %str(xstart)[1:-1]
if type_xend == array:
rscript += 'end <- c(%s)\n' %str(list(xend))[1:-1]
else:
rscript += 'end <- c(%s)\n' %str(xend)[1:-1]
if type_y == array:
rscript += 'vals <- c(%s)\n' %str(list(y))[1:-1]
else:
rscript += 'vals <- c(%s)\n' %str(y)[1:-1]
# saturate the values
rscript += 'vals[vals > %s] <- %s\n' %(str(ylim[1]), str(ylim[1]))
rscript += 'vals[vals < %s] <- %s\n' %(str(ylim[0]), str(ylim[0]))
rscript += 'quantized.vals <- seq(%s, %s, by=(%s-%s)/(length(%s)-1))\n' %(str(ylim[0]), str(ylim[1]), str(ylim[1]), str(ylim[0]), cmap)
# normalize the values
rscript += 'ixs <- round((length(%s)-1) * (vals - %s)/(%s - %s)) + 1\n' %(cmap, str(ylim[0]), str(ylim[1]), str(ylim[0]))
rscript += 'for (i in 1:length(vals)) {\n'
if ylim[0] < 0:
rscript += '\tjs <- (length(quantized.vals)/2):ixs[i]\n'
rscript += '\tprev <- %f * ((quantized.vals[js[1]] - %s)/(%s - %s) -0.5) + %s\n' %(hdelta/0.5, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(med))
rscript += '\tfor(j in js) {\n'
rscript += '\t\theight <- %f * ((quantized.vals[j] - %s)/(%s - %s) -0.5) + %s\n' %(hdelta/0.5, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(med))
rscript += '\t\tpolygon(x=c(start[i], end[i], end[i], start[i]), y=c(prev, prev, height, height), col=%s[j], border=%s[j])\n' %(cmap, cmap)
rscript += '\t\tprev <- height\n'
else:
rscript += '\tjs <- 1:ixs[i]\n'
rscript += '\tprev <- %f * ((quantized.vals[js[1]] - %s)/(%s - %s)) + %s\n' %(2*hdelta, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(bot))
rscript += '\tfor (j in js) {\n'
rscript += '\t\theight <- %f * ((quantized.vals[j] - %s)/(%s - %s)) + %s\n' %(2*hdelta, str(ylim[0]), str(ylim[1]), str(ylim[0]), str(bot))
rscript += '\t\tpolygon(x=c(start[i], end[i], end[i], start[i]), y=c(prev, prev, height, height), col=%s[j], border=%s[j])\n' %(cmap, cmap)
rscript += '\t\tprev <- height\n'
rscript += '\t}\n'
rscript += '}\n'
return rscript
def write_func_quantize():
"""Write a quantize function as a R script.
To use quantize function in R, use use_func_quantize.
"""
rscript = 'quantize <- function(x, lim, n.levels)\n'
rscript += '\t{\n'
rscript += '\t\t' + 'x[x < lim[0]] <- lim[0]' + '\n'
rscript += '\t\t' + 'x[x > lim[1]] <- lim[1]' + '\n'
rscript += '\t\t' + 'q <- round(n.levels * (x - lim[0])/(lim[1]-lim[0]))' + '\n'
rscript += '\t\t' + 'return(q)' + '\n'
rscript += '\t\}\n'
return rscript
def use_func_quantize(x, lim, n_levels='128', return_varname = 'qx'):
""" Quantize x with the given number of levels
parameters:
1. x: the name of vector or string of R vector
2. lim: the name of a vector of lower and upper limits or R vector
3. n_levels: the name of variable of the number of levels or a string number
4. return_varname: the return argument name in the R script
"""
rscript = '%s <- quantize(%s, lim=%s, n.levels=%s)\n' %(return_varname, x, lim, n_levels)
return rscript
| [
198,
198,
37811,
26796,
12489,
198,
198,
15269,
357,
66,
8,
3717,
367,
13,
13005,
11466,
1279,
1477,
259,
31,
73,
320,
1820,
13,
9869,
10187,
13,
15532,
29,
198,
198,
1212,
2438,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,... | 2.019257 | 11,061 |
# Generated by Django 3.2.4 on 2021-07-17 09:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
19,
319,
33448,
12,
2998,
12,
1558,
7769,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import torch
import torch.nn as nn
from torch.optim import Adam
in_d = 5
hidn = 2
out_d = 3
net = NN_Network(in_d, hidn, out_d)
print(list(net.parameters()))
print("test") | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
40085,
1330,
7244,
198,
198,
259,
62,
67,
796,
642,
198,
49675,
77,
796,
362,
198,
448,
62,
67,
796,
513,
198,
3262,
796,
399,
45,
62,
26245,
7,
259,
... | 2.457143 | 70 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: george wang
@datetime: 2019-06-12
@file: state.py
@contact: georgewang1994@163.com
@desc: 状态机
"""
from enum import Enum, unique
@unique
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
31,
9800,
25,
220,
220,
220,
4903,
3643,
266,
648,
198,
31,
19608,
8079,
25,
220,
13130,
12,
3312,
12,
... | 2.038095 | 105 |
from md2cf.confluence_renderer import ConfluenceTag, ConfluenceRenderer
| [
6738,
45243,
17,
12993,
13,
10414,
23079,
62,
10920,
11882,
1330,
7326,
23079,
24835,
11,
7326,
23079,
49,
437,
11882,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628
] | 3.033333 | 30 |
manylinux1_compatible = False
| [
805,
2645,
259,
2821,
16,
62,
38532,
796,
10352,
198
] | 3 | 10 |
from . import base
import audioread
class TrackReadValidator(base.Validator):
"""
Check if the track can be opened and read.
"""
def validate(self, corpus):
"""
Perform the validation on the given corpus.
Args:
corpus (Corpus): The corpus to test/validate.
Returns:
InvalidItemsResult: Validation result.
"""
invalid_tracks = {}
for track in corpus.tracks.values():
try:
track.duration
except EOFError:
invalid_tracks[track.idx] = 'EOFError'
except audioread.NoBackendError:
invalid_tracks[track.idx] = 'NoBackendError'
passed = len(invalid_tracks) <= 0
return base.InvalidItemsResult(
passed,
invalid_tracks,
item_name='Tracks',
name=self.name()
)
| [
6738,
764,
1330,
2779,
198,
198,
11748,
40504,
382,
324,
628,
198,
4871,
17762,
5569,
47139,
1352,
7,
8692,
13,
47139,
1352,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6822,
611,
262,
2610,
460,
307,
4721,
290,
1100,
13,
1... | 2.096774 | 434 |
n = int(input())
temps = list(map(int, input().split(" ")))
i = 1
maxTemp = 10000000
theDay = 0
while i < n -1:
if temps[i - 1] > temps[i + 1]:
if maxTemp > temps[i - 1]:
maxTemp = temps[i - 1]
theDay = i - 1
else:
if maxTemp > temps[i +1]:
maxTemp = temps[i + 1]
theDay = i - 1
i += 1
print(theDay + 1, maxTemp)
| [
77,
796,
493,
7,
15414,
28955,
201,
198,
201,
198,
11498,
862,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
7203,
366,
22305,
201,
198,
201,
198,
72,
796,
352,
201,
198,
9806,
30782,
796,
1802,
20483,
201,
198,
1169,
12393,
... | 1.945055 | 182 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from sklearn.decomposition import PCA
colors = ['#AAAAAA', '#7777CC', '#FFFF99', '#99FFFF', '#FF5555', '#FF99FF',
'#5555FF', '#55FF55', '#555555', '#12569A', '#A96521', '#12A965']
shapes = ['s', 's', 'o', '^', 'v', 's', 'o', '^', 'v', 's', 'o', '^']
sizes = [40, 20, 17, 15, 13, 20, 17, 15, 13, 20, 17, 15]
edgecolors = ['#555555', 'k', 'k', 'k', 'k', 'k', 'k', 'k', 'k', 'k', 'k', 'k']
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
2164,
2340,
43106,
1330,
24846,
22882,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
628,
198... | 2.054167 | 240 |
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""This is the library to hold all api's related to list operations """
def get_list_by_separating_strings(list_to_be_processed, char_to_be_replaced=",", str_to_replace_with_if_empty=None):
""" This function converts a list of type:
['str1, str2, str3', 'str4, str5, str6, str7', None, 'str8'] to:
[['str1', 'str2', 'str3'], ['str4', 'str5', 'str6', 'str7'], [], ['str8']]
"""
final_list =[]
if list_to_be_processed is not None and list_to_be_processed is not False and list_to_be_processed != "":
for i in range(0, len(list_to_be_processed)):
if list_to_be_processed[i] is None or list_to_be_processed[i] is False or list_to_be_processed[i] == "":
temp_list = []
else:
if list_to_be_processed[i] == "":
list_to_be_processed[i] = str_to_replace_with_if_empty
temp_list = list_to_be_processed[i].split(char_to_be_replaced)
for j in range(0, len(temp_list)):
temp_list[j] = temp_list[j].strip()
final_list.append(temp_list)
return final_list
def get_list_comma_sep_string(input_string):
"""
This function converts a comma separated string in to a list
Eg: "a, b, c, d, e, f" would become ['a', 'b', 'c', 'd', 'e', 'f']
"""
final_list = input_string.split(',')
for i in range(0, len(final_list)):
final_list[i] = final_list[i].strip()
return final_list
| [
7061,
6,
198,
15269,
2177,
11,
32671,
19831,
7311,
14620,
11,
3457,
13,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.460984 | 833 |
# Generated by Django 3.2.3 on 2021-07-17 19:59
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
18,
319,
33448,
12,
2998,
12,
1558,
678,
25,
3270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from abc import ABC, abstractmethod
from dataclasses import dataclass
import torch
class BanditsDataset(ABC):
"""
Base class for logged, aka behavior, dataset
"""
@abstractmethod
def __len__(self) -> int:
"""
Returns:
length of the dataset
"""
pass
@abstractmethod
def __getitem__(self, idx) -> dataclass:
"""
Args:
idx: index of the sample
Returns:
tuple of features, action, and reward at idx
"""
pass
@property
@abstractmethod
def num_features(self) -> int:
"""
Returns:
number of features
"""
pass
@property
@abstractmethod
def num_actions(self) -> int:
"""
Returns:
number of total possible actions
"""
pass
@property
@abstractmethod
def features(self) -> torch.Tensor:
"""
Returns:
all features in the dataset as numpy array
"""
pass
@property
@abstractmethod
def actions(self) -> torch.Tensor:
"""
Returns:
all actions in the dataset as numpy array
"""
pass
@property
@abstractmethod
def rewards(self) -> torch.Tensor:
"""
Returns:
all rewards in the dataset as numpy array
"""
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
4818,
330,
28958,
1330,
... | 2.158345 | 701 |
from autoflow.workflow.components.data_process_base import AutoFlowDataProcessAlgorithm
__all__ = ["ADASYN"]
| [
6738,
1960,
1659,
9319,
13,
1818,
11125,
13,
5589,
3906,
13,
7890,
62,
14681,
62,
8692,
1330,
11160,
37535,
6601,
18709,
2348,
42289,
198,
198,
834,
439,
834,
796,
14631,
2885,
26483,
45,
8973,
628
] | 3.171429 | 35 |
"""
Dicts for Activation Function
"""
import sys
import numpy
"""
Add New Activation Functions Here
"""
FUNC_LIST = [method for method in dir() if method.startswith('__') is False]
CUR_MODULE = sys.modules[__name__]
| [
37811,
198,
35,
14137,
329,
13144,
341,
15553,
198,
37811,
198,
11748,
25064,
198,
11748,
299,
32152,
198,
198,
37811,
198,
4550,
968,
13144,
341,
40480,
3423,
198,
37811,
198,
198,
42296,
34,
62,
45849,
796,
685,
24396,
329,
2446,
287,... | 3.070423 | 71 |
# -*- coding: utf-8 -*-
"""Command Line Interface."""
import argparse
import logging
import os
import sys
from pic2map.db import (
LocationDB,
transform_metadata_to_row,
)
from pic2map.fs import TreeExplorer
from pic2map.gps import filter_gps_metadata
from pic2map.server.app import app
logger = logging.getLogger(__name__)
def main(argv=None):
"""Entry point for the pic2map.py script."""
if argv is None:
argv = sys.argv[1:]
args = parse_arguments(argv)
configure_logging(args.log_level)
args.func(args)
def add(args):
"""Add location information for pictures under directory."""
logger.info('Adding image files from %r...', args.directory)
tree_explorer = TreeExplorer(args.directory)
paths = tree_explorer.paths()
gps_metadata_records = filter_gps_metadata(paths)
logger.info(
'%d picture files with GPS metadata found under %s',
len(gps_metadata_records),
args.directory)
location_rows = [
transform_metadata_to_row(metadata)
for metadata in gps_metadata_records
]
if location_rows:
with LocationDB() as database:
database.insert(location_rows)
def remove(args):
"""Remove location information for pictures under directory."""
logger.info('Removing image files from %r...', args.directory)
with LocationDB() as database:
database.delete(args.directory)
def count(_args):
"""Get number picture files in the database."""
logger.info('Getting image files in the database...')
with LocationDB() as database:
file_count = database.count()
print file_count
def serve(_args):
"""Run web server."""
app.run(debug=True)
def valid_directory(path):
"""Directory validation."""
if not os.path.isdir(path):
raise argparse.ArgumentTypeError(
'{!r} is not a valid directory'.format(path))
if not os.access(path, os.R_OK | os.X_OK):
raise argparse.ArgumentTypeError(
'not enough permissions to explore {!r}'.format(path))
return path
def configure_logging(log_level):
"""Configure logging based on command line argument.
:param log_level: Log level passed form the command line
:type log_level: int
"""
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# Log to sys.stderr using log level
# passed through command line
log_handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
log_handler.setFormatter(formatter)
log_handler.setLevel(log_level)
root_logger.addHandler(log_handler)
def parse_arguments(argv):
"""Parse command line arguments.
:returns: Parsed arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(
description='Display pictures location in a map')
log_levels = ['debug', 'info', 'warning', 'error', 'critical']
parser.add_argument(
'-l', '--log-level',
dest='log_level',
choices=log_levels,
default='info',
help=('Log level. One of {0} or {1} '
'(%(default)s by default)'
.format(', '.join(log_levels[:-1]), log_levels[-1])))
subparsers = parser.add_subparsers(help='Subcommands')
add_parser = subparsers.add_parser('add', help=add.__doc__)
add_parser.add_argument(
'directory', type=valid_directory, help='Base directory')
add_parser.set_defaults(func=add)
remove_parser = subparsers.add_parser('remove', help=remove.__doc__)
remove_parser.add_argument(
'directory', type=valid_directory, help='Base directory')
remove_parser.set_defaults(func=remove)
count_parser = subparsers.add_parser('count', help=count.__doc__)
count_parser.set_defaults(func=count)
serve_parser = subparsers.add_parser('serve', help=serve.__doc__)
serve_parser.set_defaults(func=serve)
args = parser.parse_args(argv)
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
21575,
6910,
26491,
526,
15931,
198,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
8301,
17,
8899,
13,
9945,
133... | 2.617402 | 1,563 |
from abc import ABCMeta
from cloudmesh.management.configuration.config import Config
import warnings
import sys
warnings.warn("ObjectStorageABC: THIS METHOD IS DEPRECATED AND StorageABC MUST BE USED")
sys.exit(1)
# noinspection PyUnusedLocal,PyPep8 | [
6738,
450,
66,
1330,
9738,
48526,
198,
198,
6738,
6279,
76,
5069,
13,
27604,
13,
11250,
3924,
13,
11250,
1330,
17056,
198,
11748,
14601,
198,
11748,
25064,
198,
198,
40539,
654,
13,
40539,
7203,
10267,
31425,
24694,
25,
12680,
337,
3625... | 3.272727 | 77 |
#!/usr/bin/env python
#
# Setup script
# $Id: //modules/xmlrpclib/setup.py#4 $
#
# Usage: python setup.py install
#
from distutils.core import setup, Extension
setup(
name="xmlrpclib",
version="1.0.1",
author="Fredrik Lundh",
author_email="fredrik@pythonware.com",
maintainer="PythonWare",
maintainer_email="info@pythonware.com",
description="xmlrpclib -- an XML-RPC library for Python",
py_modules = ["xmlrpclib", "SimpleXMLRPCServer"],
scripts = ["xmlrpc_handler.py", "xmlrpcserver.py", "echotest.py"],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
201,
198,
2,
31122,
4226,
201,
198,
2,
720,
7390,
25,
3373,
18170,
14,
19875,
81,
79,
565,
571,
14,
40406,
13,
9078,
2,
19,
720,
201,
198,
2,
201,
198,
2,
29566,
25,
21... | 2.375 | 240 |
import tkinter
from src.setup import *
Starter() | [
11748,
256,
74,
3849,
198,
6738,
12351,
13,
40406,
1330,
1635,
198,
198,
1273,
2571,
3419
] | 3.0625 | 16 |
class Node(object):
"""
The nodes are the verticies of the graph.
"""
| [
4871,
19081,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
383,
13760,
389,
262,
9421,
291,
444,
286,
262,
4823,
13,
198,
220,
220,
220,
37227,
198
] | 2.645161 | 31 |
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import arctern
# rename to arctern.plot later
# pylint: disable=reimported
import arctern as ap
# copied from source, since _func can not be called here
# pylint: disable=redefined-outer-name
bounding_box = [-73.998427, 40.730309, -73.954348, 40.780816]
| [
2,
15069,
357,
34,
8,
13130,
12,
42334,
1168,
359,
528,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.371648 | 261 |
from itertools import cycle, repeat
from operator import mul
import tensorflow as tf
import numpy as np
from config import DATA_SIZE, SUBSAMPLING_SIZE, INPUT_SIZE, SHIFT_SIZE
if __name__ == '__main__':
preprocess_data()
| [
6738,
340,
861,
10141,
1330,
6772,
11,
9585,
198,
6738,
10088,
1330,
35971,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4566,
1330,
42865,
62,
33489,
11,
13558,
4462,
2390,
6489,
2... | 3.013158 | 76 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
helps['keyvault'] = """
type: group
short-summary: Safeguard and maintain control of keys, secrets, and certificates.
"""
helps['keyvault create'] = """
type: command
short-summary: Create a key vault.
long-summary: Default permissions are created for the current user or service principal unless the `--no-self-perms` flag is specified.
"""
helps['keyvault delete'] = """
type: command
short-summary: Delete a key vault.
"""
helps['keyvault list'] = """
type: command
short-summary: List key vaults.
"""
helps['keyvault show'] = """
type: command
short-summary: Show details of a key vault.
"""
helps['keyvault update'] = """
type: command
short-summary: Update the properties of a key vault.
"""
helps['keyvault recover'] = """
type: command
short-summary: Recover a key vault.
long-summary: Recovers a previously deleted key vault for which soft delete was enabled.
"""
helps['keyvault key'] = """
type: group
short-summary: Manage keys.
"""
helps['keyvault secret'] = """
type: group
short-summary: Manage secrets.
"""
helps['keyvault certificate'] = """
type: group
short-summary: Manage certificates.
"""
helps['keyvault certificate download'] = """
type: command
short-summary: Download the public portion of a Key Vault certificate.
long-summary: The certificate formatted as either PEM or DER. PEM is the default.
examples:
- name: Download a certificate as PEM and check its fingerprint in openssl.
text: >
az keyvault certificate download --vault-name vault -n cert-name -f cert.pem && \\
openssl x509 -in cert.pem -inform PEM -noout -sha1 -fingerprint
- name: Download a certificate as DER and check its fingerprint in openssl.
text: >
az keyvault certificate download --vault-name vault -n cert-name -f cert.crt -e DER && \\
openssl x509 -in cert.crt -inform DER -noout -sha1 -fingerprint
"""
helps['keyvault certificate get-default-policy'] = """
type: command
short-summary: Get the default policy for self-signed certificates.
long-summary: >
This default policy can be used in conjunction with `az keyvault create` to create a self-signed certificate.
The default policy can also be used as a starting point to create derivative policies.\n
For more details, see: https://docs.microsoft.com/en-us/rest/api/keyvault/certificates-and-policies
examples:
- name: Create a self-signed certificate with the default policy
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
"""
helps['keyvault certificate create'] = """
type: command
short-summary: Create a Key Vault certificate.
long-summary: Certificates can be used as a secrets for provisioned virtual machines.
examples:
- name: Create a self-signed certificate with the default policy and add it to a virtual machine.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm format-secret -s "$secrets") \n
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
"""
helps['keyvault certificate import'] = """
type: command
short-summary: Import a certificate into KeyVault.
long-summary: Certificates can also be used as a secrets in provisioned virtual machines.
examples:
- name: Create a service principal with a certificate, add the certificate to Key Vault and provision a VM with that certificate.
text: >
service_principal=$(az ad sp create-for-rbac --create-cert) \n
cert_file=$(echo $service_principal | jq .fileWithCertAndPrivateKey -r) \n
az keyvault create -g my-group -n vaultname \n
az keyvault certificate import --vault-name vaultname -n cert_file \n
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm format-secret -s "$secrets") \n
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
"""
helps['keyvault certificate pending'] = """
type: group
short-summary: Manage pending certificate creation operations.
"""
helps['keyvault certificate contact'] = """
type: group
short-summary: Manage contacts for certificate management.
"""
helps['keyvault certificate issuer'] = """
type: group
short-summary: Manage certificate issuer information.
"""
helps['keyvault certificate issuer admin'] = """
type: group
short-summary: Manage admin information for certificate issuers.
"""
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 2.959249 | 1,865 |
# -*- coding: utf-8 -*-
import blinker
signals = blinker.Namespace()
load = signals.signal('load')
before_save = signals.signal('before_save')
save = signals.signal('save')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
21019,
263,
628,
198,
12683,
874,
796,
21019,
263,
13,
36690,
10223,
3419,
198,
198,
2220,
796,
10425,
13,
12683,
282,
10786,
2220,
11537,
198,
198,
19052,
6... | 2.69697 | 66 |
from typing import Tuple, Dict, Optional, List
import numpy as np
import re
import json
from functools import lru_cache
from geotext import GeoText
from shapely.geometry import Point, Polygon, shape
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
| [
6738,
19720,
1330,
309,
29291,
11,
360,
713,
11,
32233,
11,
7343,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
302,
198,
11748,
33918,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
198,
6738,
4903,
1258,
742,
13... | 3.706667 | 75 |
from ff_espn_api import League
if __name__ == "__main__":
league = League(league_id=1360211, year=2019, debug=True)
for team in league.teams:
print(team)
print(league) | [
6738,
31246,
62,
9774,
77,
62,
15042,
1330,
4041,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
4652,
796,
4041,
7,
19316,
62,
312,
28,
1485,
1899,
21895,
11,
614,
28,
23344,
11,
14257,
... | 2.461538 | 78 |
# CaosMX
# Dic 2020
# Ex Python Practica
'''
Listas
Podemos hacer las mismas operaciones que con las cadenas de caracteres y con las tuplas,
pero estas si podemos editarlas.
Agregar, Eliminar, Cambiar
Siguiendo Curso de Python de Manuel Gonzalez:
https://www.youtube.com/channel/UCQLWbxZbgftDIhw21i6q_OA/featured
https://programarescomounjuego.blogspot.com
'''
mascotas = ["gato", "perro", "canario", "cocodrilo"]
print (mascotas)
#Modificando el índice 3 (4o elemento)
mascotas[3] = "tortuga"
print (mascotas)
# Hay que elegir correctamente Listas o Tuplas:
# Las tuplas son mas rápidamente accesibles
# Las listas son mas versátiles
# Crear secuencias entre elementos como tuplas o listas según convenga:
dias_semana = ("Lunes", "Martes", "Miercoles", "Jueves", "Viernes", "Sabado", "Domingo")
dias_entrenamiento = ["Lunes", "Miercoles", "Viernes"]
print (dias_semana)
print (dias_entrenamiento)
'''
('Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes', 'Sabado', 'Domingo')
['Lunes', 'Miercoles', 'Viernes']
'''
# Creando una lista del 1 al 100 usando WHILE a partor de una lista vacía:
lista = []
n = 1
while n <= 100:
lista = lista +[n]
n += 1
print (lista) | [
2,
6488,
418,
43243,
198,
2,
360,
291,
12131,
198,
2,
1475,
11361,
13672,
3970,
198,
198,
7061,
6,
198,
8053,
292,
198,
41565,
368,
418,
289,
11736,
39990,
32691,
292,
1515,
49443,
274,
8358,
369,
39990,
20603,
268,
292,
390,
1097,
... | 2.355865 | 503 |
# coding: utf-8
"""
Swagger Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
OpenAPI spec version: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class FakeApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def fake_outer_boolean_serialize(self, **kwargs): # noqa: E501
"""fake_outer_boolean_serialize # noqa: E501
Test serialization of outer boolean types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_boolean_serialize(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool body: Input boolean as post body
:return: OuterBoolean
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fake_outer_boolean_serialize_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.fake_outer_boolean_serialize_with_http_info(**kwargs) # noqa: E501
return data
def fake_outer_boolean_serialize_with_http_info(self, **kwargs): # noqa: E501
"""fake_outer_boolean_serialize # noqa: E501
Test serialization of outer boolean types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_boolean_serialize_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool body: Input boolean as post body
:return: OuterBoolean
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fake_outer_boolean_serialize" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/outer/boolean', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OuterBoolean', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def fake_outer_composite_serialize(self, **kwargs): # noqa: E501
"""fake_outer_composite_serialize # noqa: E501
Test serialization of object with outer number type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_composite_serialize(async_req=True)
>>> result = thread.get()
:param async_req bool
:param OuterComposite body: Input composite as post body
:return: OuterComposite
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fake_outer_composite_serialize_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.fake_outer_composite_serialize_with_http_info(**kwargs) # noqa: E501
return data
def fake_outer_composite_serialize_with_http_info(self, **kwargs): # noqa: E501
"""fake_outer_composite_serialize # noqa: E501
Test serialization of object with outer number type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_composite_serialize_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param OuterComposite body: Input composite as post body
:return: OuterComposite
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fake_outer_composite_serialize" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/outer/composite', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OuterComposite', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def fake_outer_number_serialize(self, **kwargs): # noqa: E501
"""fake_outer_number_serialize # noqa: E501
Test serialization of outer number types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_number_serialize(async_req=True)
>>> result = thread.get()
:param async_req bool
:param float body: Input number as post body
:return: OuterNumber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fake_outer_number_serialize_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.fake_outer_number_serialize_with_http_info(**kwargs) # noqa: E501
return data
def fake_outer_number_serialize_with_http_info(self, **kwargs): # noqa: E501
"""fake_outer_number_serialize # noqa: E501
Test serialization of outer number types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_number_serialize_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param float body: Input number as post body
:return: OuterNumber
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fake_outer_number_serialize" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/outer/number', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OuterNumber', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def fake_outer_string_serialize(self, **kwargs): # noqa: E501
"""fake_outer_string_serialize # noqa: E501
Test serialization of outer string types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_string_serialize(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: Input string as post body
:return: OuterString
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fake_outer_string_serialize_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.fake_outer_string_serialize_with_http_info(**kwargs) # noqa: E501
return data
def fake_outer_string_serialize_with_http_info(self, **kwargs): # noqa: E501
"""fake_outer_string_serialize # noqa: E501
Test serialization of outer string types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fake_outer_string_serialize_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: Input string as post body
:return: OuterString
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fake_outer_string_serialize" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/outer/string', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OuterString', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_client_model(self, body, **kwargs): # noqa: E501
"""To test \"client\" model # noqa: E501
To test \"client\" model # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_client_model(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Client body: client model (required)
:return: Client
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_client_model_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.test_client_model_with_http_info(body, **kwargs) # noqa: E501
return data
def test_client_model_with_http_info(self, body, **kwargs): # noqa: E501
"""To test \"client\" model # noqa: E501
To test \"client\" model # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_client_model_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Client body: client model (required)
:return: Client
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_client_model" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `test_client_model`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Client', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_endpoint_parameters(self, body, **kwargs): # noqa: E501
"""Fake endpoint for testing various parameters 假端點 偽のエンドポイント 가짜 엔드 포인트 # noqa: E501
Fake endpoint for testing various parameters 假端點 偽のエンドポイント 가짜 엔드 포인트 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_endpoint_parameters(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body2 body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_endpoint_parameters_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.test_endpoint_parameters_with_http_info(body, **kwargs) # noqa: E501
return data
def test_endpoint_parameters_with_http_info(self, body, **kwargs): # noqa: E501
"""Fake endpoint for testing various parameters 假端點 偽のエンドポイント 가짜 엔드 포인트 # noqa: E501
Fake endpoint for testing various parameters 假端點 偽のエンドポイント 가짜 엔드 포인트 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_endpoint_parameters_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body2 body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_endpoint_parameters" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `test_endpoint_parameters`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml; charset=utf-8', 'application/json; charset=utf-8']) # noqa: E501
# Authentication setting
auth_settings = ['http_basic_test'] # noqa: E501
return self.api_client.call_api(
'/fake', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_enum_parameters(self, **kwargs): # noqa: E501
"""To test enum parameters # noqa: E501
To test enum parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_enum_parameters(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] enum_header_string_array: Header parameter enum test (string array)
:param str enum_header_string: Header parameter enum test (string)
:param list[str] enum_query_string_array: Query parameter enum test (string array)
:param str enum_query_string: Query parameter enum test (string)
:param int enum_query_integer: Query parameter enum test (double)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_enum_parameters_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.test_enum_parameters_with_http_info(**kwargs) # noqa: E501
return data
def test_enum_parameters_with_http_info(self, **kwargs): # noqa: E501
"""To test enum parameters # noqa: E501
To test enum parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_enum_parameters_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] enum_header_string_array: Header parameter enum test (string array)
:param str enum_header_string: Header parameter enum test (string)
:param list[str] enum_query_string_array: Query parameter enum test (string array)
:param str enum_query_string: Query parameter enum test (string)
:param int enum_query_integer: Query parameter enum test (double)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['enum_header_string_array', 'enum_header_string', 'enum_query_string_array', 'enum_query_string', 'enum_query_integer'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_enum_parameters" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'enum_query_string_array' in params:
query_params.append(('enum_query_string_array', params['enum_query_string_array'])) # noqa: E501
collection_formats['enum_query_string_array'] = 'csv' # noqa: E501
if 'enum_query_string' in params:
query_params.append(('enum_query_string', params['enum_query_string'])) # noqa: E501
if 'enum_query_integer' in params:
query_params.append(('enum_query_integer', params['enum_query_integer'])) # noqa: E501
header_params = {}
if 'enum_header_string_array' in params:
header_params['enum_header_string_array'] = params['enum_header_string_array'] # noqa: E501
collection_formats['enum_header_string_array'] = '' # noqa: E501
if 'enum_header_string' in params:
header_params['enum_header_string'] = params['enum_header_string'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_enum_request_body(self, **kwargs): # noqa: E501
"""To test enum parameters # noqa: E501
To test enum parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_enum_request_body(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body4 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_enum_request_body_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.test_enum_request_body_with_http_info(**kwargs) # noqa: E501
return data
def test_enum_request_body_with_http_info(self, **kwargs): # noqa: E501
"""To test enum parameters # noqa: E501
To test enum parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_enum_request_body_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body4 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_enum_request_body" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/enum/form', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_inline_additional_properties(self, body, **kwargs): # noqa: E501
"""test inline additionalProperties # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_inline_additional_properties(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param dict(str, str) body: request body (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_inline_additional_properties_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.test_inline_additional_properties_with_http_info(body, **kwargs) # noqa: E501
return data
def test_inline_additional_properties_with_http_info(self, body, **kwargs): # noqa: E501
"""test inline additionalProperties # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_inline_additional_properties_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param dict(str, str) body: request body (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_inline_additional_properties" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `test_inline_additional_properties`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/inline-additionalProperties', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_json_form_data(self, body, **kwargs): # noqa: E501
"""test json serialization of form data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_json_form_data(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body5 body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_json_form_data_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.test_json_form_data_with_http_info(body, **kwargs) # noqa: E501
return data
def test_json_form_data_with_http_info(self, body, **kwargs): # noqa: E501
"""test json serialization of form data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_json_form_data_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body5 body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_json_form_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `test_json_form_data`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake/jsonFormData', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
2451,
7928,
4767,
8095,
628,
220,
220,
220,
770,
1020,
318,
8384,
329,
4856,
4767,
8095,
4382,
290,
4909,
8390,
886,
13033,
11,
4981,
13,
4222,
466,
407,
779,
42... | 2.21416 | 17,006 |
"""
GOCAD data tools.
paulbourke.net/dataformats/gocad/gocad.pdf
"""
import os
import numpy
def header(buff, counter=0, casters=None):
"""
GOCAD header reader
"""
if casters is None:
casters = {
int: ('pclip', 'field'),
bool: ('imap', 'ivolmap', 'parts', 'transparency'),
float: ('contrast', 'low_clip', 'high_clip', 'transparency_min'),
}
cast = {}
for c in casters:
for k in casters[c]:
cast[k] = c
header = {}
while counter < len(buff):
line = buff[counter]
counter += 1
if '}' in line:
return header, counter
k, v = line.split(':')
k = k.split('*')[-1]
header[k] = v
if k in cast:
f = v.split()
if len(f) > 1:
try:
header[k] = tuple(cast[k](x) for x in f)
except ValueError:
print('Could not cast %s %s to %s' % (k, v, cast[k]))
else:
try:
header[k] = cast[k](v)
except ValueError:
print('Could not cast %s %s to %s' % (k, v, cast[k]))
raise Exception('Error in header')
return
def voxet(path, load_props=[], alternate='', no_data_value=None, buff=None):
"""
GOCAD voxet reader
"""
if buff is None:
buff = open(path).read()
if no_data_value in ('nan', 'NaN', 'NAN'):
no_data_value = float('nan')
buff = buff.strip().split('\n')
cast = {}
casters = {
int: ('N', 'ESIZE', 'OFFSET', 'SIGNED', 'PAINTED_FLAG_BIT_POS'),
str: (
'NAME', 'FILE', 'TYPE', 'ETYPE', 'FORMAT', 'UNIT',
'ORIGINAL_UNIT'
),
float: (
'O', 'D', 'U', 'V', 'W', 'MIN', 'MAX', 'NO_DATA_VALUE',
'SAMPLE_STATS'
),
}
for c in casters:
for k in casters[c]:
cast[k] = c
voxet = {}
counter = 0
while counter < len(buff):
line = buff[counter].strip()
counter += 1
f = line.replace('"', '').split()
if len(f) == 0 or line.startswith('#'):
continue
elif line.startswith('GOCAD Voxet'):
id_ = f[2]
axis, prop = {}, {}
elif f[0] == 'HEADER':
hdr, counter = header(buff, counter)
elif len(f) > 1:
k = f[0].split('_', 1)
if k[0] == 'AXIS':
axis[k[1]] = tuple(cast[k[1]](x) for x in f[1:])
elif f[0] == 'PROPERTY':
prop[f[1]] = {'PROPERTY': f[2]}
elif k[0] == 'PROP':
if len(f) > 3:
prop[f[1]][k[1]] = tuple(cast[k[1]](x) for x in f[2:])
else:
prop[f[1]][k[1]] = cast[k[1]](f[2])
elif f[0] == 'END':
for p in load_props:
p = prop[p]
n = axis['N']
f = os.path.join(os.path.dirname(path), p['FILE'] + alternate)
if os.path.exists(f):
dtype = '>f%s' % p['ESIZE']
data = numpy.fromfile(f, dtype)
if no_data_value is not None:
data[data == p['NO_DATA_VALUE']] = no_data_value
p['DATA'] = data.reshape(n[::-1]).T
voxet[id_] = {'HEADER': hdr, 'AXIS': axis, 'PROP': prop}
return voxet
def tsurf(buff):
"""
GOCAD triangulated surface reader
"""
buff = buff.strip().split('\n')
tsurf = []
counter = 0
# casters = {
# int: ('ATOM', 'PATOM', 'TRGL', 'BORDER', 'BSTONE'),
# float: ('VRTX', 'PVRTX'),
# }
while counter < len(buff):
line = buff[counter].strip()
counter += 1
f = line.split()
if len(f) == 0 or line.startswith('#'):
continue
elif line.startswith('GOCAD TSurf'):
meta0, meta, tri, x, t, b, s, a = None, {}, [], [], [], [], [], []
elif f[0] in ('VRTX', 'PVRTX'):
x.append([float(f[2]), float(f[3]), float(f[4])])
elif f[0] in ('ATOM', 'PATOM'):
i = int(f[2]) - 1
a.append([len(x), i])
# x.append(x[i])
x.append([float('nan'), float('nan'), float('nan')])
elif f[0] == 'TRGL':
t.append([int(f[1]) - 1, int(f[2]) - 1, int(f[3]) - 1])
elif f[0] == 'BORDER':
b.append([int(f[2]) - 1, int(f[3]) - 1])
elif f[0] == 'BSTONE':
s.append(int(f[1]) - 1)
elif f[0] == 'TFACE':
if t != []:
tri.append(numpy.array(t, 'i'))
t = []
elif f[0] == 'END':
tri.append(numpy.array(t, 'i'))
x = numpy.array(x, 'f')
b = numpy.array(b, 'i')
s = numpy.array(s, 'i')
for i, j in a:
tri[tri == i] = j
b[b == i] = j
s[s == i] = j
data = {'vtx': x, 'tri': tri, 'border': b, 'bstone': s}
meta.update(meta0)
tsurf.append([meta, data])
elif f[0] == 'PROPERTY_CLASS_HEADER':
meta[f[1]], counter = header(buff, counter)
elif f[0] == 'HEADER':
meta0, counter = header(buff, counter)
return tsurf
| [
37811,
198,
38,
4503,
2885,
1366,
4899,
13,
198,
198,
79,
2518,
6084,
365,
13,
3262,
14,
7890,
687,
1381,
14,
70,
420,
324,
14,
70,
420,
324,
13,
12315,
198,
37811,
198,
11748,
28686,
198,
11748,
299,
32152,
628,
198,
4299,
13639,
... | 1.726593 | 3,076 |
import logging
from zentral.contrib.inventory.models import MachineSnapshot
from zentral.contrib.jamf.models import JamfInstance
from zentral.core.events import event_cls_from_type, register_event_type
from zentral.core.events.base import BaseEvent
from zentral.core.queues import queues
logger = logging.getLogger('zentral.contrib.jamf.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "jamf"}
JAMF_EVENTS = {"ComputerAdded": ("computer_added", False, None),
"ComputerCheckIn": ("computer_checkin", True,
"checkin_heartbeat_timeout"),
"ComputerInventoryCompleted": ("computer_inventory_completed", True,
"inventory_completed_heartbeat_timeout"),
"ComputerPatchPolicyCompleted": ("computer_patch_policy_completed", True, None),
"ComputerPolicyFinished": ("computer_policy_finished", True, None),
"ComputerPushCapabilityChanged": ("computer_push_capability_changed", False, None),
"DeviceAddedToDEP": ("device_added_to_dep", False, None),
"JSSShutdown": ("shutdown", False, None),
"JSSStartup": ("startup", False, None),
"MobileDeviceCheckIn": ("mobile_device_checkin", True, None),
"MobileDeviceCommandCompleted": ("mobile_device_command_completed", True, None),
"MobileDeviceEnrolled": ("mobile_device_enrolled", True, None),
"MobileDevicePushSent": ("mobile_device_push_sent", False, None),
"MobileDeviceUnEnrolled": ("mobile_device_unenrolled", False, None),
"PatchSoftwareTitleUpdated": ("patch_software_title_updated", False, None),
"PushSent": ("push_sent", False, None),
"RestAPIOperation": ("rest_api_operation", False, None),
"SCEPChallenge": ("scep_challenge", False, None),
"SmartGroupComputerMembershipChange": ("smart_group_computer_membership_change", False, None),
"SmartGroupMobileDeviceMembershipChange": ("smart_group_mobile_device_membership_change", False, None)}
for jamf_event, (event_subtype, is_heartbeat, timeout_attr) in JAMF_EVENTS.items():
event_type = 'jamf_{}'.format(event_subtype)
event_class_name = "".join(s.title() for s in event_type.split('_'))
tags = ['jamf', 'jamf_webhook']
if is_heartbeat:
tags.append('heartbeat')
event_class_attrs = {'event_type': event_type, 'tags': tags}
if timeout_attr:
event_class_attrs["get_machine_heartbeat_timeout"] = make_get_machine_heartbeat_timeout(timeout_attr)
event_class = type(event_class_name, (BaseEvent,), event_class_attrs)
register_event_type(event_class)
register_event_type(JAMFChangeManagementEvent)
register_event_type(JAMFSoftwareServerEvent)
register_event_type(JAMFAccessEvent)
register_event_type(JAMFClientEvent)
| [
11748,
18931,
198,
6738,
1976,
298,
1373,
13,
3642,
822,
13,
24807,
13,
27530,
1330,
10850,
43826,
9442,
198,
6738,
1976,
298,
1373,
13,
3642,
822,
13,
39159,
69,
13,
27530,
1330,
9986,
69,
33384,
198,
6738,
1976,
298,
1373,
13,
7295,... | 2.459732 | 1,192 |
from numpy import ones, arange, meshgrid, sum, int8, sqrt, roll, zeros
from param import Param
from fortran_operators import celltocorner
| [
6738,
299,
32152,
1330,
3392,
11,
610,
858,
11,
19609,
25928,
11,
2160,
11,
493,
23,
11,
19862,
17034,
11,
4836,
11,
1976,
27498,
198,
6738,
5772,
1330,
25139,
198,
6738,
329,
2213,
272,
62,
3575,
2024,
1330,
2685,
40301,
273,
1008,
... | 3.232558 | 43 |
import dash_bootstrap_components as dbc
carousel = dbc.Carousel(
items=[
{"key": "1", "src": "/static/images/slide1.svg"},
{"key": "2", "src": "/static/images/slide2.svg"},
{"key": "3", "src": "/static/images/slide3.svg"},
],
controls=True,
indicators=True,
)
| [
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
198,
7718,
48355,
796,
288,
15630,
13,
9914,
48355,
7,
198,
220,
220,
220,
3709,
41888,
198,
220,
220,
220,
220,
220,
220,
220,
19779,
2539,
1298,
366,
16,
1600,
... | 2.165468 | 139 |
########
# GROWbox Supervisor System (GROWSS)
# Version: 2019-03-27V1A (This is an alpha version & not yet complete)
# Todd Moore
# 3.27.19
#
# This project is released under The MIT License (MIT)
# Copyright 2019 Todd Moore
########
########
# # Code is compatible with Python 2.7 and Python 3.5.
#!/usr/bin/env python
# coding=utf-8
########
########
# The GROWbox Supervisor System (GROWSS) is a monitor & control system for small to medium grow boxes
# & grow cabinets. GROWSS uses a Raspberry Pi SBC, a GrovePi+ Hat, & Grove Sensors to monitor and
# control the growing environment.
#
# Features Include:
# - Monitor Temperature, Humidity, Density (smoke alarm) & soil moisture. Measurements are taken
# about every 25 seconds and updated to the displays (below).
# - Controls growing lights, exhaust fan, & humidifier.
# - Sets alarms for hi/low temperature, humidity & soil moisture. Sets alarm if there is smoke.
# - Monitoring & Alarm information is provided many ways:
# - All measured values are saved to local storage every 15 min & if there is an alarm happening.
# - LEDs on the front indicate if there is a temperature, humidity, density, or soil moisture.
# - LEDs on the front also indicate when the exhaust fan is on & when the humidifier is running.
# - An RGB LCD on the case dispalys the growing information in an easy to see format.
# - A mobile app is also available to monitor the growing environment with alarms & hi/low values.
########
########
# Features maybe included in the future:
# - Automatically water plant when soil moisture is too low.
# - send email or text if an alarm is present.
# - Ability to change alarm threasholds easily (ie. switch, etc.)
########
########
# RPI/Grove+ Pinout Definitions
# Port # Pins on Port # Type Sensor Pin Sensor/Module
# ------------------------------------------------------------------------
# SERIAL D0 & D1 DIGITAL & SERIAL n/a
# D2 D2 & D3 DIGITAL D2 Grove Buzzer
# D3 D3 & D4 DIGITAL D3 Humid Alarm LED
# D4 Temp Alarm LED
# D4 D4 & D5 DIGITAL n/a
# D5 D5 & D6 DIGITAL D5 Water Atomizer LED
# D6 D6 & D7 DIGITAL D6 Grove - Temperature&Humidity Sensor Pro
# D7 D7 & D8 DIGITAL D7 Grove - Water Atomization
# D8 D8 & D9 DIGITAL D8 Smoke Alarm LED
# D9 Moisture Alarm LED
#
# A0 A0 & A1 ANALOG A0 Grove - Moisture Sensor
# A1 A1 & A2 ANALOG A1 Grove MQ2 Air Sensor
# A2 A2 & A3 ANALOG D16 Grove - 2-Channel SPDT Switch 1,
# LED Lights
# D17 Grove - 2-Channel SPDT Switch 2,
# Exhaust Fan
#
# I2C-1 I2C Free
# I2C-2 I2C Free
# I2C-3 I2C Grove - LCD RGB Backlight
# RPRISER RPI SERIAL
########
import datetime
import BlynkLib
import config
import setup_rpi
import welcome
import get
import hi_lo_values
import check_alarms
import control
import send_values
import email_handler
import sms_handler
#__________________________________________________________________________________
########
# Enable flags - Enable/Disable debugging, email, & other features
########
config.DEBUG = False # debug enable - True prints debugging values during execution
config.email_enable = True # email enable - True turns on email alerts,
config.text_enable = True # text enable - True turns on sms text alarts
config.control_fan = True # enable controlling the fan - True allows RPI to control fan
config.control_moist = True # control the humidifier - allow RPI to control the water
# atomizer/humidifier
config.control_light = True # enable controlling the light - True allows RPI to control the lights
config.blynk_app_enable = True # enable sending info to the blynk GROWSS Mobile app
#__________________________________________________________________________________
# Setup Hardware
setup_rpi.hardware()
#__________________________________________________________________________________
# welcome screen on stdio
# welcome.startup()
# Welcome Screen on LCD
# send_values.version_to_lcd()
#__________________________________________________________________________________
# Initialize Blynk
blynk = BlynkLib.Blynk(config.BLYNK_AUTH)
#__________________________________________________________________________________
# register virtual pins for the blynk app
# use the new command, '@blynk.ON()', instead of '@blynk.VIRTUAL_READ()'
@blynk.VIRTUAL_READ(0) # time value
#@blynk.ON(0)
# @blynk.ON(1)
# @blynk.ON(2)
# @blynk.ON(7)
# @blynk.ON(16)
#__________________________________________________________________________________
while True:
v0_read_handler()
blynk.run()
| [
7804,
198,
2,
10863,
3913,
3524,
45673,
4482,
357,
10761,
3913,
5432,
8,
198,
2,
10628,
25,
13130,
12,
3070,
12,
1983,
53,
16,
32,
357,
1212,
318,
281,
17130,
2196,
1222,
407,
1865,
1844,
8,
198,
2,
14377,
8877,
198,
2,
513,
13,
... | 2.419813 | 2,251 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bilibili/account/fission/v1/fission.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='bilibili/account/fission/v1/fission.proto',
package='bilibili.account.fission.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n)bilibili/account/fission/v1/fission.proto\x12\x1b\x62ilibili.account.fission.v1\"\r\n\x0b\x45ntranceReq\"w\n\rEntranceReply\x12\x0c\n\x04icon\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\x12=\n\x0b\x61nimateIcon\x18\x04 \x01(\x0b\x32(.bilibili.account.fission.v1.AnimateIcon\"\x0b\n\tWindowReq\"<\n\x0bWindowReply\x12\x0c\n\x04type\x18\x01 \x01(\x05\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x12\n\nreportData\x18\x03 \x01(\t\")\n\x0b\x41nimateIcon\x12\x0c\n\x04icon\x18\x01 \x01(\t\x12\x0c\n\x04json\x18\x02 \x01(\t2\xc7\x01\n\x07\x46ission\x12`\n\x08\x45ntrance\x12(.bilibili.account.fission.v1.EntranceReq\x1a*.bilibili.account.fission.v1.EntranceReply\x12Z\n\x06Window\x12&.bilibili.account.fission.v1.WindowReq\x1a(.bilibili.account.fission.v1.WindowReplyb\x06proto3'
)
_ENTRANCEREQ = _descriptor.Descriptor(
name='EntranceReq',
full_name='bilibili.account.fission.v1.EntranceReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=87,
)
_ENTRANCEREPLY = _descriptor.Descriptor(
name='EntranceReply',
full_name='bilibili.account.fission.v1.EntranceReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='icon', full_name='bilibili.account.fission.v1.EntranceReply.icon', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='bilibili.account.fission.v1.EntranceReply.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='bilibili.account.fission.v1.EntranceReply.url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='animateIcon', full_name='bilibili.account.fission.v1.EntranceReply.animateIcon', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=208,
)
_WINDOWREQ = _descriptor.Descriptor(
name='WindowReq',
full_name='bilibili.account.fission.v1.WindowReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=221,
)
_WINDOWREPLY = _descriptor.Descriptor(
name='WindowReply',
full_name='bilibili.account.fission.v1.WindowReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='bilibili.account.fission.v1.WindowReply.type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='bilibili.account.fission.v1.WindowReply.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reportData', full_name='bilibili.account.fission.v1.WindowReply.reportData', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=223,
serialized_end=283,
)
_ANIMATEICON = _descriptor.Descriptor(
name='AnimateIcon',
full_name='bilibili.account.fission.v1.AnimateIcon',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='icon', full_name='bilibili.account.fission.v1.AnimateIcon.icon', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json', full_name='bilibili.account.fission.v1.AnimateIcon.json', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=285,
serialized_end=326,
)
_ENTRANCEREPLY.fields_by_name['animateIcon'].message_type = _ANIMATEICON
DESCRIPTOR.message_types_by_name['EntranceReq'] = _ENTRANCEREQ
DESCRIPTOR.message_types_by_name['EntranceReply'] = _ENTRANCEREPLY
DESCRIPTOR.message_types_by_name['WindowReq'] = _WINDOWREQ
DESCRIPTOR.message_types_by_name['WindowReply'] = _WINDOWREPLY
DESCRIPTOR.message_types_by_name['AnimateIcon'] = _ANIMATEICON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EntranceReq = _reflection.GeneratedProtocolMessageType('EntranceReq', (_message.Message,), {
'DESCRIPTOR' : _ENTRANCEREQ,
'__module__' : 'bilibili.account.fission.v1.fission_pb2'
# @@protoc_insertion_point(class_scope:bilibili.account.fission.v1.EntranceReq)
})
_sym_db.RegisterMessage(EntranceReq)
EntranceReply = _reflection.GeneratedProtocolMessageType('EntranceReply', (_message.Message,), {
'DESCRIPTOR' : _ENTRANCEREPLY,
'__module__' : 'bilibili.account.fission.v1.fission_pb2'
# @@protoc_insertion_point(class_scope:bilibili.account.fission.v1.EntranceReply)
})
_sym_db.RegisterMessage(EntranceReply)
WindowReq = _reflection.GeneratedProtocolMessageType('WindowReq', (_message.Message,), {
'DESCRIPTOR' : _WINDOWREQ,
'__module__' : 'bilibili.account.fission.v1.fission_pb2'
# @@protoc_insertion_point(class_scope:bilibili.account.fission.v1.WindowReq)
})
_sym_db.RegisterMessage(WindowReq)
WindowReply = _reflection.GeneratedProtocolMessageType('WindowReply', (_message.Message,), {
'DESCRIPTOR' : _WINDOWREPLY,
'__module__' : 'bilibili.account.fission.v1.fission_pb2'
# @@protoc_insertion_point(class_scope:bilibili.account.fission.v1.WindowReply)
})
_sym_db.RegisterMessage(WindowReply)
AnimateIcon = _reflection.GeneratedProtocolMessageType('AnimateIcon', (_message.Message,), {
'DESCRIPTOR' : _ANIMATEICON,
'__module__' : 'bilibili.account.fission.v1.fission_pb2'
# @@protoc_insertion_point(class_scope:bilibili.account.fission.v1.AnimateIcon)
})
_sym_db.RegisterMessage(AnimateIcon)
_FISSION = _descriptor.ServiceDescriptor(
name='Fission',
full_name='bilibili.account.fission.v1.Fission',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=329,
serialized_end=528,
methods=[
_descriptor.MethodDescriptor(
name='Entrance',
full_name='bilibili.account.fission.v1.Fission.Entrance',
index=0,
containing_service=None,
input_type=_ENTRANCEREQ,
output_type=_ENTRANCEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Window',
full_name='bilibili.account.fission.v1.Fission.Window',
index=1,
containing_service=None,
input_type=_WINDOWREQ,
output_type=_WINDOWREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_FISSION)
DESCRIPTOR.services_by_name['Fission'] = _FISSION
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
275,
22282,
2403,
14,
23317,
14,
69,
1480,
14,
85,
16,
14,
69,
1480,
1... | 2.432107 | 4,404 |
import logging
from typing import Optional
log = logging.getLogger(__name__)
try:
from commentjson import loads
except ImportError:
from json import loads
log.warning("'commentjson' not found, falling back to built-in 'json'. "
"Be warned, comments in .json files will cause errors.")
with open("./data/strings.json", "r") as strings_:
STRINGS = loads(strings_.read())
REQUIRED_STRING_LIST = [
"BOT_ABOUT",
"ON_MEMBER_JOIN",
"ON_VERIFICATION_BEGIN",
"VERIFICATION_HOW",
"VERIFY_RANDOM_EMOJI_LIST",
"VERIFY_FAILED_TIMEOUT",
"VERIFY_SUCCESS",
"VERIFYALL_CONFIRMATION",
"VERIFYALL_TIMEOUT",
"VERIFYALL_STARTING",
"VERIFYALL_PROGRESS",
"VERIFYALL_DONE",
"CMD_NOT_ALLOWED_FOR_USER",
"MANUAL_VERIFICATION",
"MANUAL_VERIFICATION_NO_NEED",
]
# Make sure all required strings are present
for s in REQUIRED_STRING_LIST:
if STRINGS.get(s) is None:
raise Exception(f"String {s} is required, but missing!")
class String:
"""
Contains all known string keys.
"""
BOT_ABOUT = "BOT_ABOUT"
ON_MEMBER_JOIN = "ON_MEMBER_JOIN"
ON_VERIFICATION_BEGIN = "ON_VERIFICATION_BEGIN"
VERIFICATION_HOW = "VERIFICATION_HOW"
VERIFY_RANDOM_EMOJI_LIST = "VERIFY_RANDOM_EMOJI_LIST"
VERIFY_FAILED_TIMEOUT = "VERIFY_FAILED_TIMEOUT"
VERIFY_SUCCESS = "VERIFY_SUCCESS"
VERIFYALL_CONFIRMATION = "VERIFYALL_CONFIRMATION"
VERIFYALL_TIMEOUT = "VERIFYALL_TIMEOUT"
VERIFYALL_STARTING = "VERIFYALL_STARTING"
VERIFYALL_PROGRESS = "VERIFYALL_PROGRESS"
VERIFYALL_DONE = "VERIFYALL_DONE"
CMD_NOT_ALLOWED_FOR_USER = "CMD_NOT_ALLOWED_FOR_USER"
MANUAL_VERIFICATION = "MANUAL_VERIFICATION"
MANUAL_VERIFICATION_NO_NEED = "MANUAL_VERIFICATION_NO_NEED"
| [
11748,
18931,
198,
6738,
19720,
1330,
32233,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
2912,
17752,
1330,
15989,
198,
16341,
17267,
12331,
25,
198,
220,
220,... | 2.214197 | 803 |
# pylint: disable=R0903,W1202,R0801
"""Language detection module"""
import os
import argparse
import wget
import pandas as pd
import fasttext
from pycountry import languages
from azureml.studio.core.io.data_frame_directory \
import load_data_frame_from_directory, save_data_frame_to_directory
from azureml.studio.core.data_frame_schema import DataFrameSchema
from azureml.studio.core.logger import module_logger as logger
class LanguagesDetector():
''' Language detection class '''
def detect_languages(self, input_df, cols):
'''
detect language in dataset
args:
input_df: Pandas Dataframe input dataset
cols: list, list of text columns to run detection on
return input_df with detection output appended
'''
for col_name in cols:
input_df[col_name] = input_df[col_name].apply(lambda text: text.replace('\n', ' '))
input_df[[f'{col_name}_language', f'{col_name}_ISO_639_code',\
f'{col_name}_likelihood']] = input_df[col_name].apply(detect)
return input_df
def main(args=None):
'''
Module entry function
'''
logger.debug(f'input-dir {args.input_dir}')
logger.debug(f'output-dir {args.output_dir}')
input_df = load_data_frame_from_directory(args.input_dir).data
columns_names = args.target_columns.split(',')
lang_detector = LanguagesDetector()
out_df = lang_detector.detect_languages(input_df, columns_names)
logger.debug(f'output dataset {out_df.describe()}')
save_data_frame_to_directory(save_to=args.output_dir,
data=out_df,
schema=DataFrameSchema.data_frame_to_dict(out_df))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--input-dir', help='Dataset to train')
PARSER.add_argument('--target-columns', type=str, help='target column')
PARSER.add_argument('--output-dir', type=str, help='dataframe containing detected langauges')
ARGS, _ = PARSER.parse_known_args()
main(ARGS)
| [
2,
279,
2645,
600,
25,
15560,
28,
49,
2931,
3070,
11,
54,
1065,
2999,
11,
49,
2919,
486,
198,
198,
37811,
32065,
13326,
8265,
37811,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
266,
1136,
198,
11748,
19798,
292,
355,
... | 2.392045 | 880 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################################################################################
# Data ingestion script for the TBFY Knowledge Graph (http://data.tbfy.eu/)
#
# This file contains a script that enriches the JSON documents with TBFY-specific properties.
#
# Copyright: SINTEF 2018-2021
# Author : Brian Elvesæter (brian.elvesater@sintef.no)
# License : Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Project : Developed as part of the TheyBuyForYou project (https://theybuyforyou.eu/)
# Funding : TheyBuyForYou has received funding from the European Union's Horizon 2020
# research and innovation programme under grant agreement No 780247
#####################################################################################################
import config
import tbfy.json_utils
import tbfy.openopps_enrich
import tbfy.opencorporates_enrich
import tbfy.statistics
import logging
import json
import os
import sys
import getopt
import time
import datetime
from datetime import datetime
from datetime import timedelta
# **********
# Statistics
# **********
stats_files = tbfy.statistics.files_statistics_count.copy()
# *************
# Main function
# *************
# *****************
# Run main function
# *****************
if __name__ == "__main__": main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
29113,
4242,
2,
198,
2,
6060,
38382,
4226,
329,
262,
23799,
43833,
20414,
29681,
357,
4023,
1378,
7890,
13,... | 3.481108 | 397 |
from output.models.ms_data.regex.re_di11_xsd.re_di11 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| [
6738,
5072,
13,
27530,
13,
907,
62,
7890,
13,
260,
25636,
13,
260,
62,
10989,
1157,
62,
87,
21282,
13,
260,
62,
10989,
1157,
1330,
357,
198,
220,
220,
220,
797,
25636,
11,
198,
220,
220,
220,
14432,
11,
198,
8,
198,
198,
834,
43... | 1.863636 | 66 |
import numpy as np
class IDIMethod:
"""Common functions for all methods.
"""
def __init__(self, video, *args, **kwargs):
"""
The image displacement identification method constructor.
For more configuration options, see `method.configure()`
"""
self.video = video
self.configure(*args, **kwargs)
def configure(self, *args, **kwargs):
"""
Configure the displacement identification method here.
"""
pass
def calculate_displacements(self, video, *args, **kwargs):
"""
Calculate the displacements of set points here.
The result should be saved into the `self.displacements` attribute.
"""
pass | [
11748,
299,
32152,
355,
45941,
628,
198,
4871,
4522,
3955,
316,
2065,
25,
198,
220,
220,
220,
37227,
17227,
5499,
329,
477,
5050,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,... | 2.465574 | 305 |
# version 0.0.2
import generator
qtd_cnpj = input('Digite aqui a quantidade de cnpjs que gostaria de gerar. Entre 1 e 100. ')
verify_generate(qtd_cnpj)
| [
2,
2196,
657,
13,
15,
13,
17,
198,
11748,
17301,
628,
198,
198,
80,
8671,
62,
31522,
79,
73,
796,
5128,
10786,
19511,
578,
14839,
72,
257,
5554,
312,
671,
390,
269,
37659,
8457,
8358,
308,
455,
10312,
390,
27602,
283,
13,
7232,
26... | 2.384615 | 65 |
from collections import OrderedDict
from ..util import create_element, set_xml_value, TNS
from ..version import EXCHANGE_2010
from .common import EWSFolderService, PagingEWSMixIn, create_shape_element
class FindFolder(EWSFolderService, PagingEWSMixIn):
"""
MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/findfolder
"""
SERVICE_NAME = 'FindFolder'
element_container_name = '{%s}Folders' % TNS
def call(self, additional_fields, restriction, shape, depth, max_items, offset):
"""
Find subfolders of a folder.
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:param depth: How deep in the folder structure to search for folders
:param max_items: The maximum number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching folders
"""
from ..folders import Folder
roots = {f.root for f in self.folders}
if len(roots) != 1:
raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots)
root = roots.pop()
for elem in self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict(
additional_fields=additional_fields,
restriction=restriction,
shape=shape,
depth=depth,
page_size=self.chunk_size,
offset=offset,
)):
if isinstance(elem, Exception):
yield elem
continue
yield Folder.from_xml_with_root(elem=elem, root=root)
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
11485,
22602,
1330,
2251,
62,
30854,
11,
900,
62,
19875,
62,
8367,
11,
309,
8035,
198,
6738,
11485,
9641,
1330,
7788,
3398,
27746,
62,
10333,
198,
6738,
764,
11321,
1330,
412,
1... | 2.553648 | 699 |
# -*- coding: utf-8; py-indent-offset: 2 -*-
from __future__ import division
from collections import defaultdict
from math import cos, pi, sin, sqrt
import sys
from cctbx import crystal
from cctbx.eltbx import sasaki, chemical_elements
from iotbx.pdb import common_residue_names_water as WATER_RES_NAMES
from libtbx.utils import null_out, xfrange
from mmtbx.ions import server
from scitbx.array_family import flex
from scitbx.math import gaussian_fit_1d_analytical
def anonymize_ions (pdb_hierarchy, log=sys.stdout):
"""
Convert any elemental ions in the PDB hierarchy to water, resetting the
occupancy and scaling the B-factor. The atom segids will be set to the old
resname. NOTE: this does not change the corresponding scatterer in the xray
structure, but a new xray structure can be obtained by calling
hierarchy.extract_xray_structure(crystal_symmetry).
Parameters
----------
pdb_hierarchy : iotbx.pdb.hierarchy.root
log : file, optional
Returns
-------
iotbx.pdb.hierarchy.root
New pdb hierarchy with its ions anonymized
int
Number of atoms that were anonymized.
"""
ion_resnames = set(chemical_elements.proper_upper_list())
for resname in server.params["_lib_charge.resname"]:
if resname not in WATER_RES_NAMES:
ion_resnames.add(resname)
n_converted = 0
pdb_hierarchy = pdb_hierarchy.deep_copy()
for model in pdb_hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
for atom_group in residue_group.atom_groups():
if atom_group.resname.strip() in ion_resnames:
atoms = atom_group.atoms()
id_strs = []
for atom in atoms:
elem = atom.element.strip()
if elem in ["H", "D"]:
atomic_number = 1
elif elem in ["HE"]:
atomic_number = 2
else:
atomic_number = sasaki.table(elem).atomic_number()
id_strs.append(atom.id_str())
atom.segid = atom_group.resname
atom.name = " O "
atom.element = "O"
atom.charge = ""
atom.occupancy = 1.0
atom.b = atom.b * (10 / atomic_number)
atom_group.resname = "HOH"
for atom, id_str in zip(atoms, id_strs):
print >> log, "%s --> %s, B-iso = %.2f" % (id_str, atom.id_str(),
atom.b)
n_converted += 1
return pdb_hierarchy, n_converted
def sort_atoms_permutation(pdb_atoms, xray_structure):
"""
Creates a list of atoms in pdb_atoms, sorted first by their atomic number,
then by occupancy, and finally, by isotropic b-factor.
Parameters
----------
pdb_atoms : iotbx.pdb.hierarchy.af_shared_atom
xray_structure : cctbx.xray.structure.structure
Returns
-------
flex.size_t of int
i_seqs of sorted atoms
"""
assert pdb_atoms.size() == xray_structure.scatterers().size()
pdb_atoms.reset_i_seq()
atoms_sorted = sorted(
pdb_atoms,
key=lambda x:
(sasaki.table(x.element.strip().upper()).atomic_number(), x.occ, x.b),
reverse=True,
)
sele = flex.size_t([atom.i_seq for atom in atoms_sorted])
return sele
def collect_ions(pdb_hierarchy):
"""
Collects a list of all ions in pdb_hierarchy.
Parameters
----------
pdb_hierarchy : iotbx.pdb.hierarchy.root
Returns
-------
list of iotbx.pdb.hierarchy.atom
"""
elements = chemical_elements.proper_upper_list()
ions = []
for model in pdb_hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
for atom_group in residue_group.atom_groups():
if atom_group.resname.strip() in elements:
atoms = atom_group.atoms()
assert len(atoms) == 1
for atom in atoms:
ions.append(atom)
return ions
# TODO add test
def compare_ions (hierarchy, reference_hierarchy, reference_xrs,
distance_cutoff=2.0, log=None, ignore_elements=(), only_elements=(),
sel_str_base="segid ION"):
"""
Compares two pdb structures to determine the number of ions that appear in the
reference structure and are either matched or missing in the other structure.
Parameters
----------
hierarchy : iotbx.pdb.hierarchy.root
reference_hierarchy : iotbx.pdb.hierarchy.root
reference_xrs : ...
distance_cutoff : float, optional
log : file, optional
ignore_element : iterable, optional
only_elements : iterable, optional
sel_str_base : str, optional
Returns
-------
int
Number of ions in reference_hierarchy that were also found in hierarchy.
int
Number of ions in reference_hierarchy that were not found in hierarchy.
"""
if log is None:
log = null_out()
sel_cache = hierarchy.atom_selection_cache()
sel_str = sel_str_base
if len(only_elements) > 0:
sel_str += " and (%s)" % " or ".join(
["element %s" % e for e in only_elements])
elif len(ignore_elements) > 0:
sel_str += " and not (%s)" % " or ".join(
["element %s" % e for e in ignore_elements])
ion_isel = sel_cache.selection(sel_str).iselection()
if len(ion_isel) == 0:
return [], []
pdb_atoms = hierarchy.atoms()
pdb_atoms.reset_i_seq()
ions = pdb_atoms.select(ion_isel)
asu_mappings = reference_xrs.asu_mappings(
buffer_thickness=distance_cutoff+0.1)
unit_cell = reference_xrs.unit_cell()
sites_cart = ions.extract_xyz()
sites_frac = unit_cell.fractionalize(sites_cart=sites_cart)
asu_mappings.process_sites_frac(sites_frac,
min_distance_sym_equiv=reference_xrs.min_distance_sym_equiv())
pair_generator = crystal.neighbors_fast_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=distance_cutoff)
reference_atoms = reference_hierarchy.atoms()
n_xray = reference_xrs.scatterers().size()
ion_ref_i_seqs = []
for k in range(len(ions)):
ion_ref_i_seqs.append([])
for pair in pair_generator:
if ((pair.i_seq < n_xray and pair.j_seq < n_xray) or
(pair.i_seq >= n_xray and pair.j_seq >= n_xray)):
continue
if pair.i_seq < n_xray:
ion_seq, ref_seq = pair.j_seq, pair.i_seq
else:
ion_seq, ref_seq = pair.i_seq, pair.j_seq
site_frac = sites_frac[ion_seq - n_xray]
dxyz = sqrt(pair.dist_sq)
j_seq = ion_seq - n_xray
ion_ref_i_seqs[j_seq].append(ref_seq)
# FIXME better filtering needed - right now we risk double-counting ions in
# the reference model, although I haven't found a case of this yet
matched = []
missing = []
for i_seq, ref_i_seqs in enumerate(ion_ref_i_seqs):
ion = ions[i_seq]
if len(ref_i_seqs) == 0:
print >> log, "No match for %s" % ion.id_str()
missing.append(ion.id_str())
else:
ref_ions = []
for i_seq in ref_i_seqs:
ref_atom = reference_atoms[i_seq]
if ion.element.upper() == ref_atom.element.upper():
ref_ions.append(ref_atom.id_str())
if len(ref_ions) >= 1:
matched.append(ion.id_str())
if len(ref_ions) > 1:
print >> log, "Multiple matches for %s:" % ion.id_str()
for ref_ion in ref_ions:
print >> log, " %s" % ref_ion
else:
print >> log, "Ion %s matches %s" % (ion.id_str(),
ref_ions[0])
else:
print >> log, "No match for %s" % ion.id_str()
missing.append(ion.id_str())
return matched, missing
def _get_points_within_radius(point, radius, radius_step=0.2,
angle_step=pi / 5):
"""
Generates a list of points and their associated radius in steps around a
sphere.
Parameters
----------
point : tuple of float, float, float
X, Y, Z, coordinates to center the sampling around.
radius : float
Max radius around the center to sample.
radius_step : float, optional
Steps along the radius to use when sampling.
angle_step : float, optional
Steps around each radii distance to use when sampling. Amount is in
radians.
Returns
-------
list of tuple of float, float, float
List of points to be sampled.
list of float
List of radii corresponding to each point.
"""
points = [point]
radiuses = [0]
for r in xfrange(radius_step, radius, radius_step):
for theta in xfrange(-pi, pi, angle_step):
for phi in xfrange(-pi, pi, angle_step):
x = r * cos(theta) * sin(phi) + point[0]
y = r * sin(theta) * sin(phi) + point[1]
z = r * cos(phi) + point[2]
points.append((x, y, z))
radiuses.append(r)
return points, radiuses
def fit_gaussian(unit_cell, site_cart, real_map, radius=1.6):
"""
Fit a gaussian function to the map around a site. Samples points in concentric
spheres up to radius away from the site.
f(x) = a * exp(-b * x ** 2)
Parameters
----------
unit_cell : uctbx.unit_cell
site_cart : tuple of float, float, float
The site's cartesian coordinates to sample the density around.
real_map : scitbx.array_family.flex
Real space map of the electron density in the unit cell.
radius : float, optional
The max radius to use for sampling.
Returns
-------
float
Height of gaussian curve.
float
Spread of guassian curve.
See Also
--------
scitbx.math.gaussian_fit_1d_analytical
"""
points, radiuses = _get_points_within_radius(site_cart, radius)
map_heights = \
[real_map.tricubic_interpolation(unit_cell.fractionalize(i))
for i in points]
# Gaussian functions can't have negative values, filter sampled points below
# zero to allow us to find the analytical solution (radius = 2.0 is too big
# for most atoms anyways)
x, y = flex.double(), flex.double()
for rad, height in zip(radiuses, map_heights):
if height > 0:
x.append(rad)
y.append(height)
try:
fit = gaussian_fit_1d_analytical(x=x, y=y)
except RuntimeError as err:
print err
return 0., 0.
else:
return fit.a, fit.b
def count_coordinating_residues (nearby_atoms, distance_cutoff=3.0):
"""
Count the number of residues of each type involved in the coordination
sphere. This may yield additional clues to the identity of ions, e.g. only
Zn will have 4 Cys residues.
Parameters
----------
nearby_atoms : list of mmtbx.ions.environment.atom_contact
distance_cutoff : float, optional
Returns
-------
dict of str, int
"""
unique_residues = []
residue_counts = defaultdict(int)
for contact in nearby_atoms:
if contact.distance() <= distance_cutoff:
parent = contact.atom.parent()
for residue in unique_residues:
if residue == parent:
break
else:
resname = parent.resname
residue_counts[resname] += 1
unique_residues.append(parent)
return residue_counts
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
12972,
12,
521,
298,
12,
28968,
25,
362,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
10688,
1330,
8615,
11,
31028,
11,
... | 2.463415 | 4,387 |
# Copyright (c) 2013-2015, Brice Arnould <unbrice@vleu.net>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following condition are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Helps running user-provided code and getting readable backtraces."""
import sys
import traceback
class Error(Exception):
"""Base class for errors from this module."""
class RunFailed(Error):
"""The user-provided code failed."""
def build_globals():
"""Returns a dictionary that can be used as globals for exec."""
return {
'__name__': '__p1__',
'__doc__': None,
'__builtins__': __builtins__
}
def _find_user_traceback_depth(tb):
"""Returns the depth of user-specific code in a traceback.
This is the depth from wich we find a frame where __name__ is '__p1__'.
Args:
tb: A traceback object.
"""
depth = 0
while tb:
# Find the topmost frame
frame = tb.tb_frame
while frame.f_back:
frame = frame.f_back
if frame.f_globals.get('__name__', None) != '__p1__':
return depth + 1
# If it does not contain '__p1__' go down the stack.
depth += 1
tb = tb.tb_next
# We could not find it, assume everything was user-specified
return 0
def _format_user_traceback(tb):
"""Returns the user-specific part of a traceback."""
tb_depth = _find_user_traceback_depth(tb)
# The limit keyword is counted from the top, we went the bottom.
tb_list = traceback.extract_tb(tb, limit=None)
return traceback.format_list(tb_list[tb_depth:])
def run(code, code_globals):
"""Runs the code with provided globals, improving backtraces readability.
Args:
code: The string or code object to run.
code_globals: A dictionary that defines global and local variables.
Raises:
RunFailed: The user-provided code failed, the message will contain a clean backtrace.
"""
try:
exec(code, code_globals)
except Exception as e:
errors = []
exc_type, exc_value, exc_tb = sys.exc_info()
try:
errors += _format_user_traceback(exc_tb)
finally:
del exc_tb # Break the circular references early
errors += traceback.format_exception_only(exc_type, exc_value)
raise RunFailed(''.join(errors)) from e
return None
| [
2,
15069,
357,
66,
8,
2211,
12,
4626,
11,
1709,
501,
16644,
426,
1279,
403,
1671,
501,
31,
85,
293,
84,
13,
3262,
29,
198,
2,
1439,
2489,
10395,
13,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
... | 2.77917 | 1,277 |
UNKNOWN = "-"
| [
4944,
44706,
796,
366,
21215,
198
] | 2.333333 | 6 |
from app.align_reader import AlignReader
import codecs
import os
from app.utils import LOG
from app import utils
from multiprocessing import Manager
import pickle
import collections
m = Manager()
index_lock = m.Lock()
alignments_lock = m.Lock() | [
6738,
598,
13,
31494,
62,
46862,
1330,
978,
570,
33634,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
6738,
598,
13,
26791,
1330,
41605,
198,
6738,
598,
1330,
3384,
4487,
198,
6738,
18540,
305,
919,
278,
1330,
9142,
198,
11748,
2298,
... | 3.5 | 70 |
from pathlib import Path
for module in Path(__file__).parent.glob('[!_]*.py'):
__import__(f'{__name__}.{module.stem}', locals(), globals())
del module # we don't want to expose the module due to this import
del Path
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
1640,
8265,
287,
10644,
7,
834,
7753,
834,
737,
8000,
13,
4743,
672,
10786,
58,
0,
62,
60,
24620,
9078,
6,
2599,
198,
220,
220,
220,
11593,
11748,
834,
7,
69,
6,
90,
834,
3672,
834,
27422,
... | 2.8375 | 80 |
import json
import random
import sys
import time
import traceback
import requests
import utils
from conf import global_variable
from orm import SubscribeVmss, or_
from task.node import V2ray
from task.proxy_server import new_proxy
from utils import logger
_v2ray_server = new_proxy()
| [
11748,
33918,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
198,
11748,
7007,
198,
198,
11748,
3384,
4487,
198,
6738,
1013,
1330,
3298,
62,
45286,
198,
6738,
393,
76,
1330,
19808,
53,
76,
824,
11,
... | 3.418605 | 86 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SizeAvailability(Model):
"""Represents the size information.
:param size_category: The category of the size (Basic, Standard,
Performance). Possible values include: 'Basic', 'Standard', 'Performance'
:type size_category: str or
~azure.mgmt.labservices.models.ManagedLabVmSize
:param is_available: Whether or not this size category is available
:type is_available: bool
"""
_attribute_map = {
'size_category': {'key': 'sizeCategory', 'type': 'str'},
'is_available': {'key': 'isAvailable', 'type': 'bool'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 3.688136 | 295 |
#
# PySNMP MIB module HUAWEI-PIM-BSR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-PIM-BSR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:35:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InetAddressPrefixLength, InetAddressType, InetZoneIndex, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetAddressType", "InetZoneIndex", "InetAddress")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, Bits, Integer32, Unsigned32, Counter64, iso, ObjectIdentity, MibIdentifier, Counter32, mib_2, NotificationType, Gauge32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Bits", "Integer32", "Unsigned32", "Counter64", "iso", "ObjectIdentity", "MibIdentifier", "Counter32", "mib-2", "NotificationType", "Gauge32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks")
RowStatus, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention", "TruthValue")
hwMcast = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149))
hwPimBsrMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2))
hwPimBsrMib.setRevisions(('2007-04-16 00:00',))
if mibBuilder.loadTexts: hwPimBsrMib.setLastUpdated('200704160000Z')
if mibBuilder.loadTexts: hwPimBsrMib.setOrganization('Huawei Technologies co.,Ltd.')
hwPimBsrObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1))
hwPimBsrConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 2))
hwPimBsrCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 2, 1))
hwPimBsrGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 2, 2))
hwPimBsrElectedBsrRpSetTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2), )
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetTable.setStatus('current')
hwPimBsrElectedBsrRpSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1), ).setIndexNames((0, "HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrGrpMappingAddrType"), (0, "HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrGrpMappingGrpAddr"), (0, "HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrGrpMappingGrpPrefixLen"), (0, "HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrGrpMappingRPAddr"))
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetEntry.setStatus('current')
hwPimBsrElectedBsrGrpMappingAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 2), InetAddressType())
if mibBuilder.loadTexts: hwPimBsrElectedBsrGrpMappingAddrType.setStatus('current')
hwPimBsrElectedBsrGrpMappingGrpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 3), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(8, 8), ValueSizeConstraint(16, 16), ValueSizeConstraint(20, 20), )))
if mibBuilder.loadTexts: hwPimBsrElectedBsrGrpMappingGrpAddr.setStatus('current')
hwPimBsrElectedBsrGrpMappingGrpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 4), InetAddressPrefixLength().subtype(subtypeSpec=ValueRangeConstraint(4, 128)))
if mibBuilder.loadTexts: hwPimBsrElectedBsrGrpMappingGrpPrefixLen.setStatus('current')
hwPimBsrElectedBsrGrpMappingRPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 5), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(8, 8), ValueSizeConstraint(16, 16), ValueSizeConstraint(20, 20), )))
if mibBuilder.loadTexts: hwPimBsrElectedBsrGrpMappingRPAddr.setStatus('current')
hwPimBsrElectedBsrRpSetPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetPriority.setStatus('current')
hwPimBsrElectedBsrRpSetHoldtime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetHoldtime.setStatus('current')
hwPimBsrElectedBsrRpSetExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetExpiryTime.setStatus('current')
hwPimBsrElectedBsrRpSetGrpBidir = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 1, 2, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwPimBsrElectedBsrRpSetGrpBidir.setStatus('current')
hwPimBsrCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 2, 1, 1)).setObjects(("HUAWEI-PIM-BSR-MIB", "hwPimBsrObjectGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwPimBsrCompliance = hwPimBsrCompliance.setStatus('current')
hwPimBsrObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 149, 2, 2, 2, 1)).setObjects(("HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrRpSetPriority"), ("HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrRpSetHoldtime"), ("HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrRpSetExpiryTime"), ("HUAWEI-PIM-BSR-MIB", "hwPimBsrElectedBsrRpSetGrpBidir"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwPimBsrObjectGroup = hwPimBsrObjectGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-PIM-BSR-MIB", hwPimBsrElectedBsrGrpMappingAddrType=hwPimBsrElectedBsrGrpMappingAddrType, hwPimBsrElectedBsrGrpMappingRPAddr=hwPimBsrElectedBsrGrpMappingRPAddr, PYSNMP_MODULE_ID=hwPimBsrMib, hwPimBsrElectedBsrRpSetGrpBidir=hwPimBsrElectedBsrRpSetGrpBidir, hwPimBsrCompliances=hwPimBsrCompliances, hwPimBsrCompliance=hwPimBsrCompliance, hwMcast=hwMcast, hwPimBsrGroups=hwPimBsrGroups, hwPimBsrElectedBsrRpSetExpiryTime=hwPimBsrElectedBsrRpSetExpiryTime, hwPimBsrElectedBsrRpSetHoldtime=hwPimBsrElectedBsrRpSetHoldtime, hwPimBsrConformance=hwPimBsrConformance, hwPimBsrObjectGroup=hwPimBsrObjectGroup, hwPimBsrElectedBsrRpSetEntry=hwPimBsrElectedBsrRpSetEntry, hwPimBsrElectedBsrRpSetPriority=hwPimBsrElectedBsrRpSetPriority, hwPimBsrElectedBsrRpSetTable=hwPimBsrElectedBsrRpSetTable, hwPimBsrElectedBsrGrpMappingGrpAddr=hwPimBsrElectedBsrGrpMappingGrpAddr, hwPimBsrMib=hwPimBsrMib, hwPimBsrElectedBsrGrpMappingGrpPrefixLen=hwPimBsrElectedBsrGrpMappingGrpPrefixLen, hwPimBsrObjects=hwPimBsrObjects)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
367,
34970,
8845,
40,
12,
47,
3955,
12,
4462,
49,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
... | 2.311828 | 3,069 |
import discord
from redbot.core import commands, Config, checks
from redbot.core.utils.embed import randomize_colour
from redbot.core.utils.menus import menu, prev_page, next_page
from discord.ext import tasks
from discord_components import DiscordComponents, Button, ButtonStyle
from .utils import *
from . import player_stats, game_stats
from random import choice
import asyncio
import brawlstats
from typing import Union
from re import sub
import datetime
import aiohttp
from cachetools import TTLCache
from fuzzywuzzy import process
from operator import itemgetter, attrgetter
import pycountry
import math
| [
11748,
36446,
198,
6738,
2266,
13645,
13,
7295,
1330,
9729,
11,
17056,
11,
8794,
198,
6738,
2266,
13645,
13,
7295,
13,
26791,
13,
20521,
1330,
4738,
1096,
62,
49903,
198,
6738,
2266,
13645,
13,
7295,
13,
26791,
13,
3653,
385,
1330,
68... | 3.83125 | 160 |
# original by Xinbo Wu
# annotated by Haoyu Huang
# Personalized PageRank for APVPA
from Utility import *
from Personalized_PageRank import *
main()
| [
2,
2656,
416,
25426,
2127,
18027,
198,
2,
24708,
515,
416,
9398,
726,
84,
31663,
198,
2,
15644,
1143,
7873,
27520,
329,
3486,
53,
4537,
198,
198,
6738,
34030,
1330,
1635,
198,
6738,
15644,
1143,
62,
9876,
27520,
1330,
1635,
628,
198,
... | 3.326087 | 46 |
# coding=utf-8
import factory
import faker
from django.core.files.uploadedfile import SimpleUploadedFile
from website.misc.factories import UserFactory
from .. import models
fake = faker.Faker()
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
8860,
198,
11748,
277,
3110,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
25850,
276,
7753,
1330,
17427,
41592,
276,
8979,
198,
198,
6738,
3052,
13,
44374,
13,
22584,
1749,
1330,
11787,
22... | 3.389831 | 59 |
import time | [
11748,
640
] | 5.5 | 2 |
# -*- coding: utf-8 -*-
import munch
import unittest
import json
import exceptions
from mock import patch, call, MagicMock
from munch import munchify
from datetime import datetime
try: # compatibility with requests-based or restkit-based op.client.python
from openprocurement_client.exceptions import ResourceGone
except ImportError:
from restkit.errors import ResourceGone
# from time import sleep
from openprocurement_client.client import ResourceNotFound
from openprocurement.bridge.contracting.databridge import ContractingDataBridge
from openprocurement.bridge.contracting.journal_msg_ids import (
DATABRIDGE_INFO, DATABRIDGE_START
)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
285,
3316,
198,
11748,
555,
715,
395,
198,
11748,
33918,
198,
198,
11748,
13269,
198,
6738,
15290,
1330,
8529,
11,
869,
11,
6139,
44,
735,
198,
6738,
285,
3316,
... | 3.227679 | 224 |
# USAGE
# python detect_age.py --image images/adrian.png --face face_detector --age age_detector
# import the necessary packages
import numpy as np
import argparse
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-f", "--face", required=True,
help="path to face detector model directory")
ap.add_argument("-a", "--age", required=True,
help="path to age detector model directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# define the list of age buckets our age detector will predict
AGE_BUCKETS = ["(0-2)", "(4-6)", "(8-12)", "(15-20)", "(25-32)",
"(38-43)", "(48-53)", "(60-100)"]
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load our serialized age detector model from disk
print("[INFO] loading age detector model...")
prototxtPath = os.path.sep.join([args["age"], "age_deploy.prototxt"])
weightsPath = os.path.sep.join([args["age"], "age_net.caffemodel"])
ageNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the input image and construct an input blob for the image
image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
print("[INFO] computing face detections...")
faceNet.setInput(blob)
detections = faceNet.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the ROI of the face and then construct a blob from
# *only* the face ROI
face = image[startY:endY, startX:endX]
faceBlob = cv2.dnn.blobFromImage(face, 1.0, (227, 227),
(78.4263377603, 87.7689143744, 114.895847746),
swapRB=False)
# make predictions on the age and find the age bucket with
# the largest corresponding probability
ageNet.setInput(faceBlob)
preds = ageNet.forward()
i = preds[0].argmax()
age = AGE_BUCKETS[i]
ageConfidence = preds[0][i]
# display the predicted age to our terminal
text = "{}: {:.2f}%".format(age, ageConfidence * 100)
print("[INFO] {}".format(text))
# draw the bounding box of the face along with the associated
# predicted age
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# display the output image
cv2.imshow("Image", image)
cv2.waitKey(0) | [
2,
1294,
11879,
198,
2,
21015,
4886,
62,
496,
13,
9078,
1377,
9060,
4263,
14,
324,
4484,
13,
11134,
1377,
2550,
1986,
62,
15255,
9250,
1377,
496,
2479,
62,
15255,
9250,
198,
198,
2,
1330,
262,
3306,
10392,
198,
11748,
299,
32152,
35... | 2.746071 | 1,209 |
import matplotlib
from matplotlib import pyplot as plt
import csv
plot_together()
#plot_increasing_size_exp()
#plot_increasing_alphabeth_exp() | [
11748,
2603,
29487,
8019,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
269,
21370,
628,
198,
198,
29487,
62,
45525,
3419,
198,
2,
29487,
62,
42647,
62,
7857,
62,
11201,
3419,
198,
2,
29487,
62,
42647,
62... | 3.085106 | 47 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subject_store
from subject.api import authorization
from subject.api import policy
from subject.api import property_protections
from subject.common import property_utils
from subject.common import store_utils
import subject.db
import subject.domain
import subject.location
import subject.notifier
import subject.quota
| [
2,
15069,
2321,
4946,
25896,
5693,
198,
2,
15069,
2211,
19764,
11421,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
... | 3.732075 | 265 |
#!/usr/bin/python
from pipeline import map_representation, strip_dimensions, call_graphviz, call_rank,\
call_similarity, call_filter, function_help, TermExtraction, extract_terms
from database import ManagedSession
from mocs_database import create_query, filter_query
import write_dot
debug = False
def map_args(args):
"""used to filter arguments passed in on the command line that should also
be passed as keyword args to make_map"""
arg_set = set(['starting_year', 'ending_year', 'ranking_algorithm',
'similarity_algorithm', 'filtering_algorithm',
'number_of_terms', 'include_svg_dimensions', 'file_format',
'only_terms', 'sample_size', 'evaluation_output_path', 'n_layers'])
graphattr_set = set(['layerselect'])
pass_args = {}
for arg in arg_set:
if arg in args:
pass_args[arg] = args[arg]
graph_attrs = { key: args[key] for key in graphattr_set if key in args and args[key]}
pass_args['graph_attrs'] = graph_attrs
return pass_args
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="make basemap with the given parameters, render it, and save to the provided filename. Saves as pdf by default, can be changed (to bmp, jpg, png, svg, ps) with file_format flag")
parser.add_argument('output_file')
parser.add_argument('--starting_year', type=int, help='starting year for query (inclusive)')
parser.add_argument('--ending_year', type=int, help='ending year for query (inclusive)')
parser.add_argument('--sample_size', default=30000, type=int, help='number of rows to sample')
parser.add_argument('-r', '--ranking_algorithm', default=call_rank.default, type=int, help=function_help(call_rank))
parser.add_argument('-s', '--similarity_algorithm', default=call_similarity.default, type=int, help=function_help(call_similarity))
parser.add_argument('-f', '--filtering_algorithm', default=call_filter.default, type=int, help=function_help(call_filter))
parser.add_argument('-n', '--number_of_terms', default=1000, type=int, help='number of terms to rank')
parser.add_argument('--include_svg_dimensions', default=False, action="store_true", help='include width and height attributes in svg file')
parser.add_argument('--dirty', default=False, action="store_true", help='include documents not marked as clean (no title or not in English)')
parser.add_argument('--file_format', default='pdf', type=str, help="file format of map. Options include 'pdf', 'bmp', 'jpg', 'png', 'ps', 'svg', or 'raw' for graphviz schematic")
parser.add_argument('--author', default=None, help="string to match author using SQL's like (can use %%)")
parser.add_argument('--conference', default=None, help="string to match conference using SQL's like (can use %%)")
parser.add_argument('--journal', default=None, help="string to match journal using SQL's like (can use %%)")
parser.add_argument('--only_terms', default=False, action="store_true", help="return a list of terms in the map")
parser.add_argument('--term_type_name', type=str, default=TermExtraction.names[TermExtraction.Phrases], help="type of term to extract. Options: %s" % (TermExtraction.names))
parser.add_argument('--debug', default=False, action="store_true", help="print status to stdout")
parser.add_argument('--evaluation_output_path', help="run evaluation metrics, and dump files to this directory")
parser.add_argument('--data_dump_path', type=str, help="dump some pickle files to this path")
parser.add_argument('--n_layers', type=int, default=0)
parser.add_argument('--layerselect')
args = vars(parser.parse_args())
global debug
debug = args['debug']
with ManagedSession() as session:
query = create_query(session, author=args['author'], journal=args['journal'], conference=args['conference'])
map_output = make_map(query,
term_type=TermExtraction.names.index(args['term_type_name']),
data_dump_path=args['data_dump_path'],
**map_args(args))
print args['output_file']
print map_output[:10]
with open(args['output_file'], 'w') as f:
f.write(map_output)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
11523,
1330,
3975,
62,
15603,
341,
11,
10283,
62,
27740,
5736,
11,
869,
62,
34960,
85,
528,
11,
869,
62,
43027,
11,
59,
198,
220,
220,
220,
869,
62,
38610,
414,
11,
869,
62,
24455,
... | 2.824885 | 1,519 |
"""Tests for segmentFiltering sub-module."""
| [
37811,
51,
3558,
329,
10618,
11928,
20212,
850,
12,
21412,
526,
15931,
198
] | 3.461538 | 13 |
import sys
sys.path.append('../')
import model.labeled_lda as llda
# initialize data
labeled_documents = [("example example example example example"*10, ["example"]),
("test llda model test llda model test llda model"*10, ["test", "llda_model"]),
("example test example test example test example test"*10, ["example", "test"]),
("good perfect good good perfect good good perfect good "*10, ["positive"]),
("bad bad down down bad bad down"*10, ["negative"])]
# new a Labeled LDA model
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector="50_div_K", eta_vector=0.001)
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector=0.02, eta_vector=0.002)
llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector=0.01)
print(llda_model)
# training
# llda_model.training(iteration=10, log=True)
while True:
print("iteration %s sampling..." % (llda_model.iteration + 1))
llda_model.fit(1)
print("after iteration: %s, perplexity: %s" % (llda_model.iteration, llda_model.perplexity()))
print("delta beta: %s" % llda_model.delta_beta)
if llda_model.is_convergent(method="beta", delta=0.01):
break
# update
print("before updating: ", llda_model)
update_labeled_documents = [("new example test example test example test example test", ["example", "test"])]
llda_model.update(labeled_documents=update_labeled_documents)
print("after updating: ", llda_model)
# train again
# llda_model.training(iteration=10, log=True)
while True:
print("iteration %s sampling..." % (llda_model.iteration + 1))
llda_model.fit(1)
print("after iteration: %s, perplexity: %s" % (llda_model.iteration, llda_model.perplexity()))
print("delta beta: %s" % llda_model.delta_beta)
if llda_model.is_convergent(method="beta", delta=0.01):
break
# inference
# note: the result topics may be different for difference training, because gibbs sampling is a random algorithm
document = "example llda model example example good perfect good perfect good perfect" * 100
topics = llda_model.inference(document=document, iteration=100, times=10)
print(topics)
# perplexity
# calculate perplexity on test data
perplexity = llda_model.perplexity(documents=["example example example example example",
"test llda model test llda model test llda model",
"example test example test example test example test",
"good perfect good good perfect good good perfect good",
"bad bad down down bad bad down"],
iteration=30,
times=10)
print("perplexity on test data: %s" % perplexity)
# calculate perplexity on training data
print("perplexity on training data: %s" % llda_model.perplexity())
# save to disk
save_model_dir = "../data/model"
# llda_model.save_model_to_dir(save_model_dir, save_derivative_properties=True)
llda_model.save_model_to_dir(save_model_dir)
# load from disk
llda_model_new = llda.LldaModel()
llda_model_new.load_model_from_dir(save_model_dir, load_derivative_properties=False)
print("llda_model_new", llda_model_new)
print("llda_model", llda_model)
print("Top-5 terms of topic 'negative': ", llda_model.top_terms_of_topic("negative", 5, False))
print("Doc-Topic Matrix: \n", llda_model.theta)
print("Topic-Term Matrix: \n", llda_model.beta)
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
11537,
198,
11748,
2746,
13,
18242,
276,
62,
18986,
355,
32660,
6814,
198,
198,
2,
41216,
1366,
198,
18242,
276,
62,
15390,
2886,
796,
685,
7203,
20688,
1672,
1672,
1672,
1672... | 2.476422 | 1,442 |
from pandas import DataFrame
import europy
from europy.lifecycle.result import TestResult, TestLabel
from europy.lifecycle.report import Report
# TODO: Explicitly import each decorator in release
from europy.decorators import *
EXAMPLE_LABEL_NAME = "my-custom-label"
# This is how you make your own decorator for tests with a custom label or with a provided label
df = DataFrame([[1, 2], [3, 4]], columns=['odds', 'evens'])
# This is how you can create your own labels on the fly
@test(EXAMPLE_LABEL_NAME, "My custom label test")
@custom_decorator("Test with custom decorator")
# This is an example on using raw decorators
@bias("Testing it out")
@data_bias("example data bias test")
# This is an example on using raw decorators
@fairness("Example Fairness Test")
@transparency("Example Transparency Test")
@accountability("Example Accountability Test")
# This is an example on using raw decorators
@unit("Example Unit Test")
# This is an example on using raw decorators
@integration("Example Integration Test")
# This is an example on using raw decorators
@minimum_functionality("Example Minimum Functionality Test")
# This is an example on using raw decorators
@unit("Example with multiple labels")
@fairness()
@minimum_functionality()
@integration()
@bias()
@test(EXAMPLE_LABEL_NAME)
@model_details('tests/model_details_example.json') # this will override the current details in the report
@model_details('tests/model_details_example.yml')
# this must run in order to pass
@model_details() # this will load the latest in the report
@using_params('tests/param_example.yml')
@report_plt("example_figure") | [
6738,
19798,
292,
1330,
6060,
19778,
198,
198,
11748,
11063,
9078,
198,
6738,
11063,
9078,
13,
36195,
47510,
13,
20274,
1330,
6208,
23004,
11,
6208,
33986,
198,
6738,
11063,
9078,
13,
36195,
47510,
13,
13116,
1330,
6358,
198,
198,
2,
16... | 3.451681 | 476 |
# coding=utf-8
# Copyright 2019 SK T-Brain Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kobert.utils.utils import download
def get_onnx_kobert_model(cachedir=".cache"):
"""Get KoBERT ONNX file path after downloading"""
onnx_kobert = {
"url": "s3://skt-lsl-nlp-model/KoBERT/models/onnx_kobert_44529811f0.onnx",
"chksum": "44529811f0",
}
model_info = onnx_kobert
model_path, is_cached = download(
model_info["url"], model_info["chksum"], cachedir=cachedir
)
return model_path
if __name__ == "__main__":
import onnxruntime
import numpy as np
from kobert import get_onnx_kobert_model
onnx_path = get_onnx_kobert_model()
sess = onnxruntime.InferenceSession(onnx_path)
input_ids = [[31, 51, 99], [15, 5, 0]]
input_mask = [[1, 1, 1], [1, 1, 0]]
token_type_ids = [[0, 0, 1], [0, 1, 0]]
len_seq = len(input_ids[0])
pred_onnx = sess.run(
None,
{
"input_ids": np.array(input_ids),
"token_type_ids": np.array(token_type_ids),
"input_mask": np.array(input_mask),
"position_ids": np.array(range(len_seq)),
},
)
print(pred_onnx[-2][0])
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
13130,
14277,
309,
12,
44687,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,... | 2.385257 | 719 |