id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
179584 | <gh_stars>1-10
from scipy import special as sfunc
from scipy.optimize import fsolve
import numpy as np
from tqdm import tqdm
def update_statistics_parallel(statistic_L_i):
"""Parallel updating of the sufficient statistics with the current data.
Parameters
----------
statistic_L_i : tuple
Tuple containing previous statistics, factor vector "L" and index of the component "i"
"""
statistics = statistic_L_i[0]
L = statistic_L_i[1]
i = statistic_L_i[2]
return i, fsolve(discrete_optimised_function, statistics, args=(L, ))
def discrete_optimised_function(stat_vec, L_component):
"""Compute value of the optimised function for the given statistics vector and the L vector for
the specific mixture component
Parameters
----------
stat_vec : array-like, shape (n_params of the component)
Sufficient statistics for the specific component
L_component: array-like shape (n_params of the component)
Factors needed for the projection for the specific component
Returns
-------
array, shape(stat_vec.shape[0], )
"""
f = sfunc.digamma(stat_vec)-sfunc.digamma(np.sum(stat_vec)) - L_component
return f
class MixtureRatio:
"""
Mixture Ratio model
Parameters
----------
variables_domain : array-like, shape (X.shape[1]+1,)
Domain of all variables, i.e. number of possible values.
variables_connection : list of array-likes with shape (n_variables in i-th component, )
Defining of the structure of the Mixture Ratio. If none, then standard connection
[[0, 1], [0, 2], ... [0, X.shape[1]]
init_statistics : list of arrays, shape (n_components + 1, n_parameters for each component)
Sufficient statistics defining the prior distribution on parameters of the Mixture Ratio model
pool : Pool() from the multiprocessing package
Pool used for parallel updating of the Mixture components. If None, the components are updated sequentially.
Attributes
----------
statistics : list of arrays, shape (n_components + 1, n_parameters for each component)
Sufficient statistics defining the learnt distribution on parameters of the Mixture Ratio model
Examples
--------
>>> import numpy as np
>>> X = np.array([[0, 0], [1, 0], [1, 1], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([0, 0, 0, 1, 1, 1])
>>> from mixture_ratio import MixtureRatio
>>> mix = MixtureRatio(variables_domain=[2, 4, 3])
>>> mix.fit(X, Y)
>>> print(mix.predict_proba([[0, 1]]))
[1]
"""
def __init__(self, variables_domain, variables_connection=None, init_statistics=None, pool=None):
self.variables_domain = np.array(variables_domain).astype(int)
if variables_connection is None:
self.variables_connection = self._get_standard_variables_connection()
else:
self.variables_connection = variables_connection
self.number_of_components = len(self.variables_connection)
self.current_data = []
self.normalizing_constants = self._get_normalizing_constants()
if init_statistics is None:
self.statistics = self._get_uniform_statistics()
else:
self.statistics = init_statistics
self.expected_parameters = []
self._update_expected_parameters()
self.pool = pool
@staticmethod
def normalize_proba(P):
"""Normalize conditional probability P, so that it sums to one for each condition.
Parameters
----------
P : array-like
Probability to be normalized.
Returns
-------
P : array-like
Normalized probability.
"""
row_sums = P.sum(axis=1)
P = P / row_sums[:, np.newaxis]
return P
@staticmethod
def k_delta(x, y):
"""Kronecked delta function of x and y
Parameters
----------
x : number
y : number
Returns
-------
f : number
Returns 1 if x=y, 0 otherwise
"""
f = 0
if x == y:
f = 1
return f
@staticmethod
def k_delta_vec(dimension, position):
"""Create an array with 1 on the given position, 0 on other positions.
Parameters
----------
dimension : integer
Dimension of the created array.
position : integer
Position of 1 in the created array
Returns
-------
delta_vec : array, shape (dimension, )
Array with 1 on the given position, 0 on other positions
"""
delta_vec = np.zeros(dimension)
delta_vec[position] = 1
return delta_vec
def _get_standard_variables_connection(self):
"""Create a default mixture structure with component connections [[0, 1], [0, 2], ..., [0, n_features]]
Returns
-------
standard_connection : list of arrays with shape (2, )
List of arrays with default variable connections.
"""
standard_connection = []
for i in range(len(self.variables_domain)-1):
standard_connection.append(np.array([0, i+1]).astype(int))
return standard_connection
def _get_uniform_statistics(self):
"""Create a default mixture structure with component connections [[0, 1], [0, 2], ..., [0, n_features]]
Returns
-------
uniform_statistics : list of arrays with shape (product of variables domain in particular connection, )
List of arrays with initial statistics defining uniform parameter distribution.
"""
uniform_statistics = []
uniform_statistics.append(np.ones(shape=self.number_of_components,))
for i in range(self.number_of_components):
num_of_values = np.prod(self.variables_domain[self.variables_connection[i]])
uniform_statistics.append(np.ones(shape=num_of_values,))
return uniform_statistics
def _get_normalizing_constants(self):
"""Compute the normalizing constants for each connection depending on the mixture structure.
Returns
-------
normalizing_constants: list of arrays with shape (product of variables domain in particular connection, )
List of arrays with normalizing constants.
"""
normalizing_constants = []
for i in range(self.number_of_components):
normalizing_constants.append(1 / np.prod(self.variables_domain[self.variables_connection[i]]))
return normalizing_constants
def fit(self, X, y):
"""Fit Mixture Ratio according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
"""
return self.partial_fit(X, y)
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks (even on single observations) of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once or when the task requires online learning.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
"""
data = np.zeros((X.shape[0], X.shape[1] + 1))
data[:, 0] = y
data[:, 1:] = X
data = data.astype(int)
for i in tqdm(range(data.shape[0])):
self._update_current_data(data[i, :])
self._update_statistics()
return self
def _update_current_data(self, data):
"""Update attribute current_data with the newest data array
Parameters
----------
data : array-like, shape (X.shape[0], X.shape[1]+1)
One observation vector of y and X
"""
self.current_data = data
def _update_statistics(self):
"""Update the sufficient statistics with the current data - self.current_data
"""
if len(self.variables_connection) > 1:
# Updating for a mixture with at least 2 components
L = self._get_L()
if self.pool is not None:
# If multiprocessing pool was provided, the update is parallelized
statistics_tuples = [(self.statistics[i], L[i], i) for i in range(len(self.statistics))]
results = self.pool.map(update_statistics_parallel, statistics_tuples)
for result in results:
self.statistics[result[0]] = result[1]
else:
# Non-paralelized update of statistics
for i in range(len(self.statistics)):
self.statistics[i] = fsolve(self._discrete_optimised_function, self.statistics[i], args=(L[i]))
else:
# Updating for a mixture with only 1 component
self._update_statistics_full_table()
# Updating of the expected values of parameters
self._update_expected_parameters()
def _discrete_optimised_function(self, stat_vec, L_component):
"""Compute value of the optimised function for the given statistics vector and the L vector for
the specific miture component
Parameters
----------
stat_vec : array-like, shape (n_params of the component)
Sufficient statistics for the specific component
L_component: array-like shape (n_params of the component)
Factors needed for the projection for the specific component
Returns
-------
array, shape(stat_vec.shape[0], )
"""
return sfunc.digamma(stat_vec) - sfunc.digamma(np.sum(stat_vec)) - L_component
def _update_statistics_full_table(self):
"""Update the sufficient statistics when the mixture has only one component.
"""
ind = np.ravel_multi_index(self.current_data[self.variables_connection[0]],
self.variables_domain[self.variables_connection[0]])
self.statistics[1][ind] += 1
def _get_L1(self, gamma, H):
"""Compute the approximated normalizing factor used in Bayes rule for the current learning step.
Parameters
----------
gamma : array-like, shape (n_components,)
A part of the normalizing factor specific for each component.
H: array-like, shape (n_component, )
The denominator of the Mixture Ratio computed for the expected parameters specific for each component.
Returns
----
L1: number
The approximated normalizing factor for Bayes rule---
"""
L1 = np.sum([g * H for g, H in zip(gamma, H)])
return L1
def _get_L(self):
"""Compute the approximated normalizing factor for Bayes rule.
Returns
-------
L: list of arrays with shapes (product of variables domain in particular connection, )
The approximated factors needed for the Kullback-Leibler projections
"""
parameters = self._get_next_expected_parameters()
H, grad_H = self._get_H_with_grad(parameters)
gamma = self._get_gamma()
L1 = self._get_L1(gamma, H)
L = []
L.append(np.zeros([self.number_of_components]))
stat_sum_0 = np.sum(self.statistics[0])+1
for c in range(self.number_of_components):
L[0][c] = np.sum(np.multiply(gamma,
np.multiply(H, sfunc.digamma(self.statistics[0][c]
+ self.k_delta_vec(self.number_of_components, c))
- sfunc.digamma(stat_sum_0))
+ 1/stat_sum_0*np.sum(
np.multiply(parameters[0][:, :].transpose(),
(grad_H[c, :].reshape(-1, 1) - grad_H.transpose())), 1)))
ind = np.ravel_multi_index(self.current_data[self.variables_connection[c]],
self.variables_domain[self.variables_connection[c]])
stat_sum = np.sum(self.statistics[c + 1])
L.append(np.zeros(np.prod(self.variables_domain[self.variables_connection[c]])))
for d in range(np.prod(self.variables_domain[self.variables_connection[c]])):
L[c + 1][d] = np.sum(
np.multiply(
gamma, np.multiply(H,
sfunc.digamma(self.statistics[c+1][d]
+ self.k_delta_vec(self.number_of_components, c)
*self.k_delta(d, ind))
- sfunc.digamma(stat_sum + self.k_delta_vec(
self.number_of_components, c)))))
L = L/L1
return L
def _update_expected_parameters(self):
"""Update expected values of all parameters. Typically called after the update of statistics.
"""
self.expected_parameters = []
for i in range(self.number_of_components+1):
self.expected_parameters.append(np.true_divide(self.statistics[i], np.sum(self.statistics[i])))
def _get_next_expected_parameters(self):
"""Compute the expected parameters needed for the projection of the distribution obtained by Bayes rule.
Returns
-------
next_expected_parameters: list of arrays with shapes (product of variables domain in particular connection, )
The expected values of parameters specific for each mixture component according to the distribution obtained
by Bayes rule
"""
next_expected_parameters = []
statistic_sum = np.sum(self.statistics[0])
param = np.array([statistic_sum / (statistic_sum + 1)*self.expected_parameters[0][:]
+ 1/(statistic_sum+1)*self.k_delta_vec(self.number_of_components, c)
for c in range(self.number_of_components)]).transpose()
next_expected_parameters.append(param)
for i in range(self.number_of_components):
statistic_sum = np.sum(self.statistics[i+1])
ind = np.ravel_multi_index(self.current_data[self.variables_connection[i]],
self.variables_domain[self.variables_connection[i]])
param = np.array([self.expected_parameters[i+1][:]
for _ in range(self.number_of_components)]).transpose()
param[:, i] = statistic_sum / (statistic_sum + 1)*param[:, i] + 1/(statistic_sum+1) \
* self.k_delta_vec(np.prod(self.variables_domain[self.variables_connection[i]]), ind)
next_expected_parameters.append(param)
return next_expected_parameters
def _get_H_with_grad(self, params):
"""Compute the denominator and its gradient of the Mixture Ratio for the given parameters.
Returns
-------
H: array, shape (n_components, )
grad_H: array, shape(n_components, n_components)
"""
H = np.zeros(self.number_of_components)
grad_H = np.zeros((self.number_of_components, self.number_of_components))
for c in range(self.number_of_components):
for d in range(self.number_of_components):
for i in range(self.variables_domain[0]):
data = np.copy(self.current_data)
data[0] = i
ind = np.ravel_multi_index(data[self.variables_connection[d]],
self.variables_domain[self.variables_connection[d]])
H[c] = H[c] + params[0][d, c] * params[d+1][ind, c] / self.normalizing_constants[d]
grad_H[d, c] = grad_H[d, c] + params[d+1][ind, c]/self.normalizing_constants[d]
H[c] = 1/H[c]
grad_H[:, c] = -np.power(H[c], 2)*grad_H[:, c]
return (H, grad_H)
def _get_gamma(self):
"""Compute the part of the normalizing factor, gamma, specific for each component.
Returns
-------
gamma: array, shape (n_components, )
"""
gamma = []
for i in range(self.number_of_components):
ind = np.ravel_multi_index(self.current_data[self.variables_connection[i]],
self.variables_domain[self.variables_connection[i]])
gamma.append(self.expected_parameters[0][i]
* self.expected_parameters[i+1][ind] / self.normalizing_constants[i])
return gamma
def get_predictor(self):
"""Compute the probability for the whole variables domain.
Returns
-------
P: array, shape (tuple(self.variables_domain))
Array representing the conditional probability function.
"""
P = np.zeros(tuple(self.variables_domain, ))
for i in range(np.prod(self.variables_domain)):
data = np.array(np.unravel_index(i, self.variables_domain))
for c in range(self.number_of_components):
ind = np.ravel_multi_index(data[self.variables_connection[c]],
self.variables_domain[self.variables_connection[c]])
P[tuple(data)] = P[tuple(data)] + self.expected_parameters[0][c]\
* self.expected_parameters[c+1][ind]/self.normalizing_constants[c]
P = self.normalize_proba(P)
return P
def predict_proba(self, X):
"""Compute the predicted probability for all of the classes for the given X.
Returns
-------
proba: array, shape (n_classes, )
Array containing probabilities of each class.
"""
proba = np.zeros((len(X), self.variables_domain[0]))
for i, d in enumerate(X):
P = np.zeros(self.variables_domain[0])
for o in range(self.variables_domain[0]):
data_vec = np.concatenate((np.array([o]), d))
for c in range(self.number_of_components):
ind = np.ravel_multi_index(data_vec[self.variables_connection[c]],
self.variables_domain[self.variables_connection[c]])
P[o] = P[o] + self.expected_parameters[0][c]\
* self.expected_parameters[c + 1][ind] / self.normalizing_constants[c]
proba[i, :] = P[:]/np.sum(P)
return proba
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state) | StarcoderdataPython |
187319 | from .storage import Storage | StarcoderdataPython |
3359069 | <reponame>huangenyan/Lattish
# -*- coding: utf-8 -*-
import unittest
from mahjong.hand import FinishedHand
from utils.tests import TestMixin
class YakumanCalculationTestCase(unittest.TestCase, TestMixin):
def test_is_tenhou(self):
hand = FinishedHand()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, is_tenhou=True)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 40)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_chiihou(self):
hand = FinishedHand()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, is_chiihou=True)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 40)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_daisangen(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='123', man='22', honors='555666777')
self.assertTrue(hand.is_daisangen(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou='123', man='22', honors='555666777')
win_tile = self._string_to_136_tile(honors='7')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 50)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_shosuushi(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='123', honors='11122233344')
self.assertTrue(hand.is_shosuushi(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou='123', honors='11122233344')
win_tile = self._string_to_136_tile(honors='4')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 60)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_daisuushi(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='22', honors='111222333444')
self.assertTrue(hand.is_daisuushi(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou='22', honors='111222333444')
win_tile = self._string_to_136_tile(honors='4')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 60)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_tsuisou(self):
hand = FinishedHand()
tiles = self._string_to_34_array(honors='11122233366677')
self.assertTrue(hand.is_tsuisou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(honors='11223344556677')
self.assertTrue(hand.is_tsuisou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(honors='1133445577', pin='88', sou='11')
self.assertFalse(hand.is_tsuisou(self._hand(tiles, 0)))
tiles = self._string_to_136_array(honors='11223344556677')
win_tile = self._string_to_136_tile(honors='7')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 25)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_chinroto(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='111999', man='111999', pin='99')
self.assertTrue(hand.is_chinroto(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou='111222', man='111999', pin='99')
win_tile = self._string_to_136_tile(pin='9')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 60)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_kokushi(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='119', man='19', pin='19', honors='1234567')
self.assertTrue(hand.is_kokushi(tiles))
tiles = self._string_to_136_array(sou='119', man='19', pin='19', honors='1234567')
win_tile = self._string_to_136_tile(sou='9')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 0)
self.assertEqual(len(result['hand_yaku']), 1)
tiles = self._string_to_136_array(sou='119', man='19', pin='19', honors='1234567')
win_tile = self._string_to_136_tile(sou='1')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 0)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_ryuisou(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='22334466888', honors='666')
self.assertTrue(hand.is_ryuisou(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou='22334466888', honors='666')
win_tile = self._string_to_136_tile(honors='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 40)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_suuankou(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='111444', man='333', pin='44555')
win_tile = self._string_to_136_tile(sou='4')
self.assertTrue(hand.is_suuankou(win_tile, self._hand(tiles, 0), True))
self.assertFalse(hand.is_suuankou(win_tile, self._hand(tiles, 0), False))
tiles = self._string_to_136_array(sou='111444', man='333', pin='44555')
win_tile = self._string_to_136_tile(pin='5')
result = hand.estimate_hand_value(tiles, win_tile, is_tsumo=True)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 50)
self.assertEqual(len(result['hand_yaku']), 1)
result = hand.estimate_hand_value(tiles, win_tile, is_tsumo=False)
self.assertNotEqual(result['han'], 13)
tiles = self._string_to_136_array(sou='111444', man='333', pin='44455')
win_tile = self._string_to_136_tile(pin='5')
result = hand.estimate_hand_value(tiles, win_tile, is_tsumo=True)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 50)
self.assertEqual(len(result['hand_yaku']), 1)
tiles = self._string_to_136_array(man='33344455577799')
win_tile = self._string_to_136_tile(man='9')
result = hand.estimate_hand_value(tiles, win_tile, is_tsumo=False)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 50)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_chuuren_poutou(self):
hand = FinishedHand()
tiles = self._string_to_34_array(man='11122345678999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(pin='11123345678999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(sou='11123456678999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(sou='11123456678999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(sou='11123456678999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_34_array(sou='11123456789999')
self.assertTrue(hand.is_chuuren_poutou(self._hand(tiles, 0)))
tiles = self._string_to_136_array(man='11123456789999')
win_tile = self._string_to_136_tile(man='1')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 40)
self.assertEqual(len(result['hand_yaku']), 1)
tiles = self._string_to_136_array(man='11122345678999')
win_tile = self._string_to_136_tile(man='2')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 26)
self.assertEqual(result['fu'], 50)
self.assertEqual(len(result['hand_yaku']), 1)
def test_is_suukantsu(self):
hand = FinishedHand()
tiles = self._string_to_34_array(sou='111333', man='222', pin='44555')
called_kan_indices = [self._string_to_34_tile(sou='1'), self._string_to_34_tile(sou='3'),
self._string_to_34_tile(pin='5'), self._string_to_34_tile(man='2')]
self.assertTrue(hand.is_suukantsu(self._hand(tiles, 0), called_kan_indices))
tiles = self._string_to_136_array(sou='111333', man='222', pin='44555')
win_tile = self._string_to_136_tile(pin='4')
open_sets = [self._string_to_open_34_set(sou='111'), self._string_to_open_34_set(sou='333')]
called_kan_indices = [self._string_to_136_tile(sou='1'), self._string_to_136_tile(sou='3'),
self._string_to_136_tile(pin='5'), self._string_to_136_tile(man='2')]
result = hand.estimate_hand_value(tiles, win_tile, open_sets=open_sets, called_kan_indices=called_kan_indices)
self.assertEqual(result['error'], None)
self.assertEqual(result['han'], 13)
self.assertEqual(result['fu'], 80)
self.assertEqual(len(result['hand_yaku']), 1)
| StarcoderdataPython |
76375 | #!/usr/bin/env python2
import sys
import hyperdex.client
import json
import os
from testlib import *
from hyperdex.client import *
c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2]))
def to_objectset(xs):
return set([frozenset(x.items()) for x in xs])
# Empty Document
assertTrue(c.put('kv', 'k', {}))
assertEquals(c.get('kv', 'k')['v'], Document({}))
# Basic Stuff
assertTrue(c.put('kv', 'k', {'v': Document({})}))
assertEquals(c.get('kv', 'k')['v'], Document({}))
assertTrue(c.put('kv', 'k', {'v': Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }})}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }}))
assertFalse(c.atomic_add('kv', 'k', {'v.a': 1}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }}))
assertTrue(c.atomic_add('kv', 'k', {'v.c.d' : 5}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 6, 'e': 'f', 'g': -2 }}))
assertTrue(c.atomic_add('kv', 'k', {'v.c.d' : 5, 'v.c.g': 5}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 11, 'e': 'f' , 'g': 3}}))
assertTrue(c.string_prepend('kv', 'k', {'v.a' : 'x', 'v.c.e': 'z'}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xb', 'c': {'d' : 11, 'e': 'zf', 'g': 3}}))
assertTrue(c.string_append('kv', 'k', {'v.a' : 'x', 'v.c.e': 'z'}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}}))
assertTrue(c.string_append('kv', 'k', {'v.k.l': 'm'}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'l' : 'm'}}))
assertTrue(c.atomic_add('kv', 'k', {'v.k.a.b.c.d' : 1}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'a': {'b' : {'c' : {'d' : 1}}}, 'l' : 'm'}}))
assertTrue(c.atomic_sub('kv', 'k', {'v.k.a.b.c.d' : 5}))
assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'a': {'b' : {'c' : {'d' : -4}}}, 'l' : 'm'}}))
# Bit operations
assertTrue(c.put('kv', 'k3', {'v': Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }})}))
assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }}))
assertTrue(c.atomic_mod('kv', 'k3', {'v.c.d' : 10000}))
assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }}))
assertTrue(c.atomic_mod('kv', 'k3', {'v.c.d' : 22}))
assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 5 }}))
assertTrue(c.atomic_xor('kv', 'k3', {'v.c.g' : 4}))
assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 1 }}))
assertTrue(c.atomic_or('kv', 'k3', {'v.c.g' : 4}))
assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 5 }}))
# Multiply and divide
assertTrue(c.put('kv', 'k4', {'v': Document({'a': 200})}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 200 }))
assertTrue(c.atomic_div('kv', 'k4', {'v.a' : 1}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 200 }))
assertTrue(c.atomic_div('kv', 'k4', {'v.a' : 2}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 100 }))
assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 4}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 400 }))
assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 1}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 400 }))
assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 0}))
assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 0 }))
# Floating point numbers
assertTrue(c.put('kv', 'k10', {'v': Document({'a': 200})}))
assertTrue(c.atomic_add('kv', 'k10', {'v.a' : 100.0}))
assertEquals(c.get('kv', 'k10')['v'], Document({ 'a': 300.0 }))
assertTrue(c.atomic_mul('kv', 'k10', {'v.a' : 1.5}))
assertEquals(c.get('kv', 'k10')['v'], Document({ 'a': 450.0 }))
# Build a new subdocument
assertTrue(c.put('kv', 'k6', {'v' : Document({'a' : 100})}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 }))
assertFalse(c.atomic_add('kv', 'k6', {'v.a.b' :1}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 }))
assertTrue(c.atomic_add('kv', 'k6', {'v.c.b' :1}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}}))
assertTrue(c.string_prepend('kv', 'k6', {'v.i.j.k' : 'xyz'}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}}))
assertFalse(c.string_prepend('kv', 'k6', {'v.i.j' : 'xyz'}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}}))
assertTrue(c.put('kv', 'k6', {'v.d' : Document({'q' : 1})}))
assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}, 'd' : {'q' : 1}}))
# Remove a property
assertTrue(c.put('kv', 'k7', {'v' : Document({'a' : {'b' : 3}})}))
assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'b' : 3}}))
assertTrue(c.document_unset('kv', 'k7', {'v.a.b' : 1}))
assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {}}))
assertFalse(c.document_unset('kv', 'k7', {'v.a.b' : 1}))
# Rename a property
assertTrue(c.put('kv', 'k7', {'v' : Document({'a' : {'b' : 3}})}))
assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'b' : 3}}))
assertTrue(c.document_rename('kv', 'k7', {'v.a.b' : 'a.c'}))
assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'c' : 3}}))
assertFalse(c.document_rename('kv', 'k7', {'v.a.b' : 'c'}))
assertFalse(c.document_rename('kv', 'k7', {'v.a.b' : 'b'}))
# Set new values (returns false if they already exist)
assertTrue(c.put('kv', 'k8', {'v' : Document({'a' : { 'b' : 'c'}})}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}}))
assertTrue(c.put('kv', 'k8', {'v.a.b' : 'c'}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}}))
assertTrue(c.put('kv', 'k8', {'v.a.b' : 'c'}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}}))
assertTrue(c.put('kv', 'k8', {'v.a.c' : 1}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 1}}))
assertTrue(c.put('kv', 'k8', {'v.a.c' : 2}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 2}}))
assertTrue(c.put('kv', 'k8', {'v.a.c' : 'c'}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}}))
assertTrue(c.put('kv', 'k8', {'v.b.a' : 1, 'v.b.b' : 1, 'v.b.c' : 'xyz'}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}}))
assertTrue(c.put('kv', 'k8', {'v.c' : Document({'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}})}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}, 'c' : {'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}}}))
assertTrue(c.put('kv', 'k8', {'v.d' : 2.5}))
assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}, 'c' : {'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}}, 'd' : 2.5}))
# Arrays
assertTrue(c.put('kv', 'k11', {'v' : Document({'a' : [1,2,3,0]})}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,0]}))
assertTrue(c.put('kv', 'k11', {'v.a[3]' : 4}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4]}))
assertFalse(c.put('kv', 'k11', {'v.a[3].b' : 4}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4]}))
assertTrue(c.list_rpush('kv', 'k11', {'v.a' : "5"}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4,"5"]}))
assertTrue(c.list_rpush('kv', 'k11', {'v.a' : Document({'x':'y'})}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4,"5",{'x':'y'}]}))
assertTrue(c.list_lpush('kv', 'k11', {'v.a' : 0}))
assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [0,1,2,3,4,"5",{'x':'y'}]}))
# Search on Documents
assertTrue(c.put('kv', 'k9', {'v' : Document({'x' : {'b' : 'c'}})}))
res1 = c.search('kv', {'v.x.b' : 'c'})
res2 = c.search('kv', {'v.x' : Document({'b' : 'c'})})
res3 = c.search('kv', {'v' : Document({'x' : {'b' : 'c'}})})
assertEquals(res1.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})})
assertFalse(res1.hasNext())
assertEquals(res2.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})})
assertFalse(res2.hasNext())
assertEquals(res3.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})})
assertFalse(res3.hasNext())
| StarcoderdataPython |
1690993 | import numpy as np
from discretize.utils.matrix_utils import mkvc
from discretize.utils.code_utils import deprecate_function
def cylindrical_to_cartesian(grid, vec=None):
"""
Take a grid defined in cylindrical coordinates :math:`(r, \theta, z)` and
transform it to cartesian coordinates.
"""
grid = np.atleast_2d(grid)
if vec is None:
return np.hstack(
[
mkvc(grid[:, 0] * np.cos(grid[:, 1]), 2),
mkvc(grid[:, 0] * np.sin(grid[:, 1]), 2),
mkvc(grid[:, 2], 2),
]
)
if len(vec.shape) == 1 or vec.shape[1] == 1:
vec = vec.reshape(grid.shape, order="F")
x = vec[:, 0] * np.cos(grid[:, 1]) - vec[:, 1] * np.sin(grid[:, 1])
y = vec[:, 0] * np.sin(grid[:, 1]) + vec[:, 1] * np.cos(grid[:, 1])
newvec = [x, y]
if grid.shape[1] == 3:
z = vec[:, 2]
newvec += [z]
return np.vstack(newvec).T
def cyl2cart(grid, vec=None):
"""An alias for cylindrical_to_cartesian"""
return cylindrical_to_cartesian(grid, vec)
def cartesian_to_cylindrical(grid, vec=None):
"""
Take a grid defined in cartesian coordinates and transform it to cyl
coordinates
"""
if vec is None:
vec = grid
vec = np.atleast_2d(vec)
grid = np.atleast_2d(grid)
theta = np.arctan2(grid[:, 1], grid[:, 0])
return np.hstack(
[
mkvc(np.cos(theta) * vec[:, 0] + np.sin(theta) * vec[:, 1], 2),
mkvc(-np.sin(theta) * vec[:, 0] + np.cos(theta) * vec[:, 1], 2),
mkvc(vec[:, 2], 2),
]
)
def cart2cyl(grid, vec=None):
"""An alias for cartesian_to_cylindrical"""
return cylindrical_to_cartesian(grid, vec)
def rotation_matrix_from_normals(v0, v1, tol=1e-20):
"""
Performs the minimum number of rotations to define a rotation from the
direction indicated by the vector n0 to the direction indicated by n1.
The axis of rotation is n0 x n1
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
:param numpy.array v0: vector of length 3
:param numpy.array v1: vector of length 3
:param tol = 1e-20: tolerance. If the norm of the cross product between the two vectors is below this, no rotation is performed
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
# ensure both n0, n1 are vectors of length 1
if len(v0) != 3:
raise ValueError("Length of n0 should be 3")
if len(v1) != 3:
raise ValueError("Length of n1 should be 3")
# ensure both are true normals
n0 = v0 * 1.0 / np.linalg.norm(v0)
n1 = v1 * 1.0 / np.linalg.norm(v1)
n0dotn1 = n0.dot(n1)
# define the rotation axis, which is the cross product of the two vectors
rotAx = np.cross(n0, n1)
if np.linalg.norm(rotAx) < tol:
return np.eye(3, dtype=float)
rotAx *= 1.0 / np.linalg.norm(rotAx)
cosT = n0dotn1 / (np.linalg.norm(n0) * np.linalg.norm(n1))
sinT = np.sqrt(1.0 - n0dotn1 ** 2)
ux = np.array(
[
[0.0, -rotAx[2], rotAx[1]],
[rotAx[2], 0.0, -rotAx[0]],
[-rotAx[1], rotAx[0], 0.0],
],
dtype=float,
)
return np.eye(3, dtype=float) + sinT * ux + (1.0 - cosT) * (ux.dot(ux))
def rotate_points_from_normals(XYZ, n0, n1, x0=np.r_[0.0, 0.0, 0.0]):
"""
rotates a grid so that the vector n0 is aligned with the vector n1
:param numpy.array n0: vector of length 3, should have norm 1
:param numpy.array n1: vector of length 3, should have norm 1
:param numpy.array x0: vector of length 3, point about which we perform the rotation
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
R = rotation_matrix_from_normals(n0, n1)
if XYZ.shape[1] != 3:
raise ValueError("Grid XYZ should be 3 wide")
if len(x0) != 3:
raise ValueError("x0 should have length 3")
X0 = np.ones([XYZ.shape[0], 1]) * mkvc(x0)
return (XYZ - X0).dot(R.T) + X0 # equivalent to (R*(XYZ - X0)).T + X0
rotationMatrixFromNormals = deprecate_function(
rotation_matrix_from_normals, "rotationMatrixFromNormals", removal_version="1.0.0"
)
rotatePointsFromNormals = deprecate_function(
rotate_points_from_normals, "rotatePointsFromNormals", removal_version="1.0.0"
)
| StarcoderdataPython |
163052 | import mock
from nose.tools import assert_equal, assert_in, raises, assert_is, assert_is_instance, assert_false, assert_true
from .. import metrics as mm, exceptions, histogram, simple_metrics as simple, meter
class TestMetricsModule(object):
def setUp(self):
self.original_registy = mm.REGISTRY.copy()
self.original_tags = mm.TAGS.copy()
mm.REGISTRY.clear()
mm.TAGS.clear()
def tearDown(self):
mm.REGISTRY.clear()
mm.REGISTRY.update(self.original_registy)
mm.TAGS.clear()
mm.TAGS.update(self.original_tags)
def test_new_metric(self):
Cls = mock.Mock()
args = [mock.Mock(), mock.Mock()]
kwargs = dict(other=mock.Mock())
res = mm.new_metric("test", Cls, *args, **kwargs)
assert_in("test", mm.REGISTRY)
item = mm.REGISTRY["test"]
assert_equal(
Cls.call_args_list,
[mock.call(*args, **kwargs)]
)
assert_equal(item, Cls())
assert_equal(item, res)
@raises(exceptions.DuplicateMetricError)
def test_new_metric_duplicated(self):
Cls = mock.Mock()
mm.new_metric("test", Cls)
mm.new_metric("test", Cls)
@raises(exceptions.InvalidMetricError)
def test_metric_not_found(self):
mm.metric("test")
def test_metric(self):
expected = mm.REGISTRY["test"] = mock.Mock()
assert_equal(mm.metric("test"), expected)
def test_metrics(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
expected = ["test1", "test2"]
assert_equal(mm.metrics(), expected)
def test_get(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
assert_equal(mm.get("test1"), mm.REGISTRY["test1"].get.return_value)
@raises(exceptions.InvalidMetricError)
def test_get_not_existing(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.get("test3")
def test_notify(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.notify("test1", 123)
assert_equal(
mm.REGISTRY["test1"].notify.call_args_list,
[mock.call(123)]
)
@raises(exceptions.InvalidMetricError)
def test_notify_not_existing(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.notify("test3", 123)
def test_delete_metric(self):
m1 = mock.Mock()
m2 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2)
assert_equal(mm.delete_metric("test1"), m1)
assert_equal(mm.REGISTRY, dict(test2=m2))
def test_delete_metric_not_found(self):
m1 = mock.Mock()
m2 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2)
assert_equal(mm.delete_metric("test3"), None)
assert_equal(mm.REGISTRY, dict(test1=m1, test2=m2))
def test_delete_metric_with_tags(self):
mm.TAGS = {"test": {"test1", "test3"}}
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2, test3=m3)
assert_equal(mm.delete_metric("test1"), m1)
assert_equal(mm.REGISTRY, dict(test2=m2, test3=m3))
assert_equal(mm.TAGS["test"], {"test3"})
def test_new_histogram_default(self):
metric = mm.new_histogram("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.UniformReservoir)
assert_equal(metric.reservoir.size, histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
def test_new_histogram(self):
metric = mm.new_histogram("test", histogram.UniformReservoir(10))
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.UniformReservoir)
assert_equal(metric.reservoir.size, 10)
def test_new_counter(self):
metric = mm.new_counter("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, simple.Counter)
def test_new_gauge(self):
metric = mm.new_gauge("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, simple.Gauge)
def test_new_meter(self):
metric = mm.new_meter("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, meter.Meter)
@raises(exceptions.InvalidMetricError)
def test_new_reservoir_bad_type(self):
mm.new_reservoir('xxx')
@raises(TypeError)
def test_new_reservoir_bad_args(self):
mm.new_reservoir('uniform', xxx='yyy')
def test_new_reservoir_with_defaults(self):
reservoir = mm.new_reservoir()
assert_is_instance(reservoir, histogram.UniformReservoir)
assert_equal(reservoir.size, histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
def test_new_reservoir(self):
reservoir = mm.new_reservoir('sliding_window', 5)
assert_is_instance(reservoir, histogram.SlidingWindowReservoir)
assert_equal(reservoir.size, 5)
def test_new_histogram_with_implicit_reservoir(self):
metric = mm.new_histogram_with_implicit_reservoir('test', 'sliding_window', 5)
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.SlidingWindowReservoir)
assert_equal(metric.reservoir.size, 5)
@mock.patch('appmetrics.metrics.time')
def test_with_histogram(self, time):
# emulate the time spent in the function by patching time.time() and returning
# two known values.
times = [5, 3.4]
time.time.side_effect = times.pop
# decorated function
@mm.with_histogram("test")
def fun(v1, v2):
"""a docstring"""
return v1+v2
assert_equal(fun.__doc__, "a docstring")
res = fun(1, 2)
assert_equal(res, 3)
assert_equal(mm.metric("test").raw_data(), [1.6])
@mock.patch('appmetrics.metrics.time')
def test_with_histogram_with_method(self, time):
# emulate the time spent in the function by patching time.time() and returning
# two known values.
times = [5, 3.4]
time.time.side_effect = times.pop
# decorated method
class MyClass(object):
def __init__(self, v1):
self.v1 = v1
@mm.with_histogram("test")
def method(self, v2):
"""a docstring"""
return self.v1+v2
assert_equal(MyClass.method.__doc__, "a docstring")
obj = MyClass(1)
assert_equal(obj.method.__doc__, "a docstring")
res = obj.method(2)
assert_equal(res, 3)
assert_equal(mm.metric("test").raw_data(), [1.6])
def test_with_histogram_multiple(self):
@mm.with_histogram("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_histogram("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
assert_equal(f1.__doc__, "a docstring")
assert_equal(f2.__doc__, "another docstring")
res = f1(1, 2)
assert_equal(res, 3)
res = f2(2, 3)
assert_equal(res, 6)
assert_equal(len(mm.metric("test").raw_data()), 2)
@raises(exceptions.InvalidMetricError)
def test_with_histogram_bad_reservoir_type(self):
# decorated function
@mm.with_histogram("test", "xxx")
def fun(v1, v2):
"""a docstring"""
return v1+v2
@raises(exceptions.DuplicateMetricError)
def test_with_histogram_multiple_and_arguments(self):
@mm.with_histogram("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_histogram("test", size=100)
def f2(v1, v2):
"""another docstring"""
return v1*v2
@raises(exceptions.DuplicateMetricError)
def test_with_histogram_multiple_different_type(self):
mm.new_gauge("test")
@mm.with_histogram("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
def test_with_meter(self):
@mm.with_meter("test")
def fun(v):
"""a docstring"""
return v*2
assert_equal(fun.__doc__, "a docstring")
res = [fun(i) for i in range(6)]
assert_equal(res, [0, 2, 4, 6, 8, 10])
assert_equal(mm.metric("test").raw_data(), 6)
def test_with_meter_with_method(self):
class MyClass(object):
def __init__(self, v):
self.v = v
@mm.with_meter("test")
def m1(self, v):
"""a docstring"""
return v*self.v
@mm.with_meter("test")
def m2(self, v):
"""another docstring"""
return v+self.v
assert_equal(MyClass.m1.__doc__, "a docstring")
assert_equal(MyClass.m2.__doc__, "another docstring")
obj = MyClass(2)
res = [obj.m1(i) for i in range(3)]
assert_equal(res, [0, 2, 4])
res = [obj.m2(i) for i in range(3)]
assert_equal(res, [2, 3, 4])
assert_equal(mm.metric("test").raw_data(), 6)
def test_with_meter_multiple(self):
@mm.with_meter("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_meter("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
assert_equal(f1.__doc__, "a docstring")
assert_equal(f2.__doc__, "another docstring")
res = f1(1, 2)
assert_equal(res, 3)
res = f2(2, 3)
assert_equal(res, 6)
assert_equal(mm.metric("test").raw_data(), 2)
@raises(exceptions.DuplicateMetricError)
def test_with_meter_multiple_and_arguments(self):
@mm.with_meter("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_meter("test", tick_interval=100)
def f2(v1, v2):
"""another docstring"""
return v1*v2
@raises(exceptions.DuplicateMetricError)
def test_with_meter_multiple_different_type(self):
mm.new_gauge("test")
@mm.with_meter("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
@mock.patch('appmetrics.histogram.Histogram.notify')
def test_timer(self, notify):
with mm.timer("test"):
pass
assert_equal(notify.call_count, 1)
@mock.patch('appmetrics.histogram.Histogram.notify')
def test_timer_multiple(self, notify):
with mm.timer("test"):
pass
with mm.timer("test"):
pass
assert_equal(notify.call_count, 2)
@raises(exceptions.DuplicateMetricError)
def test_timer_multiple_different_reservoir(self):
with mm.timer("test", reservoir_type="sliding_window"):
pass
with mm.timer("test"):
pass
@raises(exceptions.DuplicateMetricError)
def test_timer_multiple_different_type(self):
mm.new_gauge("test")
with mm.timer("test"):
pass
@raises(exceptions.InvalidMetricError)
def test_tag_invalid_name(self):
mm.tag("test", "test")
def test_tag(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.tag("test1", "1")
mm.tag("test3", "1")
mm.tag("test2", "2")
assert_equal(mm.TAGS, {"1": {"test1", "test3"}, "2": {"test2"}})
def test_tags(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_equal(mm.tags(), mm.TAGS)
assert_false(mm.tags() is mm.TAGS)
def test_untag_bad_tag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_false(mm.untag("test1", "xxx"))
def test_untag_bad_metric(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_false(mm.untag("xxx", "1"))
def test_untag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_true(mm.untag("test1", "1"))
assert_equal(mm.TAGS, {"1": {"test3"}, "2": {"test2"}})
def test_untag_last_group(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_true(mm.untag("test1", "1"))
assert_true(mm.untag("test3", "1"))
assert_equal(mm.TAGS, {"2": {"test2"}})
def test_metrics_by_tag_invalid_tag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_equal(mm.metrics_by_tag("test"), {})
def test_metrics_by_tag(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test3"}}
assert_equal(mm.metrics_by_tag("1"), {"test1": m1.get(), "test3": m3.get()})
def test_metrics_by_tag_deletion_while_looping(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
m2.get.side_effect = exceptions.InvalidMetricError
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.TAGS = {"1": {"test1", "test2", "test3"}, "2": {"test2"}}
assert_equal(mm.metrics_by_tag("1"), {"test1": m1.get(), "test3": m3.get()})
def test_metrics_by_name_list(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock(), test3=mock.Mock())
out = mm.metrics_by_name_list(["test1", "test3"])
expected = {'test1': mm.REGISTRY["test1"].get.return_value,
'test3': mm.REGISTRY["test3"].get.return_value}
assert_equal(out, expected)
| StarcoderdataPython |
1777009 | <gh_stars>0
import sys
import math
import datetime as dt
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
DEFAULT_WIDTH = 20
DEFAULT_HEIGHT = 10
# Iterates from first to last inclusive, with the given step.
def iterdate(first, last, step=dt.timedelta(days=1)):
while first <= last:
yield first
first += step
def itermonth(first, last):
year = first.year
month = first.month
while year <= last.year and (year != last.year or month <= last.month):
yield f"{year}-{month:02}"
if month == 12:
year += 1
month = 1
else:
month += 1
def count_by(df, by, fill=True):
grouped = df.groupby([by])[by].count()
if fill:
# Generate range of all values
first = min(df.date)
last = max(df.date)
allvalues = None
if by == "year-month":
itermonth(min(df.date), max(df.date))
elif by == "year":
allvalues = range(min(df.year), max(df.year) + 1)
elif by == "day":
if first.year == last.year and first.month == last.month:
allvalues = range(min(df.day), max(df.day))
else:
allvalues = range(1, 32)
elif by == "date":
allvalues = iterdate(min(df.date), max(df.date))
elif by == "month":
if first.year == last.year:
allvalues = range(min(df.month), max(df.month))
else:
allvalues = range(1, 13)
elif by == "hour":
allvalues = range(24)
# Fill missing values with given range
if allvalues is not None:
missing = pd.Series(index=set(allvalues).difference(grouped.index))
grouped = grouped.append(missing).fillna(0)
grouped.sort_index(inplace=True)
return grouped
# Returns the value for the given month and day, or NaN if there is no such value.
def get_value(df, day, month):
try:
return df[(df["year-month"] == month)
& (df["day"] == day)]["count"].iloc[0]
except IndexError:
return float("nan")
def display_values_per_day(df,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT,
labelrotate=False):
df = pd.DataFrame(count_by(df, "date"), columns=["count"])
df["year-month"] = df.index.to_series().map(
lambda date: f"{date.year}-{date.month:02}")
df["day"] = df.index.to_series().map(lambda date: date.day)
rotation = 0
if labelrotate:
rotation = 90
months = sorted(df["year-month"].drop_duplicates())
days = list(range(1, 32))
# Generate a bidimensional array, the first dimension is of size 31 (one per day), the second is dimension is year-month
# There is probably a way to do that more efficiently (with groupby and such) but it works like this
counts = [[get_value(df, day, month) for month in months] for day in days]
counts = np.array(counts)
plt.figure(figsize=(width, height))
# Use the log to determine the color of the tile
plt.imshow([[math.log(1 + x) for x in y] for y in counts],
interpolation='none',
aspect="auto",
vmax=6.5)
# Add the int values as labels on each tile
for (j, i), _ in np.ndenumerate(counts):
if not np.isnan(counts[j][i]):
plt.text(i,
j,
str(int(counts[j][i])),
ha='center',
va='center',
size=6,
rotation=rotation)
# Better way ! But not visualy satisfying... TODO check that
# import seaborn as sns
# df2 = pd.crosstab(df['day'], df['year-month'])
# sns.heatmap(df2, annot=False)
# Explicitly set x ticks as the list of year-month
plt.xticks(ticks=range(len(months)), labels=months, rotation=90)
# Explicitly set y ticks as the list of ints from 1 to 31
plt.yticks(ticks=range(len(days)), labels=days)
plt.show()
def display_count_hour(df, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
plt.figure(figsize=(width, height))
plt.hist(df["hour"], bins=range(25))
plt.xticks(ticks=range(24), labels=range(24))
plt.show()
# Displays the distribution for each year/month (min, 1st quartile, median, 3rd quartile, max + outliers)
def display_distribution(df, by, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
uniqueby = sorted(df[by].unique())
df = pd.DataFrame(count_by(df, "date"), columns=["count"])
if by == "year":
df["year"] = df.index.to_series().map(lambda date: date.year)
elif by == "year-month":
df["year-month"] = df.index.to_series().map(
lambda date: f"{date.year}-{date.month:02}")
else:
print("Invalid column to display distribution")
sys.exit(1)
plt.figure(figsize=(width, height))
plt.boxplot(list(
map(lambda unique: df[df[by] == unique]["count"], uniqueby)),
labels=uniqueby)
plt.xticks(ticks=range(1, len(uniqueby) + 1), labels=uniqueby, rotation=90)
plt.show()
displays = [("display-ydistrib",
lambda df, w, h, _: display_distribution(df, "year", w, h),
"display the yearly distribution of messages"),
("display-mdistrib",
lambda df, w, h, _: display_distribution(df, "year-month", w, h),
"display the monthly distribution of messages"),
("display-counter", display_values_per_day,
"display the number of messages for each day"),
("hour-count", lambda df, w, h, _: display_count_hour(df, w, h),
"display the number of messages sent each hour of the day")]
def init(parser):
group = parser.add_argument_group(
"displays",
"the various possible displays. If none is selected, all are displayed"
)
for name, _, helper in displays:
group.add_argument(f"--{name}", action='store_true', help=helper)
group.add_argument(
"--figsize-w",
help=
f"the width of the generated plots in inch. Defaults to {DEFAULT_WIDTH}",
type=int,
action="store",
default=DEFAULT_WIDTH)
group.add_argument(
"--figsize-h",
help=
f"the height of the generated plots in inch. Defaults to {DEFAULT_HEIGHT}",
type=int,
action="store",
default=DEFAULT_HEIGHT)
group.add_argument(
"--rotate-labels",
help=
"rotate the labels of the heatmap. Convenient for big values or big timelapse",
action="store_true")
def display(df, values):
any_specified = any(
map(lambda display: values[display[0].replace('-', '_')], displays))
for name, display, _ in displays:
if not any_specified or values[name.replace('-', '_')]:
display(df, values["figsize_w"], values["figsize_h"],
values["rotate_labels"])
| StarcoderdataPython |
1796814 | import _nx
import warnings
from .utils import bit, cached_property
AUTO_PLAYER_1_ID = 10
def refresh_inputs():
"""Refreshes inputs.
Should normally be called at least once
within every iteration of your main loop.
"""
_nx.hid_scan_input()
def _determine_controller_type(player):
# TODO determine the type of the controller for this player via _nx
return DualJoyconController
class Controller:
"""
Represents an abstract controller.
:attribute: player
:type: Player
The player to whom the Controller belongs to.
:attribute: a_button
:type: Button
The A button of the controller.
:attribute: b_button
:type: Button
The B button of the controller.
:attribute: x_button
:type: Button
The X button of the controller.
:attribute: y_button
:type: Button
The Y button of the controller.
"""
def __init__(self, player):
self.player = player
self.a_button = Button(self.player, bit(0))
self.b_button = Button(self.player, bit(1))
self.x_button = Button(self.player, bit(2))
self.y_button = Button(self.player, bit(3))
@staticmethod
def from_player(player):
"""
<todo>
:param player: <todo>
:returns: <todo> Controller class
:rtype: Controller
"""
controller_class = _determine_controller_type(player)
return controller_class(player)
class JoyconController(Controller):
"""
Represents a single Joycon controller.
:attribute: is_left
:type: bool
Whether the JoyconController is the left or right Joy-Con.
:attribute: parent
:type: <todo>
The parent Controller of the Joy-Con.
:attribute: stick_button
:type: Button
The button located in the analogue stick, when it is pressed.
:attribute: l_or_r_button
:type: Button
Either the L or R button on the controller, dependent on which Joy-Con.
:attribute: zl_or_zr_button
:type: Button
Either the ZL or ZR button on the controller, dependent on which Joy-Con.
:attribute: plus_or_minus_button
:type: Button
Either the + or - button on the controller, dependent on which Joy-Con.
:attribute: stick
:type: Stick
The analogue stick of the controller.
:attribute: left
:type: Button
The analogue stick in the left position.
:attribute: right
:type: Button
The analogue stick in the right position.
:attribute: up
:type: Button
The analogue stick in the up position.
:attribute: down
:type: Button
The analogue stick in the down position.
"""
def __init__(self, player, is_left, parent=None):
super().__init__(player)
self.is_left = is_left
self.parent = parent
if is_left:
self.stick_button = Button(self.player, bit(4))
self.l_or_r_button = Button(self.player, bit(6))
self.zl_or_zr_button = Button(self.player, bit(8))
self.plus_or_minus_button = Button(self.player, bit(11))
self.stick = Stick(self.player, is_left=True)
else:
self.stick_button = Button(self.player, bit(5))
self.l_or_r_button = Button(self.player, bit(7))
self.zl_or_zr_button = Button(self.player, bit(9))
self.plus_or_minus_button = Button(self.player, bit(10))
self.stick = Stick(self.player, is_left=False)
self.left = Button(player, self.stick.left_key_bit)
self.right = Button(player, self.stick.right_key_bit)
self.up = Button(player, self.stick.up_key_bit)
self.down = Button(player, self.stick.down_key_bit)
@cached_property
def sl_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(24))
@cached_property
def sr_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(25))
class StandardController(Controller):
def __init__(self, player):
super().__init__(player)
self.left_stick_button = Button(self.player, bit(4))
self.right_stick_button = Button(self.player, bit(5))
self.l_button = Button(self.player, bit(6))
self.r_button = Button(self.player, bit(7))
self.zl_button = Button(self.player, bit(8))
self.zr_button = Button(self.player, bit(9))
self.plus_button = Button(self.player, bit(10))
self.minus_button = Button(self.player, bit(11))
self.left_button = Button(self.player, bit(12))
self.up_button = Button(self.player, bit(13))
self.right_button = Button(self.player, bit(14))
self.down_button = Button(self.player, bit(15))
self.left_stick = Stick(self.player, is_left=True)
self.right_stick = Stick(self.player, is_left=False)
self.stick = self.left_stick
self.left = Button(player, self.stick.left_key_bit, self.left_button.key_bits[0])
self.right = Button(player, self.stick.right_key_bit, self.right_button.key_bits[0])
self.up = Button(player, self.stick.up_key_bit, self.up_button.key_bits[0])
self.down = Button(player, self.stick.down_key_bit, self.down_button.key_bits[0])
class SwitchProController(StandardController):
"""Represents a Switch Pro Controller.
Can also be a similar controller with the same buttons.
"""
pass
class DualJoyconController(StandardController):
"""Represents two Joy-Cons in combination, attached to rails"""
is_attached = True
def __init__(self, player):
super().__init__(player)
self.left_joycon = JoyconController(player, is_left=True, parent=self)
self.right_joycon = JoyconController(player, is_left=False, parent=self)
class FreeDualJoyconController(DualJoyconController):
"""Represents two Joy-Cons in combination, detached from rails"""
is_attached = False
class Button:
"""Represents a button or button-like object."""
def __init__(self, player, *key_bits):
self.player = player
self.key_bits = key_bits
@property
def is_pressed(self):
"""Indicates whether the Button is pressed."""
return any_pressed(self.player, self)
def __eq__(self, other):
if not isinstance(other, Button):
raise TypeError("Can only compare a Button to another Button")
return self.key_bits == other.key_bits
class ButtonGroup(Button):
"""Represents a group of :class:`Button` objects."""
def __init__(self, *buttons):
if not buttons:
raise TypeError("At least one Button must be passed")
key_bits = [key_bit for button in buttons for key_bit in button.key_bits]
super().__init__(buttons[0].player, *key_bits)
self.buttons = buttons
@property
def pressed(self):
return which_pressed(self.player, *self.buttons)
class Stick:
"""Represents the analogue stick on the controller."""
def __init__(self, player, is_left):
self.player = player
self.is_left = is_left
if is_left:
self.left_key_bit = bit(16)
self.right_key_bit = bit(18)
self.up_key_bit = bit(17)
self.down_key_bit = bit(19)
else:
self.left_key_bit = bit(20)
self.right_key_bit = bit(22)
self.up_key_bit = bit(21)
self.down_key_bit = bit(23)
@property
def left(self):
"""
:return: A value indicating whether or not the stick is in the left position
:rtype: bool
"""
return self.x < 0.0
@property
def right(self):
"""
:return: A value indicating whether or not the stick is in the right position
:rtype: bool
"""
return self.x > 0.0
@property
def up(self):
"""
:return: A value indicating whether or not the stick is in the up position
:rtype: bool
"""
return self.y > 0.0
@property
def down(self):
"""
:return: A value indicating whether or not the stick is in the down position
:rtype: bool
"""
return self.y < 0.0
@property
def x(self):
"""
The current x value of the analogue stick
:return: The float value of the stick's x location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.left_key_bit:
return -1.0
if keys_pressed & self.right_key_bit:
return 1.0
return 0.0
@property
def y(self):
"""
The current y value of the analogue stick
:return: The float value of the stick's y location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.up_key_bit:
return 1.0
if keys_pressed & self.down_key_bit:
return -1.0
return 0.0
def any_pressed(player, *buttons: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: Optional[one or more :class:`Button` objects OR Tuple[Button]]
Buttons to check for. Checks if no Button is pressed if none given.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if len(buttons) == 0:
return not keys_pressed == 0
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
return True
return False
def is_pressed(player, button: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
button: :class:`Button`
Button to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
return any_pressed(player, button, refresh_input=refresh_input)
def which_pressed(player, *buttons: Button, refresh_input=False):
"""Checks which of the given buttons are pressed.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: one or more :class:`Button` objects OR Tuple[Button]
Buttons to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
Returns
-------
A list of :class:`Button` objects.
"""
if refresh_input:
_nx.hid_scan_input()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if not buttons:
raise TypeError("At least one Button must be passed")
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
buttons_pressed = []
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
buttons_pressed.append(button)
return buttons_pressed
| StarcoderdataPython |
165891 | # Copyright (c) 2021 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
from pathlib import Path
from typing import Any, Callable, List, Mapping, Optional, Union
from hydra._internal.callbacks import Callbacks
from hydra._internal.hydra import Hydra
from hydra._internal.utils import create_config_search_path
from hydra.core.config_store import ConfigStore
from hydra.core.global_hydra import GlobalHydra
from hydra.core.utils import JobReturn
from hydra.plugins.sweeper import Sweeper
from hydra.types import HydraContext, RunMode
from omegaconf import DictConfig, OmegaConf
from .._hydra_overloads import instantiate
from ..typing import DataClass
def _store_config(
cfg: Union[DataClass, DictConfig, Mapping], config_name: str = "hydra_launch"
) -> str:
"""Stores configuration object in Hydra's ConfigStore.
Parameters
----------
cfg: Union[DataClass, DictConfig, Mapping]
A configuration as a dataclass, configuration object, or a dictionary.
config_name: str (default: hydra_launch)
The configuration name used to store the configuration.
Returns
-------
config_name: str
The configuration name used to store the default configuration.
Notes
-----
The input configuration is registered in the Hydra ConfigStore [1]_ using a
user-provided config name.
References
----------
.. [1] https://hydra.cc/docs/tutorials/structured_config/config_store
"""
cs = ConfigStore().instance()
cs.store(name=config_name, node=cfg)
return config_name
def hydra_run(
config: Union[DataClass, DictConfig, Mapping],
task_function: Callable[[DictConfig], Any],
overrides: Optional[List[str]] = None,
config_dir: Optional[Union[str, Path]] = None,
config_name: str = "hydra_run",
job_name: str = "hydra_run",
with_log_configuration: bool = True,
) -> JobReturn:
"""Launch a Hydra job defined by `task_function` using the configuration
provided in `config`.
Similar to how Hydra CLI works, `overrides` are a string list of configuration
values to use for a given experiment run. For example, the Hydra CLI provided by::
$ python -m job.task_function job/group=group_name job.group.param=1
would be::
>>> job = hydra_run(config, task_function, overrides=["job/group=group_name", "job.group.param=1"])
This functions executes Hydra and therefore creates its own working directory. See Configuring Hydra [2]_ for more
details on customizing Hydra.
Parameters
----------
config: Union[DataClass, DictConfig, Mapping]
A configuration as a dataclass, configuration object, or a dictionary.
task_function: Callable[[DictConfig], Any]
The function Hydra will execute with the given configuration.
overrides: Optional[List[str]] (default: None)
If provided, overrides default configurations, see [1]_ and [2]_.
config_dir: Optional[Union[str, Path]] (default: None)
Add configuration directories if needed.
config_name: str (default: "hydra_run")
Name of the stored configuration in Hydra's ConfigStore API.
job_name: str (default: "hydra_run")
with_log_configuration: bool (default: True)
Flag to configure logging subsystem from the loaded config
Returns
-------
result: JobReturn
The object storing the results of the Hydra experiment.
- overrides: From `overrides` and `multirun_overrides`
- return_value: The return value of the task function
- cfg: The configuration object sent to the task function
- hydra_cfg: The hydra configuration object
- working_dir: The experiment working directory
- task_name: The task name of the Hydra job
References
----------
.. [1] https://hydra.cc/docs/next/advanced/override_grammar/basic
.. [2] https://hydra.cc/docs/next/configure_hydra/intro
Examples
--------
Simple Hydra run:
>>> from hydra_zen import instantiate, builds
>>> from hydra_zen.experimental import hydra_run
>>> job = hydra_run(builds(dict, a=1, b=1), task_function=instantiate)
>>> job.return_value
{'a': 1, 'b': 1}
Using a more complex task function:
>>> from hydra_zen.experimental import hydra_run
>>> from hydra_zen import builds, instantiate
>>> cfg = dict(f=builds(pow, exp=2, hydra_partial=True), x=10)
>>> def task_function(cfg):
... return instantiate(cfg.f)(cfg.x)
Launch a job to evaluate the function using the given configuration:
>>> job = hydra_run(cfg, task_function)
>>> job.return_value
100
An example using PyTorch:
>>> from torch.optim import Adam
>>> from torch.nn import Linear
>>> AdamConfig = builds(Adam, lr=0.001, hydra_partial=True)
>>> ModelConfig = builds(Linear, in_features=1, out_features=1)
>>> cfg = dict(optim=AdamConfig(), model=ModelConfig())
>>> def task_function(cfg):
... cfg = instantiate(cfg)
... optim = cfg.optim(model.parameters())
... loss = cfg.model(torch.ones(1)).mean()
... optim.zero_grad()
... loss.backward()
... optim.step()
... return loss.item()
>>> jobs = hydra_run(cfg, task_function, overrides=["optim.lr=0.1"])
>>> j.return_value
0.3054758310317993
"""
config_name = _store_config(config, config_name)
if config_dir is not None:
config_dir = str(Path(config_dir).absolute())
search_path = create_config_search_path(config_dir)
hydra = Hydra.create_main_hydra2(task_name=job_name, config_search_path=search_path)
try:
job = hydra.run(
config_name=config_name,
task_function=task_function,
overrides=overrides if overrides is not None else [],
with_log_configuration=with_log_configuration,
)
finally:
GlobalHydra.instance().clear()
return job
def hydra_multirun(
config: Union[DataClass, DictConfig, Mapping],
task_function: Callable[[DictConfig], Any],
overrides: Optional[List[str]] = None,
config_dir: Optional[Union[str, Path]] = None,
config_name: str = "hydra_multirun",
job_name: str = "hydra_multirun",
with_log_configuration: bool = True,
) -> Any:
"""Launch a Hydra multi-run ([1]_) job defined by `task_function` using the configuration
provided in `config`.
Similar to how Hydra CLI works, `overrides` are a string list of configuration
values to use for a given experiment run. For example, the Hydra CLI provided by::
$ python -m job.task_function job/group=group_name job.group.param=1 --multirun
would be::
>>> job = hydra_multirun(config, task_function, overrides=["job/group=group_name", "job.group.param=1"])
To sweep over parameters the Hydra CLI provided by::
$ python -m job.task_function job/group=group_name job.group.param=1,2,3 --multirun
would be::
>>> job = hydra_multirun(config, task_function, overrides=["job/group=group_name", "job.group.param=1,2,3"])
This functions executes Hydra and therefore creates its own working directory. See Configuring Hydra [3]_ for more
details on customizing Hydra.
Parameters
----------
config: Union[DataClass, DictConfig]
A configuration as a dataclass, configuration object, or a dictionary.
task_function: Callable[[DictConfig], Any]
The function Hydra will execute with the given configuration.
overrides: Optional[List[str]] (default: None)
If provided, overrides default configurations, see [2]_ and [3]_.
config_dir: Optional[Union[str, Path]] (default: None)
Add configuration directories if needed.
config_name: str (default: "hydra_run")
Name of the stored configuration in Hydra's ConfigStore API.
job_name: str (default: "hydra_multirun")
with_log_configuration: bool (default: True)
Flag to configure logging subsystem from the loaded config
Returns
-------
result: Any
The return values of all launched jobs (depends on the Sweeper implementation).
References
----------
.. [1] https://hydra.cc/docs/tutorials/basic/running_your_app/multi-run
.. [2] https://hydra.cc/docs/next/advanced/override_grammar/basic
.. [3] https://hydra.cc/docs/next/configure_hydra/intro
Examples
--------
Simple Hydra multirun:
>>> job = hydra_multirun(
... builds(dict, a=1, b=1),
... task_function=instantiate,
... overrides=["a=1,2"],
... )
>>> [j.return_value for j in job[0]]
[{'a': 1, 'b': 1}, {'a': 2, 'b': 1}]
Using a more complex `task_function`
>>> from hydra_zen import builds, instantiate
>>> cfg = dict(f=builds(pow, exp=2, hydra_partial=True), x=1)
>>> def task_function(cfg):
... return instantiate(cfg.f)(cfg.x)
Launch a multi-run over a list of different `x` values using Hydra's override syntax `range`:
>>> jobs = hydra_multirun(cfg, task_function, overrides=["x=range(-2,3)"])
>>> [j.return_value for j in jobs[0]]
[4, 1, 0, 1, 4]
An example using PyTorch
>>> from torch.optim import Adam
>>> from torch.nn import Linear
>>> AdamConfig = builds(Adam, lr=0.001, hydra_partial=True)
>>> ModelConfig = builds(Linear, in_features=1, out_features=1)
>>> cfg = dict(optim=AdamConfig(), model=ModelConfig())
>>> def task_function(cfg):
... cfg = instantiate(cfg)
... optim = cfg.optim(model.parameters())
... loss = cfg.model(torch.ones(1)).mean()
... optim.zero_grad()
... loss.backward()
... optim.step()
... return loss.item()
Evaluate the function for different learning rates
>>> jobs = hydra_multirun(cfg, task_function, overrides=["optim.lr=0.1,1.0"])
>>> [j.return_value for j in jobs[0]]
[0.3054758310317993, 0.28910207748413086]
"""
config_name = _store_config(config, config_name)
if config_dir is not None:
config_dir = str(Path(config_dir).absolute())
search_path = create_config_search_path(config_dir)
hydra = Hydra.create_main_hydra2(task_name=job_name, config_search_path=search_path)
try:
cfg = hydra.compose_config(
config_name=config_name,
overrides=overrides if overrides is not None else [],
with_log_configuration=with_log_configuration,
run_mode=RunMode.MULTIRUN,
)
callbacks = Callbacks(cfg)
callbacks.on_multirun_start(config=cfg, config_name=config_name)
# Instantiate sweeper without using Hydra's Plugin discovery (Zen!)
sweeper = instantiate(cfg.hydra.sweeper)
assert isinstance(sweeper, Sweeper)
sweeper.setup(
config=cfg,
hydra_context=HydraContext(
config_loader=hydra.config_loader, callbacks=callbacks
),
task_function=task_function,
)
task_overrides = OmegaConf.to_container(cfg.hydra.overrides.task, resolve=False)
assert isinstance(task_overrides, list)
job = sweeper.sweep(arguments=task_overrides)
callbacks.on_multirun_end(config=cfg, config_name=config_name)
finally:
GlobalHydra.instance().clear()
return job
| StarcoderdataPython |
1659882 | <filename>src/lesson_runtime_features/site_addsitedir.py<gh_stars>1-10
import site
import os
import sys
script_directory = os.path.dirname(__file__)
module_directory = os.path.join(script_directory, sys.argv[1])
try:
import mymodule
except ImportError as err:
print('Could not import mymodule:', err)
print()
before_len = len(sys.path)
site.addsitedir(module_directory)
print('New paths:')
for p in sys.path[before_len:]:
print(p.replace(os.getcwd(), '.')) # shorten dirname
print()
import mymodule
| StarcoderdataPython |
1664969 | <reponame>mrityunjaykumar911/gmailMailerPy
#!/usr/local/bin/python
"""
Filename: main.py
Author: mrityunjaykumar
Date: 02/02/19
author_email: <EMAIL>
"""
from __future__ import absolute_import
# from email_all import main_1
from fetch_sheet import main_1
from mailer import main_2
if __name__ == '__main__':
# Fetch data from Google Sheet, Sheet ID defined in config.py
main_1()
# send the mail to recipients
main_2() | StarcoderdataPython |
3257563 | <reponame>ALFA-group/adv-malware-viz
# coding=utf-8
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from os import system
from utils.utils import load_parameters, set_parameter
import shutil
import time
if __name__ == "__main__":
trained_experiment_model = sys.argv[1]
original_parameters_filepath = "figure_generation_parameters.ini"
exp_time = time.strftime("%m_%d_%Hh_%Mm", time.localtime())
new_params_directory = "experiment_parameters"
if not os.path.exists(new_params_directory):
os.mkdir(new_params_directory)
new_params_name = "loss_landscape_parameters_{exp}_{time}.ini".format(
exp=trained_experiment_model, time=exp_time)
new_params_filepath = os.path.join(new_params_directory, new_params_name)
shutil.copy(original_parameters_filepath, new_params_filepath)
new_params = load_parameters(new_params_filepath)
trained_model_directory = "../trained_models"
model_filepath_base_string = os.path.join(trained_model_directory, "[training:{train_meth}|evasion:{train_meth}]_{exp_name}-model.pt")
train_methods = ['natural', 'dfgsm_k', 'rfgsm_k', 'bga_k', 'bca_k']
evasion_methods = ['rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']
set_parameter(new_params_filepath, "general", "generate_histogram", "False")
for train_method in train_methods:
model_filepath = model_filepath_base_string.format(train_meth=train_method, exp_name=trained_experiment_model)
set_parameter(new_params_filepath, "general", "training_method", train_method)
set_parameter(new_params_filepath, "general", "model_weights_path", model_filepath)
if train_method == "natural":
for evasion_method in evasion_methods:
set_parameter(new_params_filepath, "general", "evasion_method", evasion_method)
start_time = time.time()
system("source activate nn_mal;python generate_loss_figures.py {params} {time}".format(
params=new_params_filepath, time=exp_time))
print("Time to run loss landscape train/evasion pair:", time.time() - start_time)
else:
set_parameter(new_params_filepath, "general", "evasion_method", train_method)
start_time = time.time()
system("source activate nn_mal;python generate_loss_figures.py {params} {time}".format(
params=new_params_filepath, time=exp_time))
print("Time to run loss landscape train/evasion pair:", time.time() - start_time)
| StarcoderdataPython |
86714 | <filename>app/redidropper/utils.py<gh_stars>1-10
"""
Goal: Store helper functions not tied to a specific module
@authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
import os
import ast
import json
from datetime import datetime, timedelta
from itsdangerous import URLSafeTimedSerializer
from flask import flash, request, jsonify
from hashlib import sha512, sha256
import hmac
import base64
from subprocess import Popen
from subprocess import PIPE
import pytz as tz
FORMAT_US_DATE = "%x"
FORMAT_US_DATE_TIME = '%x %X'
FORMAT_US_DATE_TIME_ZONE = '%x %X %Z%z'
FORMAT_DATABASE_DATE_TIME = "%Y-%m-%d %H:%M:%S"
# @TODO: move to the configs
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'tiff',
'zip', 'tar', 'tgz', 'bz2'])
FLASH_CATEGORY_ERROR = 'error'
FLASH_CATEGORY_INFO = 'info'
def _get_remote_addr():
""" Return the utf-8 encoded request address """
address = request.headers.get('X-Forwarded-For', request.remote_addr)
if address is not None:
address = address.encode('utf-8')
return address
def _get_user_agent():
""" Return the utf-8 encoded request user agent """
user_agent = request.headers.get('User-Agent')
if user_agent is not None:
user_agent = user_agent.encode('utf-8')
return user_agent
def _create_salt():
""" Get the first 16 bytes of the sha256(rand:user_ip:user_agent) """
rand = base64.b64encode(os.urandom(24))
base = '{0}:{1}:{2}'.format(rand, _get_remote_addr(), _get_user_agent())
if str is bytes:
base = unicode(base, 'utf-8', errors='replace') # pragma: no cover
hasher = sha256()
hasher.update(base.encode('utf8'))
all64 = hasher.hexdigest()
return all64[0:16]
def _generate_sha512_hmac(pepper, salt, data):
""" Generate the SHA512 HMAC -- for compatibility with Flask-Security
h = HMAC(pepper, salt+data)
Where
pepper: the global application key
salt: the 128bit (16bytes) obtained from sha256(rand:ip:agent)
data: the data to be protected
from passlib.context import CryptContext
self.password_crypt_context = CryptContext(schemes='bcrypt')
"""
payload = '{}:{}'.format(salt.encode('utf-8'), data.encode('utf-8'))
return base64.b64encode(hmac.new(pepper, payload, sha512).digest())
def generate_auth(pepper, password):
"""
Return the salt and hashed password to be stored in the database.
Execute once when the user account is created.
Note: requires a request context.
"""
salt = _create_salt()
password_hash = _generate_sha512_hmac(pepper, salt, password)
return (salt, password_hash)
def is_valid_auth(pepper, salt, candidate_password, correct_hash):
"""
Return ``True`` if the candidate_password hashes to the same
value stored in the database as correct_hash.
:param pepper: the global application security key
:param salt: the user-specific salt
:param candidate_password
:rtype Boolean
:return password validity status
"""
assert pepper is not None
assert salt is not None
assert candidate_password is not None
candidate_hash = _generate_sha512_hmac(pepper, salt, candidate_password)
return correct_hash == candidate_hash
def clean_str(dangerous):
""" Return the trimmed string """
if dangerous is None:
return None
return str(dangerous).strip()
def clean_int(dangerous):
"""
Return None for non-integer input
"""
if dangerous is None:
return None
dangerous = str(dangerous).strip()
if "" == dangerous:
return None
if not dangerous.isdigit():
return None
return int(dangerous)
def get_safe_int(unsafe, default=1, min_allowed=1, max_allowed=None):
""" Helper method for reading the user input
:param unsafe: the user input to be interpreted as in
:param default: the default to use if there is a problem converting the int
:param min_allowed: the minimum value to use
:param max_allowed: the maximum value to use (ignores None value)
"""
unsafe = clean_int(unsafe)
if unsafe is None:
unsafe = default
elif unsafe < min_allowed:
unsafe = min_allowed
elif max_allowed is not None and unsafe > max_allowed:
unsafe = max_allowed
return unsafe
def flash_error(msg):
""" Put a message in the "error" queue for display """
flash(msg, FLASH_CATEGORY_ERROR)
def flash_info(msg):
""" Put a message in the "info" queue for display """
flash(msg, FLASH_CATEGORY_INFO)
def pack(data):
"""
Create a string represenation of data
:param data -- dictionary
"""
return json.dumps(data, sort_keys=True, indent=2)
def pack_error(msg):
""" Format an error message to be json-friendly """
return pack({'status': 'error', 'message': msg})
def jsonify_error(data):
""" Format an error message to be json-friendly """
return jsonify({'status': 'error', 'data': data})
def jsonify_success(data):
""" Format a success message to be json-friendly """
return jsonify({'status': 'success', 'data': data})
def get_db_friendly_date_time():
"""
:rtype: string
:return current time in format: "2014-06-24 01:23:24"
"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def localize_datetime(value, zone_name='US/Eastern'):
""" Localize the specified datetime value according to a zone"""
# print(tz.all_timezones)
if value is None:
return ''
timezone = tz.timezone(zone_name)
localized_value = timezone.localize(value, is_dst=None)
return localized_value
def localize_est_date(value):
""" Format the datetime value as `FORMAT_US_DATE` """
localized_value = localize_datetime(value)
return localized_value.strftime(FORMAT_US_DATE)
def localize_est_datetime(value):
""" Format the datetime value as `FORMAT_US_DATE_TIME` """
localized_value = localize_datetime(value)
if value is None or '' == value:
return ''
return localized_value.strftime(FORMAT_US_DATE_TIME)
def get_expiration_date(offset_days):
"""
:param offset_days: how many days to shift versus today
:rtype datetime
:return the date computed with offset_days
"""
return datetime.now() + timedelta(days=offset_days)
def compute_text_md5(text):
""" Compute md5sum as hexdigest
:param text: the input string
:rtype string
"""
import hashlib
m = hashlib.md5()
m.update(text)
return m.hexdigest()
def get_email_token(email, salt, secret):
"""
Generate a timestamped token from the specified email
"""
ts = URLSafeTimedSerializer(secret)
token = ts.dumps(email, salt=salt)
return token
def get_email_from_token(token, salt, secret, max_age=86400):
"""
Read an email from a timestamped token.
Raises an exception if the token is more than 24 hours old or invalid
"""
ts = URLSafeTimedSerializer(secret)
email = ts.loads(token, salt=salt, max_age=max_age)
return email
def redcap_api_call(url, token, content, fields, max_time):
"""
Send an API request to the REDCap server.
Notes:
- when no fields are specified all fields are retrived
- the underlining cURL process is limited to complete
within `max_time` seconds
:rtype: dict
:return: the requested content if it is of valid type (event, record)
"""
assert content in ['event', 'record']
# @TODO: add config flag for enabling/disabling the ssl
# certificate validation: curl -k
cmd = 'curl -m {} -ksX POST {} ' \
' -d token={} ' \
' -d format=json ' \
' -d content={} ' \
' -d fields="{}"' \
' -d returnFormat=json ' \
' | python -m json.tool ' \
.format(max_time, url, token, content, fields)
proc = Popen(cmd, shell=True, stdout=PIPE)
(out, err) = proc.communicate()
# print("redcap_api_call: {}\n{}".format(cmd, out))
if err:
print("redcap_api_call error: \n{}".format(err))
data = []
try:
data = ast.literal_eval(out)
except Exception as exc:
print("redcap_api_call error parsing curl response:\n{}".format(exc))
return data
def retrieve_redcap_subjects(url, token, fields, max_time=30):
"""Read the list of subjects from the REDCap instance using the API"""
data = redcap_api_call(url, token, content='record',
fields=fields, max_time=max_time)
# print("subjects: {}".format(data))
return data
def retrieve_redcap_events(url, token, max_time=30):
"""Read the list of events from the REDCap instance using the API"""
data = redcap_api_call(url, token, content='event',
fields={}, max_time=max_time)
# print("events: {}".format(data))
return data
| StarcoderdataPython |
115258 | default_app_config = (
'wshop.apps.dashboard.vouchers.config.VouchersDashboardConfig')
| StarcoderdataPython |
3205694 | from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dashboard.forms import ExcuseResponseForm
from dashboard.models import Excuse, Position
from dashboard.utils import verify_position
from dashboard.views._positions._attendance_utils import event_type_from_position
@verify_position([Position.PositionChoices.RECRUITMENT_CHAIR, Position.PositionChoices.SECRETARY, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def excuse(request, position_slug, excuse_id):
""" Renders Excuse response form """
# since this view can be accessed from multiple different pages,in order to redirect back to those pages,
# pass in a slug for the position doing a redirect on the slug should redirect back to the position's page
excuse = get_object_or_404(Excuse, pk=excuse_id)
form = ExcuseResponseForm(request.POST or None, excuse=excuse)
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False)
excuse.status = instance.status
excuse.response_message = instance.response_message
excuse.save()
return HttpResponseRedirect('/' + position_slug)
context = {
'type': 'response',
'excuse': excuse,
'form': form,
}
return render(request, "excuse.html", context)
# accepts the excuse then immediately redirects you back to where you came from
@verify_position([Position.PositionChoices.RECRUITMENT_CHAIR, Position.PositionChoices.SECRETARY, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def excuse_quick_accept(request, position_slug, excuse_id):
# since this view can be accessed from multiple different pages,in order to redirect back to those pages,
# pass in a slug for the position doing a redirect on the slug should redirect back to the position's page
excuse = Excuse.objects.get(pk=excuse_id)
excuse.status = '1'
excuse.save()
return HttpResponseRedirect('/' + position_slug)
@verify_position([Position.PositionChoices.RECRUITMENT_CHAIR, Position.PositionChoices.SECRETARY, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def all_excuses(request, position_slug):
""" Renders Excuse archive for the given position, currently only works for Recruitment Chair and Secretary """
event_type = event_type_from_position(position_slug)
excuses = Excuse.objects.exclude(status='0').filter(event__in=event_type.objects.all()).order_by('brother__last_name', 'event__date')
context = {
'excuses': excuses,
'position': Position.objects.get(title=position_slug),
}
return render(request, 'excuses-archive.html', context)
| StarcoderdataPython |
3240040 | # Tables.py
# @author <NAME>
# Module to drop and add tables for AdviseMe db
# Just a few things I need to make this work
from mysql.connector import Error
""" ----------------------------------------------------------------------------------------------------------
Drops capstone db tables
"""
def dropTables(cursor):
# Dictionary of drop statements
drop = {}
drop['curriculum'] = ("DROP TABLE curriculum;")
drop['degree'] = ("DROP TABLE degree;")
drop['enrolled'] = ("DROP TABLE enrolled;")
drop['college'] = ("DROP TABLE college;")
drop['classes'] = ("DROP TABLE classes;")
drop['students'] = ("DROP TABLE students;")
print ("Dropping AdviseMe Tables:")
# Execute drop statements
for index, query in drop.items():
try:
cursor.execute(query)
except Error as e:
print (e.msg)
else:
print ("Query: %s successful" %(query))
""" ----------------------------------------------------------------------------------------------------------
Create capstone tables for MySQL db
"""
def createTables(cursor):
# Dictionary of create table statements
tables = {}
tables['students'] = (
"CREATE TABLE `students` ("
" _id integer NOT NULL auto_increment,"
" sid varchar(9),"
" first varchar(30),"
" last varchar(30),"
" dob date,"
" status char(3),"
" hours smallint,"
" primary key (_id),"
" unique (sid)"
");"
)
tables['classes'] = (
"CREATE TABLE `classes` ("
" _id integer NOT NULL auto_increment,"
" prefix varchar(4),"
" co_num varchar(4),"
" title varchar(50),"
" hours tinyint,"
" primary key (_id),"
" unique (prefix, co_num)"
");"
)
tables['college'] = (
"CREATE TABLE `college` ("
" _id integer NOT NULL auto_increment,"
" college varchar(30),"
" major char(3),"
" primary key (_id),"
" unique (college, major)"
");"
)
tables['enrolled'] = (
"CREATE TABLE `enrolled` ("
" _id integer NOT NULL auto_increment,"
" sid varchar(9),"
" prefix varchar(4),"
" co_num varchar(4),"
" grade char(1),"
" primary key (_id),"
" unique (sid, prefix, co_num)"
");"
)
tables['degree'] = (
"CREATE TABLE `degree` ("
" _id integer NOT NULL auto_increment,"
" sid varchar(9),"
" major char(3),"
" primary key (_id),"
" unique (sid, major)"
");"
)
tables['curriculum'] = (
"CREATE TABLE `curriculum` ("
" _id integer NOT NULL auto_increment,"
" prefix varchar(4),"
" co_num varchar(4),"
" major char(3),"
" semester char(1),"
" min_grade char(1) default NULL,"
" primary key (_id),"
" unique (prefix, co_num, major)"
");"
)
print ("Creating AdviseMe tables:")
# Execute create statements
for index, query in tables.items():
try:
cursor.execute(query)
except Error as e:
print (e.msg)
else:
print ("Query: %s \nsuccessful" %(query))
| StarcoderdataPython |
121891 | import torch
from .num_nodes import maybe_num_nodes
def contains_self_loops(edge_index):
row, col = edge_index
mask = row == col
return mask.sum().item() > 0
def remove_self_loops(edge_index, edge_attr=None):
row, col = edge_index
mask = row != col
edge_attr = edge_attr if edge_attr is None else edge_attr[mask]
mask = mask.unsqueeze(0).expand_as(edge_index)
edge_index = edge_index[mask].view(2, -1)
return edge_index, edge_attr
def add_self_loops(edge_index, num_nodes=None):
num_nodes = maybe_num_nodes(edge_index, num_nodes)
dtype, device = edge_index.dtype, edge_index.device
loop = torch.arange(0, num_nodes, dtype=dtype, device=device)
loop = loop.unsqueeze(0).repeat(2, 1)
edge_index = torch.cat([edge_index, loop], dim=1)
return edge_index
| StarcoderdataPython |
1616618 | <gh_stars>10-100
import numpy as np
import sys, os, pdb, pickle, time
from .utils import *
from .losses import *
from .keras_models import *
from .aux_dict import *
from scipy.stats import norm
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from collections import OrderedDict
from IPython.display import HTML
from keras.utils.generic_utils import get_custom_objects
metrics_dict = dict([(f.__name__, f) for f in [crps_cost_function]])
get_custom_objects().update(metrics_dict)
from timeit import default_timer
from keras.callbacks import EarlyStopping
import tensorflow as tf
def limit_mem():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
limit_mem() | StarcoderdataPython |
1718521 | # Generated by Django 2.1.7 on 2019-05-22 09:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Activities', '0006_auto_20190415_1456'),
]
operations = [
migrations.AddField(
model_name='sentence',
name='artefactid',
field=models.ForeignKey(blank=True, db_column='ArtefactID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='Activities.Artefact', verbose_name='Artefacto'),
),
migrations.AddField(
model_name='sentence',
name='resourceid',
field=models.ForeignKey(blank=True, db_column='ResourceID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='Activities.Resource', verbose_name='Recurso'),
),
]
| StarcoderdataPython |
1698889 | """Copy bumped workflows to the template folder, appending the `.jinja` suffix."""
from pathlib import Path
from shutil import copy
COMMON_PATH = ".github/workflows"
source_folder = Path("./dependabot") / COMMON_PATH
destination_folder = Path("./template/") / COMMON_PATH
for source in source_folder.iterdir():
destination = destination_folder / (source.name + ".jinja")
copy(source, destination)
| StarcoderdataPython |
25087 | from tests.common.devices.base import AnsibleHostBase
class VMHost(AnsibleHostBase):
"""
@summary: Class for VM server
For running ansible module on VM server
"""
def __init__(self, ansible_adhoc, hostname):
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
@property
def external_port(self):
if not hasattr(self, "_external_port"):
vm = self.host.options["variable_manager"]
im = self.host.options["inventory_manager"]
hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
setattr(self, "_external_port", hostvars["external_port"])
return getattr(self, "_external_port")
| StarcoderdataPython |
1781613 | from django.contrib import admin
from users.models import CustomUser
class CustomUserAdmin(admin.ModelAdmin):
list_display = ("email", "first_name", "last_name", "date_joined", "is_superuser", "is_staff")
list_filter = ("email", "date_joined", "is_superuser")
admin.site.register(CustomUser, CustomUserAdmin)
| StarcoderdataPython |
1680963 | import geometry
import math
import OpenGL.GL as gl
import numpy as np
import ctypes
import json
class signalgenerator(geometry.base):
vertex_code = """
uniform mat4 modelview;
uniform mat4 projection;
in vec2 position;
in vec2 texcoor;
out vec2 v_texcoor;
void main()
{
gl_Position = projection * modelview * vec4(position,0,1);
v_texcoor = texcoor;
} """
fragment_code = """
uniform sampler2D tex;
uniform sampler2D lamptex;
uniform int columns;
uniform highp float supersample;
out highp vec4 f_color;
in highp vec2 v_texcoor;
in highp float v_id;
const int BIT_A = 2;
const int BIT_B = 6;
const int BIT_C = 7;
const int BIT_D = 0;
const int BIT_E = 4;
const int BIT_OE = 0;
const int BIT_LAT = 1;
const int BIT_CLK = 5;
const int BIT_R1 = 1;
const int BIT_G1 = 1;
const int BIT_B1 = 2;
const int BIT_R2 = 0;
const int BIT_G2 = 4;
const int BIT_B2 = 3;
const int MASK_A = (1 << BIT_A);
const int MASK_B = (1 << BIT_B);
const int MASK_C = (1 << BIT_C);
const int MASK_D = (1 << BIT_D);
const int MASK_E = (1 << BIT_E);
const int MASK_OE = (1 << BIT_OE);
const int MASK_LAT = (1 << BIT_LAT);
const int MASK_CLK = (1 << BIT_CLK);
const int MASK_R1 = (1 << BIT_R1);
const int MASK_G1 = (1 << BIT_G1);
const int MASK_B1 = (1 << BIT_B1);
const int MASK_R2 = (1 << BIT_R2);
const int MASK_G2 = (1 << BIT_G2);
const int MASK_B2 = (1 << BIT_B2);
const int depth = 12;
const int height = 16;
void setBits(out ivec3 p, lowp int D, lowp int LAT, lowp int A, lowp int B2, lowp int E, lowp int B, lowp int C, lowp int R2, lowp int G1, lowp int G2, lowp int CLK, lowp int OE, lowp int R1, lowp int B1) {
p.r = (D > 0 ? MASK_D : 0) +
(LAT > 0 ? MASK_LAT : 0) +
(A > 0 ? MASK_A : 0) +
(B2 > 0 ? MASK_B2 : 0) +
(E > 0 ? MASK_E : 0) +
(B > 0 ? MASK_B : 0) +
(C > 0 ? MASK_C : 0);
p.g = (R2 > 0 ? MASK_R2 : 0) +
(G1 > 0 ? MASK_G1 : 0) +
(G2 > 0 ? MASK_G2 : 0) +
(CLK > 0 ? MASK_CLK : 0);
p.b = (OE > 0 ? MASK_OE : 0) +
(R1 > 0 ? MASK_R1 : 0) +
(B1 > 0 ? MASK_B1 : 0);
}
ORDER_FUNC;
OUTPUT_ENABLE_FUNC;
EXTRACT_FUNC;
void main()
{
highp int physx = int(v_texcoor.x * 4096.0);
highp int physy = int(v_texcoor.y * 194.0);
highp int physend = 192;
highp int dfield;
highp int dy;
getLineParams(physy, dy, dfield);
highp int field;
highp int y;
getLineParams(physy-1, y, field);
highp int nextfield;
highp int nexty;
getLineParams(physy+1, nexty, nextfield);
if (physy == 0)
y = 0;
if (nexty != y && physx > 4000)
y++;
highp int t = physx;
lowp ivec3 data;
lowp int LAT = t >= 3850 && t < 3860 ? 1 : 0;
lowp int A = ((y & 0x1) > 0) ? 1 : 0;
lowp int B = ((y & 0x2) > 0) ? 1 : 0;
lowp int C = ((y & 0x4) > 0) ? 1 : 0;
lowp int D = ((y & 0x8) > 0) ? 1 : 0;
lowp int E = ((y & 0x10) > 0) ? 1 : 0;
int dx = (1919 - (t / 2)) % columns;
highp vec2 ttexpos = vec2(float(dx) / float(columns), 1.0 - (float(dy) / 31.0));
highp vec3 top = texture(tex, ttexpos).rgb;
highp vec2 btexpos = vec2(float(dx) / float(columns), 1.0 - (float(dy+16) / 31.0));
highp vec3 bottom = texture(tex, btexpos).rgb;
lowp int OE = getOE(t, field);
if (t > 3840)
OE = 0;
if (physy == 0)
OE = 0;
lowp int CLK;
if (t < 3840)
CLK = ((t & 1) == 0) ? 0 : 1;
else
CLK = 0;
lowp int R1 = 0, G1 = 0, B1 = 0, R2 = 0, G2 = 0, B2 = 0;
top = pow(top, vec3(2.2));
bottom = pow(bottom, vec3(2.2));
extract_bitplane(R1, G1, B1, top, dfield);
extract_bitplane(R2, G2, B2, bottom, dfield);
if (physy >= physend) {
R1 = G1 = B1 = R2 = G2 = B2 = 0;
OE = 0;
}
OE = OE == 0 ? 1 : 0;
setBits(data, D, LAT, A, B2, E, B, C, R2, G1, G2, CLK, OE, R1, B1);
f_color = vec4(float(data.r) / 255.0, float(data.g) / 255.0, float(data.b) / 255.0, 1.0);
} """
order = {
'line-first': """
void getLineParams(int physy, out int y, out int field) {
y = physy / depth;
field = physy % depth;
}""",
'field-first': """
void getLineParams(int physy, out int y, out int field) {
y = physy % height;
field = physy / height;
}"""
}
output_enable = {
'normal': """
int getOE(int t, int field) {
return (t < ((4096 >> field))) ? 1 : 0;
}
""",
'es-pwm': """
int getOE(int t, int field) {
int rep;
switch(field) {
case 0: rep = 1; break;
default:
rep = (1 << field) + field;
}
return (t % rep) == 0 ? 1 : 0;
}
""",
'enable': """
int getOE(int t, int field) {
return 1;
}
"""
}
extract = {
'bcm':"""
void extract_bitplane(out lowp int R, out lowp int G, out lowp int B, highp vec3 pixel, lowp int dfield) {
lowp int dbitplane = 15 - dfield;
R = (int(pixel.r * 65535.0 ) & (1 << dbitplane)) > 0 ? 1 : 0;
G = (int(pixel.g * 65535.0 ) & (1 << dbitplane)) > 0 ? 1 : 0;
B = (int(pixel.b * 65535.0 ) & (1 << dbitplane)) > 0 ? 1 : 0;
}""",
'pwm':"""
void extract_bitplane(out lowp int R, out lowp int G, out lowp int B, highp vec3 pixel, lowp int dfield) {
R = (int(pixel.r * 11.0) > dfield) ? 1 : 0;
G = (int(pixel.g * 11.0) > dfield) ? 1 : 0;
B = (int(pixel.b * 11.0) > dfield) ? 1 : 0;
}"""
}
attributes = { 'position' : 2, 'texcoor' : 2 }
primitive = gl.GL_QUADS
def __init__(self, columns, rows, supersample, order='line-first', oe='normal', extract='bcm'):
self.columns = columns
self.rows = rows
self.supersample = supersample
if extract == 'pwm':
oe = 'enable' # This implied
self.fragment_code = self.fragment_code.replace('ORDER_FUNC;', self.order[order])
self.fragment_code = self.fragment_code.replace('OUTPUT_ENABLE_FUNC;', self.output_enable[oe])
self.fragment_code = self.fragment_code.replace('EXTRACT_FUNC;', self.extract[extract])
super(signalgenerator, self).__init__()
def getVertices(self):
verts = [(-1, -1), (+1, -1), (+1, +1), (-1, +1)]
coors = [(0, 1), (1, 1), (1, 0), (0, 0)]
return { 'position' : verts, 'texcoor' : coors }
def draw(self):
loc = gl.glGetUniformLocation(self.program, "tex")
gl.glUniform1i(loc, 0)
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
loc = gl.glGetUniformLocation(self.program, "columns")
gl.glUniform1i(loc, self.columns)
loc = gl.glGetUniformLocation(self.program, "supersample")
gl.glUniform1f(loc, self.supersample)
super(signalgenerator, self).draw()
def setTexture(self, tex):
self.tex = tex
| StarcoderdataPython |
3335014 | <gh_stars>1-10
import os
import torch
import argparse
import numpy as np
from backend.quant_metric_inputs import ValidationLoader
from metric_learning_main import plot_nearest_neighbours
def compute_dist_naive(emb_train, emb_val):
"""
emb_train: NTrain, nlocs, emb_dim
emb_val: NVal, nlocs, emb_dim
returns:
-----------
dist_mat: distance of each emb_val from every emb_train
So it would be (NVal, NTrain)
"""
major_dist = list()
for i, v in enumerate(emb_val):
print(f'working on {i}/{len(emb_val)} val')
curr_v = v
minidist = list()
for j, t in enumerate(emb_train):
print(f'working on {j}/{len(emb_train)} train')
curr_t = t
dist = torch.norm(v.contiguous().view(-1)\
- t.contiguous().view(-1))
minidist.append(dist)
major_dist.append(minidist)
for i, d in enumerate(major_dist):
major_dist[i] = torch.Tensor(d).float()
dist_mat = torch.stack(major_dist)
return dist_mat
def compute_nearest_neighbours(train_data, val_data, save_path):
"""
parameters
-------------
train_data: (quant_metric_inputs.ValidataLoader)
val_data: (quant_metric_inputs.ValidationLoader)
save_path: where the plot will be stored
"""
# gather just the tensors
train_embeddings = list()
train_objs = list()
train_labels = list()
train_files = list()
for t in train_data.records:
train_embeddings.append(t['ob_tensor'].squeeze(0))
train_objs.append(t['ob_name'])
train_labels.append(t['label'])
train_files.append(t['file_name'])
train_embeddings = np.stack(train_embeddings, axis=0)
val_embeddings = list()
val_objs = list()
val_labels = list()
val_files = list()
for v in val_data.records:
val_embeddings.append(v['ob_tensor'].squeeze(0))
val_objs.append(v['ob_name'])
val_labels.append(v['label'])
val_files.append(v['file_name'])
val_embeddings = np.stack(val_embeddings, axis=0)
# some checks
assert len(val_files) == len(val_embeddings), "should be equal bro"
assert len(train_files) == len(train_embeddings), "should be equal brother"
emb_train = torch.from_numpy(train_embeddings).float()
emb_val = torch.from_numpy(val_embeddings).float()
trainN, C, D, H, W = list(emb_train.shape)
valN, _, _, _, _ = list(emb_val.shape)
emb_train = emb_train.permute(0, 2, 3, 4, 1).reshape(trainN, D*H*W, C)
emb_val = emb_val.permute(0, 2, 3, 4, 1).reshape(valN, D*H*W, C)
dist_mat = compute_dist_naive(emb_train, emb_val)
# now compute the top_k along axis = 1 and then plot it, I have computed the
# euclidean distance remember that, so the smallest is required so largest should
# be false, can easily verify using values returned to me if they are ascending or
# descending, since the values themselves are sorted
dist_mat_topk = torch.topk(dist_mat, k=5, dim=1, largest=False, sorted=True)
plot_nearest_neighbours(dist_mat_topk, train_files, val_files, train_labels, val_labels,
log_path=save_path, step=0, dist_fn='eucl', plot_method='matplotlib')
def main():
parser = argparse.ArgumentParser('Parser for rgb-viewpred nn')
parser.add_argument('--dataset_listdir', type=str,
default='/home/ubuntu/pytorch_disco/backend/quant_train_files')
parser.add_argument('--train_file', type=str, required=True)
parser.add_argument('--val_file', type=str, required=True)
parser.add_argument('--save_path', type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
train3d_tensors_path = os.path.join(args.dataset_listdir, args.train_file)
val3d_tensors_path = os.path.join(args.dataset_listdir, args.val_file)
if not os.path.exists(train3d_tensors_path):
raise FileNotFoundError('could not find training file')
if not os.path.exists(val3d_tensors_path):
raise FileNotFoundError('could not find validation file')
# get the tensors out from both of them.
train_data = ValidationLoader(train3d_tensors_path)
val_data = ValidationLoader(val3d_tensors_path)
compute_nearest_neighbours(train_data, val_data, args.save_path)
if __name__ == '__main__':
main() | StarcoderdataPython |
141094 | <filename>components/studio/studio/settings.py
"""
Django settings for studio project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REPO_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DOMAIN = 'platform.local'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
if DEBUG:
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = ['platform.local']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'oauth2_provider',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'ingress',
'api',
'monitor',
'projects',
'labs',
'models',
'reports',
'files',
'datasets',
'workflows',
'experiments',
'deployments',
'bootstrap_modal_forms'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
ROOT_URLCONF = 'studio.urls'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(REPO_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'studio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'postgres',
# 'USER': 'postgres',
# 'PASSWORD': 'postgres',
# 'HOST': 'stack-studio-db',
# 'PORT': 5432,
# }
# }
# Dummy backend here to allow for creating migrations locally.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(REPO_DIR, 'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(REPO_DIR, 'media/')
LOGIN_REDIRECT_URL = '/'
import socket
# TODO remove after refactor
API_HOSTNAME = 'localhost'
API_PORT = 8080
GIT_REPOS_ROOT = os.path.join(REPO_DIR, 'repos')
GIT_REPOS_URL = '/repos/'
REGISTRY_SVC = 'stack-docker-registry'
CHART_CONTROLLER_URL = 'http://stack-chart-controller'
STUDIO_URL = 'http://stack-studio:8080'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'platform-redis')
CELERY_BROKER_URL = 'amqp://admin:LJqEG9RE4FdZbVWoJzZIOQEI@platform-rabbit:5672//'
CELERY_RESULT_BACKEND = 'redis://%s:%d/%d' % (REDIS_HOST, REDIS_PORT, REDIS_DB)
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = "UTC"
CELERY_ENABLE_UTC = True
EXTERNAL_KUBECONF = True
NAMESPACE = 'default'
STORAGECLASS = 'aws-efs'
try:
from .settings_local import *
except ImportError as e:
pass
import os
try:
apps = [os.environ.get("APPS").split(" ")]
for app in apps:
INSTALLED_APPS += [app]
except Exception as e:
pass
| StarcoderdataPython |
1625762 |
def main(max_weight,weights,values):
return 1
if __name__ == "__main__":
# n , max_w = map(int,input().split())
# weights = []
# values = []
# for _ in range(n):
# w,v=map(int,input().split())
# weights.append(w)
# values.append(v)
# print(weights , values)
weights = [1, 1, 1]
values = [3, 2, 1]
max_weight = 6
print(main(max_weight,weights,values)) | StarcoderdataPython |
1769460 | <filename>exp_distech.py
import numpy as np
from pandas import DataFrame
import utils
import eval_utils
import os
from typing import Dict, List, Tuple
import SpacePair
import exp_unsup
def read_3clusters(filenme:str) -> (List[str], List[str]):
dis_words = []
tech_words = []
with open(filenme, "r") as f:
lines = f.readlines()
dis_words.extend(lines[1].rstrip().split())
dis_words.extend(lines[3].rstrip().split())
tech_words.extend(lines[5].rstrip().split())
return dis_words, tech_words
def read_wordlist(filename:str, column:int=0, uniquify=True) -> List[str]:
"""
Read a word list in one- or multi-column format.
:param column: choose the column containing the words if it's a multicolumn file
:param uniquify: delete duplicate words
"""
woi = []
with open(filename, "r") as f:
for line in f:
line = line.rstrip().split()
if line:
woi.append(line[column])
if uniquify:
return list(set(woi)) # make sure that each word is in the list only once
else:
return woi
def output_dists(sp:SpacePair, PX:np.ndarray, woi:List[str],
neighbors:int=10, use_csls:bool=False,
out_dir:str=None, list_name:str=""):
"""
For all given words u, compute the cosine distance between
Px_u and y_u, and find the nearest neighbors of x_u in X and y_u in Y.
Writes results to a file if out_dir is specified.
Prefixes output files with list_name if specified.
:param sp: SpacePair object holding X and Y as well as the word pairs T
:param PX: X, projected onto Y (i.e. aligned)
:param neighbors: number of nearest neighbors to be reported
:param use_csls: rank nearest neighbors not by cosine, but by CSLS instead
:param out_dir: needs to end with "/"
:param list_name: used to distinguish different lists of words of interest
"""
dists = {}
for w in woi:
dists[w] = utils.pairwise_cos_dist(np.array([PX[sp.voc_x[w]]]),
np.array([sp.Y[sp.voc_y[w]]]),
no_zero_dists=False)[0]
dist_ranked_words = sorted(dists, key=dists.get, reverse=True)
# just some printouts
top = 10
print(f"\nDistances (top {top} {list_name} words):\n"
f"{'dcos(Px_w,y_w)'} {'word':<12}")
for w in dist_ranked_words[:top]:
print(f"{dists[w]:<13.5f} {w:<12}")
print(f"\nFinding the {neighbors} nearest neighbors in each space")
src_nbs = exp_unsup.find_closest_concepts(sp.X[[sp.voc_x[w] for w in woi]],
sp.X, sp.voc_x,
k=neighbors, csls=use_csls)
trg_nbs = exp_unsup.find_closest_concepts(sp.Y[[sp.voc_y[w] for w in woi]],
sp.Y, sp.voc_y,
k=neighbors, csls=use_csls)
if out_dir is not None:
if not os.path.isdir(out_dir): os.makedirs(out_dir)
filepath = out_dir + list_name + "_dists.tsv"
print(f"writing pair distances to {filepath}...\n")
df = DataFrame({"distance":[dists[w] for w in dist_ranked_words],
"word": dist_ranked_words,
"src_neighbors":src_nbs,
"trg_neighbors":trg_nbs})
df.to_csv(filepath, sep='\t')
def run(sp:SpacePair, PX:np.ndarray, woi:List[str], list_name:str, ap_source:bool=False,
out_dir:str=None,
min_count:int=1, spaces_mincount:int=0,
dist_nbs:int=10, dir_k:int=1, pairdist_csls:bool=False,
options:utils.ConfigReader=None):
"""
This is similar to exp_unsup.py, but it works on the basis of words of interest,
which might not appear in the space pair's vocabularies.
:param sp: SpacePair (holds X, U, Y, V, and T)
:param PX: projected space (using SpacePair.P to project X onto Y)
:param woi: list of words of interest.
:param ap_source: either cluster source vectors (True) or cluster difference vectors (False)
:param out_dir: output destination (creates multiple files)
:param min_count: usually 5, 10, 15 -- require min. occurrence from pairs' words
:param spaces_mincount: usually 1, 5, 10 -- require min. occ. of embeddings' words
:param dir_k: number of NNs to a centroid
:param pairdist_csls: use csls for nearest neighbors search (in the pairdists part)
:param signal: list of translation pair (in the monolingual scenario: just word pairs)
:param options: use this for more convenient parameter passing. expects to
hold min_count, spaces_mincount, dist_nbs, dir_k, and pairdist_csls.
"""
if options is not None:
if options("exp_distech_min_wordcount") is not None:
min_count = options("exp_distech_min_wordcount")
if options("exp_distech_spaces_mincount") is not None:
spaces_mincount = options("exp_distech_spaces_mincount")
if options("exp_distech_neighbors") is not None:
dist_nbs = options("exp_distech_neighbors")
if options("exp_distech_clusterlabels") is not None:
dir_k = options("exp_distech_clusterlabels")
if options("exp_distech_use_csls") is not None:
pairdist_csls = options("exp_distech_use_csls")
# 1. 'prune' the word pairs to throw out unreliable embeddings
woi, sp = exp_unsup.reduce_bilingual_signal([(w,w) for w in woi], sp,
min_count=min_count,
spaces_mincount=spaces_mincount) # this is to save memory
woi = [p[0] for p in woi]
print(f"Reduced vocabulary to words with min. {min_count} corpus occurrences. "
f"Continuing with {len(woi)} pairs.")
if spaces_mincount>0:
print(f"Also reduced spaces to concepts with {spaces_mincount} corpus "
f"occuccences in order to save memory. new sizes (X/Y): "
f"{sp.X.shape[0]}/{sp.Y.shape[0]}.")
# 2. Calculate difference vectors
from_vecs = np.array([ PX[sp.voc_x[u]] for u in woi])
to_vecs = np.array([sp.Y[sp.voc_y[v]] for v in woi])
D = utils.shift_directions(from_vecs, to_vecs, norm=False) # norm the vectors, just to be safe.
D = exp_unsup.normalize_shifts_by_frequency(D, [(w,w) for w in woi], sp.freq_x, sp.freq_y)
voc_D = {w:i for i,w in enumerate(woi)}
cluster_timer = utils.Timer()
print(f"\nStarting shift detection on '{list_name}' words.")
# Distances between Px and Y (no shift directions involved)
output_dists(sp, PX, woi,
neighbors=dist_nbs, use_csls=pairdist_csls,
out_dir=out_dir, list_name=list_name)
cluster_timer("distances")
# 3.2 Clustering of difference vetors
selected_D = D[[voc_D[w] for w in woi]]
ind_sD = {i: w for i, w in enumerate(woi)}
# value of labels = index in center_ids
# index of labels = key in ind_sD
# value of center_ids = key in ind_sD
# index of center_ids = cluster label (= values of labels)
if ap_source is True:
print("Clustering source vectors ...")
labels, center_ids, convergence_it = utils.affprop_clusters(np.array(sp.X[[sp.voc_x[u] for u in woi]]))
else:
print("Clustering shift vectors ...")
labels, center_ids, convergence_it = utils.affprop_clusters(selected_D)
cluster_timer("AP_clustering")
# make arrays of vectors and lists of word pairs which belong to the same cluster
clusters, cluster_words = exp_unsup.reorganize(labels, selected_D, ind_sD)
cluster_timer("re-organization")
# 3.3 Cluster sizes, cluster lengths, inner distances
cluster_sizes = [len(c) for c in clusters]
lengths_max = [exp_unsup.cluster_length(cluster, "max") for cluster in clusters]
lengths_mean = [exp_unsup.cluster_length(cluster, "mean") for cluster in clusters]
lengths_median = [exp_unsup.cluster_length(cluster, "median") for cluster in clusters]
lengths_std = [exp_unsup.cluster_length(cluster, "std") for cluster in clusters]
inner_dists = [exp_unsup.inner_distance(cluster) for cluster in clusters]
lengths_max_normed = eval_utils.z_scores(lengths_max)
lengths_mean_normed = eval_utils.z_scores(lengths_mean)
lengths_median_normed = eval_utils.z_scores(lengths_median)
lengths_std_normed = eval_utils.z_scores(lengths_std)
inner_dists_normed = eval_utils.z_scores(inner_dists)
cluster_timer("lengths_and_inner_dist")
del clusters # to save memory
# Nearest Neighbors: find the vector(s) in Y most similar to a cluster's centroid
direction_labels = exp_unsup.find_closest_concepts(selected_D[center_ids], sp.Y, sp.voc_y, k=dir_k)
cluster_timer("closest_concepts")
# report everything
df = DataFrame({
"max_length" : lengths_max,
"mean_length": lengths_mean,
"median_length": lengths_median,
"std_length": lengths_std,
"max_length_zscore": lengths_max_normed,
"mean_length_zscore": lengths_mean_normed,
"median_length_zscore": lengths_median_normed,
"std_length_zscore": lengths_std_normed,
"inner_distance" : inner_dists,
"inner_dist_zscore": inner_dists_normed, # normalized among all clusters
"cluster_size" : cluster_sizes,
"centroid" : [ind_sD[center] for center in center_ids], # these are word pairs
"direction_label" : direction_labels, # these are tuples (word, distance)
"cluster_words" : cluster_words
})
# rank by shift size
df = df.sort_values("inner_distance", ascending=False, ignore_index=True)
df.to_csv(out_dir+list_name+"_shift_clusters.tsv", sep='\t')
cluster_timer.total()
# additional information
with open(out_dir+list_name+"_clustering_stats", "w") as f:
f.write(f"number_of_clusters\t{len(center_ids)}\n"
f"convergence_criterion\t{convergence_it}\n"
f"param_min_count\t{min_count}\n"
f"param_dir_k\t{dir_k}\n"
f"param_reduce_spaces\t{spaces_mincount}\n"
f"size_X\t{sp.X.shape[0]}\n"
f"size_Y\t{sp.Y.shape[0]}\n"
f"words_of_interest\t{len(woi)}\n"
f"\ntime_taken:\n{cluster_timer}")
| StarcoderdataPython |
75326 | """
Response selection methods determines which response should be used in
the event that multiple responses are generated within a logic adapter.
"""
import logging
def get_most_frequent_response(input_statement, response_list):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:return: The response statement with the greatest number of occurrences.
:rtype: Statement
"""
matching_response = None
occurrence_count = -1
logger = logging.getLogger(__name__)
logger.info(u'Selecting response with greatest number of occurrences.')
for statement in response_list:
count = statement.get_response_count(input_statement)
# Keep the more common statement
if count >= occurrence_count:
matching_response = statement
occurrence_count = count
# Choose the most commonly occuring matching response
return matching_response
def get_first_response(input_statement, response_list):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:return: Return the first statement in the response list.
:rtype: Statement
"""
logger = logging.getLogger(__name__)
logger.info(u'Selecting first response from list of {} options.'.format(
len(response_list)
))
return response_list[0]
def get_random_response(input_statement, response_list):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:return: Choose a random response from the selection.
:rtype: Statement
"""
from random import choice
logger = logging.getLogger(__name__)
logger.info(u'Selecting a response from list of {} options.'.format(
len(response_list)
))
return choice(response_list)
| StarcoderdataPython |
1752954 | <gh_stars>0
import collections
import dataclasses
import functools
import inspect
import re
import types
from typing import Any
import numpy as np
import torch
import torchdynamo
from .. import mutation_guard
from .. import skipfiles
from ..allowed_functions import is_allowed
from ..allowed_functions import is_builtin
from ..allowed_functions import is_numpy
from ..exc import unimplemented
from ..guards import GuardBuilder
from ..side_effects import SideEffects
from ..source import AttrSource
from ..source import GetItemSource
from ..source import Source
from ..source import TupleIteratorGetItemSource
from ..utils import getfile
from ..utils import is_namedtuple
from ..utils import istensor
from ..utils import istype
from ..utils import tuple_iterator
from ..utils import tuple_iterator_getitem
from ..utils import tuple_iterator_len
from .base import MutableLocal
from .builtin import BuiltinVariable
from .constant import ConstantVariable
from .dicts import ConstDictVariable
from .dicts import DataClassVariable
from .functions import UserFunctionVariable
from .lists import ListIteratorVariable
from .lists import ListVariable
from .lists import NamedTupleVariable
from .lists import RangeVariable
from .lists import TupleVariable
from .misc import AutogradFunctionVariable
from .misc import InspectSignatureVariable
from .misc import LambdaVariable
from .misc import NumpyVariable
from .misc import PythonModuleVariable
from .misc import SkipFilesVariable
from .nn_module import UnspecializedNNModuleVariable
from .tensor import TensorVariable
from .torch import TorchVariable
from .user_defined import UserDefinedClassVariable
from .user_defined import UserDefinedObjectVariable
@dataclasses.dataclass
class GraphArg:
source: Source
example: Any
def load(self, tx):
return self.source.reconstruct(tx)
def get_examples(self):
return [self.example]
def __len__(self):
return 1
class VariableBuilder:
"""Wrap a python value in a VariableTracker() instance"""
def __init__(
self,
tx: "torchdynamo.symbolic_convert.InstructionTranslatorBase",
source: Source,
):
super(VariableBuilder, self).__init__()
self.tx = tx
self.source = source
self.name = source.name()
def __call__(self, value):
if value in self.tx.output.side_effects:
# TODO(jansel): add guard for alias relationship
return self.tx.output.side_effects[value]
return self._wrap(value).clone(**self.options())
@staticmethod
def list_type(value):
if is_namedtuple(value):
return functools.partial(NamedTupleVariable, tuple_cls=type(value))
return {
tuple: TupleVariable,
list: ListVariable,
torch.nn.ParameterList: ListVariable,
torch.nn.ModuleList: ListVariable,
}[type(value)]
def get_source(self):
return self.source
def options(self):
return {"source": self.get_source()}
def make_guards(self, *guards):
source = self.get_source()
return {source.create_guard(guard) for guard in guards}
def _wrap(self, value):
make_guards = self.make_guards
if istensor(value):
return self.wrap_tensor(value)
elif istype(value, (tuple, list)) or is_namedtuple(value):
guards = self.make_guards(GuardBuilder.LIST_LENGTH)
output = [
VariableBuilder(self.tx, GetItemSource(self.get_source(), i))(
item
).add_guards(guards)
for i, item in enumerate(value)
]
result = self.list_type(value)(output, guards=guards)
if istype(value, list):
return self.tx.output.side_effects.track_list(
self.source, value, result
)
return result
elif istype(value, tuple_iterator):
guards = self.make_guards(GuardBuilder.TUPLE_ITERATOR_LEN)
output = [
VariableBuilder(
self.tx, TupleIteratorGetItemSource(self.get_source(), i)
)(tuple_iterator_getitem(value, i)).add_guards(guards)
for i in range(tuple_iterator_len(value))
]
return ListIteratorVariable(
output, mutable_local=MutableLocal(), guards=guards
)
elif istype(value, range):
guards = self.make_guards(GuardBuilder.EQUALS_MATCH)
return RangeVariable(value=value, guards=guards)
elif istype(value, (dict, collections.OrderedDict)) and all(
map(ConstantVariable.is_literal, value.keys())
):
guards = self.make_guards(GuardBuilder.DICT_KEYS)
keys = (
value.keys()
if istype(value, collections.OrderedDict)
else sorted(value.keys())
)
result = collections.OrderedDict(
(
k,
VariableBuilder(self.tx, GetItemSource(self.get_source(), k))(
value[k]
).add_guards(guards),
)
for k in keys
)
result = ConstDictVariable(result, guards=guards)
if istype(value, dict):
return self.tx.output.side_effects.track_dict(
self.source, value, result
)
return result
elif isinstance(value, torch.nn.Module):
if mutation_guard.is_dynamic_nn_module(value):
# created dynamically, don't specialize on it
result = UnspecializedNNModuleVariable(
value, guards=make_guards(GuardBuilder.TYPE_MATCH)
)
if not SideEffects.cls_supports_mutation_side_effects(type(value)):
# don't allow STORE_ATTR mutation with custom __setattr__
return result
return self.tx.output.side_effects.track_object_existing(
self.source, value, result
)
else:
return self.tx.output.add_submodule(
value,
self.name,
source=self.get_source(),
# Guards are added inside add_submodule
)
elif ConstantVariable.is_literal(value) or istype(
value, (torch.Size, torch.device, torch.dtype)
):
# For these, just specialize on exact value
return ConstantVariable(
value=value,
guards=make_guards(GuardBuilder.CONSTANT_MATCH),
)
elif is_builtin(value):
return BuiltinVariable(
value,
guards=make_guards(GuardBuilder.BUILTIN_MATCH),
)
elif is_allowed(value):
return TorchVariable(
value,
guards=make_guards(GuardBuilder.FUNCTION_MATCH),
)
elif value is inspect.signature:
return LambdaVariable(
InspectSignatureVariable.create,
guards=make_guards(GuardBuilder.FUNCTION_MATCH),
)
elif value is dataclasses.fields:
return LambdaVariable(
_dataclasses_fields_lambda,
guards=make_guards(GuardBuilder.FUNCTION_MATCH),
)
elif is_numpy(value):
return NumpyVariable(
value,
guards=make_guards(
GuardBuilder.FUNCTION_MATCH
if callable(value)
else GuardBuilder.TYPE_MATCH
),
)
elif (
istype(value, (type, types.FunctionType))
and skipfiles.check(getfile(value), allow_torch=True)
and not inspect.getattr_static(value, "_torchdynamo_inline", False)
):
return SkipFilesVariable(
value, guards=make_guards(GuardBuilder.FUNCTION_MATCH)
)
elif istype(value, type):
return UserDefinedClassVariable(
value, guards=make_guards(GuardBuilder.FUNCTION_MATCH)
)
elif istype(value, types.FunctionType):
return UserFunctionVariable(
value,
guards=make_guards(GuardBuilder.FUNCTION_MATCH),
)
elif istype(value, types.ModuleType):
return PythonModuleVariable(
value,
guards=make_guards(GuardBuilder.PYMODULE_MATCH),
)
elif type(value) is torch.autograd.function.FunctionMeta:
return AutogradFunctionVariable(
value, guards=make_guards(GuardBuilder.FUNCTION_MATCH)
)
if istype(
value,
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return self._wrap(int(value))
elif DataClassVariable.is_matching_object(value):
return DataClassVariable.wrap(self, value).add_guards(
make_guards(GuardBuilder.TYPE_MATCH)
)
else:
result = UserDefinedObjectVariable(
value,
guards=self.make_guards(GuardBuilder.TYPE_MATCH),
)
if not SideEffects.cls_supports_mutation_side_effects(type(value)):
# don't allow STORE_ATTR mutation with custom __setattr__
return result
return self.tx.output.side_effects.track_object_existing(
self.source, value, result
)
def wrap_tensor(self, value: torch.Tensor):
if self.get_source().guard_source().is_nn_module():
return self.tx.output.add_submodule(
value,
self.name,
source=self.get_source(),
# Guards are done inside add_submodule
# guards=self.make_guards(GuardBuilder.TENSOR_MATCH),
)
else:
self.tx.output.graphargs.append(GraphArg(self.get_source(), value))
return TensorVariable.create(
tx=self.tx,
proxy=self.tx.output.create_graph_input(
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value)
),
example_value=value,
guards=self.make_guards(GuardBuilder.TENSOR_MATCH),
)
def _dataclasses_fields_lambda(obj):
if isinstance(obj, UserDefinedObjectVariable):
value = obj.value
elif isinstance(obj, DataClassVariable):
value = obj.user_cls
else:
unimplemented(f"Dataclass fields handling fails for type {obj}")
items = []
for field in dataclasses.fields(value):
source = None
if obj.source:
source = GetItemSource(
AttrSource(obj.source, "__dataclass_fields__"), field.name
)
items.append(UserDefinedObjectVariable(field, source=source).add_options(obj))
return TupleVariable(items).add_options(obj)
| StarcoderdataPython |
3239413 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from net.utils.graph import Graph
class Model(nn.Module):
def __init__(self, in_channels, num_class, graph_args,
edge_importance_weighting, **kwargs):
super().__init__()
self.graph = Graph(**graph_args)
A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False)
self.register_buffer('A', A)
self.edge_type = 2
temporal_kernel_size = 9
spatial_kernel_size = A.size(0) + self.edge_type
st_kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
self.class_layer_0 = StgcnBlock(in_channels, 64, st_kernel_size, self.edge_type, stride=1, residual=False, **kwargs)
self.class_layer_1 = StgcnBlock(64, 64, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_2 = StgcnBlock(64, 64, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_3 = StgcnBlock(64, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.class_layer_4 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_5 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_6 = StgcnBlock(128, 256, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.class_layer_7 = StgcnBlock(256, 256, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_8 = StgcnBlock(256, 256, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.recon_layer_0 = StgcnBlock(256, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.recon_layer_1 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_2 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_3 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_4 = StgcnBlock(128, 128, (3, spatial_kernel_size), self.edge_type, stride=2, **kwargs)
self.recon_layer_5 = StgcnBlock(128, 128, (5, spatial_kernel_size), self.edge_type, stride=1, padding=False, residual=False, **kwargs)
self.recon_layer_6 = StgcnReconBlock(128+3, 30, (1, spatial_kernel_size), self.edge_type, stride=1, padding=False, residual=False, activation=None, **kwargs)
if edge_importance_weighting:
self.edge_importance = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in range(9)])
self.edge_importance_recon = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in range(9)])
else:
self.edge_importance = [1] * (len(self.st_gcn_networks)+len(self.st_gcn_recon))
self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
def forward(self, x, x_target, x_last, A_act, lamda_act):
N, C, T, V, M = x.size()
x_recon = x[:,:,:,:,0] # [2N, 3, 300, 25] wsx: x_recon(4,3,290,25) select the first person data?
x = x.permute(0, 4, 3, 1, 2).contiguous() # [N, 2, 25, 3, 300] wsx: x(4,2,25,3,290)
x = x.view(N * M, V * C, T) # [2N, 75, 300]m wsx: x(8,75,290)
x_last = x_last.permute(0,4,1,2,3).contiguous().view(-1,3,1,25) #(2N,3,1,25)
x_bn = self.data_bn(x)
x_bn = x_bn.view(N, M, V, C, T)
x_bn = x_bn.permute(0, 1, 3, 4, 2).contiguous()
x_bn = x_bn.view(N * M, C, T, V) #2N,3,290,25
h0, _ = self.class_layer_0(x_bn, self.A * self.edge_importance[0], A_act, lamda_act) # [N, 64, 300, 25]
h1, _ = self.class_layer_1(h0, self.A * self.edge_importance[1], A_act, lamda_act) # [N, 64, 300, 25]
h1, _ = self.class_layer_1(h0, self.A * self.edge_importance[1], A_act, lamda_act) # [N, 64, 300, 25]
h2, _ = self.class_layer_2(h1, self.A * self.edge_importance[2], A_act, lamda_act) # [N, 64, 300, 25]
h3, _ = self.class_layer_3(h2, self.A * self.edge_importance[3], A_act, lamda_act) # [N, 128, 150, 25]
h4, _ = self.class_layer_4(h3, self.A * self.edge_importance[4], A_act, lamda_act) # [N, 128, 150, 25]
h5, _ = self.class_layer_5(h4, self.A * self.edge_importance[5], A_act, lamda_act) # [N, 128, 150, 25]
h6, _ = self.class_layer_6(h5, self.A * self.edge_importance[6], A_act, lamda_act) # [N, 256, 75, 25]
h7, _ = self.class_layer_7(h6, self.A * self.edge_importance[7], A_act, lamda_act) # [N, 256, 75, 25]
h8, _ = self.class_layer_8(h7, self.A * self.edge_importance[8], A_act, lamda_act) # [N, 256, 75, 25]
x_class = F.avg_pool2d(h8, h8.size()[2:]) #(8,256,1,1)
x_class = x_class.view(N, M, -1, 1, 1).mean(dim=1) #(4,256,1,1)
x_class = self.fcn(x_class) #(4,60,1,1) Conv2d(256, 60, kernel_size=(1, 1), stride=(1, 1))
x_class = x_class.view(x_class.size(0), -1) #(4,60)
r0, _ = self.recon_layer_0(h8, self.A*self.edge_importance_recon[0], A_act, lamda_act) # [N, 128, 75, 25]
r1, _ = self.recon_layer_1(r0, self.A*self.edge_importance_recon[1], A_act, lamda_act) # [N, 128, 38, 25]
r2, _ = self.recon_layer_2(r1, self.A*self.edge_importance_recon[2], A_act, lamda_act) # [N, 128, 19, 25]
r3, _ = self.recon_layer_3(r2, self.A*self.edge_importance_recon[3], A_act, lamda_act) # [N, 128, 10, 25]
r4, _ = self.recon_layer_4(r3, self.A*self.edge_importance_recon[4], A_act, lamda_act) # [N, 128, 5, 25]
r5, _ = self.recon_layer_5(r4, self.A*self.edge_importance_recon[5], A_act, lamda_act) # [N, 128, 1, 25]
r6, _ = self.recon_layer_6(torch.cat((r5, x_last),1), self.A*self.edge_importance_recon[6], A_act, lamda_act) # [N, 64, 1, 25] wsx:(8,30,1,25)
pred = x_last.squeeze().repeat(1,10,1) + r6.squeeze() # [N, 3, 25] wsx:(8,30,25)
pred = pred.contiguous().view(-1, 3, 10, 25)
x_target = x_target.permute(0,4,1,2,3).contiguous().view(-1,3,10,25)
return x_class, pred[::2], x_target[::2]
def extract_feature(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
_, c, t, v = x.size()
feature = x.view(N, M, c, t, v).permute(0, 2, 3, 4, 1)
x = self.fcn(x)
output = x.view(N, M, -1, t, v).permute(0, 2, 3, 4, 1)
return output, feature
class StgcnBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
edge_type=2,
t_kernel_size=1,
stride=1,
padding=True,
dropout=0,
residual=True):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
if padding == True:
padding = ((kernel_size[0] - 1) // 2, 0)
else:
padding = (0,0)
self.gcn = SpatialGcn(in_channels=in_channels,
out_channels=out_channels,
k_num=kernel_size[1],
edge_type=edge_type,
t_kernel_size=t_kernel_size)
self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels,
out_channels,
(kernel_size[0], 1),
(stride, 1),
padding),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True))
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=(stride, 1)),
nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A, B, lamda_act):
res = self.residual(x)
x, A = self.gcn(x, A, B, lamda_act)
x = self.tcn(x) + res
return self.relu(x), A
class StgcnReconBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
edge_type=2,
t_kernel_size=1,
stride=1,
padding=True,
dropout=0,
residual=True,
activation='relu'):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
if padding == True:
padding = ((kernel_size[0] - 1) // 2, 0)
else:
padding = (0,0)
self.gcn_recon = SpatialGcnRecon(in_channels=in_channels,
out_channels=out_channels,
k_num=kernel_size[1],
edge_type=edge_type,
t_kernel_size=t_kernel_size)
self.tcn_recon = nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(kernel_size[0], 1),
stride=(stride, 1),
padding=padding,
output_padding=(stride-1,0)),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True))
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=(stride, 1),
output_padding=(stride-1,0)),
nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
self.activation = activation
def forward(self, x, A, B, lamda_act):
res = self.residual(x)
x, A = self.gcn_recon(x, A, B, lamda_act)
x = self.tcn_recon(x) + res
if self.activation == 'relu':
x = self.relu(x)
else:
x = x
return x, A
class SpatialGcn(nn.Module):
def __init__(self,
in_channels,
out_channels,
k_num,
edge_type=2,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True):
super().__init__()
self.k_num = k_num
self.edge_type = edge_type
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels*k_num,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A, B, lamda_act):
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.k_num, kc//self.k_num, t, v)
x1 = x[:,:self.k_num-self.edge_type,:,:,:]
x2 = x[:,-self.edge_type:,:,:,:]
x1 = torch.einsum('nkctv,kvw->nctw', (x1, A))
x2 = torch.einsum('nkctv,nkvw->nctw', (x2, B))
x_sum = x1+x2*lamda_act
return x_sum.contiguous(), A
class SpatialGcnRecon(nn.Module):
def __init__(self, in_channels, out_channels, k_num, edge_type=3,
t_kernel_size=1, t_stride=1, t_padding=0, t_outpadding=0, t_dilation=1,
bias=True):
super().__init__()
self.k_num = k_num
self.edge_type = edge_type
self.deconv = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels*k_num,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
output_padding=(t_outpadding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A, B, lamda_act):
x = self.deconv(x)
n, kc, t, v = x.size()
x = x.view(n, self.k_num, kc//self.k_num, t, v)
x1 = x[:,:self.k_num-self.edge_type,:,:,:]
x2 = x[:,-self.edge_type:,:,:,:]
x1 = torch.einsum('nkctv,kvw->nctw', (x1, A))
x2 = torch.einsum('nkctv,nkvw->nctw', (x2, B))
x_sum = x1+x2*lamda_act
return x_sum.contiguous(), A
| StarcoderdataPython |
3385983 | <filename>fluentogram/misc/__init__.py<gh_stars>0
# coding=utf-8
from .timezones import timezones
__all__ = ["timezones"]
| StarcoderdataPython |
1633056 | <gh_stars>1-10
import random
import numpy as np
from sinkhorn_knopp import sinkhorn_knopp
from simulated.Packet import Packet
class Traffic_Generator(object):
def __init__(self, size, seed, load):
super(Traffic_Generator, self).__init__()
self._size = size
self._seed = seed
self._load = load
def generate_doubly_stochastic_traffic(self):
np.random.seed(self._seed)
sk = sinkhorn_knopp.SinkhornKnopp()
self._traffic_matrix = sk.fit(np.random.rand(self._size, self._size))
self._traffic_matrix = self._traffic_matrix * self._load
return self._traffic_matrix
def generate_packets(self, timestep):
packets = []
for input in range(0, self._size):
output_probas = self._traffic_matrix[input]
# check if we can use the probabilities
if all(i == 0 for i in output_probas):
output = -1
else:
# flip a coin according to load if we want to generate a packet
generate_packet = np.random.choice([True, False], p = [self._load, 1-self._load])
if generate_packet:
proba_sum = sum(output_probas)
scaled_probas = [i * (1/proba_sum) for i in output_probas]
output_indices = np.arange(self._size)
output = np.random.choice(output_indices, p=scaled_probas)
else:
output=-1
if output != -1:
packets.append(Packet(input, output, timestep))
return packets
| StarcoderdataPython |
3321813 | <reponame>beli302/Pitches
from flask import render_template, request, redirect, url_for, abort, flash
from flask_login import login_required, current_user
from . forms import PitchForm, CommentForm, CategoryForm, UpdateProfile
from .import main
from .. import db
from ..models import User, Pitch, Comments, PitchCategory, Votes
# @main.route("/")
# def welcome():
# return render_template('welcome.html', title = 'Welcome')
#display categories on the landing page
@main.route('/')
def index():
"""
View root page function that returns index page
"""
all_pitches = Pitch.query.all()
print(all_pitches)
title = 'Home- Welcome'
return render_template('index.html', title=title, all_pitches=all_pitches)
# Route for adding a new pitch
@main.route('/pitch', methods=['GET', 'POST'])
@login_required
def new_pitch():
"""
Function to check Pitches form and fetch data from the fields
"""
form = PitchForm()
if form.validate_on_submit():
content = form.content.data
new_pitch = Pitch(content=content, user=current_user)
new_pitch.save_pitch()
return redirect(url_for('main.index'))
return render_template('new_pitch.html', pitch_form=form)
@main.route('/categories/<int:id>')
def category(id):
category=PitchCategory.query.get(id)
if category is None:
abort(404)
pitches=Pitch.get_pitches(id)
return render_template('category.html', pitches=pitches, category=category)
@main.route('/add/category', methods=['GET', 'POST'])
@login_required
def new_category():
"""
View new group route function that returns a page with a form to create a category
"""
form=CategoryForm()
if form.validate_on_submit():
name=form.name.data
new_category=PitchCategory(name=name)
new_category.save_category()
return redirect(url_for('.index'))
title='New category'
return render_template('new_category.html', category_form=form, title=title)
# view single pitch alongside its comments
@main.route('/view-pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def view_pitch(id):
"""
Function the returns a single pitch for a comment to be added
"""
pitches=Pitch.query.get(id)
# pitches = Pitch.query.filter_by(id=id).all()
if pitches is None:
abort(404)
#
comment=Comments.get_comments(id)
count_likes=Votes.query.filter_by(pitches_id=id, vote=1).all()
count_dislikes=Votes.query.filter_by(pitches_id=id, vote=2).all()
return render_template('view_pitch.html', pitches=pitches, comment=comment, count_likes=len(count_likes), count_dislikes=len(count_dislikes), category_id=id)
# adding a comment
@main.route('/write_comment/<int:id>', methods=['GET', 'POST'])
@login_required
def post_comment(id):
"""
Function to post comments
"""
form=CommentForm()
title='post comment'
pitches=Pitch.query.filter_by(id=id).first()
if pitches is None:
abort(404)
if form.validate_on_submit():
opinion=form.opinion.data
new_comment=Comments(
opinion=opinion, user_id=current_user.id, pitches_id=pitches.id)
new_comment.save_comment()
return redirect(url_for('.view_pitch', id=pitches.id))
return render_template('post_comment.html', comment_form=form, title=title)
# Routes upvoting/downvoting pitches
@main.route('/pitch/upvote/<int:id>&<int:vote_type>')
@login_required
def upvote(id, vote_type):
"""
View function that adds one to the vote_number column in the votes table
"""
# Query for user
votes=Votes.query.filter_by(user_id=current_user.id).all()
print(f'The new vote is {votes}')
to_str=f'{vote_type}:{current_user.id}:{id}'
print(f'The current vote is {to_str}')
if not votes:
new_vote=Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
# print(len(count_likes))
print('YOU HAVE new VOTED')
for vote in votes:
if f'{vote}' == to_str:
print('Y')
break
else:
new_vote=Votes(
vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
print('YOU HAVE VOTED')
break
return redirect(url_for('.view_pitch', id=id))
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route('/user/<uname>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form) | StarcoderdataPython |
1766974 | <filename>tests/test_compiler.py
from .context import lux
import pytest
import pandas as pd
def test_underspecifiedNoVis(test_showMore):
noViewActions = ["Correlation", "Distribution", "Category"]
df = pd.read_csv("lux/data/car.csv")
test_showMore(df,noViewActions)
assert len(df.viewCollection)==0
# test only one filter context case.
df.setContext([lux.Spec(attribute = "Origin", filterOp="=",value="USA")])
test_showMore(df,noViewActions)
assert len(df.viewCollection)==0
def test_underspecifiedSingleVis(test_showMore):
oneViewActions = ["Enhance", "Filter", "Generalize"]
df = pd.read_csv("lux/data/car.csv")
df.setContext([lux.Spec(attribute = "MilesPerGal"),lux.Spec(attribute = "Weight")])
assert len(df.viewCollection)==1
assert df.viewCollection[0].mark == "scatter"
for attr in df.viewCollection[0].specLst: assert attr.dataModel=="measure"
for attr in df.viewCollection[0].specLst: assert attr.dataType=="quantitative"
test_showMore(df,oneViewActions)
def test_underspecifiedVisCollection(test_showMore):
multipleViewActions = ["View Collection"]
df = pd.read_csv("lux/data/car.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute = ["Horsepower","Weight","Acceleration"]),lux.Spec(attribute = "Year",channel="x")])
assert len(df.viewCollection)==3
assert df.viewCollection[0].mark == "line"
for vc in df.viewCollection:
assert (vc.getAttrByChannel("x")[0].attribute == "Year")
test_showMore(df,multipleViewActions)
df.setContext([lux.Spec(attribute = "?"),lux.Spec(attribute = "Year",channel="x")])
assert len(df.viewCollection) == len(list(df.columns))-1 # we remove year by year so its 8 vis instead of 9
for vc in df.viewCollection:
assert (vc.getAttrByChannel("x")[0].attribute == "Year")
test_showMore(df,multipleViewActions)
df.setContext([lux.Spec(attribute = "?",dataType="quantitative"),lux.Spec(attribute = "Year")])
assert len(df.viewCollection) == len([view.getAttrByDataType("quantitative") for view in df.viewCollection]) # should be 5
test_showMore(df,multipleViewActions)
df.setContext([lux.Spec(attribute = "?", dataModel="measure"),lux.Spec(attribute="MilesPerGal",channel="y")])
for vc in df.viewCollection:
print (vc.getAttrByChannel("y")[0].attribute == "MilesPerGal")
test_showMore(df,multipleViewActions)
df.setContext([lux.Spec(attribute = "?", dataModel="measure"),lux.Spec(attribute = "?", dataModel="measure")])
assert len(df.viewCollection) == len([view.getAttrByDataModel("measure") for view in df.viewCollection]) #should be 25
test_showMore(df,multipleViewActions)
@pytest.fixture
def test_showMore():
def test_showMore_function(df, actions):
df.showMore()
assert (len(df._recInfo) > 0)
for rec in df._recInfo:
assert (rec["action"] in actions)
return test_showMore_function
def test_parse():
df = pd.read_csv("lux/data/car.csv")
df.setContext([lux.Spec("Origin=?"),lux.Spec(attribute = "MilesPerGal")])
assert len(df.viewCollection)==3
df = pd.read_csv("lux/data/car.csv")
df.setContext([lux.Spec("Origin=?"),lux.Spec("MilesPerGal")])
assert len(df.viewCollection)==3
def test_underspecifiedVisCollection_Zval():
# check if the number of charts is correct
df = pd.read_csv("lux/data/car.csv")
df.setContext([lux.Spec(attribute = "Origin", filterOp="=",value="?"),lux.Spec(attribute = "MilesPerGal")])
assert len(df.viewCollection)==3
#does not work
# df = pd.read_csv("lux/data/cars.csv")
# df.setContext([lux.Spec(attribute = ["Origin","Cylinders"], filterOp="=",value="?"),lux.Spec(attribute = ["Horsepower"]),lux.Spec(attribute = "Weight")])
# assert len(df.viewCollection) == 8
def test_sortBar():
from lux.compiler.Compiler import Compiler
from lux.view.View import View
df = pd.read_csv("lux/data/car.csv")
view = View([lux.Spec(attribute="Acceleration",dataModel="measure",dataType="quantitative"),
lux.Spec(attribute="Origin",dataModel="dimension",dataType="nominal")])
Compiler.determineEncoding(df,view)
assert view.mark == "bar"
assert view.specLst[1].sort == ''
df = pd.read_csv("lux/data/car.csv")
view = View([lux.Spec(attribute="Acceleration",dataModel="measure",dataType="quantitative"),
lux.Spec(attribute="Name",dataModel="dimension",dataType="nominal")])
Compiler.determineEncoding(df,view)
assert view.mark == "bar"
assert view.specLst[1].sort == 'ascending'
def test_specifiedVisCollection():
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext(
[lux.Spec(attribute="Horsepower"),lux.Spec(attribute="Brand"), lux.Spec(attribute = "Origin",value=["Japan","USA"])])
assert len(df.viewCollection) == 2
df.setContext(
[lux.Spec(attribute=["Horsepower","Weight"]),lux.Spec(attribute="Brand"), lux.Spec(attribute = "Origin",value=["Japan","USA"])])
assert len(df.viewCollection) == 4
# # test if z axis has been filtered correctly
chartTitles = [view.title for view in df.viewCollection.collection]
assert "Origin = USA" and "Origin = Japan" in chartTitles
assert "Origin = Europe" not in chartTitles
def test_specifiedChannelEnforcedVisCollection():
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext(
[lux.Spec(attribute="?"),lux.Spec(attribute="MilesPerGal",channel="x")])
for view in df.viewCollection:
checkAttributeOnChannel(view, "MilesPerGal", "x")
def test_autoencodingScatter():
# No channel specified
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute="MilesPerGal"),lux.Spec(attribute="Weight")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "MilesPerGal", "x")
checkAttributeOnChannel(view, "Weight", "y")
# Partial channel specified
df.setContext([lux.Spec(attribute="MilesPerGal", channel="y"),lux.Spec(attribute="Weight")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "MilesPerGal", "y")
checkAttributeOnChannel(view, "Weight", "x")
# Full channel specified
df.setContext([lux.Spec(attribute="MilesPerGal", channel="y"),lux.Spec(attribute="Weight",channel="x")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "MilesPerGal", "y")
checkAttributeOnChannel(view, "Weight", "x")
# Duplicate channel specified
with pytest.raises(ValueError):
# Should throw error because there should not be columns with the same channel specified
df.setContext([lux.Spec(attribute="MilesPerGal", channel="x"), lux.Spec(attribute="Weight", channel="x")])
def test_autoencodingHistogram():
# No channel specified
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute="MilesPerGal",channel="y")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "MilesPerGal", "y")
# Record instead of count
# df.setContext([lux.Spec(attribute="MilesPerGal",channel="x")])
# assert df.viewCollection[0].getAttrByChannel("x")[0].attribute == "MilesPerGal"
# assert df.viewCollection[0].getAttrByChannel("y")[0].attribute == "count()"
def test_autoencodingLineChart():
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute="Year"),lux.Spec(attribute="Acceleration")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "Year", "x")
checkAttributeOnChannel(view, "Acceleration", "y")
# Partial channel specified
df.setContext([lux.Spec(attribute="Year", channel="y"),lux.Spec(attribute="Acceleration")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "Year", "y")
checkAttributeOnChannel(view, "Acceleration", "x")
# Full channel specified
df.setContext([lux.Spec(attribute="Year", channel="y"),lux.Spec(attribute="Acceleration", channel="x")])
view = df.viewCollection[0]
checkAttributeOnChannel(view, "Year", "y")
checkAttributeOnChannel(view, "Acceleration", "x")
with pytest.raises(ValueError):
# Should throw error because there should not be columns with the same channel specified
df.setContext([lux.Spec(attribute="Year", channel="x"), lux.Spec(attribute="Acceleration", channel="x")])
def test_autoencodingColorLineChart():
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute="Year"),lux.Spec(attribute="Acceleration"),lux.Spec(attribute="Origin")])
view = df.viewCollection[0]
checkAttributeOnChannel(view,"Year","x")
checkAttributeOnChannel(view,"Acceleration","y")
checkAttributeOnChannel(view,"Origin","color")
def test_autoencodingColorScatterChart():
df = pd.read_csv("lux/data/cars.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
df.setContext([lux.Spec(attribute="Horsepower"),lux.Spec(attribute="Acceleration"),lux.Spec(attribute="Origin")])
view = df.viewCollection[0]
checkAttributeOnChannel(view,"Origin","color")
df.setContext([lux.Spec(attribute="Horsepower"),lux.Spec(attribute="Acceleration",channel="color"),lux.Spec(attribute="Origin")])
view = df.viewCollection[0]
checkAttributeOnChannel(view,"Acceleration","color")
def test_populateOptions():
from lux.compiler.Compiler import Compiler
df = pd.read_csv("lux/data/cars.csv")
df.setContext([lux.Spec(attribute="?"), lux.Spec(attribute="MilesPerGal")])
colSet = set()
for specOptions in Compiler.populateWildcardOptions(df.context,df)["attributes"]:
for spec in specOptions:
colSet.add(spec.attribute)
assert listEqual(list(colSet), list(df.columns))
df.setContext([lux.Spec(attribute="?",dataModel="measure"), lux.Spec(attribute="MilesPerGal")])
colSet = set()
for specOptions in Compiler.populateWildcardOptions(df.context,df)["attributes"]:
for spec in specOptions:
colSet.add(spec.attribute)
assert listEqual(list(colSet), ['Acceleration', 'Weight', 'Horsepower', 'MilesPerGal', 'Displacement'])
def listEqual(l1,l2):
l1.sort()
l2.sort()
return l1==l2
def checkAttributeOnChannel(view,attrName,channelName):
assert view.getAttrByChannel(channelName)[0].attribute == attrName
| StarcoderdataPython |
120702 | <reponame>Zepyhrus/tf2<filename>src/4-4.py
import tensorflow as tf
import tensorflow_datasets as tfds
tf.compat.v1.disable_v2_behavior()
def get_iris_data():
ds_train, *_ = tfds.load(name='iris', split=['train'])
with open('iris.csv', 'w') as f:
for i, ds in enumerate(ds_train):
features = ds['features'].numpy()
label = ds['label'].numpy()
msg = f'{i},'
for feature in features:
msg += f'{feature:.1f},'
msg += f'{label}\n'
f.write(msg)
def read_data(file_queue):
reader = tf.compat.v1.TextLineReader(skip_header_lines=1)
key, value = reader.read(file_queue)
defaults = [[0], [0.], [0.], [0.], [0.], [0]]
csv_column = tf.io.decode_csv(records=value, record_defaults=defaults)
feature_column = [i for i in csv_column[1:-1]]
label_column = csv_column[-1]
return tf.stack(feature_column), label_column
def create_pipeline(filename, batch_size, num_epochs=None):
file_queue = tf.compat.v1.train.string_input_producer([filename], num_epochs=num_epochs)
feature, label = read_data(file_queue)
min_after_dequeue = 1000 # keep at least 1000 data
capacity = min_after_dequeue + batch_size
feature_batch, label_batch = tf.compat.v1.train.shuffle_batch(
[feature, label],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue
)
return feature_batch, label_batch
if __name__ == "__main__":
x_train_batch, y_train_batch = create_pipeline('iris.csv', 32, num_epochs=100)
x_test, y_test = create_pipeline('iris.csv', 32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.local_variables_initializer()) # this is necessary
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(coord=coord)
try:
while True:
if coord.should_stop():
break
example, label = sess.run([x_train_batch, y_train_batch])
print('training data: ', example)
print('training label: ', label)
except tf.errors.OutOfRangeError:
print('Done reading')
example, label = sess.run([x_test, y_test])
print('test data: ', example)
print('test label: ', label)
except KeyboardInterrupt:
print('Terminated by keyboard')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
| StarcoderdataPython |
93349 | print('Gathering psychic powers...')
import re
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
# word_vectors.save('wvsubset')
# word_vectors = KeyedVectors.load("wvsubset", mmap='r')
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r"\w+")
from nltk import pos_tag
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
read_WVM_from_file = False
def read_words():
words = []
fid = open('emotional_words2.txt', 'r')
while True:
line = fid.readline()
if not line:
break
if len(line) > 0:
word = tokenizer.tokenize(line)
words.append(word[0])
fid.close()
return words
def get_WVM():
words = read_words()
words = [lemmatizer.lemmatize(word) for word in words]
# Select adjectives (?)
# words = [word0 for word0 in words if pos_tag([word0])[0][1] in ['JJ', 'JJR', 'JJS', 'NN', 'RB', 'RBR', 'RBS']]
# Select words known by word_vectors
emowords = [word0 for word0 in words if word0 in word_vectors.vocab]
emowords = set(emowords)
emowords = [word0 for word0 in emowords]
WVM = np.array([word_vectors[word0] for word0 in emowords])
return emowords, WVM
if read_WVM_from_file:
emowords = np.load('emowords.npy')
WVM = np.load('WVM.npy')
else:
emowords, WVM = get_WVM()
np.save('emowords', emowords)
np.save('WVM', WVM)
def emodet(text_all):
sentences = re.split(r'[,;.-]', text_all)
sims_all = []
for text in sentences:
if len(text) == 0:
continue
tokens = tokenizer.tokenize(text)
tokens = [lemmatizer.lemmatize(token) for token in tokens]
tokens = [token0 for token0 in tokens if token0 in word_vectors.vocab]
# Test for negation-tokens (for adjectives)
neg_v = [np.dot(word_vectors['not'] - word_vectors['very'], word_vectors[token]) for token in tokens]
neg_v = np.array(neg_v)
# For nouns
#neg_v2 = [np.dot(word_vectors['lose'] - word_vectors['gain'], word_vectors[token]) for token in tokens]
#neg_v = neg_v + np.array(neg_v2)
nonnegation = 1 - 2 * np.mod(len(neg_v[neg_v > 1]), 2)
# Get nouns and adjectives after preprocessing
pt = pos_tag(tokens);
tokens2 = [x[0] for x in pt if x[1] in ['JJ', 'NN', 'RB', 'VB']]
if len(tokens2) > 0:
tokens = tokens2
# Find strongest match to an emotion
token_sims = []
for token0 in tokens:
sims0 = [nonnegation * np.dot(word_vectors[token0], WVMv) for WVMv in WVM]
token_sims.append(sims0)
sims_all.append(token_sims)
# Get emotional meaning per sentences and average the vector
nEmos_per_token = 3
nEmos_total = 3
emo_indices = []
emo_sims = []
for sentence_level in sims_all:
for token_level in sentence_level:
token_level = np.array(token_level)
indices = np.argsort(token_level)
emo_indices.append(indices[-nEmos_per_token:])
token_level_s = token_level[indices]
emo_sims.append(token_level_s[-nEmos_per_token:])
# mswv = word_vectors.most_similar(positive=[sims])
emo_indices = np.array(emo_indices).flatten()
emo_sims = np.array(emo_sims).flatten()
# return sims_all, emo_indices, emo_sims
indices = np.argsort(emo_sims)
indices = emo_indices[indices]
output = 'I sense you are feeling... '
iEmo = 1
nEmo = 0
used_indices = []
while nEmo < nEmos_total:
this_index = indices[-iEmo]
if not this_index in used_indices:
output = output + emowords[this_index] + "... "
used_indices.append(this_index)
nEmo = nEmo + 1
iEmo = iEmo + 1
print(output)
return output
| StarcoderdataPython |
3386902 | <gh_stars>0
# -*- coding: utf-8 -*-
import json
import struct
import threading
from io import BytesIO
from collections import OrderedDict
from tempfile import TemporaryFile
from configparser import RawConfigParser
class Result(dict):
def __init__(self, code=0, msg=r'', data=None, extra=None):
super().__init__(code=code, msg=msg)
if data is not None:
self.__setitem__(r'data', data)
if extra is not None:
self.__setitem__(r'extra', extra)
def __bool__(self):
return self.code == 0
@property
def code(self):
return self.get(r'code')
@property
def msg(self):
return self.get(r'msg')
@property
def data(self):
return self.get(r'data', None)
@property
def extra(self):
return self.get(r'extra', None)
class NullData:
def __int__(self):
return 0
def __bool__(self):
return False
def __float__(self):
return 0.0
def __len__(self):
return 0
def __repr__(self):
return r''
def __eq__(self, obj):
return bool(obj) == False
def __nonzero__(self):
return False
def __cmp__(self, val):
if val is None:
return 0
else:
return 1
class ErrorData(NullData):
__slots__ = [r'data']
def __init__(self, data=None):
self.data = data if isinstance(data, str) else str(data)
def __repr__(self):
return self.data
class ThreadList(threading.local):
__slots__ = [r'data']
def __init__(self):
self.data = []
class ThreadDict(threading.local):
__slots__ = [r'data']
def __init__(self):
self.data = {}
class Const(OrderedDict):
class _Predefine(NullData):
pass
class _ConstError(TypeError):
pass
def __init__(self):
super().__init__()
def __getattr__(self, key):
if key[:1] == r'_':
return super().__getattr__(key)
else:
return self.__getitem__(key)
def __setattr__(self, key, val):
if key[:1] == r'_':
super().__setattr__(key, val)
else:
self.__setitem__(key, val)
def __delattr__(self, key):
if key[:1] == r'_':
super().__delattr__(key)
else:
self.__delitem__(key)
def __setitem__(self, key, val):
if key in self and not isinstance(self.__getitem__(key), Const._Predefine):
raise Const._ConstError()
else:
super().__setitem__(key, val)
def __delitem__(self, key):
raise Const._ConstError()
def exist(self, val):
return val in self.values()
class ByteArray(BytesIO):
NETWORK = r'!'
NATIVE = r'='
NATIVE_ALIGNMENT = r'@'
LITTLE_ENDIAN = r'<'
BIG_ENDIAN = r'>'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._endian = self.NETWORK
def _fmt_str(self, val):
return r'{0:s}{1:s}'.format(self._endian, val)
def get_endian(self):
return self._endian
def set_endian(self, val):
self._endian = val
def read_pad_byte(self, _len):
struct.unpack(self._fmt_str(r'{0:d}x'.format(_len)), self.read(_len))
def write_pad_byte(self, _len):
self.write(struct.pack(self._fmt_str(r'{0:d}x'.format(_len))))
def read_char(self):
return struct.unpack(self._fmt_str(r'c'), self.read(1))[0]
def write_char(self, val):
self.write(struct.pack(self._fmt_str(r'c'), val))
def read_signed_char(self):
return struct.unpack(self._fmt_str(r'b'), self.read(1))[0]
def write_signed_char(self, val):
self.write(struct.pack(self._fmt_str(r'b'), val))
def read_unsigned_char(self):
return struct.unpack(self._fmt_str(r'B'), self.read(1))[0]
def write_unsigned_char(self, val):
self.write(struct.pack(self._fmt_str(r'B'), val))
def read_bool(self):
return struct.unpack(self._fmt_str(r'?'), self.read(1))[0]
def write_bool(self, val):
self.write(struct.pack(self._fmt_str(r'?'), val))
def read_short(self):
return struct.unpack(self._fmt_str(r'h'), self.read(2))[0]
def write_short(self, val):
self.write(struct.pack(self._fmt_str(r'h'), val))
def read_unsigned_short(self):
return struct.unpack(self._fmt_str(r'H'), self.read(2))[0]
def write_unsigned_short(self, val):
self.write(struct.pack(self._fmt_str(r'H'), val))
def read_int(self):
return struct.unpack(self._fmt_str(r'i'), self.read(4))[0]
def write_int(self, val):
self.write(struct.pack(self._fmt_str(r'i'), val))
def read_unsigned_int(self):
return struct.unpack(self._fmt_str(r'I'), self.read(4))[0]
def write_unsigned_int(self, val):
self.write(struct.pack(self._fmt_str(r'I'), val))
def read_long(self):
return struct.unpack(self._fmt_str(r'l'), self.read(8))[0]
def write_long(self, val):
self.write(struct.pack(self._fmt_str(r'l'), val))
def read_unsigned_long(self):
return struct.unpack(self._fmt_str(r'L'), self.read(8))[0]
def write_unsigned_long(self, val):
self.write(struct.pack(self._fmt_str(r'L'), val))
def read_long_long(self):
return struct.unpack(self._fmt_str(r'q'), self.read(8))[0]
def write_long_long(self, val):
self.write(struct.pack(self._fmt_str(r'q'), val))
def read_unsigned_long_long(self):
return struct.unpack(self._fmt_str(r'Q'), self.read(8))[0]
def write_unsigned_long_long(self, val):
self.write(struct.pack(self._fmt_str(r'Q'), val))
def read_float(self):
return struct.unpack(self._fmt_str(r'f'), self.read(4))[0]
def write_float(self, val):
self.write(struct.pack(self._fmt_str(r'f'), val))
def read_double(self):
return struct.unpack(self._fmt_str(r'd'), self.read(8))[0]
def write_double(self, val):
self.write(struct.pack(self._fmt_str(r'd'), val))
def read_bytes(self, _len):
return struct.unpack(self._fmt_str(r'{0:d}s'.format(_len)), self.read(_len))[0]
def write_bytes(self, val):
self.write(struct.pack(self._fmt_str(r'{0:d}s'.format(len(val))), val))
def read_string(self, _len):
return self.read_bytes(_len).decode()
def write_string(self, val):
self.write_bytes(val.encode())
def read_pascal_bytes(self, _len):
return struct.unpack(self._fmt_str(r'{0:d}p'.format(_len)), self.read(_len))[0]
def write_pascal_bytes(self, val):
self.write(struct.pack(self._fmt_str(r'{0:d}p'.format(len(val))), val))
def read_pascal_string(self, _len):
return self.read_pascal_bytes(_len).decode()
def write_pascal_string(self, val):
self.write_pascal_bytes(val.encode())
def read_python_int(self, _len):
return struct.unpack(self._fmt_str(r'{0:d}P'.format(_len)), self.read(_len))[0]
def write_python_int(self, val):
self.write(struct.pack(self._fmt_str(r'{0:d}P'.format(len(val))), val))
class ConfigParser(RawConfigParser):
def getstr(self, section, option, default=None, **kwargs):
val = self.get(section, option, **kwargs)
return val if val else default
def getjson(self, section, option, **kwargs):
val = self.get(section, option, **kwargs)
result = json.loads(val)
return result
def _split_host(self, val):
if val.find(r':') > 0:
host, port = val.split(r':', 2)
return host.strip(), int(port.strip())
else:
return None
def get_split_host(self, section, option, **kwargs):
val = self.get(section, option, **kwargs)
return self._split_host(val)
def _split_str(self, val, sep=r'|'):
result = tuple(temp.strip() for temp in val.split(sep))
return result
def get_split_str(self, section, option, sep=r'|', **kwargs):
val = self.get(section, option, **kwargs)
return self._split_str(val, sep)
def _split_int(self, val, sep=r','):
result = tuple(int(temp.strip()) for temp in val.split(sep))
return result
def get_split_int(self, section, option, sep=r',', **kwargs):
val = self.get(section, option, **kwargs)
return self._split_int(val, sep)
def split_float(self, val, sep=r','):
result = tuple(float(item.strip()) for item in val.split(sep))
return result
def get_split_float(self, section, option, sep=r',', **kwargs):
val = self.get(section, option, **kwargs)
return self.split_float(val, sep)
class Configure(Const):
def __init__(self):
super().__init__()
self._parser = ConfigParser()
def _init_options(self):
self.clear()
def get_option(self, section, option):
return self._parser.get(section, option)
def get_options(self, section):
parser = self._parser
options = {}
for option in parser.options(section):
options[option] = parser.get(section, option)
return options
def set_options(self, section, **options):
if not self._parser.has_section(section):
self._parser.add_section(section)
for option, value in options.items():
self._parser.set(section, option, value)
self._init_options()
def read(self, files):
self._parser.clear()
self._parser.read(files, r'utf-8')
self._init_options()
def read_str(self, val):
self._parser.clear()
self._parser.read_string(val)
self._init_options()
def read_dict(self, val):
self._parser.clear()
self._parser.read_dict(val)
self._init_options()
class FileBuffer:
def __init__(self, slice_size=0x20000):
self._buffers = [TemporaryFile()]
self._slice_size = slice_size
self._read_offset = 0
def write(self, data):
buffer = self._buffers[-1]
buffer.seek(0, 2)
buffer.write(data)
buffer.flush()
if buffer.tell() >= self._slice_size:
self._buffers.append(TemporaryFile())
def read(self, size=None):
buffer = self._buffers[0]
buffer.seek(self._read_offset, 0)
result = buffer.read(size)
if len(result) == 0 and len(self._buffers) > 1:
self._buffers.pop(0).close()
self._read_offset = 0
else:
self._read_offset = buffer.tell()
return result
| StarcoderdataPython |
3228956 | """
agents
Created by: <NAME>
On: 21-11-19, 12:04
"""
from abc import abstractmethod
from tqdm import trange
from drugex.api.agent.callbacks import AgentMonitor
from drugex.api.agent.policy import PolicyGradient
from drugex.api.environ.models import Environ
from drugex.api.pretrain.generators import Generator
from drugex.api.pretrain.serialization import StateProvider
class Agent(StateProvider):
class UntrainedException(Exception):
pass
def __init__(self, monitor : AgentMonitor, environ : Environ, exploit : Generator, policy : PolicyGradient, explore = None, train_params=None):
self.monitor = monitor
self.environ = environ
self.exploit = exploit
self.explore = explore
self.policy = policy
self.train_params = train_params if train_params else dict()
if "n_epochs" not in self.train_params:
self.train_params.update({"n_epochs" : 1000})
self.n_epochs = self.train_params["n_epochs"]
@abstractmethod
def train(self):
pass
@abstractmethod
def sample(self, n_samples):
pass
@abstractmethod
def getAgent(self):
pass
class DrugExAgent(Agent):
def __init__(self, monitor : AgentMonitor, environ: Environ, exploit: Generator, policy: PolicyGradient, explore=None, train_params=None):
super().__init__(monitor, environ, exploit, policy, explore, train_params=train_params)
self.best_state = None
def getState(self):
return self.best_state
def getAgent(self):
self.exploit.setState(self.best_state)
return self.exploit
def train(self):
best_score = 0
for epoch in trange(self.n_epochs, desc="Epoch"):
self.policy(self.environ, self.exploit, explore=self.explore)
self.monitor.model(self.exploit.model)
# choosing the best model
smiles, valids = self.exploit.sample(1000)
scores = self.environ.predictSMILES(smiles)
scores[valids == False] = 0
unique = (scores >= 0.5).sum() / 1000
# The model with best percentage of unique desired SMILES will be persisted on the hard drive.
is_best = False
if best_score < unique or self.best_state is None:
is_best = True
self.best_state = self.exploit.getState()
best_score = unique
# monitor performance information
self.monitor.performance(scores, valids, unique, best_score)
for i, smile in enumerate(smiles):
self.monitor.smiles(smile, scores[i])
# monitor state
self.monitor.state(self.exploit.getState(), is_best)
# Learning rate exponential decay
for param_group in self.exploit.model.optim.param_groups:
param_group['lr'] *= (1 - 0.01)
# finalize epoch monitoring
self.monitor.finalizeEpoch(epoch, self.n_epochs)
self.monitor.close()
self.exploit.setState(self)
def sample(self, n_samples):
if self.best_state:
return self.exploit.sample(n_samples=n_samples)
else:
raise self.UntrainedException("You have to train the agent first!")
| StarcoderdataPython |
20058 | import os
import json
import ConfigParser
import logging.config
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load the shared settings file
settings_file_path = os.path.join(base_dir, 'config', 'settings.config')
settings = ConfigParser.ConfigParser()
settings.read(settings_file_path)
# set up logging
with open(os.path.join(base_dir, 'config', 'logging.json'), 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
log = logging.getLogger(__name__)
log.info("---------------------------------------------------------------------------")
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.INFO) | StarcoderdataPython |
1603221 | <gh_stars>1-10
import unittest
from typing import Optional
from ravendb.documents.indexes.index_creation import AbstractIndexCreationTask
from ravendb.documents.session.loaders.include import QueryIncludeBuilder
from ravendb.documents.session.misc import TransactionMode, SessionOptions
from ravendb.documents.session.query import QueryStatistics
from ravendb.infrastructure.orders import Company, Address, Employee
from ravendb.tests.test_base import TestBase
from ravendb.util.util import StartingWithOptions
class Companies_ByName(AbstractIndexCreationTask):
def __init__(self):
super().__init__()
self.map = "from c in docs.Companies select new { name = c.name }"
class TestRavenDB14006(TestBase):
def setUp(self):
super().setUp()
def test_compare_exchange_value_tracking_in_session_starts_with(self):
all_companies = []
session_options = SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE)
with self.store.open_session(session_options=session_options) as session:
for i in range(10):
company = Company(f"companies/{i}", "companies/hr", "HR")
all_companies.append(company.Id)
session.advanced.cluster_transaction.create_compare_exchange_value(company.Id, company)
session.save_changes()
with self.store.open_session(session_options=session_options) as session:
results = session.advanced.cluster_transaction.get_compare_exchange_values(
StartingWithOptions("comp"), Company
)
self.assertEqual(10, len(results))
self.assertTrue(all(map(lambda x: x is not None, results)))
self.assertEqual(1, session.number_of_requests)
results = session.advanced.cluster_transaction.get_compare_exchange_values(all_companies, Company)
self.assertEqual(10, len(results))
self.assertTrue(all(map(lambda x: x is not None, results)))
self.assertEqual(1, session.number_of_requests)
for company_id in all_companies:
result = session.advanced.cluster_transaction.get_compare_exchange_value(company_id, Company)
self.assertIsNotNone(result.value)
self.assertEqual(1, session.number_of_requests)
def test_compare_exchange_value_tracking_in_session(self):
session_options = SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE)
with self.store.open_session(session_options=session_options) as session:
company = Company("companies/1", "companies/cf", "CF")
session.store(company)
number_of_requests = session.number_of_requests
address = Address(city="Torun")
session.advanced.cluster_transaction.create_compare_exchange_value(company.external_id, address)
self.assertEqual(number_of_requests, session.number_of_requests)
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertEqual(number_of_requests, session.number_of_requests)
self.assertEqual(address, value1.value)
self.assertEqual(company.external_id, value1.key)
self.assertEqual(0, value1.index)
session.save_changes()
self.assertEqual(number_of_requests + 1, session.number_of_requests)
self.assertEqual(address, value1.value)
self.assertEqual(value1.key, company.external_id)
self.assertGreater(value1.index, 0)
value2 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertEqual(number_of_requests + 1, session.number_of_requests)
self.assertEqual(value1, value2)
session.save_changes()
self.assertEqual(number_of_requests + 1, session.number_of_requests)
session.clear()
value3 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertNotEqual(value2, value3)
with self.store.open_session(session_options=session_options) as session:
address = Address(city="Hadera")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hr", address)
session.save_changes()
with self.store.open_session(session_options=session_options) as session:
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_value("companies/cf", Address)
self.assertEqual(number_of_requests + 1, session.number_of_requests)
value2 = session.advanced.cluster_transaction.get_compare_exchange_value("companies/hr", Address)
self.assertEqual(number_of_requests + 2, session.number_of_requests)
values = session.advanced.cluster_transaction.get_compare_exchange_values(
["companies/cf", "companies/hr"], Address
)
self.assertEqual(number_of_requests + 2, session.number_of_requests)
self.assertEqual(2, len(values))
self.assertEqual(value1, values.get(value1.key))
self.assertEqual(value2, values.get(value2.key))
values = session.advanced.cluster_transaction.get_compare_exchange_values(
["companies/cf", "companies/hr", "companies/hx"], Address
)
self.assertEqual(number_of_requests + 3, session.number_of_requests)
self.assertEqual(3, len(values))
self.assertEqual(value1, values.get(value1.key))
self.assertEqual(value2, values.get(value2.key))
value3 = session.advanced.cluster_transaction.get_compare_exchange_value("companies/hx", Address)
self.assertEqual(number_of_requests + 3, session.number_of_requests)
self.assertIsNone(value3)
self.assertIsNone(values.get("companies/hx", None))
session.save_changes()
self.assertEqual(number_of_requests + 3, session.number_of_requests)
address = Address(city="Bydgoszcz")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hx", address)
session.save_changes()
self.assertEqual(number_of_requests + 4, session.number_of_requests)
def test_can_use_compare_exchange_value_includes_in_queries_static_javascript(self):
Companies_ByName().execute(self.store)
session_options = SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE)
with self.store.open_session(session_options=session_options) as session:
employee = Employee("employees/1", notes=["companies/cf", "companies/hr"])
session.store(employee)
company = Company("companies/1", "companies/cf", "CF")
session.store(company)
address1 = Address(city="Torun")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/cf", address1)
address2 = Address(city="Hadera")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hr", address2)
session.save_changes()
self.wait_for_indexing(self.store)
with self.store.open_session(session_options=session_options) as session:
result_etag = None
statistics: Optional[QueryStatistics] = None
def __statistics_callback(stats: QueryStatistics):
nonlocal statistics
statistics = stats # plug-in the reference, value will be changed
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from index 'Companies/ByName' as c\n"
+ "select incl(c)",
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
number_of_requests = session.number_of_requests
result_etag = statistics.result_etag
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Torun", value1.value.city)
self.assertEqual(number_of_requests, session.number_of_requests)
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from index 'Companies/ByName' as c\n"
+ "select incl(c)",
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertEqual(-1, statistics.duration_in_ms)
self.assertEqual(result_etag, statistics.result_etag)
result_etag = statistics.result_etag
with self.store.open_session(session_options=session_options) as inner_session:
value = inner_session.advanced.cluster_transaction.get_compare_exchange_value(
companies[0].external_id, Address
)
value.value.city = "Bydgoszcz"
inner_session.save_changes()
self.wait_for_indexing(self.store)
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from index 'Companies/ByName' as c\n"
+ "select incl(c)",
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
self.assertNotEqual(result_etag, statistics.result_etag)
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Bydgoszcz", value1.value.city)
def test_can_use_compare_exchange_value_includes_in_queries_dynamic_javascript(self):
session_options = SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE)
with self.store.open_session(session_options=session_options) as session:
employee = Employee("employees/1", notes=["companies/cf", "companies/hr"])
session.store(employee)
company = Company("companies/1", "companies/cf", "CF")
session.store(company)
address1 = Address(city="Torun")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/cf", address1)
address2 = Address(city="Hadera")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hr", address2)
session.save_changes()
self.wait_for_indexing(self.store)
with self.store.open_session(session_options=session_options) as session:
result_etag = None
statistics: Optional[QueryStatistics] = None
def __statistics_callback(stats: QueryStatistics):
nonlocal statistics
statistics = stats # plug-in the reference, value will be changed
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from Companies as c\n"
+ "select incl(c)",
Company,
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertGreater(statistics.duration_in_ms, 0)
number_of_requests = session.number_of_requests
result_etag = statistics.result_etag
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Torun", value1.value.city)
self.assertEqual(number_of_requests, session.number_of_requests)
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from Companies as c\n"
+ "select incl(c)",
Company,
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertEqual(-1, statistics.duration_in_ms)
self.assertEqual(result_etag, statistics.result_etag)
with self.store.open_session(session_options=session_options) as inner_session:
value = inner_session.advanced.cluster_transaction.get_compare_exchange_value(
companies[0].external_id, Address
)
value.value.city = "Bydgoszcz"
inner_session.save_changes()
companies = list(
session.advanced.raw_query(
"declare function incl(c) {\n"
+ " includes.cmpxchg(c.external_id);\n"
+ " return c;\n"
+ "}\n"
+ "from Companies as c\n"
+ "select incl(c)",
Company,
).statistics(__statistics_callback)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
self.assertNotEqual(result_etag, statistics.result_etag)
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Bydgoszcz", value1.value.city)
def test_compare_exchange_value_tracking_in_session_no_tracking(self):
company = Company("companies/1", "companies/cf", "CF")
session_options = SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE)
with self.store.open_session(session_options=session_options) as session:
session.store(company)
address = Address()
address.city = "Torun"
session.advanced.cluster_transaction.create_compare_exchange_value(company.external_id, address)
session.save_changes()
session_options_no_tracking = SessionOptions()
session_options_no_tracking.no_tracking = True
session_options_no_tracking.transaction_mode = TransactionMode.CLUSTER_WIDE
with self.store.open_session(session_options=session_options_no_tracking) as session:
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertEqual(number_of_requests + 1, session.number_of_requests)
value2 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertEqual(number_of_requests + 2, session.number_of_requests)
self.assertNotEqual(value1, value2)
value3 = session.advanced.cluster_transaction.get_compare_exchange_value(company.external_id, Address)
self.assertEqual(number_of_requests + 3, session.number_of_requests)
self.assertNotEqual(value2, value3)
with self.store.open_session(session_options=session_options_no_tracking) as session:
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_values_starting_with(
company.external_id, object_type=Address
)
self.assertEqual(number_of_requests + 1, session.number_of_requests)
value2 = session.advanced.cluster_transaction.get_compare_exchange_values_starting_with(
company.external_id, object_type=Address
)
self.assertEqual(number_of_requests + 2, session.number_of_requests)
self.assertNotEqual(value1, value2)
value3 = session.advanced.cluster_transaction.get_compare_exchange_values_starting_with(
company.external_id, object_type=Address
)
self.assertEqual(number_of_requests + 3, session.number_of_requests)
self.assertNotEqual(value2, value3)
with self.store.open_session(session_options=session_options_no_tracking) as session:
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_values([company.external_id], Address)
self.assertEqual(number_of_requests + 1, session.number_of_requests)
value2 = session.advanced.cluster_transaction.get_compare_exchange_values([company.external_id], Address)
self.assertEqual(number_of_requests + 2, session.number_of_requests)
self.assertNotEqual(value1.get(company.external_id), value2.get(company.external_id))
value3 = session.advanced.cluster_transaction.get_compare_exchange_values([company.external_id], Address)
self.assertEqual(number_of_requests + 3, session.number_of_requests)
self.assertNotEqual(value2.get(company.external_id), value3.get(company.external_id))
def test_can_use_compare_exchange_value_includes_in_queries_dynamic(self):
session_options = SessionOptions()
session_options.transaction_mode = TransactionMode.CLUSTER_WIDE
with self.store.open_session(session_options=session_options) as session:
employee = Employee("employees/1", notes=["companies/cf", "companies/hr"])
session.store(employee)
company = Company("companies/1", "companies/cf", "CF")
session.store(company)
address1 = Address(city="Torun")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/cf", address1)
address2 = Address(city="Hadera")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hr", address2)
session.save_changes()
with self.store.open_session(session_options=session_options) as session:
statistics: Optional[QueryStatistics] = None
def __statistics_callback(stats: QueryStatistics):
nonlocal statistics
statistics = stats
def __include_cmpxch(builder: QueryIncludeBuilder):
builder.include_compare_exchange_value("external_id")
companies = list(
session.query(object_type=Company).statistics(__statistics_callback).include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
result_etag = statistics.result_etag
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Torun", value1.value.city)
self.assertEqual(number_of_requests, session.number_of_requests)
companies = list(
session.query(object_type=Company).statistics(__statistics_callback).include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertEqual(-1, statistics.duration_in_ms)
self.assertEqual(result_etag, statistics.result_etag)
with self.store.open_session(session_options=session_options) as inner_session:
value = inner_session.advanced.cluster_transaction.get_compare_exchange_value(
companies[0].external_id, Address
)
value.value.city = "Bydgoszcz"
inner_session.save_changes()
companies = list(
session.query(object_type=Company).statistics(__statistics_callback).include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
self.assertNotEqual(result_etag, statistics.result_etag)
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Bydgoszcz", value1.value.city)
def test_can_use_compare_exchange_value_includes_in_queries_static(self):
Companies_ByName().execute(self.store)
session_options = SessionOptions()
session_options.transaction_mode = TransactionMode.CLUSTER_WIDE
with self.store.open_session(session_options=session_options) as session:
employee = Employee("employees/1", notes=["companies/cf", "companies/hr"])
session.store(employee)
company = Company("companies/1", "companies/cf", "CF")
session.store(company)
address1 = Address(city="Torun")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/cf", address1)
address2 = Address(city="Hadera")
session.advanced.cluster_transaction.create_compare_exchange_value("companies/hr", address2)
session.save_changes()
self.wait_for_indexing(self.store)
def __statistics_callback(stats: QueryStatistics):
nonlocal statistics
statistics = stats
def __include_cmpxch(builder: QueryIncludeBuilder):
builder.include_compare_exchange_value("external_id")
with self.store.open_session(session_options=session_options) as session:
statistics: Optional[QueryStatistics] = None
companies = list(
session.query_index_type(Companies_ByName, Company)
.statistics(__statistics_callback)
.include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
result_etag = statistics.result_etag
number_of_requests = session.number_of_requests
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Torun", value1.value.city)
self.assertEqual(number_of_requests, session.number_of_requests)
companies = list(
session.query_index_type(Companies_ByName, Company)
.statistics(__statistics_callback)
.include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertEqual(-1, statistics.duration_in_ms)
self.assertEqual(result_etag, statistics.result_etag)
with self.store.open_session(session_options=session_options) as inner_session:
value = inner_session.advanced.cluster_transaction.get_compare_exchange_value(
companies[0].external_id, Address
)
value.value.city = "Bydgoszcz"
inner_session.save_changes()
self.wait_for_indexing(self.store)
companies = list(
session.query_index_type(Companies_ByName, Company)
.statistics(__statistics_callback)
.include(__include_cmpxch)
)
self.assertEqual(1, len(companies))
self.assertGreaterEqual(statistics.duration_in_ms, 0)
self.assertNotEqual(result_etag, statistics.result_etag)
value1 = session.advanced.cluster_transaction.get_compare_exchange_value(companies[0].external_id, Address)
self.assertEqual("Bydgoszcz", value1.value.city)
| StarcoderdataPython |
1709880 | <filename>txt2svg/txt2svg.py<gh_stars>1-10
import sys
from re import compile
import networkx as nx
import numpy as np
import math
R = 30
def parse_input():
regex = compile('(\w+)\s*->\s*((?:[^\s]+)?)\s+(\w+)')
fname = sys.argv[1]
with open(fname, 'r') as f:
lines = f.readlines()
edges = []
for line in lines:
try:
from_node, label, to_node = regex.search(line).groups()
edges.append((from_node, to_node, label))
except:
print('Error parsing line: {}'.format(line))
raise
return edges
def make_graph(edges):
G = nx.Graph()
for from_node, to_node, _ in edges:
G.add_edge(from_node, to_node)
return G
def get_layout(G, width, height):
#layout = nx.spring_layout(G, k=12, iterations=1000)
#layout = nx.kamada_kawai_layout(G)
layout = nx.spectral_layout(G)
min_x = min([v[0] for v in layout.values()])
max_x = max([v[0] for v in layout.values()])
min_y = min([v[1] for v in layout.values()])
max_y = max([v[1] for v in layout.values()])
MARGIN = 0.3
min_x -= MARGIN * abs(min_x)
max_x += MARGIN * abs(max_x)
min_y -= MARGIN * abs(min_y)
max_y += MARGIN * abs(max_y)
x_diff = max_x - min_x
y_diff = max_y - min_y
layout = {k: ((v[0] - min_x) / x_diff * width, (v[1] - min_y) / y_diff * height) for k, v in layout.items()}
layout = {k: np.array(v).astype(int) for k, v in layout.items()}
return layout
def render(W, H, edges, layout):
template = '''
<div>
<svg width="%dpx" height="%dpx">
<defs>
<marker id="arrow" markerWidth="10" markerHeight="10" refX="0" refY="3" orient="auto" markerUnits="strokeWidth">
<path d="M0,0 L0,6 L6,3 z" fill="#000" />
</marker>
</defs>
%s
</svg>
</div>'''
return template % (W, H, render_svg(edges, layout))
def render_svg(edges, layout):
return '\n'.join([render_edge(layout, *edge) for edge in edges] + [render_node(*v, k) for k, v in layout.items()])
def render_node(x, y, label):
return ('<circle stoke="black" fill="#AAEEBB" r="%d" cx="%d" cy="%d"></circle>' % (R, x, y) +
'<text x="%d" y="%d" text-anchor="middle" stroke="black">%s</text>') % (x, y, label)
def render_edge(layout, from_node, to_node, label):
return '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" stroke-width="1" marker-end="url(#arrow)"></line>' % (
layout[from_node][0],
layout[from_node][1],
*shorten_by_r(layout[from_node][0], layout[from_node][1], layout[to_node][0], layout[to_node][1])
) + '<text x="%d" y="%d" text-anchor="middle" stroke="black">%s</text>' % (
(layout[from_node][0] + layout[to_node][0]) / 2.,
(layout[from_node][1] + layout[to_node][1]) / 2.,
label
)
def shorten_by_r(from_x, from_y, to_x, to_y):
angle = math.atan2(from_y - to_y, from_x - to_x)
D = R + 6 # Arrow length
return (to_x + D * math.cos(angle), to_y + D * math.sin(angle))
def main():
W = 500
H = 500
edges = parse_input()
G = make_graph(edges)
layout = get_layout(G, W, H)
html = render(W, H, edges, layout)
with open('mypage.html', 'w') as f:
f.write(html)
if __name__ == '__main__':
main()
| StarcoderdataPython |
92325 | <reponame>TacPhoto/Learning<gh_stars>0
import numpy
arr = numpy.array([
[1, 2, 3, 4],
[10, 20, 30, 40],
[100, 200, 300, 400],
[1000, 2000, 3000, 4000]
])
print(arr)
print('Arr shape: ' + str(arr.shape))
print(arr[0,])
print(arr[0,3])
print('Extracting a submatrix')
subarr = arr[1:3,1:] #rows, columns (first:last, first:last)
print(subarr)
print('------------------------------------------')
I = numpy.eye(3)
print(I)
O = numpy.zeros((4,4))
print(O)
ones = numpy.ones((3,3))
print(ones)
full = numpy.full((2,4), 3)
print(full)
X = numpy.random.random((2,3))
print(X)
print('------------------------------------------')
print('X Mean: ' + str(numpy.mean(X)))
| StarcoderdataPython |
67954 | from ..li.api import LIReader
class LIViolationReader(LIReader):
endpoint = 'violationdetails'
def get(self, code, since=None, until=None, params={}):
filters = [ "violation_code eq '%s'" % code, ]
if since:
filters.append("violation_datetime gt %s" %
self.format_datetime(since))
if until:
filters.append("violation_datetime lt %s" %
self.format_datetime(until))
params.update({
'$expand': 'locations',
'$filter': ' and '.join(filters),
'orderby': 'violation_datetime desc',
})
return super(LIViolationReader, self).get(self.endpoint, params)
| StarcoderdataPython |
1656912 | import json
class DataManager:
"""The DataManager class works with an Environment class' instance.
It manages the information by returning it when requested, while also
storing the program's data on the data file precissed."""
def __init__(self, environment):
self.this_environment = environment
def return_env_dir(self):
"""Getter method for the environment folder of this environment."""
return self.this_environment.env_folder
def return_images_dir(self):
"""Getter method for the images folder of this environment."""
return self.this_environment.image_folder
def return_data_file(self):
"""Getter method for the data file of this environment."""
return self.this_environment.data
def return_list_data(self):
"""Getter method for the list file of this environment."""
return self.this_environment.list_data
def save_pages_in_data(self, pages):
"""Saves the downloaded images into the data file. The downloaded images
are a list of strings."""
with open(self.datafile, 'a') as data:
json.dump(pages, data, ensure_ascii=False)
def get_pages_list(self):
"""Reads the websites stored on listdata.txt to tell the program where
to look and download from."""
with open(self.list, 'r') as pages:
return pages.readlines()
environment = property(return_env_dir)
folder = property(return_images_dir)
datafile = property(return_data_file)
list = property(return_list_data)
| StarcoderdataPython |
4828407 | from uuid import uuid4
from moto.core import BaseBackend, BaseModel
from moto.wafv2 import utils
from .utils import make_arn_for_wacl, pascal_to_underscores_dict
from .exceptions import WAFV2DuplicateItemException
from moto.core.utils import iso_8601_datetime_with_milliseconds, BackendDict
import datetime
from collections import OrderedDict
US_EAST_1_REGION = "us-east-1"
GLOBAL_REGION = "global"
class VisibilityConfig(BaseModel):
"""
https://docs.aws.amazon.com/waf/latest/APIReference/API_VisibilityConfig.html
"""
def __init__(
self, metric_name, sampled_requests_enabled, cloud_watch_metrics_enabled
):
self.cloud_watch_metrics_enabled = cloud_watch_metrics_enabled
self.metric_name = metric_name
self.sampled_requests_enabled = sampled_requests_enabled
class DefaultAction(BaseModel):
"""
https://docs.aws.amazon.com/waf/latest/APIReference/API_DefaultAction.html
"""
def __init__(self, allow={}, block={}):
self.allow = allow
self.block = block
# TODO: Add remaining properties
class FakeWebACL(BaseModel):
"""
https://docs.aws.amazon.com/waf/latest/APIReference/API_WebACL.html
"""
def __init__(self, name, arn, id, visibility_config, default_action):
self.name = name if name else utils.create_test_name("Mock-WebACL-name")
self.created_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
self.id = id
self.arn = arn
self.description = "Mock WebACL named {0}".format(self.name)
self.capacity = 3
self.visibility_config = VisibilityConfig(
**pascal_to_underscores_dict(visibility_config)
)
self.default_action = DefaultAction(
**pascal_to_underscores_dict(default_action)
)
def to_dict(self):
# Format for summary https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateWebACL.html (response syntax section)
return {
"ARN": self.arn,
"Description": self.description,
"Id": self.id,
"LockToken": "Not Implemented",
"Name": self.name,
}
class WAFV2Backend(BaseBackend):
"""
https://docs.aws.amazon.com/waf/latest/APIReference/API_Operations_AWS_WAFV2.html
"""
def __init__(self, region_name=None):
super(WAFV2Backend, self).__init__()
self.region_name = region_name
self.wacls = OrderedDict() # self.wacls[ARN] = FakeWacl
# TODO: self.load_balancers = OrderedDict()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_web_acl(self, name, visibility_config, default_action, scope):
wacl_id = str(uuid4())
arn = make_arn_for_wacl(
name=name, region_name=self.region_name, id=wacl_id, scope=scope
)
if arn in self.wacls or self._is_duplicate_name(name):
raise WAFV2DuplicateItemException()
new_wacl = FakeWebACL(name, arn, wacl_id, visibility_config, default_action)
self.wacls[arn] = new_wacl
return new_wacl
def list_web_acls(self):
return [wacl.to_dict() for wacl in self.wacls.values()]
def _is_duplicate_name(self, name):
allWaclNames = set(wacl.name for wacl in self.wacls.values())
return name in allWaclNames
# TODO: This is how you link wacl to ALB
# @property
# def elbv2_backend(self):
# """
# EC2 backend
# :return: EC2 Backend
# :rtype: moto.ec2.models.EC2Backend
# """
# return ec2_backends[self.region_name]
wafv2_backends = BackendDict(WAFV2Backend, "waf-regional")
wafv2_backends[GLOBAL_REGION] = WAFV2Backend(
GLOBAL_REGION
) # never used? cloudfront is global and uses us-east-1
| StarcoderdataPython |
54170 | <filename>srdk/cy/lang_tools/get_stressed_phones_for_htk.py
import sys, re, traceback
from llef.llef import get_stressed_phones
def get_stressed_phones_for_htk(word):
try:
stressed_phones = get_stressed_phones(word)
except (ValueError, TypeError):
return '','',''
lexiconword=word
if lexiconword.startswith("'"): lexiconword=lexiconword[1:]
if '/' in lexiconword: return '','',''
if '\\' in lexiconword: return '','',''
if 'tsh' in stressed_phones:
#print 'Ignored because of unsupported phone: %s' % lexiconword
return '','','';
phones = ' '.join(stressed_phones).encode('UTF-8')
phones = phones.replace('1','X')
phones = phones.replace('X','')
phones = phones.replace('i','I')
phones = phones.replace('o','O')
return lexiconword, word, phones
| StarcoderdataPython |
1608610 | """
This module to call hyperlink_preview and display the result in a webbrowser.
Provided as sample html and how to call hyperlink_preview.
"""
from pathlib import Path
import shutil
import tempfile
import time
import webbrowser
import argparse
from . import hyperlink_preview as HLP
import html
if __name__ == "__main__":
template_html = """
<!DOCTYPE html>
<html lang="en">
<meta charset="UTF-8">
<title>Hyperlink "demo"</title>
<meta name="viewport" content="width=device-width,initial-scale=1">
<script src="https://code.jquery.com/jquery-3.6.0.min.js"
integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4=" crossorigin="anonymous"></script>
<style>
<!--
.hlp {
display: flex;
background-color: #ff44110D;
border: solid 1px #f41;
border-radius: 4px;
}
.hlp-img {
}
.hlp-img img{
height: 0;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
}
.hlp-informations {
display: flex;
flex-direction: column;
padding: 16px;
}
.hlp-info-header {
display: flex;
margin-bottom: 1em;
}
.hlp-info-type {
background-color: #f41;
color: #E3E3E3;
border-radius: 3px;
padding: 0 10px;
text-transform: uppercase;
margin-right: 10px;
font-size: 0.7em;
display: flex;
align-items: center;
}
.hlp-info-title {
text-decoration: none;
color: #f41;
font-weight: bold;
}
.hlp-info-desc {
text-align: justify;
color: #333;
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 4; /* number of lines to show */
line-clamp: 4;
-webkit-box-orient: vertical;
}
.hlp-info-link-ico {
height: 24px;
width: 24px;
margin-right: 10px;
}
.hlp-info-link-ico path {
fill: #ffa288;
}
.hlp-info-domain {
margin-top: 1em;
display: flex;
align-items: center;
color: #ffa288;
}
-->
</style>
<body onload="adapt_hlp_img();">
<div class="hlp">
<div class="hlp-img">
<img src="PLACEHOLDER_IMG">
</div>
<div class="hlp-informations">
<div class="hlp-info-header">
<span class="hlp-info-type">PLACEHOLDER_TYPE</span>
<a href="PLACEHOLDER_URL" class="hlp-info-title">PLACEHOLDER_TITLE</a>
</div>
<div class="hlp-info-desc">
PLACEHOLDER_DESC
</div>
<div class="hlp-info-domain">
<svg class="hlp-info-link-ico" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px"
y="0px" width="512px" height="512px" viewBox="0 0 512 512" enable-background="new 0 0 512 512" xml:space="preserve">
<script xmlns="" id="__gaOptOutExtension" />
<path fill="#010101"
d="M459.654,233.373l-90.531,90.5c-49.969,50-131.031,50-181,0c-7.875-7.844-14.031-16.688-19.438-25.813 l42.063-42.063c2-2.016,4.469-3.172,6.828-4.531c2.906,9.938,7.984,19.344,15.797,27.156c24.953,24.969,65.563,24.938,90.5,0 l90.5-90.5c24.969-24.969,24.969-65.563,0-90.516c-24.938-24.953-65.531-24.953-90.5,0l-32.188,32.219 c-26.109-10.172-54.25-12.906-81.641-8.891l68.578-68.578c50-49.984,131.031-49.984,181.031,0 C509.623,102.342,509.623,183.389,459.654,233.373z M220.326,382.186l-32.203,32.219c-24.953,24.938-65.563,24.938-90.516,0 c-24.953-24.969-24.953-65.563,0-90.531l90.516-90.5c24.969-24.969,65.547-24.969,90.5,0c7.797,7.797,12.875,17.203,15.813,27.125 c2.375-1.375,4.813-2.5,6.813-4.5l42.063-42.047c-5.375-9.156-11.563-17.969-19.438-25.828c-49.969-49.984-131.031-49.984-181.016,0 l-90.5,90.5c-49.984,50-49.984,131.031,0,181.031c49.984,49.969,131.031,49.969,181.016,0l68.594-68.594 C274.561,395.092,246.42,392.342,220.326,382.186z" />
</svg>
<span>PLACEHOLDER_DOMAIN</span>
</div>
</div>
</div>
<script>
$(".hlp-img").height($(".hlp-informations").outerHeight());
$(".hlp-img img").height("100%");
</script>
</body>
</html>"""
parser = argparse.ArgumentParser(description='Build an hyperlink preview and open it in a webbrowser')
parser.add_argument('url', type=str, help='url of the link')
args = parser.parse_args()
hlp = HLP.HyperLinkPreview(url=args.url)
if not hlp.is_valid:
print(f"error while parsing preview of [{args.url}]")
else:
preview_data = hlp.get_data()
print("Data for preview:")
for key, value in preview_data.items():
print(f" {key}: {value}")
try:
template_html = template_html.replace("PLACEHOLDER_IMG", preview_data["image"])
except:
template_html = template_html.replace("PLACEHOLDER_IMG", "https://upload.wikimedia.org/wikipedia/commons/8/85/Media_Viewer_Icon_-_Link_Hover.svg")
try:
template_html = template_html.replace("PLACEHOLDER_TYPE", preview_data["type"])
except:
template_html = template_html.replace("PLACEHOLDER_TYPE", "link")
template_html = template_html.replace("PLACEHOLDER_URL", args.url)
template_html = template_html.replace("PLACEHOLDER_TITLE", preview_data["title"])
template_html = template_html.replace("PLACEHOLDER_DESC", html.escape(preview_data["description"]))
template_html = template_html.replace("PLACEHOLDER_DOMAIN", preview_data["domain"])
try:
temp_folder = Path(tempfile.mkdtemp())
temp_file = temp_folder / "hyperlink_preview_demo.html"
print(temp_file)
with open(temp_file, "w", encoding="utf-8") as html:
html.write(template_html)
webbrowser.open(str(temp_file))
time.sleep(10)
finally:
try:
print(f"let's delete {temp_file}")
shutil.rmtree(temp_folder)
except Exception:
pass
| StarcoderdataPython |
3236002 | import os
import numpy as np
from flask import Flask, request, jsonify, render_template, send_from_directory
import tensorflow as tf
from tensorflow import keras
import cv2
import matplotlib.image as mpimg
IMAGE_UPLOADS = 'static/uploads/'
app = Flask(__name__)
app.config['IMAGE_UPLOADS'] = IMAGE_UPLOADS
model = keras.models.load_model('models/fine_tuned_model')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict/', methods=['POST'])
def predict():
if request.files:
img = request.files["image"]
# img.save(os.path.join(app.config["IMAGE_UPLOADS"], img.filename))
img = mpimg.imread(img) # uses mpimg.imread() instead of cv2.imread() because of type error
img = cv2.resize(img, (256, 256))
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)[0][0]
print(prediction)
if prediction < 0.5:
output = "fac"
prediction = 1 - prediction
else:
output = "not fac"
return render_template('index.html', prediction_text='Image is {}, with probability {}'.format(output, prediction))
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
1760580 | # Generated by Django 3.1.4 on 2020-12-17 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("puzzles", "0019_auto_20201217_0708"),
]
operations = [
migrations.AddConstraint(
model_name="puzzletag",
constraint=models.UniqueConstraint(
fields=("name", "hunt"), name="unique_tag_names_per_hunt"
),
),
]
| StarcoderdataPython |
129582 | <reponame>motraor3/py-Goldsberry<filename>goldsberry/sportvu/__init__.py
from goldsberry.sportvu._SportVu2 import * | StarcoderdataPython |
1667716 | <reponame>niranjanreddy891/pclpy<gh_stars>1-10
import pclpy_dependencies
from pclpy import io
from pclpy import view | StarcoderdataPython |
6332 | # nuScenes dev-kit.
# Code written by <NAME> & <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
class NuScenesEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations an predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://github.com/nutonomy/nuscenes-devkit for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
eval_set: str,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a NuScenesEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train or val.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose)
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data')
metric_data_list = MetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: MetricDataList instance.
"""
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(43)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
| StarcoderdataPython |
1776815 | """
10-6. Addition: One common problem when prompting for numerical input
occurs when people provide text instead of numbers. When you try to convert
the input to an int, you’ll get a ValueError. Write a program that prompts for
two numbers. Add them together and print the result. Catch the ValueError if
either input value is not a number, and print a friendly error message. Test your
program by entering two numbers and then by entering some text instead of a
number.
"""
try:
given_number_a = input("Please give me a number: ")
given_number_a = int(given_number_a)
given_number_b = input("Please give me another number: ")
given_number_b = input(given_number_b)
except ValueError:
print("Please enter numbers!")
else:
result = given_number_a + given_number_b
print(f"the answer is {result}") | StarcoderdataPython |
1722299 | import os
from xbrr.base.reader.base_element import BaseElement
from xbrr.edinet.reader.element_value import ElementValue
class Element(BaseElement):
def __init__(self, name, element, reference, reader):
super().__init__(name, element, reference, reader)
self.name = name
self.element = element
self.reference = reference
self.reader = reader
@property
def xsd(self):
name = self.reference_name
path = self.reference_path
xml = None
if path.endswith(".xsd"):
xml = self.reader._read_from_cache(path)
else:
path = self._find_file(os.path.dirname(path), ".xsd")
if os.path.dirname(path).endswith("PublicDoc"):
element = xml.find("element", {"id": name})
else:
element = xml.find("xsd:element", {"id": name})
return element
def label(self, kind="lab", verbose=False):
label = ""
if kind == "en":
label = self._get_label("_lab_en.xml", verbose)
elif kind == "gla":
label = self._get_label("_lab_gla.xml", verbose)
else:
label = self._get_label("_lab.xml", verbose)
if label is None:
return ""
elif isinstance(label, str):
return label
else:
return label.text
def value(self, label_kind="", label_verbose=False):
return ElementValue.create_from_element(
reader=self.reader, element=self,
label_kind=label_kind, label_verbose=label_verbose)
def _get_label(self, extention, verbose):
name = self.reference_name
path = self.reference_path
xml = None
if os.path.dirname(path).endswith("PublicDoc"):
label_path = self._find_file(os.path.dirname(path), extention)
href = self.reference
else:
_dir = os.path.join(os.path.dirname(path), "label")
label_path = self._find_file(_dir, extention)
href = f"../{os.path.basename(path)}#{name}"
xml = self.reader._read_from_cache(label_path)
targets = self._read_link(
xml=xml, arc_name="link:labelArc", reference=href,
target_name="link:label", target_attribute="id")
if len(targets) > 1:
for lb in targets:
if lb["xlink:role"].endswith("verboseLabel") and verbose:
label = lb
break
else:
label = lb
elif len(targets) > 0:
label = targets[0]
else:
label = None
return label
def _read_link(self, xml, arc_name, reference="",
target_name="", target_attribute=""):
# link: href: absolute path to element definition by url format.
# name: underscore separated name. when used by tag, it is splited by ":"
# name is solved by namespace so
# name => link is good approach.
reference = reference if reference else self.reference
label = xml.find("link:loc", {"xlink:href": reference})
arc = None
if label is not None:
arc = xml.find(arc_name, {"xlink:from": label["xlink:label"]})
else:
arc = xml.find(arc_name, {"xlink:label": self.name})
if arc is None:
return []
target_name = target_name if target_name else "link:loc"
target_attribute = target_attribute if target_attribute else "xlink:label"
targets = []
if arc is not None:
targets = xml.find_all(target_name, {target_attribute: arc["xlink:to"]})
if len(targets) == 0 and target_attribute != "xlink:label":
targets = xml.find_all(target_name, {"xlink:label": arc["xlink:to"]})
return targets
| StarcoderdataPython |
3249958 | from shop import app
app.run(debug=True, port=8001)
| StarcoderdataPython |
40543 | # -*- coding: utf-8 -*-
# @Time : 2019-09-01 17:49
# @Author : EchoShoot
# @Email : <EMAIL>
# @URL : https://github.com/EchoShoot
# @File : test_others.py
# @Explain :
from sheen import Str
import pytest
class TestOthers(object):
raw = 'xxooAß西xoox'
obj = Str.red(raw)
obj[2:-2] = Str.green
obj[4:-4] = Str.blue
obj[5:-5] = Str.magenta
def test_upper(self):
assert self.obj.upper() == self.raw.upper()
assert self.obj.upper() != Str(self.raw).upper()
def test_lower(self):
assert self.obj.lower() == self.raw.lower()
assert self.obj.lower() != Str(self.raw).lower()
def test_swapcase(self):
assert self.obj.swapcase() == self.raw.swapcase()
assert self.obj.swapcase() != Str(self.raw).swapcase()
def test_title(self):
assert self.obj.title() == self.raw.title()
assert self.obj.title() != Str(self.raw).title()
def test_capitalize(self):
assert self.obj.capitalize() == self.raw.capitalize()
assert self.obj.capitalize() != Str(self.raw).capitalize()
def test_casefold(self):
assert self.obj.casefold() == self.raw.casefold()
assert self.obj.casefold() != Str(self.raw).casefold()
def test_startswith(self):
assert self.obj.startswith('xxoo') is True
assert self.obj.startswith('xoox') is False
assert self.obj.startswith(Str.green('xx') + Str.red('oo')) is False
assert self.obj.startswith(Str.red('xx') + Str.green('oo')) is True
def test_endswith(self):
assert self.obj.endswith('xxoo') is False
assert self.obj.endswith('xoox') is True
assert self.obj.endswith(Str.green('xo') + Str.red('ox')) is True
assert self.obj.endswith(Str.red('xo') + Str.green('ox')) is False
def test_zfill(self):
assert self.obj.zfill(-1) == self.raw.zfill(-1)
assert self.obj.zfill(0) == self.raw.zfill(0)
assert self.obj.zfill(30) == self.raw.zfill(30)
def test_encode(self):
assert self.obj.encode() == self.raw.encode()
assert self.obj.encode(encoding='gbk', errors='ignore') == self.raw.encode(encoding='gbk', errors='ignore')
assert self.obj.encode(encoding='gbk', errors='replace') == self.raw.encode(encoding='gbk', errors='replace')
with pytest.raises(UnicodeEncodeError):
self.obj.encode(encoding='gbk', errors='strict')
with pytest.raises(UnicodeEncodeError):
self.raw.encode(encoding='gbk', errors='strict')
| StarcoderdataPython |
3306634 | <filename>udemy/spiders/udemy_course.py<gh_stars>0
#!/usr/bin/env python
"""ETL process for gathering Udemy courses metadata.
"""
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
# standard libraries
import re
# third parties libraries
import scrapy
class UdemyCourseSpider(scrapy.Spider):
name = 'udemy-course'
def __init__(self, course_url, **kwargs):
super().__init__(**kwargs)
self.start_urls = [
f'{course_url}',
]
def parse(self, response):
yield {
'url': self.__get_url(response),
'title': self.__get_title(response),
'subtitle': self.__get_subtitle(response),
'avg_rating': self.__get_avg_rating(response),
'rating': self.__get_rating(response),
'students_enrolled': self.__get_enrollment(response),
'last_update': self.__get_last_update(response),
'author': self.__get_author(response),
'language': self.__get_language(response),
'price': self.__get_price(response),
'learning_goals': self.__get_learning_goals(response),
'requirements': self.__get_requirements(response),
'duration': self.__get_duration(response),
'articles_amount': self.__get_aticles_amount(response),
'downloadable': self.__get_downloadable(response)
}
def __get_url(self, response):
# TODO("Not implemented yet")
return "Not implemented yet"
def __get_title(self, response):
pattern = "div.clp-lead__title::text"
return response.css(pattern).get(). \
strip()
def __get_subtitle(self, response):
pattern = "div.clp-lead__headline::text"
return response.css(pattern).get(). \
strip()
def __get_avg_rating(self, response):
id_pattern = 'span.tooltip-container span::attr(id)'
id = response.css(id_pattern).get()
avg_rating_pattern = '[id="%s"]::text' % id
avg_rating = response.css(avg_rating_pattern).get(). \
strip()
return float(avg_rating)
def __get_rating(self, response):
# TODO("Not implemented yet")
return "Not implemented yet"
def __get_enrollment(self, response):
data_purpose = 'enrollment'
pattern = '[data-purpose="%s"]::text' % data_purpose
enrollment = response.css(pattern).get(). \
strip()
enrollment = re.findall(r'[0-9]+', enrollment)
return int(''.join(enrollment))
def __get_last_update(self, response):
return response.css('div.last-update-date span::text').get(). \
strip(). \
split(' ')[-1]
def __get_author(self, response):
data_purpose = 'instructor-name-top'
pattern = 'span[data-purpose="%s"] a::text' % data_purpose
return response.css(pattern).get(). \
strip()
def __get_language(self, response):
# TODO("Not implemented yet")
return "Not implemented yet"
def __get_price(self, response):
# TODO("Not implemented yet")
return "Not implemented yet"
def __get_learning_goals(self, response):
pattern = 'ul.what-you-get__items span.what-you-get__text::text'
learning_goals = response.css(pattern).getall()
return '. '.join(learning_goals)
def __get_requirements(self, response):
pattern = 'ul.requirements__list li.requirements__item::text'
requirements = response.css(pattern).getall()
return '. '.join(requirements)
def __get_duration(self, response):
data_purpose = 'video-content-length'
pattern = 'span[data-purpose="%s"]::text' % data_purpose
duration_string = response.css(pattern).get(). \
strip()
return duration_string.split(" ")[0]
def __get_aticles_amount(self, response):
# TODO("Not implemented yet")
return "Not implemented yet"
def __get_downloadable(self, response):
# TODO("Not implemented yet")
return "Not implemented yet" | StarcoderdataPython |
1795945 | <filename>source/segment/nnmf.py
import torch
import torch.nn as nn
import numpy as np
from utils import softminus
import math
import numbers
from torch.nn import functional as F
class SubNet(nn.ModuleList):
def __init__(self, list):
super(SubNet, self).__init__(list)
def forward(self, input):
output = input
for l in self:
output = l(output)
return output
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups)
class NNMF(nn.Module):
def __init__(self, gmf_size, mlp_size, mlp_layers, threshold_layers):
super(NNMF, self).__init__()
self.gmf_size = gmf_size
self.mlp_size = mlp_size
self.threshold_layers = threshold_layers
self.mlp_layers = mlp_layers
self.embedding_activation = nn.functional.softplus
self.mlp_activation = nn.LeakyReLU
self.threshold_activation = nn.ReLU
self.threshold_activation_output = nn.ReLU
self.output_activation = nn.Sigmoid
self.neu_mf_input_size = self.mlp_layers[-1] * (self.mlp_size > 0) + self.gmf_size
self.mlp_input_size = 2 * self.mlp_size
self.threshold_mlp = None
self.mlp = None
self.neu_mf = None
self.num_pixels = None
self.num_frames = None
self.gmf_u = None
self.gmf_v = None
self.mlp_u = None
self.mlp_v = None
self.define_nn()
def define_nn(self):
self.threshold_mlp = SubNet([nn.Linear(1, self.threshold_layers[0]), self.threshold_activation()] +
[item for t in [(nn.Linear(self.threshold_layers[j],
self.threshold_layers[j + 1]),
self.threshold_activation())
for j in range(len(self.threshold_layers) - 1)] for item in t])
self.threshold_mlp[-1] = self.threshold_activation_output()
self.mlp = SubNet([nn.Linear(self.mlp_input_size, self.mlp_layers[0]), self.mlp_activation()] +
[item for t in [(nn.Linear(self.mlp_layers[j], self.mlp_layers[j + 1]),
self.mlp_activation())
for j in range(len(self.mlp_layers) - 1)] for item in t])
self.neu_mf = SubNet([nn.Linear(self.neu_mf_input_size, 1), self.output_activation()])
def set_matrix(self, matrix2d, embedding_nmf_init=None):
self.num_pixels = matrix2d.shape[0]
self.num_frames = matrix2d.shape[1]
initialize_embedding = lambda x: nn.Embedding.from_pretrained(torch.from_numpy(x).float(), freeze=False)
get_random_init = lambda size: softminus(np.random.normal(loc=0.5, scale=0.01, size=size))
if embedding_nmf_init:
self.gmf_u = initialize_embedding(softminus(embedding_nmf_init[0]))
self.gmf_v = initialize_embedding(softminus(embedding_nmf_init[1]))
else:
self.gmf_u = initialize_embedding(get_random_init((self.num_pixels, self.gmf_size)))
self.gmf_v = initialize_embedding(get_random_init((self.num_frames, self.gmf_size)))
self.mlp_u = initialize_embedding(get_random_init((self.num_pixels, self.mlp_size)))
self.mlp_v = initialize_embedding(get_random_init((self.num_frames, self.mlp_size)))
def init_params(self, gmf_net_init=False):
def init_weights(m):
if type(m) == nn.Sequential:
try:
nn.init.xavier_normal_(m.weight.data, gain=1)
nn.init.normal_(m.bias, mean=0.0, std=0.01)
except:
pass
self.apply(init_weights)
if gmf_net_init:
with torch.no_grad():
for l in self.mlp:
try:
l.weight.fill_(0.)
l.bias.fill_(0.)
except:
pass
for l in self.neu_mf:
try:
l.weight.fill_(1.)
l.bias.fill_(0.)
except:
pass
with torch.no_grad():
for l in self.threshold_mlp:
try:
nn.init.eye_(l.weight)
l.bias.fill_(0.)
except:
pass
def forward(self, pixel, frame, target):
neu_mf_input = []
if self.mlp_size != 0:
mlp_input = torch.cat([self.embedding_activation(self.mlp_u(pixel)),
self.embedding_activation(self.mlp_v(frame))], dim=1)
mlp_output = self.mlp(mlp_input)
neu_mf_input += [mlp_output]
if self.gmf_size != 0:
neu_mf_input += [torch.mul(self.embedding_activation(self.gmf_u(pixel)),
self.embedding_activation(self.gmf_v(frame)))]
neu_mf_input = torch.cat(neu_mf_input, dim=1)
neu_mf_output = self.neu_mf(neu_mf_input)
s_input = target - neu_mf_output
s_output = self.threshold_mlp(s_input)
return neu_mf_output, s_output
def embedding_parameters(self):
embedding_params = []
if self.mlp_size != 0:
embedding_params += list(self.mlp_u.parameters()) + list(self.mlp_v.parameters())
if self.gmf_size != 0:
embedding_params += list(self.gmf_u.parameters()) + list(self.gmf_v.parameters())
return embedding_params
def embedding_regularization(self, pixel, frame):
loss = 0
if self.gmf_size != 0:
loss += torch.norm(self.embedding_activation((self.gmf_u(pixel)))) + \
torch.norm(self.embedding_activation((self.gmf_v(frame))))
if self.mlp_size != 0:
loss += torch.norm(self.embedding_activation((self.mlp_u(pixel)))) + \
torch.norm(self.embedding_activation((self.mlp_v(frame))))
return loss / pixel.shape[0]
def spatial_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, int(np.sqrt(self.num_pixels)), int(np.sqrt(self.num_pixels)), -1])
emb_r = emb_r.permute([0, 3, 1, 2])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size-1)/2)*np.array([1, 1, 1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=2).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u),
pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_u.weight, self.gmf_size) / self.num_pixels
if self.mlp_size != 0: loss += add_loss(self.mlp_u.weight, self.mlp_size) / self.num_pixels
return loss
def temporal_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, self.num_frames, -1])
emb_r = emb_r.permute([0, 2, 1])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size - 1) / 2) * np.array([1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=1).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u), pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_v.weight, self.gmf_size) / self.num_frames
if self.mlp_size != 0: loss += add_loss(self.mlp_v.weight, self.mlp_size) / self.num_frames
return loss
| StarcoderdataPython |
1707612 | <gh_stars>0
import pluggy
from scenario_player.constants import HOST_NAMESPACE
from scenario_player.services.rpc.blueprints.instances import instances_blueprint
from scenario_player.services.rpc.blueprints.tokens import tokens_blueprint
from scenario_player.services.rpc.blueprints.transactions import transactions_blueprint
from scenario_player.services.rpc.utils import RPCRegistry
__all__ = ["transactions_blueprint", "instances_blueprint"]
HOOK_IMPL = pluggy.HookimplMarker(HOST_NAMESPACE)
@HOOK_IMPL
def register_blueprints(app):
app.config["rpc-client"] = RPCRegistry()
for bp in (transactions_blueprint, instances_blueprint, tokens_blueprint):
app.register_blueprint(bp)
| StarcoderdataPython |
1743333 | <gh_stars>1-10
from .buttons import create_toggle_button
from numpy import ceil
from ipywidgets import GridspecLayout
import pandas as pd
from typing import List
# A simple class for creating a grid of toggle buttons
# It has some additional utilities such as
# get_values or load_values methods
class ToggleGrid:
"""
A class that creates a grid
of toggle buttons and facilitates getting
information from them.
:param cats: list of categories
:param n_cols: number of grid's columns
:param categories: list of categories
"""
def __init__(self, categories: List[str], n_cols: int = 3) -> None:
self.cats = categories
self.n_cols = n_cols
self.n_rows = int(ceil(len(self.cats) / n_cols))
self.grid = GridspecLayout(self.n_rows, self.n_cols)
# The grid gets created on calling the class
def __call__(self) -> None:
for i in range(self.n_rows):
for j in range(self.n_cols):
if self.n_cols * i + j <= len(self.cats) - 1:
self.grid[i, j] = create_toggle_button(
self.cats[self.n_cols * i + j]
)
return self.grid
# get_values method takes the current buttons' values and resets the buttons
def get_values(self) -> None:
values = list()
for i in range(self.n_rows):
for j in range(self.n_cols):
if self.n_cols * i + j <= len(self.cats) - 1:
values.append(self.grid[i, j].value)
self.grid[i, j].value = False
return values
# load_values method sets the buttons' loads buttons'
# previous values, which are stored in the dataframe df
def load_values(self, iterator: int, df: pd.DataFrame) -> None:
for i in range(self.n_rows):
for j in range(self.n_cols):
if self.n_cols * i + j <= len(self.cats) - 1:
self.grid[i, j].value = df.iloc[iterator, self.n_cols * i + j]
| StarcoderdataPython |
153212 | <reponame>voximplant/apiclient-python
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Create a new subuser for account_id = 1.
KEY_ID = "ab98c70e-573e-4446-9af9-105269dfafca"
DESCRIPTION = "test_desc"
try:
res = voxapi.update_key(KEY_ID,
DESCRIPTION)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
| StarcoderdataPython |
1753933 | import sqlite3
import logging
# create logger
module_logger = logging.getLogger(__name__)
class DBConnection:
def __init__(self, filename="bot.db"):
self.filename = filename
self.connection = sqlite3.connect(filename, timeout=20)
# don't wait for the disk to finish writing
self.connection.execute("PRAGMA synchronous = OFF")
# journal disabled since we never do rollbacks
self.connection.execute("PRAGMA journal_mode = %s" % 'WAL')
# 64mb of cache memory,probably need to make it user configurable
self.connection.execute("PRAGMA cache_size=-%s" % (32 * 1024))
self.connection.row_factory = sqlite3.Row
def action(self, query, args=None):
if query is None:
return
sql_result = None
try:
with self.connection as c:
if args is None:
sql_result = c.execute(query)
else:
sql_result = c.execute(query, args)
except sqlite3.OperationalError as e:
if "unable to open database file" in str(e) or "database is locked" in str(e):
module_logger.debug('Database Error: %s', e)
else:
module_logger.debug('Database error: %s', e)
raise
except sqlite3.DatabaseError as e:
module_logger.debug('Fatal Error executing %s :: %s', query, e)
raise
return sql_result
def select(self, query, args=None):
sql_results = self.action(query, args).fetchall()
if sql_results is None or sql_results == [None]:
return []
return sql_results
def insert_gossip(self, name, message):
insert_query = (
"INSERT INTO gossip (name, message)" +
" VALUES ('" + name + "', '" + message + "')"
)
try:
self.action(insert_query)
except sqlite3.IntegrityError:
module_logger.debug('Queries failed: %s', insert_query)
def upsert(self, tableName, valueDict, keyDict):
def genParams(myDict):
return [x + " = ?" for x in myDict.keys()]
changesBefore = self.connection.total_changes
update_query = "UPDATE " + tableName + " SET " + ", ".join(
genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
self.action(update_query, valueDict.values() + keyDict.values())
if self.connection.total_changes == changesBefore:
insert_query = (
"INSERT INTO " + tableName + " (" + ", ".join(
valueDict.keys() + keyDict.keys()) + ")" +
" VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")"
)
try:
self.action(insert_query, valueDict.values() + keyDict.values())
except sqlite3.IntegrityError:
module_logger.debug('Queries failed: %s and %s', update_query, insert_query) | StarcoderdataPython |
3314973 | <filename>test/echo_server.py
import asyncio
import json
import logging
import sys
import aiohttp
from aiohttp import web
async def async_main():
stdout_handler = logging.StreamHandler(sys.stdout)
for logger_name in ["aiohttp.server", "aiohttp.web", "aiohttp.access"]:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
async def handle_http(request):
data = {
"method": request.method,
"content": (await request.read()).decode(),
"headers": dict(request.headers),
}
return web.json_response(
data, status=405, headers={"from-upstream": "upstream-header-value"}
)
async def handle_websockets(request):
wsock = web.WebSocketResponse()
await wsock.prepare(request)
await wsock.send_str(json.dumps(dict(request.headers)))
async for msg in wsock:
if msg.type == aiohttp.WSMsgType.CLOSE:
await wsock.close()
if msg.type == aiohttp.WSMsgType.TEXT:
await wsock.send_str(msg.data)
if msg.type == aiohttp.WSMsgType.BINARY:
await wsock.send_bytes(msg.data)
return wsock
upstream = web.Application()
upstream.add_routes(
[
web.get("/http", handle_http),
web.patch("/http", handle_http),
web.get("/websockets", handle_websockets),
]
)
upstream_runner = web.AppRunner(upstream)
await upstream_runner.setup()
upstream_site = web.TCPSite(upstream_runner, "0.0.0.0", 8888)
await upstream_site.start()
await asyncio.Future()
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(async_main())
if __name__ == "__main__":
main()
| StarcoderdataPython |
4814811 | cash = float(17.50)
hours = float(raw_input("Hours worked in the past two weeks? ")) # def hours
def payment(): # calc reg pay
return hours * cash
def over():
if hours > 80:
OT = (hours - 80) * 8.75 # calc overtime hours
return OT
else:
OT = 0
return OT
def final():
return payment() + over()
print "Congrats! You've earned", final() , " dollars this pay period."
| StarcoderdataPython |
1759336 | <reponame>impedimentToProgress/ratchet<gh_stars>1-10
import sys
import re
cp_re = re.compile("[0-9A-F]{8}: CP: ([0-9]*).*$")
read_re = re.compile("[0-9A-F]{8}: (?:Flash|Ram) read at (0x[0-9A-F]{8})=0x[0-9A-F]{8}$")
write_re = re.compile("[0-9A-F]{8}: (?:Flash|Ram) write at (0x[0-9A-F]{8})=0x[0-9A-F]{8}$")
def print_violation(violators, lines, output):
addr_re = re.compile("[0-9A-F]{8}: (?:Flash|Ram) (?:read|write) at (0x[0-9A-F]{8})=0x[0-9A-F]{8}$")
s = "Found idempotency violation!\n"
s += str(violators) + '\n'
s += "----- BEGIN -----\n"
for l in lines:
addr = addr_re.match(l)
if addr == None:
continue
for v in violators:
if v == addr.group(1):
s += str(l[:-1]) + '\n'
s += "------ END ------\n"
if s not in output:
output.append(s)
print s
def check_idempotency(reads, line, ignore):
addr_re = re.compile("([0-9A-F]{8}):")
addr = addr_re.match(line).group(1)
write = write_re.match(line).group(1)
if write == ignore:
return False
for read in reads:
if write == read[0] and addr != addr_re.match(read[1]).group(1):
return True
return False
def parse_file(fname, ignore):
if fname != '-':
f = open(fname, 'r')
else:
f = sys.stdin
writes = []
reads = []
lines = []
violators = []
output = []
all_violators = []
for line in f:
# Check to see if we're at a checkpoint
#cp = cp_re.match(line)
#if cp != None:
if "CP: " in line:
if len(violators):
print_violation(violators, lines, output)
writes = []
reads = []
lines = []
violators = []
continue
lines.append(line)
if "read at" in line:
r = read_re.match(line)
addr = r.group(1)
if addr not in writes:
reads.append((addr, line))
if "write at" in line:
w = write_re.match(line)
addr = w.group(1)
flag = False
for write in writes:
if addr == write[0]:
flag = True
break
if flag == False and check_idempotency(reads, line, ignore):
if addr not in violators:
violators.append(addr)
if addr not in all_violators:
all_violators.append(addr)
writes.append((addr, line))
if "Program exit" in line:
if len(violators):
print_violation(violators, lines, output)
break
if len(violators):
print_violation(violators, lines, output)
print "All aliasing addresses:"
print all_violators
def idem_sect_length(fname):
f = open(fname, 'r')
last_cp = 0
lengths = []
for line in f:
m = cp_re.match(line)
if m != None:
lengths.append(int(m.group(1))-last_cp)
last_cp = int(m.group(1))
return lengths
if __name__=="__main__":
if len(sys.argv) < 3:
print "Usage: python {} <filename> <ignore>".format(sys.argv[0])
sys.exit()
parse_file(sys.argv[1], sys.argv[2])
#print idem_sect_length(sys.argv[1])
| StarcoderdataPython |
38141 | <reponame>vrautela/hail
states = {'Pending', 'Ready', 'Creating', 'Running', 'Cancelled', 'Error', 'Failed', 'Success'}
complete_states = ('Cancelled', 'Error', 'Failed', 'Success')
valid_state_transitions = {
'Pending': {'Ready'},
'Ready': {'Creating', 'Running', 'Cancelled', 'Error'},
'Creating': {'Ready', 'Running'},
'Running': {'Ready', 'Cancelled', 'Error', 'Failed', 'Success'},
'Cancelled': set(),
'Error': set(),
'Failed': set(),
'Success': set(),
}
tasks = ('input', 'main', 'output')
memory_types = ('lowmem', 'standard', 'highmem')
HTTP_CLIENT_MAX_SIZE = 8 * 1024 * 1024
BATCH_FORMAT_VERSION = 6
STATUS_FORMAT_VERSION = 5
INSTANCE_VERSION = 22
MAX_PERSISTENT_SSD_SIZE_GIB = 64 * 1024
RESERVED_STORAGE_GB_PER_CORE = 5
| StarcoderdataPython |
195287 | from eth.vm.forks.byzantium import ByzantiumVM
from .blocks import StretchBlock
from eth.vm.forks.byzantium.state import ByzantiumState
from .xmessage import StretchXMessage
from typing import (
Tuple,
)
from .headers import StretchBlockHeader
from eth.db.trie import make_trie_root_and_nodes
class StretchVM(ByzantiumVM):
# fork name
fork = 'stretch'
# classes
block_class = StretchBlock
_state_class = ByzantiumState
def set_block_xmessages_received(self,
base_block: StretchBlock,
new_header: StretchBlockHeader,
xmessages_received: Tuple[StretchXMessage, ...]) -> StretchBlock:
tx_root_hash, tx_kv_nodes = make_trie_root_and_nodes(xmessages_received)
self.chaindb.persist_trie_data_dict(tx_kv_nodes)
return base_block.copy(
xmessages_received=xmessages_received,
header=new_header.copy(
xmessage_received_root=tx_root_hash,
),
)
def set_block_xmessages_sent(self,
base_block: StretchBlock,
new_header: StretchBlockHeader,
xmessages_sent: Tuple[StretchXMessage, ...]) -> StretchBlock:
tx_root_hash, tx_kv_nodes = make_trie_root_and_nodes(xmessages_sent)
self.chaindb.persist_trie_data_dict(tx_kv_nodes)
return base_block.copy(
xmessages_sent=xmessages_sent,
header=new_header.copy(
xmessage_sent_root=tx_root_hash,
),
)
| StarcoderdataPython |
4809464 | # -*- coding: utf-8 -*-
#Plotting is on python since this will make it much easier to debug and adjsut
#no need to recompile everytime i change graph color....
#needs a serious refactor
from matplotlib import pyplot as plt
import numpy as np
from .nputil import mid, minmax, vector_apply
from util import parse_arg, describe
from math import sqrt, ceil, floor
from warnings import warn
def draw_simultaneous(self, minuit=None, args=None, errors=None, **kwds):
numf = len(self.allf)
ret = []
numraw = sqrt(numf)
numcol = ceil(numraw)
numrow = floor(numraw) if floor(numraw)*numcol>=numf else ceil(numraw)
for i in range(numf):
plt.subplot(numrow, numcol, i+1)
part_args, part_errors = self.args_and_error_for(i, minuit, args, errors)
ret.append(self.allf[i].draw(args=part_args, errors=part_errors, **kwds))
return ret
def _get_args_and_errors(self, minuit=None, args=None, errors=None):
"""
consistent algorithm to get argument and errors
1) get it from minuit if minuit is available
2) if not get it from args and errors
2.1) if args is dict parse it.
3) if all else fail get it from self.last_arg
"""
ret_arg = None
ret_error = None
if minuit is not None: # case 1
ret_arg = minuit.args
ret_error = minuit.errors
return ret_arg, ret_error
#no minuit specified use args and errors
if args is not None:
if isinstance(args, dict):
ret_arg = parse_arg(self, args)
else:
ret_arg = args
else: # case 3
ret_arg = self.last_arg
if errors is not None:
ret_error = errors
return ret_arg, ret_error
def _param_text(parameters, arg, error):
txt = u''
for i, (k, v) in enumerate(zip(parameters, arg)):
txt += u'%s = %5.4g'%(k, v)
if error is not None:
txt += u'±%5.4g'%error[k]
txt += u'\n'
return txt
#from UML
def draw_ulh(self, minuit=None, bins=100, ax=None, bound=None,
parmloc=(0.05, 0.95), nfbins=200, print_par=True, grid=True,
args=None, errors=None, parts=False, show_errbars='normal'):
data_ret = None
error_ret = None
total_ret = None
part_ret = []
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
n,e= np.histogram(self.data, bins=bins, range=bound, weights=self.weights)
dataint= (n*np.diff(e)).sum()
data_ret = (e, n)
if not show_errbars:
pp= ax.hist(mid(e), bins=e, weights=n, histtype='step')
error_ret = (np.sqrt(n), np.sqrt(n))
else:
w2= None
if show_errbars=='normal':
w2=n
error_ret = (np.sqrt(n), np.sqrt(n))
elif show_errbars=='sumw2':
weights= None
if self.weights!= None:
weights= self.weights**2
w2,e= np.histogram(self.data, bins=e, weights=weights)
error_ret = (np.sqrt(w2), np.sqrt(w2))
else:
raise ValueError('show_errbars must be \'normal\' or \'sumw2\'')
pp= ax.errorbar(mid(e), n, np.sqrt(w2) , fmt='b+', capsize=0)
#bound = (e[0], e[-1])
draw_arg = [('lw', 2)]
if not parts:
draw_arg.append(('color', 'r'))
# Draw pdf with finer bins
ef= np.linspace(e[0],e[-1], nfbins+1)
scale= dataint if not self.extended else nfbins/float(bins)
total_ret = draw_pdf_with_edges(self.f, arg, ef, ax=ax, density=not self.extended, scale=scale,
**dict(draw_arg))
if parts:
f_parts = getattr(self.f, 'parts', None)
if f_parts is not None:
for p in f_parts():
ret = draw_pdf_with_edges(p, arg, ef, ax=ax, scale=scale, density=not self.extended)
part_ret.append(ret)
ax.grid(grid)
txt = _param_text(describe(self), arg, error)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
return (data_ret, error_ret, total_ret, part_ret)
def draw_residual_ulh(self, minuit=None, bins=100, ax=None, bound=None,
parmloc=(0.05, 0.95), print_par=False, grid=True,
args=None, errors=None, show_errbars=True,
errbar_algo='normal', norm=False):
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
n,e= np.histogram(self.data, bins=bins, range=bound, weights=self.weights)
dataint= (n*np.diff(e)).sum()
scale= dataint if not self.extended else 1.0
w2= None
if errbar_algo=='normal':
w2=n
elif errbar_algo=='sumw2':
weights= None
if self.weights!= None:
weights= self.weights**2
w2,e= np.histogram(self.data, bins=e, weights=weights)
else:
raise ValueError('errbar_algo must be \'normal\' or \'sumw2\'')
yerr= np.sqrt(w2)
arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
yf = vector_apply(self.f, mid(e), *arg)
yf*= (scale*np.diff(e) if self.extended else scale)
n = n- yf
if norm:
sel= yerr>0
n[sel]/= yerr[sel]
yerr= np.ones(len(yerr))
if show_errbars:
pp= ax.errorbar(mid(e), n, yerr , fmt='b+', capsize=0)
else: # No errorbars
pp= ax.bar(e[:-1], n, width=np.diff(e))
#bound = (e[0], e[-1])
#draw_arg = [('lw', 2), ('color', 'r')]
ax.plot([e[0],e[-1]],[0.,0.], 'r-')
ax.grid(grid)
txt = _param_text(describe(self), arg, error)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
return mid(e), n, yerr
#from chi2 regression
def draw_x2(self, minuit=None, ax=None, parmloc=(0.05, 0.95), print_par=True,
args=None, errors=None, grid=True, parts=False, nbins=None):
data_ret = None
error_ret = None
total_ret = None
part_ret = []
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
x=self.x
y=self.y
data_err = self.error
# Draw data points
data_ret = x,y
if data_err is None:
ax.plot(x, y, '+')
err_ret = (np.ones(len(self.x)), np.ones(len(self.x)))
else:
ax.errorbar(x, y, data_err, fmt='+', capsize=0)
err_ret = (data_err, data_err)
draw_arg = [('lw', 2)]
draw_arg.append(('color', 'r'))
# Draw PDF curve(s)
if nbins is not None:
x = np.linspace(x[0],x[-1], nbins)
total_ret = draw_pdf_with_midpoints(self.f, arg, x, ax=ax, **dict(draw_arg))
if parts:
f_parts = getattr(self.f, 'parts', None)
if f_parts is not None:
for p in f_parts():
tmp = draw_pdf_with_midpoints(p, arg, x, ax=ax, **dict(draw_arg))
part_ret.append(tmp)
# Print text
txt = _param_text(describe(self), arg, error)
chi2 = self(*arg)
if self.ndof > 0:
txt+=u'chi2/ndof = %5.4g(%5.4g/%d)'%(chi2/self.ndof, chi2, self.ndof)
else:
txt+=u'chi2/ndof = (%5.4g/%d)'%(chi2, self.ndof)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
ax.grid(grid)
return (data_ret, error_ret, total_ret , part_ret)
def draw_x2_residual(self, minuit=None, ax=None, args=None, errors=None, grid=True,
norm=False):
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
x=self.x
y=self.y
data_err = self.error
f=self.f
arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
yf = vector_apply(f, x, *arg)
yplot= y-yf
eplot= data_err if data_err is not None else np.zeros(len(x))
if norm:
if data_err is None:
warn(RuntimeWarning('No error on data points; cannot normalize to error'))
else:
yplot= yplot/data_err
eplot= data_err/data_err
ax.errorbar(x, yplot, eplot, fmt='b+', capsize=0)
ax.grid(grid)
return x, yplot, eplot
#from binned chi2
def draw_bx2(self, minuit=None, parmloc=(0.05, 0.95), nfbins=500, ax=None,
print_par=True, args=None, errors=None, parts=False, grid=True):
data_ret = None
error_ret = None
total_ret = None
part_ret = []
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
m = mid(self.edges)
ax.errorbar(m, self.h, self.err, fmt='+', capsize=0)
data_ret = (self.edges, self.h)
error_ret = (self.err, self.err)
bound = (self.edges[0], self.edges[-1])
scale = nfbins/float(self.bins) #scale back to bins
draw_arg = [('lw', 2)]
if not parts:
draw_arg.append(('color', 'r'))
total_ret = draw_pdf(self.f, arg, bins=nfbins, bound=bound, ax=ax, density=False,
scale=scale, **dict(draw_arg))
if parts:
f_parts = getattr(self.f, 'parts', None)
if f_parts is not None:
for p in f_parts():
tmp = draw_pdf(p, arg, bound=bound, bins=nfbins, ax=ax, density=False,
scale=scale)
part_ret.append(tmp)
ax.grid(grid)
txt = _param_text(describe(self), arg, error)
chi2 = self(*arg)
if self.ndof > 0:
txt+=u'chi2/ndof = %5.4g(%5.4g/%d)'%(chi2/self.ndof, chi2, self.ndof)
else:
txt+=u'chi2/ndof = (%5.4g/%d)'%(chi2, self.ndof)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
return (data_ret, error_ret, total_ret, part_ret)
#from binnedLH
def draw_blh(self, minuit=None, parmloc=(0.05, 0.95),
nfbins=1000, ax=None, print_par=True, grid=True,
args=None, errors=None, parts=False):
data_ret = None
error_ret = None
total_ret = None
part_ret = []
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
m = mid(self.edges)
if self.use_w2:
err = np.sqrt(self.w2)
else:
err = np.sqrt(self.h)
n= np.copy(self.h)
dataint= (n*np.diff(self.edges)).sum()
scale= dataint if not self.extended else 1.0
ax.errorbar(m, n, err, fmt='+', capsize=0)
data_ret = (self.edges, n)
error_ret = (err, err)
draw_arg = [('lw', 2)]
if not parts:
draw_arg.append(('color', 'r'))
bound = (self.edges[0], self.edges[-1])
#scale back to bins
if self.extended:
scale= nfbins/float(self.bins)
total_ret = draw_pdf(self.f, arg, bins=nfbins, bound=bound, ax=ax, density=not self.extended,
scale=scale, **dict(draw_arg))
if parts:
f_parts = getattr(self.f, 'parts', None)
if f_parts is not None:
for p in f_parts():
tmp = draw_pdf(p, arg, bins=nfbins, bound=bound, ax=ax,
density=not self.extended, scale=scale)
part_ret.append(tmp)
ax.grid(grid)
txt = _param_text(describe(self), arg, error)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
return (data_ret, error_ret, total_ret, part_ret)
def draw_residual_blh(self, minuit=None, parmloc=(0.05, 0.95),
ax=None, print_par=False, args=None, errors=None,
norm=False, grid=True):
ax = plt.gca() if ax is None else ax
arg, error = _get_args_and_errors(self, minuit, args, errors)
m = mid(self.edges)
if self.use_w2:
err = np.sqrt(self.w2)
else:
err = np.sqrt(self.h)
n= np.copy(self.h)
dataint= (n*np.diff(self.edges)).sum()
scale= dataint if not self.extended else 1.0
arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
yf = vector_apply(self.f, m, *arg)
yf*= (scale*np.diff(self.edges) if self.extended else scale)
n = n- yf
if norm:
sel= err>0
n[sel]/= err[sel]
err= np.ones(len(err))
ax.errorbar(m, n, err, fmt='+', capsize=0)
ax.plot([self.edges[0],self.edges[-1]],[0.,0.], 'r-')
ax.grid(grid)
txt = _param_text(describe(self), arg, error)
if print_par:
ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
transform=ax.transAxes)
return m, n, err
def draw_compare(f, arg, edges, data, errors=None, ax=None, grid=True, normed=False, parts=False):
"""
TODO: this needs to be rewritten
"""
#arg is either map or tuple
ax = plt.gca() if ax is None else ax
arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
x = (edges[:-1]+edges[1:])/2.0
bw = np.diff(edges)
yf = vector_apply(f, x, *arg)
total = np.sum(data)
if normed:
ax.errorbar(x, data/bw/total, errors/bw/total, fmt='b+', capsize=0)
ax.plot(x, yf, 'r', lw=2)
else:
ax.errorbar(x, data, errors, fmt='b+', capsize=0)
ax.plot(x, yf*bw, 'r', lw=2)
#now draw the parts
if parts:
if not hasattr(f, 'eval_parts'):
warn(RuntimeWarning('parts is set to True but function does '
'not have eval_parts method'))
else:
scale = bw if not normed else 1.
parts_val = list()
for tx in x:
val = f.eval_parts(tx, *arg)
parts_val.append(val)
py = zip(*parts_val)
for y in py:
tmpy = np.array(y)
ax.plot(x, tmpy*scale, lw=2, alpha=0.5)
plt.grid(grid)
return x, yf, data
def draw_normed_pdf(f, arg, bound, bins=100, scale=1.0, density=True, ax=None, **kwds):
return draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True,
normed_pdf=True, ax=ax, **kwds)
def draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True,
normed_pdf=False, ax=None, **kwds):
"""
draw pdf with given argument and bounds.
**Arguments**
* **f** your pdf. The first argument is assumed to be independent
variable
* **arg** argument can be tuple or list
* **bound** tuple(xmin,xmax)
* **bins** number of bins to plot pdf. Default 100.
* **scale** multiply pdf by given number. Default 1.0.
* **density** plot density instead of expected count in each bin
(pdf*bin width). Default True.
* **normed_pdf** Normalize pdf in given bound. Default False
* The rest of keyword argument will be pass to pyplot.plot
**Returns**
x, y of what's being plot
"""
edges = np.linspace(bound[0], bound[1], bins)
return draw_pdf_with_edges(f, arg, edges, ax=ax, scale=scale, density=density,
normed_pdf=normed_pdf, **kwds)
def draw_pdf_with_edges(f, arg, edges, ax=None, scale=1.0, density=True,
normed_pdf=False, **kwds):
x = (edges[:-1]+edges[1:])/2.0
bw = np.diff(edges)
scale *= bw if not density else 1.
return draw_pdf_with_midpoints(f, arg, x, ax=ax, scale=scale,
normed_pdf=normed_pdf, **kwds)
def draw_pdf_with_midpoints(f, arg, x, ax=None, scale=1.0, normed_pdf=False, **kwds):
ax = plt.gca() if ax is None else ax
arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
yf = vector_apply(f, x, *arg)
if normed_pdf:
normed_factor = sum(yf) # assume equal binwidth
yf /= normed_factor
yf *= scale
ax.plot(x, yf, **kwds)
return x, yf
#draw comparison between function given args and data
def draw_compare_hist(f, arg, data, bins=100, bound=None, ax=None, weights=None,
normed=False, use_w2=False, parts=False, grid=True):
"""
draw histogram of data with poisson error bar and f(x,*arg).
::
data = np.random.rand(10000)
f = gaussian
draw_compare_hist(f, {'mean':0,'sigma':1}, data, normed=True)
**Arguments**
- **f**
- **arg** argument pass to f. Can be dictionary or list.
- **data** data array
- **bins** number of bins. Default 100.
- **bound** optional boundary of plot in tuple form. If `None` is
given, the bound is determined from min and max of the data. Default
`None`
- **weights** weights array. Default None.
- **normed** optional normalized data flag. Default False.
- **use_w2** scaled error down to the original statistics instead of
weighted statistics.
- **parts** draw parts of pdf. (Works with AddPdf and Add2PdfNorm).
Default False.
"""
ax = plt.gca() if ax is None else ax
bound = minmax(data) if bound is None else bound
h, e = np.histogram(data, bins=bins, range=bound, weights=weights)
err = None
if weights is not None and use_w2:
err, _ = np.histogram(data, bins=bins, range=bound,
weights=weights*weights)
err = np.sqrt(err)
else:
err = np.sqrt(h)
return draw_compare(f, arg, e, h, err, ax=ax, grid=grid, normed=normed, parts=parts)
| StarcoderdataPython |
71432 | <reponame>wy1157497582/arcpy
#import arcpy
"""-----------------------------------------------------------------------------
Script Name: Clip Multiple Feature Classes
Description: Clips one or more shapefiles
from a folder and places the clipped
feature classes into a geodatabase.
Created By: Insert name here.
Date: Insert date here.
-----------------------------------------------------------------------------"""
import cc
# Import ArcPy site-package and os modules // 导入包
import arcpy
import os
# Set the input workspace //设置输入要素路径
arcpy.env.workspace = arcpy.GetParameterAsText(0)
# Set the clip featureclass //设置裁剪要素
clipFeatures = arcpy.GetParameterAsText(1)
# Set the output workspace //设置输出要素路径
outWorkspace = arcpy.GetParameterAsText(2)
# Set the XY tolerance //设置容差
clusterTolerance = arcpy.GetParameterAsText(3)
try:
# Get a list of the featureclasses in the input folder
fcs = arcpy.ListFeatureClasses()
for fc in fcs:
# Validate the new feature class name for the output workspace.
featureClassName = arcpy.ValidateTableName(fc, outWorkspace)
outFeatureClass = os.path.join(outWorkspace, featureClassName)
# Clip each feature class in the list with the clip feature class.
# Do not clip the clipFeatures, it may be in the same workspace.
if fc != os.path.basename(clipFeatures):
arcpy.Clip_analysis(fc, clipFeatures, outFeatureClass,
clusterTolerance)
except Exception as err:
arcpy.AddError(err)
print err
| StarcoderdataPython |
3277883 | <reponame>Whosemario/stackless-python
from stacklessness import *
def f():
print 'f1'
schedule()
print 'f2'
def g():
print 'g1'
schedule()
print 'g2'
def h():
print 'h1'
schedule()
print 'h2'
t1 = tasklet(f)()
t2 = tasklet(g)()
t3 = tasklet(h)()
t1.run()
| StarcoderdataPython |
197403 | <reponame>FatliTalk/learnenglish
# Generated by Django 3.2.12 on 2022-03-10 05:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('words_in_sentences', '0005_review'),
]
operations = [
migrations.RemoveField(
model_name='review',
name='version',
),
migrations.RemoveField(
model_name='sentence',
name='is_understand',
),
]
| StarcoderdataPython |
4808657 | """ Characteristic matrices """
from larlib import *
print "\n>>> brc2Csr"
V = [[0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1]]
FV = [[0, 1, 3], [1, 2, 4], [1, 3, 4], [2, 4, 5]]
EV = [[0,1],[0,3],[1,2],[1,3],[1,4],[2,4],[2,5],[3,4],[4,5]]
csrFV = csrCreate(FV)
csrEV = csrCreate(EV)
print "\ncsrCreate(FV) =\n", csrFV
VIEW(STRUCT(MKPOLS((V,FV))))
VIEW(STRUCT(MKPOLS((V,EV))))
print "\n>>> csr2DenseMatrix"
print "\nFV =\n", csr2DenseMatrix(csrFV)
print "\nEV =\n", csr2DenseMatrix(csrEV)
| StarcoderdataPython |
3347700 | <filename>labs/stacktrain/core/node_builder.py
import stacktrain.config.general as conf
import stacktrain.core.autostart as autostart
import stacktrain.batch_for_windows as wbatch
def build_nodes(cluster_cfg):
config_name = "{}_{}".format(conf.distro, cluster_cfg)
if conf.wbatch:
wbatch.wbatch_begin_node(config_name)
autostart.autostart_reset()
autostart.autostart_from_config("scripts." + config_name)
if conf.wbatch:
wbatch.wbatch_end_file()
| StarcoderdataPython |
3298478 | <reponame>vishalbelsare/event-registry-python<gh_stars>100-1000
from eventregistry._version import __version__
from eventregistry.Base import *
from eventregistry.EventForText import *
from eventregistry.ReturnInfo import *
from eventregistry.Query import *
from eventregistry.QueryEvents import *
from eventregistry.QueryEvent import *
from eventregistry.QueryArticles import *
from eventregistry.QueryArticle import *
from eventregistry.QueryMentions import *
from eventregistry.QueryStory import *
from eventregistry.Counts import *
from eventregistry.DailyShares import *
from eventregistry.Info import *
from eventregistry.Recent import *
from eventregistry.Trends import *
from eventregistry.Analytics import *
from eventregistry.TopicPage import *
from eventregistry.EventRegistry import * | StarcoderdataPython |
3394263 | """
extract within- and between-module correlation for each module/session
"""
import os,sys
import numpy
import ctypes
basedir=os.environ['MYCONNECTOME_DIR']
def r_to_z(r):
# fisher transform
z=0.5*numpy.log((1.0+r)/(1.0-r))
z[numpy.where(numpy.isinf(z))]=0
z[numpy.where(numpy.isnan(z))]=0
return z
def z_to_r(z):
# inverse transform
return (numpy.exp(2.0*z) - 1)/(numpy.exp(2.0*z) + 1)
def extract_module_summary():
f=open(os.path.join(basedir,'parcellation/module_names.txt'))
network_names=[]
for l in f.readlines():
l_s=l.strip().split('\t')
network_names.append(' '.join(l_s))
f.close()
network_names=network_names[2:]
nmods=len(network_names)
datadir=os.path.join(basedir,'combined_data_scrubbed')
outdir=os.path.join(basedir,'rsfmri')
if not os.path.exists(outdir):
os.makedirs(outdir)
outdir_bwmod=os.path.join(outdir,'bwmod_corr_data')
if not os.path.exists(outdir_bwmod):
os.makedirs(outdir_bwmod)
outdir_winmod=os.path.join(outdir,'winmod_corr_data')
if not os.path.exists(outdir_winmod):
os.mkdir(outdir_winmod)
subcodes=[i.strip() for i in open(os.path.join(basedir,'subcodes.txt')).readlines()]
f=open(os.path.join(basedir,'rsfmri/module_assignments.txt'))
roinum=[]
hemis=[]
parcelnum=[]
modulenum=[]
for l in f.readlines():
l_s=l.strip().split()
roinum.append(int(l_s[0]))
hemis.append(l_s[1])
parcelnum.append(int(l_s[2]))
modulenum.append(float(l_s[3]))
f.close()
modulenum=numpy.array(modulenum)
modules=numpy.unique(modulenum)
modules=modules[modules>0]
nparcels=616
#subcodes=[subcodes[0]]
for subcode in subcodes:
datafile=os.path.join(datadir,subcode+'.txt')
print datafile
assert os.path.exists(datafile)
data=numpy.loadtxt(datafile)
data=data[:,:nparcels]
modctr=0
modrois={}
modmeancorr_within=numpy.zeros(len(modules))
modmeancorr_between=numpy.zeros((len(modules),len(modules)))
moddata={}
moddata_unscrubbed={}
for m in modules:
modrois[m]=numpy.where(modulenum==m)[0]
moddata[m]=data[:,modrois[m]]
for m in modules:
modcorr=numpy.corrcoef(moddata[m].T)
modutr=numpy.triu_indices(modcorr.shape[0],1)
modmeancorr_within[modctr]=z_to_r(numpy.mean(r_to_z(modcorr[modutr])))
mbctr=-1
for mb in modules:
mbctr+=1
if mb==m:
continue
mc=numpy.corrcoef(moddata[m].T,moddata[mb].T)
mcbw=mc[moddata[m].shape[1]:,:moddata[m].shape[1]]
modmeancorr_between[modctr,mbctr]=z_to_r(numpy.mean(r_to_z(mcbw)))
modctr+=1
mcbw_utr=modmeancorr_between[numpy.triu_indices(nmods,1)]
numpy.savetxt(os.path.join(outdir_winmod,subcode+'.txt'),modmeancorr_within)
numpy.savetxt(os.path.join(outdir_bwmod,subcode+'.txt'),mcbw_utr)
f=open(os.path.join(outdir,'bwmod_corr_labels.txt'),'w')
utr=numpy.triu_indices(nmods,1)
for i in range(utr[0].shape[0]):
f.write('%s\t%s\n'%(network_names[utr[0][i]].replace(' ','_'),network_names[utr[1][i]].replace(' ','_')))
f.close()
if __name__ == "__main__":
extract_module_summary() | StarcoderdataPython |
3351068 | #!/usr/bin/env python
import argparse
import bluetooth
joy_con_names = ['Joy-Con (L)', 'Joy-Con (R)']
def parse_cmd_line_args():
parser = argparse.ArgumentParser(
description='Interface with Nintendo switch joy con over bluetooth')
parser.parse_args()
def main():
# Find services
print 'Looking for services'
services = bluetooth.find_service()
joy_cons = []
for service in services:
print 'Found service: \n\t%s' % (service,)
# Lookup the host name to see if its a Joy-Con
name = bluetooth.lookup_name(service['host'])
print 'The device providing the service is named: %s' % (name)
if name in joy_con_names:
print 'Found a Joy-Con'
joy_cons.append(service)
# Connect to all Found Joy-Cons
print 'Connecting to all Joy-Cons'
sockets = []
for service in joy_cons:
# Get the protocol and start a socket
socket = None
protocol = service['protocol']
if protocol == 'RFCOMM':
socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
elif protocol == 'L2CAP':
socket = bluetooth.BluetoothSocket(bluetooth.L2CAP)
else:
print "Unkown protocol!"
continue
# Connect to the socket on the correct port
socket.connect((service['host'], service['port']))
sockets.append(socket)
print 'Connected'
# Print all raw data
while True:
for i, socket in enumerate(sockets):
print '-----------------------\nSocket %d' % (i,)
print socket.recv(1024)
# Close all sockets
for socket in sockets:
socket.close()
if __name__ == '__main__':
parse_cmd_line_args()
main()
| StarcoderdataPython |
106223 | <reponame>atanna/bm
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from functools import partial
from matplotlib import pyplot as plt
def magic_benchpy(line='', cell=None):
"""
Run benchpy.run
%benchpy [[-i] [-g] [-n <N>] [-m <M>] [-p] [-r <R>] [-t <T>] -s<S>] statement
where statement is Bench or Group or list with benches
%%benchpy [[-i] -g<G> -m<M> -n<N> [-p] -s<S>]
long description of statement
Options:
-i: return full information about benchmark results.
-g: use information from garbage collector (with_gc=True).
Default: 'False'.
-n<N>: set maximum of batch size <N> (max_batch=<N>).
Default: 10.
-m<M>: set number of batches for fitting regression <M> (n_batches=<M>).
Default: 10.
batch_sizes = [1, ...,M-2<M>/<N>, M-<M>/<N>, <M>]
-p: show plots with regression.
-r<R>: repeat the loop iteration <R> (n_samples=<R>).
Default 5.
-t<T>: choose columns <T> to represent result.
<T> = [t][c][f][s][m][M][r][g][i]
where
t='Time'
c='CI' - confidence interval
f='Features_time' - time for each regression parameter
s='Std' - standard deviation for regression parameter (which means time)
m='Min' - minimum of the time values
M='Max' - maximum
r="R2" - r2 regression score
g='gc_time' - time for gc collections (useful only with python version >= 3.3)
i='fit_info' - fitting information
Default - default in repr.
Examples
--------
::
In [1]: import benchpy as bp
In [2]: %benchpy 10**10000
+--------------+-------------------------------+-------------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] |
+--------------+-------------------------------+-------------------------------+
| 225.33965124 | [ 210.72239262 239.54741751] | [ 177.29140495 48.04824629] |
+--------------+-------------------------------+-------------------------------+
In [3]: %benchpy -t tcsrmM 10**10000
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
| Time (µs) | CI_tquant[0.95] | Std | R2 | Min | Max |
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
| 226.600298929 | [ 213.60009798 240.16961693] | 7.00210625405 | 0.999999184569 | 179.693800055 | 226.248999752 |
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
In [4]: def cycles(n):
...: for i in range(n):
...: arr = []
...: arr.append(arr)
...:
In [9]: %benchpy -n 1000 cycles(100)
+---------------+-----------------------------+-----------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] |
+---------------+-----------------------------+-----------------------------+
| 23.3943861198 | [ 0. 25.96065552] | [ 20.87035101 2.52403511] |
+---------------+-----------------------------+-----------------------------+
In [10]: %benchpy -n 1000 -g cycles(100)
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] | gc_time | predicted time without gc |
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
| 64.256959342 | [ 0. 99.92966164] | [ 28.80691753 35.45004181] | 7.67428691294 | 56.582672429 |
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
"""
from IPython import get_ipython
from IPython.core.magics import UserMagics
ip = get_ipython()
opts, arg_str = UserMagics(ip).parse_options(
line, 'igm:n:pr:t:', list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = ip.input_transformer_manager.transform_cell(cell)
with_gc = 'g' in opts
n_samples = int(opts.get('r', [5])[0])
max_batch = int(opts.get('n', [10])[0])
n_batches = min(int(max_batch), int(opts.get('m', [10])[0]))
table_keys = None
table_labels = opts.get('t', [None])[0]
if table_labels is not None:
table_keys = table_labels
f = partial(exec, arg_str, ip.user_ns)
from . import run, bench
res = run(bench("<magic>", f), with_gc=with_gc,
n_samples=n_samples,
n_batches=n_batches,
max_batch=max_batch)
if 'i' in opts:
print(res._repr("Full"))
else:
print(res._repr(table_keys, with_empty=False))
if 'p' in opts:
res.plot()
res.plot_features()
plt.show()
def load_ipython_extension(ip):
"""API for IPython to recognize this module as an IPython extension."""
ip.register_magic_function(magic_benchpy, "line_cell", magic_name="benchpy")
| StarcoderdataPython |
164150 | """This module contains functions that visualise solar agent control."""
from __future__ import annotations
from typing import Tuple, Dict, List
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from solara.plot.constants import COLORS, LABELS, MARKERS
def default_setup(figsize=None) -> None:
"""Setup default matplotlib settings."""
if figsize is None:
figsize = (6, 3)
plt.figure(figsize=figsize, dpi=100, tight_layout=True)
sns.set_style("ticks", {"dashes": False})
sns.set_context("paper")
def plot_episode(
data: Dict[str, np.array],
colors: Dict[str, str] = None,
labels: Dict[str, str] = None,
markers: Dict[str, str] = None,
selected_keys: List[str] = None,
num_timesteps: int = 25,
iteration: int = None,
title: str = "Episode Trajectory",
y_max: float = 4,
y_min: float = -2.5,
show_grid: bool = True,
figsize: Tuple = (4.62, 3),
rewards_key: str = "rewards",
dpi: int = 100,
include_episode_stats: bool = True,
):
"""Plot a single episode of battery control problem."""
# default_setup()
matplotlib.rc("text", usetex=True)
if colors is None:
colors = COLORS
if labels is None:
labels = LABELS
if markers is None:
markers = MARKERS
x = np.arange(0, num_timesteps)
if rewards_key in data.keys():
episode_reward = sum(data[rewards_key])
else:
episode_reward = None
# Setting up the figure
_, ax = plt.subplots(figsize=figsize, dpi=dpi)
ax.set_xticks([0, 5, 10, 15, 20, 23], minor=False)
ax.set_xticks(x, minor=True)
ax.set_xticklabels([0, 5, 10, 15, 20, 23], minor=False)
if show_grid:
ax.yaxis.grid(True, which="major")
ax.xaxis.grid(True, which="major")
ax.xaxis.grid(True, which="minor")
# ax.set_prop_cycle("color", colors)
# Plotting the data
for name, values in data.items():
if selected_keys is None or name in selected_keys:
if name in colors.keys():
color = colors[name]
else:
color = None
if name in labels.keys():
label = labels[name]
else:
label = name
if name in markers.keys():
marker = markers[name]
else:
marker = "."
label = label.replace("$", "\\$")
ax.plot(values, label=label, marker=marker, color=color)
if title is not None:
if iteration is not None:
iteration_str = "Iteration {:2.0f}, ".format(iteration)
else:
iteration_str = ""
if episode_reward is not None:
title += " ({}Overall reward: {:.3f})".format(
iteration_str, episode_reward
)
plt.title(title)
plt.ylabel("kW / kWh / other")
plt.xlabel("Time step")
# Adding overall data
if "power_diff" in data:
power_diff_sum = float(sum(data["power_diff"]))
else:
power_diff_sum = 0
handles, _ = ax.get_legend_handles_labels()
if include_episode_stats:
ep_summary_stats = (
# "\\rule{{67pt}}{{0.25pt}}"
"\n \\textbf{{Episode statistics}}"
"\n Sum of rewards: {:>8.3f} \\\\"
"\n Sum of costs: {:>15.3f} \\\\"
"\n Sum of penalties: {:>11.3f}"
).format(
float(sum(data["rewards"])),
float(sum(data["cost"])),
power_diff_sum,
)
handles.append(matplotlib.patches.Patch(color="none", label=ep_summary_stats))
plt.legend(
bbox_to_anchor=(1.02, 1.025),
loc="upper left",
edgecolor="grey",
handles=handles,
# title="\\textbf{{Legend}}",
)
plt.ylim(ymin=y_min, ymax=y_max)
# plt.show()
| StarcoderdataPython |
146567 | <filename>dcorch/common/endpoint_cache.py<gh_stars>0
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from keystoneauth1 import loading
from keystoneauth1 import session
from keystoneclient.v3 import client as keystone_client
from oslo_config import cfg
from oslo_log import log as logging
from dcorch.common import consts
LOG = logging.getLogger(__name__)
class EndpointCache(object):
def __init__(self):
self.endpoint_map = collections.defaultdict(dict)
self.admin_session = None
self.keystone_client = None
self._update_endpoints()
@staticmethod
def _get_endpoint_from_keystone(self):
loader = loading.get_plugin_loader(
cfg.CONF.keystone_authtoken.auth_type)
# TODO(John): figure out the domain stuff...
auth = loader.load_from_options(
auth_url=cfg.CONF.cache.auth_uri,
username=cfg.CONF.cache.admin_username,
user_domain_name="Default",
password=cfg.CONF.cache.admin_password,
project_name=cfg.CONF.cache.admin_tenant,
project_domain_name="Default",
)
self.admin_session = session.Session(
auth=auth, additional_headers=consts.USER_HEADER)
cli = keystone_client.Client(session=self.admin_session)
self.keystone_client = cli
service_id_name_map = {}
for service in cli.services.list():
service_dict = service.to_dict()
service_id_name_map[service_dict['id']] = service_dict['name']
region_service_endpoint_map = {}
for endpoint in cli.endpoints.list():
endpoint_dict = endpoint.to_dict()
if endpoint_dict['interface'] != consts.KS_ENDPOINT_INTERNAL:
continue
region_id = endpoint_dict['region']
service_id = endpoint_dict['service_id']
url = endpoint_dict['url']
service_name = service_id_name_map[service_id]
if region_id not in region_service_endpoint_map:
region_service_endpoint_map[region_id] = {}
region_service_endpoint_map[region_id][service_name] = url
return region_service_endpoint_map
def _get_endpoint(self, region, service, retry):
if service not in self.endpoint_map[region]:
if retry:
self.update_endpoints()
return self._get_endpoint(region, service, False)
else:
return ''
else:
return self.endpoint_map[region][service]
def _update_endpoints(self):
endpoint_map = EndpointCache._get_endpoint_from_keystone(self)
for region in endpoint_map:
for service in endpoint_map[region]:
self.endpoint_map[region][
service] = endpoint_map[region][service]
def get_endpoint(self, region, service):
"""Get service endpoint url.
:param region: region the service belongs to
:param service: service type
:return: url of the service
"""
return self._get_endpoint(region, service, True)
def update_endpoints(self):
"""Update endpoint cache from Keystone.
:return: None
"""
self._update_endpoints()
def get_all_regions(self):
"""Get region list.
return: List of regions
"""
return self.endpoint_map.keys()
def get_session_from_token(self, token, project_id):
"""Get session based on token to communicate with openstack services.
:param token: token with which the request is triggered.
:param project_id: UUID of the project.
:return: session object.
"""
loader = loading.get_plugin_loader('token')
auth = loader.load_from_options(auth_url=cfg.CONF.cache.auth_uri,
token=token, project_id=project_id)
sess = session.Session(auth=auth)
return sess
| StarcoderdataPython |
4802010 | <filename>testing/build/gen_fixtures_location_symbol.py
#!/usr/bin/env python
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import sys
import os
def main():
parser = argparse.ArgumentParser(
description='Create the symbol specifying the location of test fixtures.')
parser.add_argument('--fixtures_location_file', type=str, required=True)
parser.add_argument('--fixtures_location', type=str, required=True)
args = parser.parse_args()
with open(args.fixtures_location_file, 'w') as file:
file.write('namespace flutter {namespace testing {const char* GetFixturesPath() {return "%s";}}}'
% args.fixtures_location)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
3250883 | def fbx_definitions_elements(root, scene_data):
"""
Templates definitions. Only used by Objects data afaik (apart from dummy GlobalSettings one).
"""
definitions = elem_empty(root, b"Definitions")
elem_data_single_int32(definitions, b"Version", FBX_TEMPLATES_VERSION)
elem_data_single_int32(definitions, b"Count", scene_data.templates_users)
fbx_templates_generate(definitions, scene_data.templates)
| StarcoderdataPython |
155677 | <reponame>rohansaini886/Peer-Programming-Hub-CP-Winter_Camp<filename>Sergeant-RANK/PRACTICE/1420A.py
for _ in range(int(input())):
n = int(input())
l = list(map(int, input().split(" ")))
is_true = False
for i in range(1, n):
if l[i] >= l[i-1]:
is_true = True
break
if is_true:
print("YES")
else:
print("NO")
| StarcoderdataPython |
3203611 | """Base class for MailComposer objects."""
import os
import textwrap
from .exceptions import MailComposerError
class BaseMailComposer(object):
"""Base class for MailComposer objects.
Your subclass should implement the display() method to open the
message in its corresponding external application.
"""
__slots__ = ["_to", "_cc", "_bcc", "_subject",
"_body", "_body_format", "_attachments"]
def __init__(self, **kw):
"""Return a new MailComposer object."""
if "to" in kw and kw["to"]:
self._to = self._parse_recipients(kw["to"])
else:
self._to = []
if "cc" in kw and kw["cc"]:
self._cc = self._parse_recipients(kw["cc"])
else:
self._cc = []
if "bcc" in kw and kw["bcc"]:
self._bcc = self._parse_recipients(kw["bcc"])
else:
self._bcc = []
if "subject" in kw and kw["subject"]:
self._subject = str(kw["subject"])
else:
self._subject = ""
if "body" in kw and kw["body"]:
self._body = str(kw["body"])
else:
self._body = ""
# Note: self._parse_body_format() will check the formatting
# for this argument and raise an exception if there's a problem.
if "body_format" in kw:
self._body_format = self._parse_body_format(kw["body_format"])
else:
self._body_format = "hybrid"
# Attachments are not accepted as a keyword argument
self._attachments = []
def __str__(self):
"""Return the message as a string.
The format approximates RFC 2822.
"""
headers = []
lines = []
# Process the message headers
if self._to:
headers.append("To: {0}".format(", ".join(self._to)))
if self._cc:
headers.append("CC: {0}".format(", ".join(self._cc)))
if self._bcc:
headers.append("BCC: {0}".format(", ".join(self._bcc)))
if self._subject:
headers.append("Subject: {0}".format(self._subject))
# Format the message headers
for header in headers:
for line in textwrap.wrap(header, width=78,
subsequent_indent=" "):
lines.append(line)
# Add a blank line separating the headers from the body text
lines.append("")
# Format the body text
for body_line in self._body.splitlines():
if body_line:
for line in textwrap.wrap(body_line, width=78):
lines.append(line)
else:
# This is necessary to keep empty lines in the body text
lines.append("")
return "\n".join(lines)
# ------------------------------------------------------------------------
def attach_file(self, path):
"""Attach the specified file to this message."""
if os.path.exists(path):
# Always give the file's absolute path, since the email
# application might not share our working directory
self._attachments.append(os.path.abspath(path))
else:
message = "No such file or directory: '{0}'".format(path)
raise MailComposerError(message)
def display(self, blocking=True):
"""Display this message in your email application."""
raise NotImplementedError
# ------------------------------------------------------------------------
def _parse_body_format(self, body_format):
"""Parse the "body_format" property."""
if body_format in ("text", "html", "hybrid"):
return body_format
else:
message = "body_format must be one of 'text', 'html', or 'hybrid'"
raise ValueError(message)
def _parse_recipients(self, recipients):
"""Parse the "to", "cc", or "bcc" property."""
if isinstance(recipients, str):
return [recipients]
else:
return list(recipients)
# ------------------------------------------------------------------------
@property
def to(self):
"""List of recipients in the "To:" field."""
return self._to
@to.setter
def to(self, value):
if value:
self._to = self._parse_recipients(value)
else:
self._to = []
@to.deleter
def to(self):
del self._to
self._to = []
# ------------------------------------------------------------------------
@property
def cc(self):
"""List of recipients in the "CC:" field."""
return self._cc
@cc.setter
def cc(self, value):
if value:
self._cc = self._parse_recipients(value)
else:
self._cc = []
@cc.deleter
def cc(self):
del self._cc
self._cc = []
# ------------------------------------------------------------------------
@property
def bcc(self):
"""List of recipients in the "BCC:" field."""
return self._bcc
@bcc.setter
def bcc(self, value):
if value:
self._bcc = self._parse_recipients(value)
else:
self._bcc = []
@bcc.deleter
def bcc(self):
del self._bcc
self._to = []
# ------------------------------------------------------------------------
@property
def subject(self):
"""The subject line of the email."""
return self._subject
@subject.setter
def subject(self, value):
if value:
self._subject = str(value)
else:
self._subject = ""
@subject.deleter
def subject(self):
del self._subject
self._subject = ""
# ------------------------------------------------------------------------
@property
def body(self):
"""The body of the email."""
return self._body
@body.setter
def body(self, value):
if value:
self._body = str(value)
else:
self._body = ""
@body.deleter
def body(self):
del self._body
self._body = ""
# ------------------------------------------------------------------------
@property
def body_format(self):
"""The format of the message body.
Possible values are:
'text'
Indicates the message body is in plain-text format.
'html'
Indicates the message body is in HTML format.
'hybrid' (default)
Indicates the message body is in plain-text format, but
the message should be sent using HTML formatting if your
email application supports it.
"""
return self._body_format
@body_format.setter
def body_format(self, value):
self._body_format = self._parse_body_format(value)
# ------------------------------------------------------------------------
@property
def attachments(self):
"""List of files to attach to this email."""
return self._attachments
| StarcoderdataPython |
4810257 | <filename>atividade2/util.py
# -*- coding: utf-8 -*-
'''
Metodos a serem usados em mais de uma questao serao colocados aqui
'''
# Definicao de metodos
def truncar(valor):
if(valor < 0.0):
return 0.0
elif(valor > 255.0):
return 255.0
return valor
| StarcoderdataPython |
133673 | <gh_stars>0
# Import Vancouver's lost animals into Elasticsearch
import urllib
import json
from pprint import pprint
from datetime import datetime
from elasticsearch import Elasticsearch
vancouverLostAnimalsFtp = 'ftp://webftp.vancouver.ca/OpenData/json/LostAnimals.json'
print "Importing Vancouver lost & found animals from " + vancouverLostAnimalsFtp
lostAnimalsJson = urllib.urlopen(vancouverLostAnimalsFtp)
lostAnimalsJsonArray = json.load(lostAnimalsJson)
es = Elasticsearch("http://localhost")
animalCount = 0
for i in lostAnimalsJsonArray:
animalCount = animalCount + 1
res = es.index(index="animals", id=str(animalCount), doc_type="lost", body=i)
print
print "Imported " + str(animalCount)
| StarcoderdataPython |
1679812 | <reponame>Mesitis/community
'''
- login and get token
- process 2FA if 2FA is setup for this account
- Returns whether or not a given security exists using either name or ticker.
'''
import requests
import json
get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/"
validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication
get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json"
get_security_url = "https://api.canopy.cloud:443/api/v1/securities.json"
#please replace below with your username and password over here
username = 'userxxx'
password = '<PASSWORD>'
#please enter the OTP token in case it is enabled
otp_code = '123456'
#first call for a fresh token
payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password
headers = {
'accept': "application/json",
'content-type':"application/x-www-form-urlencoded"
}
response = requests.request("POST", get_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
token = response.json()['token']
login_flow = response.json()['login_flow']
#in case 2FA is enabled use the OTP code to get the second level of authentication
if login_flow == '2fa_verification':
headers['Authorization'] = token
payload = 'otp_code=' + otp_code
response = requests.request("POST", validate_otp_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text
token = response.json()['token']
#replace ticker and keyword with search terms appropriately
ticker = "AAPL_US"
keyword = ""
querystring = {"ticker":ticker,"keyword":keyword,"page":"1","per_page":"20"}
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8"
}
response = requests.request("GET", get_security_url, headers=headers, params=querystring)
print json.dumps(response.json(), indent=4, sort_keys = True)
| StarcoderdataPython |
3338239 | <filename>tests/test_main.py<gh_stars>1-10
import csv
import os
from tempfile import TemporaryDirectory
import pytest
from click.testing import CliRunner
from qdc_converter import main as converter_main
from qdc_converter.utils import get_files_recursively
@pytest.fixture(scope='module')
def runner():
return CliRunner()
def compare_two_csv(csv_one, csv_two):
"""Compare 2 CSV files.
Args:
csv_one (str): Path to 1st csv.
csv_two (str): Path to 2nd csv.
"""
with open(csv_one, 'r') as f1, open(csv_two, 'r') as f2:
data1 = list(csv.reader(f1))
data2 = list(csv.reader(f2))
assert len(data1) == len(data2)
for v1, v2 in zip(data1, data2):
assert v1 == v2
def test_main(runner):
"""Convert test qdc and compare csv output with validated sample."""
with TemporaryDirectory() as tmpdir:
result_csv = os.path.join(tmpdir, 'output.csv')
here = os.path.dirname(os.path.abspath(__file__))
test_path = os.path.join(here, 'data', 'main')
csv_files = get_files_recursively(test_path, '.csv')
qdc_files = get_files_recursively(test_path, '.qdc')
assert len(csv_files) == 1
csv_sample_file = csv_files[0]
assert qdc_files
qdc_path = os.path.dirname(qdc_files[0])
runner.invoke(converter_main, [
'--qdc-folder-path', qdc_path,
'--output-path', result_csv,
'--layer', '1',
'--quite',
])
# Compare output with sample
compare_two_csv(csv_sample_file, result_csv)
| StarcoderdataPython |
3334223 | <filename>bikeshares/programs/boston.py<gh_stars>1-10
import bikeshares
import pandas as pd
import numpy as np
def convert_rider_gender(x):
if type(x) != str and np.isnan(x): return np.nan
if x == "Male": return "M"
if x == "Female": return "F"
raise Exception("Unrecognized gender variable: {0}".format(x))
def convert_rider_type(x):
if x == "Registered": return "member"
if x == "Casual": return "non-member"
raise Exception("Unrecognized rider type: {0}".format(x))
class Hubway(bikeshares.program.BikeShareProgram):
def parse_trips(self, data_path):
parsed = pd.read_csv(data_path,
dtype=dict(zip_code="O"),
na_values=["NA"],
usecols=[ "start_date", "end_date", "duration",
"start_station", "end_station", "bike_nr",
"subscription_type", "zip_code", "birth_date", "gender" ],
parse_dates=["start_date", "end_date"])
mapped = pd.DataFrame({
"start_time": parsed["start_date"],
"start_station": parsed["start_station"],
"end_time": parsed["end_date"],
"end_station": parsed["end_station"],
"duration": parsed["duration"],
"bike_id": parsed["bike_nr"],
"rider_type": parsed["subscription_type"].apply(convert_rider_type),
"rider_gender": parsed["gender"].apply(convert_rider_gender),
"rider_birthyear": parsed["birth_date"]
})
return mapped
def parse_stations(self, data_path):
parsed = pd.read_csv(data_path,
usecols=[ "id", "station",
"install_date", "last_day", "nb_docks",
"lat", "lng" ],
parse_dates=["install_date", "last_day"])
mapped = pd.DataFrame({
"id": parsed["id"],
"name": parsed["station"],
"lat": parsed["lat"],
"lng": parsed["lng"],
"capacity": parsed["nb_docks"],
"install_date": parsed["install_date"],
"removal_date": parsed["last_day"]
}).groupby("id").first().reset_index()
return mapped
| StarcoderdataPython |
1788881 | <reponame>cyperior7/MBEDataMechanics<filename>mergedList.py
import json
import dml
import prov.model
import datetime
import pandas as pd
import uuid
class mergedList(dml.Algorithm):
contributor = 'ashwini_gdukuray_justini_utdesai'
reads = ['ashwini_gdukuray_justini_utdesai.masterList', 'ashwini_gdukuray_justini_utdesai.nonMBEmasterList']
writes = ['ashwini_gdukuray_justini_utdesai.mergedList']
@staticmethod
def execute(trial=False):
'''Retrieve some data sets (not using the API here for the sake of simplicity).'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai')
MBElist = repo['ashwini_gdukuray_justini_utdesai.masterList']
NMBElist = repo['ashwini_gdukuray_justini_utdesai.nonMBEmasterList']
# Trial mode implementation
if (trial):
MBElistDF = pd.DataFrame(list(MBElist.find()))[:100]
NMBElistDF = pd.DataFrame(list(NMBElist.find()))[:100]
else:
MBElistDF = pd.DataFrame(list(MBElist.find()))
NMBElistDF = pd.DataFrame(list(NMBElist.find()))
# Add back a column for MBE status
MBElistDF['MBE Status'] = 'Y'
NMBElistDF['MBE Status'] = 'N'
# Merge two lists
combinedDF = pd.concat([MBElistDF, NMBElistDF])
# Standardize industry column
categories = {
'Finance': ['financial', 'finance', 'accounting', 'bank', 'money', 'equity', 'asset', 'payroll', 'economic', 'stock', 'fine art', 'novelties'],
'Technology': ['information', 'technology', 'tech', 'computer', 'software', 'data', 'it professional'],
'Architecture': ['architecture', 'architectural', 'engineering', 'architect', 'interior design'],
'Carpentry': ['carpentry', 'wood', 'drywall'],
'HVAC': ['hvac', 'air conditioning', 'heating', 'insulation'],
'Landscaping': ['landscaping', 'landscape', 'snow'],
'Janitorial Services': ['janitorial'],
'Music': ['music', 'acoustic', 'guitar'],
'Answering Services': ['answering', 'telephone', 'telecommunications'],
'Home': ['cabinet', 'appliance', 'kitchen', 'bath', 'door', 'home', 'window', 'remodel', 'flag'],
'Hospitality': ['catering', 'wedding', 'party', 'event planner', 'cater', 'planning', 'hospitality'],
'Construction': ['concrete', 'construction', 'operat', 'glass', 'scaffold'],
'Environment': ['environment', 'sustainability', 'sustainable'],
'Electrical': ['electric', 'electronic'],
'Marketing/Advertising': ['marketing', 'advertising', 'leadership', 'strategic planning', 'relations', 'community', 'brand', 'presentation'],
'Maintenance/Repairs': ['elevator', 'hardware', 'maintenance'],
'Pest Control': ['pest', 'exterminator', 'exterminat'],
'Fencing': ['fence', 'fencing', 'barrier'],
'Flooring/Roofing': ['flooring', 'floor', 'carpet', 'roofing', 'tile'],
'Fuel': ['oil', 'gasoline', 'gas', 'energy'],
'Contractor': ['contract'],
'Legal': ['legal', 'lawyer'],
'Cleanup': ['lead abatement', 'asbestos', 'demolition', 'trash', 'garbage', 'cleaning', 'compost'],
'Management/Real Estate': ['management', 'leadership', 'project cont', 'real estate'],
'Masonry': ['masonry', 'stone'],
'Metals': ['metal', 'steel', 'iron', 'gold', 'jewell'],
'Office Supplies': ['office', 'stationery'],
'Painting': ['paint'],
'Media': ['photo', 'video', 'media', 'printing', 'graphic', 'news', 'logo', 'arts'],
'Plumbing': ['plumbing', 'plumber'],
'Insurance': ['insurance', 'claims'],
'Security/Safety': ['security', 'watch guard', 'safety', 'fire'],
'Site Improvements': ['road', 'paving', 'sidewalk', 'site util', 'sign procure'],
'Transportation': ['bus', 'car', 'taxi', 'transport', 'airplane', 'travel', 'delivery', 'livery', 'trucking', 'moving', 'shuttle'],
'Services': ['translation', 'resident', 'language', 'supplier'],
'Floral': ['flower', 'floral'],
'Fitness/Health': ['fitness', 'health', 'recreational', 'medical', 'yoga', 'barre'],
'Food': ['food', 'pantry', 'beverage', 'produce'],
'Municipality': ['poverty', 'permit', 'parking', 'state', 'court'],
'Counseling': ['counseling', 'therapy'],
'Research': ['research', 'testing', 'laboratory'],
'Non Profit/Community': ['community', 'housing', 'treatment', 'shelter', 'mental', 'domestic', 'habitat'],
'Consumer Services': ['hair', 'salon', 'nail'],
'Automobile': ['vehicle', 'automobile', 'body shop', 'auto damage'],
'Apparel': ['clothes', 'uniform', 'apparel'],
'Employment': ['employment', 'recruit', 'interview', 'skills', 'employee', 'workforce', 'training', 'staffing'],
'Education': ['education', 'school', 'children', 'college']
}
combinedDF['IndustryID'] = 'Temp'
for index, row in combinedDF.iterrows():
industry = row['Industry'].lower()
for key in categories:
for cat in categories[key]:
if (cat in industry):
row['IndustryID'] = key
break
# check if company was sorted into a category already
if (row['IndustryID'] != 'Temp'):
break
combinedDF = combinedDF.reset_index(drop=True)
combinedDF = combinedDF.drop(columns=['Industry'])
#records = json.loads(industryDF.T.to_json()).values()
repo.dropCollection("mergedList")
repo.createCollection("mergedList")
repo['ashwini_gdukuray_justini_utdesai.mergedList'].insert_many(combinedDF.to_dict('records'))
repo['ashwini_gdukuray_justini_utdesai.mergedList'].metadata({'complete': True})
print(repo['ashwini_gdukuray_justini_utdesai.mergedList'].metadata())
repo.logout()
endTime = datetime.datetime.now()
return {"start": startTime, "end": endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
doc.add_namespace('bdp', 'http://datamechanics.io/?prefix=ashwini_gdukuray_justini_utdesai/')
this_script = doc.agent('alg:ashwini_gdukuray_justini_utdesai#mergedList()',
{prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})
masterList = doc.entity('dat:ashwini_gdukuray_justini_utdesai#masterList',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataSet',
'ont:Extension': 'json'})
nonMBEmasterList = doc.entity('dat:ashwini_gdukuray_justini_utdesai#nonMBEmasterList',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataSet',
'ont:Extension': 'json'})
mergedList = doc.entity('dat:ashwini_gdukuray_justini_utdesai#mergedList',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataSet',
'ont:Extension': 'json'})
act = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)
doc.wasAssociatedWith(act, this_script)
doc.usage(act, masterList, startTime, None,
{prov.model.PROV_TYPE: 'ont:Retrieval',
'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'
}
)
doc.usage(act, nonMBEmasterList, startTime, None,
{prov.model.PROV_TYPE: 'ont:Retrieval',
'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'
}
)
doc.wasAttributedTo(mergedList, this_script)
doc.wasGeneratedBy(mergedList, act, endTime)
doc.wasDerivedFrom(mergedList, masterList, act, act, act)
doc.wasDerivedFrom(mergedList, nonMBEmasterList, act, act, act)
repo.logout()
return doc
'''
# This is example code you might use for debugging this module.
# Please remove all top-level function calls before submitting.
example.execute()
doc = example.provenance()
print(doc.get_provn())
print(json.dumps(json.loads(doc.serialize()), indent=4))
'''
## eof
| StarcoderdataPython |
3262822 | <reponame>birds-on-mars/birdsonearth<filename>VGGish_model.py
import torch
import torch.nn as nn
from torch.nn.functional import relu, softmax
from torch.utils.data import DataLoader
import h5py as h5
import os
import params as p
class VGGish(nn.Module):
def __init__(self, params):
super(VGGish, self).__init__()
self.n_bins = params.n_bins
self.n_frames = params.n_frames
self.out_dims = int(params.n_bins / 2**4 * params.n_frames / 2**4)
self.n_classes = params.n_classes
self.weights = params.weights
self.model_zoo = params.model_zoo
self.name = params.name
# convolutional bottom part
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=(1, 1))
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv2 = nn.Conv2d(64, 128, kernel_size=(3, 3), padding=(1, 1))
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=(3, 3), padding=(1, 1))
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=(3, 3), padding=(1, 1))
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=(3, 3), padding=(1, 1))
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=(3, 3), padding=(1, 1))
self.pool4 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
# fully connected top part
self.classifier = nn.Sequential(
nn.Linear(self.out_dims*512, 1028),
nn.ReLU(),
nn.Linear(1028, 1028),
nn.ReLU(),
nn.Linear(1028, self.n_classes)
)
def forward(self, X):
a = self.pool1(relu(self.conv1(X)))
a = self.pool2(relu(self.conv2(a)))
a = relu(self.conv3_1(a))
a = relu(self.conv3_2(a))
a = self.pool3(a)
a = relu(self.conv4_1(a))
a = relu(self.conv4_2(a))
a = self.pool4(a)
a = a.reshape((a.size(0), -1))
a = self.classifier(a)
a = softmax(a)
return a
def init_weights(self, file=None):
'''
laods pretrained weights from an .hdf5 file. File structure must match exactly.
Args:
file (string): path to .hdf5 file containing VGGish weights
'''
if file is not None:
file = file
else:
file = self.weights
# loading weights from file
with h5.File(file, 'r') as f:
conv1 = f['conv1']['conv1']
kernels1 = torch.from_numpy(conv1['kernel:0'][()].transpose(3, 2, 1, 0))
biases1 = torch.from_numpy(conv1['bias:0'][()])
conv2 = f['conv2']['conv2']
kernels2 = torch.from_numpy(conv2['kernel:0'][()].transpose(3, 2, 1, 0))
biases2 = torch.from_numpy(conv2['bias:0'][()])
conv3_1 = f['conv3']['conv3_1']['conv3']['conv3_1']
kernels3_1 = torch.from_numpy(conv3_1['kernel:0'][()].transpose(3, 2, 1, 0))
biases3_1 = torch.from_numpy(conv3_1['bias:0'][()])
conv3_2 = f['conv3']['conv3_2']['conv3']['conv3_2']
kernels3_2 = torch.from_numpy(conv3_2['kernel:0'][()].transpose(3, 2, 1, 0))
biases3_2 = torch.from_numpy(conv3_2['bias:0'][()])
conv4_1 = f['conv4']['conv4_1']['conv4']['conv4_1']
kernels4_1 = torch.from_numpy(conv4_1['kernel:0'][()].transpose(3, 2, 1, 0))
biases4_1 = torch.from_numpy(conv4_1['bias:0'][()])
conv4_2 = f['conv4']['conv4_2']['conv4']['conv4_2']
kernels4_2 = torch.from_numpy(conv4_2['kernel:0'][()].transpose(3, 2, 1, 0))
biases4_2 = torch.from_numpy(conv4_2['bias:0'][()])
# assigning weights to layers
self.conv1.weight.data = kernels1
self.conv1.bias.data = biases1
self.conv2.weight.data = kernels2
self.conv2.bias.data = biases2
self.conv3_1.weight.data = kernels3_1
self.conv3_1.bias.data = biases3_1
self.conv3_2.weight.data = kernels3_2
self.conv3_2.bias.data = biases3_2
self.conv4_1.weight.data = kernels4_1
self.conv4_1.bias.data = biases4_1
self.conv4_2.weight.data = kernels4_2
self.conv4_2.bias.data = biases4_2
def freeze_bottom(self):
'''
freezes the convolutional bottom part of the model.
'''
for layer in self.children():
if isinstance(layer, nn.Conv2d):
layer.weight.requires_grad = False
layer.bias.requires_grad = False
def save_weights(self):
torch.save(self.state_dict(), \
os.path.join(self.model_zoo, self.name+'.pt'))
return
class VGGish_w_top(nn.Module):
def __init__(self, params, top=False):
super(VGGish, self).__init__()
self.n_bins = params.n_bins
self.n_frames = params.n_frames
self.out_dims = int(params.n_bins / 2**4 * params.n_frames / 2**4)
self.n_classes = params.n_classes
self.weights = params.weights
self.weights_to = params.weights_to
self.name = params.name
self.top = top
# convolutional bottom part
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=(1, 1))
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv2 = nn.Conv2d(64, 128, kernel_size=(3, 3), padding=(1, 1))
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=(3, 3), padding=(1, 1))
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=(3, 3), padding=(1, 1))
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=(3, 3), padding=(1, 1))
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=(3, 3), padding=(1, 1))
self.pool4 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
if self.top:
self.fc1 = nn.Linear(self.out_dims*512, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, 128)
def forward(self, X):
a = self.pool1(relu(self.conv1(X)))
a = self.pool2(relu(self.conv2(a)))
a = relu(self.conv3_1(a))
a = relu(self.conv3_2(a))
a = self.pool3(a)
a = relu(self.conv4_1(a))
a = relu(self.conv4_2(a))
a = self.pool4(a)
return a
def init_weights(self, file=None):
'''
laods pretrained weights from an .hdf5 file. File structure must match exactly.
Args:
file (string): path to .hdf5 file containing VGGish weights
'''
if file is not None:
file = file
else:
file = self.weights
# loading weights from file
with h5.File(file, 'r') as f:
conv1 = f['conv1']['conv1']
kernels1 = torch.from_numpy(conv1['kernel:0'][()].transpose(3, 2, 1, 0))
biases1 = torch.from_numpy(conv1['bias:0'][()])
conv2 = f['conv2']['conv2']
kernels2 = torch.from_numpy(conv2['kernel:0'][()].transpose(3, 2, 1, 0))
biases2 = torch.from_numpy(conv2['bias:0'][()])
conv3_1 = f['conv3']['conv3_1']['conv3']['conv3_1']
kernels3_1 = torch.from_numpy(conv3_1['kernel:0'][()].transpose(3, 2, 1, 0))
biases3_1 = torch.from_numpy(conv3_1['bias:0'][()])
conv3_2 = f['conv3']['conv3_2']['conv3']['conv3_2']
kernels3_2 = torch.from_numpy(conv3_2['kernel:0'][()].transpose(3, 2, 1, 0))
biases3_2 = torch.from_numpy(conv3_2['bias:0'][()])
conv4_1 = f['conv4']['conv4_1']['conv4']['conv4_1']
kernels4_1 = torch.from_numpy(conv4_1['kernel:0'][()].transpose(3, 2, 1, 0))
biases4_1 = torch.from_numpy(conv4_1['bias:0'][()])
conv4_2 = f['conv4']['conv4_2']['conv4']['conv4_2']
kernels4_2 = torch.from_numpy(conv4_2['kernel:0'][()].transpose(3, 2, 1, 0))
biases4_2 = torch.from_numpy(conv4_2['bias:0'][()])
# assigning weights to layers
self.conv1.weight.data = kernels1
self.conv1.bias.data = biases1
self.conv2.weight.data = kernels2
self.conv2.bias.data = biases2
self.conv3_1.weight.data = kernels3_1
self.conv3_1.bias.data = biases3_1
self.conv3_2.weight.data = kernels3_2
self.conv3_2.bias.data = biases3_2
self.conv4_1.weight.data = kernels4_1
self.conv4_1.bias.data = biases4_1
self.conv4_2.weight.data = kernels4_2
self.conv4_2.bias.data = biases4_2
def freeze_bottom(self):
'''
freezes the convolutional bottom part of the model.
'''
for layer in self.children():
if isinstance(layer, nn.Conv2d):
layer.weight.requires_grad = False
layer.bias.requires_grad = False
def save_weights(self, path=None):
if path is None:
path = os.path.join(self.weights_to, self.name+'.pt')
torch.save(self.state_dict(), path)
if __name__ == '__main__':
params = p.Params()
if torch.cuda.is_available():
device = torch.device('cuda:2')
print('GPU available, working on device', device)
else:
device = torch.device('cpu')
print('No GPU available, working on CPU.')
print('loading model')
net = VGGish(params)
#print('net on gpu?:', net.is_cuda)
net.init_weights('vggish_audioset_weights_without_fc2.h5')
net.to(device)
t = torch.randn((10, 10), device=device)
print(t.device)
t2 = torch.randn((10, 10))
t2.to(device)
print(t.device)
for name, param in net.named_parameters():
print(name, param.device)
# ONNX export
#net.cuda()
# dummy_in = torch.randn(size=(10, 1, 64, 96)).cuda()
# input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(12)]
# output_names = ["output1"]
# torch.onnx.export(net, dummy_in, "modelsVGGish_conv.onnx", verbose=True, \
# input_names=input_names, output_names=output_names)
| StarcoderdataPython |
1661156 | <filename>Objects/Background.py
from Objects.Object import Object
class Background(Object):
def __init__(self, pPixellength):
self.pixellength = pPixellength
self.color = [0, 0, 0]
super().__init__(True, self.pixellength - 1, [self.color] * self.pixellength)
def setColor(self, color):
self.color = color
self.content = [color] * self.pixellength
| StarcoderdataPython |
33941 | import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import linalg
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pylint: disable=no-member
config.update("jax_enable_x64", True)
np_real = [np.float32, np.float16, np.float64]
np_float = np_real + [np.complex64, np.complex128]
np_int = [np.int8, np.int16, np.int32, np.int64]
np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
np_dtypes = {"real": np_real, "float": np_float,
"rand": np_float,
"int": np_int + np_uint,
"all": np_real+ np_int + np_uint + [None, ]}
tf_real = [tf.float32, tf.float16, tf.float64]
tf_float = tf_real + [tf.complex64, tf.complex128]
tf_int = [tf.int8, tf.int16, tf.int32, tf.int64]
tf_uint = [tf.uint8, tf.uint16, tf.uint32, tf.uint64]
tf_dtypes = {"real": tf_real, "float": tf_float,
"rand": tf_real + [None, ],
"int": tf_int + tf_uint,
"all": tf_real + tf_int + tf_uint + [None, ]}
torch_float = [torch.float32, torch.float16, torch.float64]
torch_int = [torch.int8, torch.int16, torch.int32, torch.int64]
torch_uint = [torch.uint8]
torch_dtypes = {"real": torch_float, "float": torch_float,
"rand": [torch.float32, torch.float64, None],
"int": torch_int + torch_uint,
"all": torch_float + torch_int + torch_uint + [None, ]}
dtypes = {"pytorch": torch_dtypes,
"jax": np_dtypes, "numpy": np_dtypes, "tensorflow": tf_dtypes}
def test_eye(backend):
"""
Tests linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.eye(N, dtype=dtype, M=M, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_zeros(backend):
"""
Tests linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.zeros(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_ones(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.ones(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_randn(backend):
"""
Tests linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.randn(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend, seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_random_uniform(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.random_uniform(shape, dtype=dtype, name=name,
axis_names=axis_names, backend=backend,
seed=seed, boundaries=boundaries)
npI = backend_obj.random_uniform(shape, dtype=dtype, seed=seed,
boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
| StarcoderdataPython |
1769536 | <gh_stars>0
#!/usr/bin/python
import sys
import time
def creat_newpost(post_name):
# current_time = time.time()
post_url = '-'.join(post_name.split())
post_date = time.strftime('%Y-%m-%d')
post_time = time.strftime('%Y-%m-%dT%H:%M:%S')
post_path = './{}-{}.md'.format(post_date, post_url)
init_content = \
"""---
layout: post
title: {}
modified:
categories:
description:
tags:
image:
feature:
credit:
creditlink:
comments:
share:
date: {}
---
""".format(post_name, post_time)
with open(post_path, 'w') as newpost:
newpost.write(init_content)
if __name__ == '__main__':
post_name = sys.argv[1]
creat_newpost(post_name) | StarcoderdataPython |
3307132 | from shapes import PolygonInterpolator, normals_offset, midpoints
import matplotlib.pyplot as plt
import shapely.geometry as geom
import numpy as np
import matplotlib.animation as animation
import scipy.spatial as spatial
from copy import copy
#p1s = geom.box(0, 0, 1, 1)
#p2s = geom.box(-1, -1, 2, 2)
p1s = geom.Polygon([(-0.5, 0.5), (0.5, -0.5), (1, 1.), (0, 1.5), (1, 0)]).convex_hull
#p2s = geom.Polygon([(0, 0), (0, 1), (0.5, 1.5), (1, 1.5),
# (1.5, 1.5), (1.5, 1.), (1, 0)])
p2s = geom.Polygon([(0, 0), (0, 1), (0.5, 1.5),
(1.5, 1.), (1.4, 0.5), (1, 0)]).convex_hull
#p2s = geom.Polygon([(0, 0), (0, 1), (0.5, 1.5), (1, 1), (1, 0)])
#
#lrpoly = geom.Polygon(zip([-0.07050964684556796, -0.07050964825832298, 0.10949034957097684, 0.10949034894607837, -0.07050964684556796], [-0.15499999638048872, 0.15499999203700351, 0.1549999968283623, -0.15499999600629497, -0.15499999638048872]))
#
#rpoly = geom.Polygon(zip([0.10949026646949628, -0.0705096371815371, -0.07050963781963734, 0.1094903420613558, 0.10949026646949628], [0.05500000293727045, 0.05500001448651814, 0.15499998281677235, 0.15499999497076253, 0.05500000293727045]))
#
#lpoly = geom.Polygon(zip([0.10949026646949628, -0.0705096371815371, -0.07050963781963734, 0.1094903420613558, 0.10949026646949628], [0.05500000293727045, 0.05500001448651814, 0.15499998281677235, 0.15499999497076253, 0.05500000293727045]))
#
#p1s = lpoly
#p2s = lrpoly
def save_polys(epsilons):
inter = PolygonInterpolator(p1s, p2s)
all_points = None
for i, eps in enumerate(epsilons):
poly = inter.fast_interpolate(eps)
arr = np.array(poly.exterior.coords)
np.savetxt('polygon_{}'.format(i), arr, header='x y', comments='')
normals, offsets = normals_offset(poly)
if eps > 0 and eps < 1:
speeds = inter.midpoint_derivative(1.)
else:
speeds = [(0, 0)]*len(normals)
# if prev_offsets is None:
# speeds = [0]*len(normals)
# else:
# speeds = [n-p for n, p in zip(offsets, prev_offsets)]
# prev_offsets = copy(offsets)
#dots = [n[0]*s[0]+n[1]*s[1] for n, s in zip(normals, speeds)]
#dots = [d if abs(d) > 0.0001 else 1 for d in dots]
#reg_dots = [d/(10*(max(dots)-min(dots))) for d in dots]
#if eps > 0 and eps < 1:
# #normals = [(d*n[0]/abs(d), d*n[1]/abs(d)) for d, n in zip(dots, normals)]
# normals = [(s*n[0], s*n[1]) for s, n in zip(speeds, normals)]
#normals = [(s[0]*n[0], s[1]*n[1]) for s, n in zip(speeds, normals)]
mid = midpoints([geom.Point(pt) for pt in zip(*poly.exterior.coords.xy)[:-1]])
x, y = zip(*[(pt.xy[0][0], pt.xy[1][0]) for pt in mid])
u, v = zip(*normals)
dx, dy = zip(*speeds)
if eps > 0 and eps < 1:
if all_points is None:
all_points = [[np.array([[xi, yi]])] for xi, yi in zip(x, y)]
else:
for l, xi, yi in zip(all_points, x, y):
l.append(np.array([[xi, yi]]))
np.savetxt('polygon_normals_{}'.format(i), np.vstack([x, y, u, v, dx, dy]).T,
header='x y u v dx dy', comments='')
for i, l in enumerate(all_points):
np.savetxt('trajectory_{}'.format(i), np.vstack(l), header='x y', comments='')
def check_speeds():
perc = 0.0
points = [(0.5, 0.5)]#, (0.5, 0), (0.5, 1.)]
interp = PolygonInterpolator(p1s, p2s)
poly = interp.fast_interpolate(perc)
pairs = interp.fast_interpolate_pairs(perc)
fig = plt.figure()
ax = fig.add_subplot('111', aspect='equal')
for p, c in zip([poly, p1s, p2s], ['r', 'b--', 'g--']):
x, y = p.exterior.xy
plt.plot(x, y, c)
pair_x, pair_y = zip(*pairs)
spd_x, spd_y = zip(*interp.point_derivative(1.))
ax.quiver(pair_x, pair_y, spd_x, spd_y, color='r')
for i, xy in enumerate(zip(pair_x, pair_y)):
ax.annotate(str(i), xy)
normals, offsets = interp.normals_offset(perc)
print normals, offsets
print normals_offset(poly)
nx, ny = zip(*normals)
for point in points:
proj = interp.projections(point, perc)
speeds = interp.point_dist_derivative(point, perc, 1.)
x, y = zip(*proj)
spd_x, spd_y = zip(*speeds)
px, py = point
plt.plot(px, py, 'or')
plt.plot(x, y, 'ob')
ax.quiver(x, y, spd_x, spd_y, color="b")
ax.quiver(x, y, nx, ny)
plt.show()
def launch_animation():
global interp, quiv, quiv_spd, quiv_dot
##-- for animation purposes
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
#xlim=(-0.2, 0.2), ylim=(-0.2, 0.2))
xlim=(-1.2, 2.2), ylim=(-1.2, 2.2))
ax.grid()
line1, = ax.plot([], [], 'r')
line2, = ax.plot([], [], 'b')
line3, = ax.plot([], [], 'g--')
quiv = ax.quiver([], [], [], [], color='g')
quiv_spd = ax.quiver([], [], [], [], color='r')
quiv_dot = ax.quiver([], [], [], [], color='b')
nr_steps = 500
interp = PolygonInterpolator(p1s, p2s)
def init():
global quiv, quiv_spd
x, y = p1s.exterior.coords.xy
line1.set_data(x, y)
x, y = p2s.exterior.coords.xy
line2.set_data(x, y)
line3.set_data([], [])
normals, _ = normals_offset(p2s)
mid = midpoints([geom.Point(pt) for pt in zip(*p2s.exterior.coords.xy)[:-1]])
x, y = zip(*[(pt.xy[0], pt.xy[1]) for pt in mid])
u, v = zip(*normals)
quiv.set_offsets(np.hstack([x, y]))
quiv.set_UVC(u, v)
speeds = interp.midpoint_derivative(1.)
u, v = zip(*speeds)
quiv_spd.set_offsets(np.hstack([x, y]))
quiv_spd.set_UVC(u, v)
dots = [n[0]*s[0]+n[1]*s[1] for n, s in zip(normals, speeds)]
spd = [(d*n[0], d*n[1]) for n, d in zip(normals, dots)]
u, v = zip(*spd)
quiv_dot.set_offsets(np.hstack([x, y]))
quiv_dot.set_UVC(u, v)
return line1, line2, line3, quiv, quiv_spd, quiv_dot
def animate(i):
global interp, quiv
perc = float(i)/float(nr_steps)
p = interp.interpolate(perc)
normals, offset = normals_offset(p)
#mid = midpoints(geom.Point(map(as_tuple, p.exterior.coords)))
line3.set_data(*p.exterior.coords.xy)
#print p.exterior.coords.xy
#normals = interp.midpoint_derivative(1)
#normals = interp.normal_derivative(0.5)
#interp.normal_derivative(1/float(nr_steps))
#x = [point.xy[0][0] for point in mid]
#y = [point.xy[1][0] for point in mid]
mid = midpoints([geom.Point(pt) for pt in zip(*p.exterior.coords.xy)[:-1]])
x, y = zip(*[(pt.xy[0], pt.xy[1]) for pt in mid])
u, v = zip(*normals)
quiv.set_offsets(np.hstack([x, y]))
quiv.set_UVC(u, v)
speeds = interp.point_dist_derivative((1., 0.), perc, 1.)
u, v = zip(*speeds)
quiv_spd.set_offsets(np.hstack([x, y]))
quiv_spd.set_UVC(u, v)
dots = [n[0]*s[0]+n[1]*s[1] for n, s in zip(normals, speeds)]
spd = [(d*n[0], d*n[1]) for n, d in zip(normals, dots)]
u, v = zip(*spd)
quiv_dot.set_offsets(np.hstack([x, y]))
quiv_dot.set_UVC(u, v)
return line1, line2, line3, quiv, quiv_spd
ani = animation.FuncAnimation(fig, animate, frames=nr_steps,
interval=0.2, blit=True, init_func=init,
repeat=True)
plt.show()
def record_animation():
nr_steps = 500
interp = PolygonInterpolator(p1s, p2s)
for s in range(nr_steps+1):
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-1.2, 2.2), ylim=(-1.2, 2.2))
cur_perc = float(s)/nr_steps
cur_poly = interp.fast_interpolate(cur_perc)
normals, _ = normals_offset(cur_poly)
mid = midpoints([geom.Point(pt) for pt in zip(*cur_poly.exterior.coords.xy)[:-1]])
x, y = zip(*[(pt.xy[0], pt.xy[1]) for pt in mid])
u, v = zip(*normals)
cur_x, cur_y = cur_poly.exterior.coords.xy
x_1, y_1 = p1s.exterior.coords.xy
x_2, y_2 = p2s.exterior.coords.xy
ax.plot(cur_x, cur_y, 'g-')
ax.plot(x_1, y_1, 'r')
ax.plot(x_2, y_2, 'b')
ax.quiver(x, y, u, v)
plt.savefig('figure_{0:03}.png'.format(s))
plt.close()
if __name__ == '__main__':
#record_animation()
check_speeds()
| StarcoderdataPython |
3314718 | <filename>common/mysqldatabasecur.py
"""
@author:
@file: mysqldatabasecur.py
@time: 2018/3/9 15:46
"""
"""
接口用例测试查询测试数据库测试结果对比,
现在支持查询mysql,进行对比
"""
from pymysql import *
'''链接数据库,code为1即链接成功,error为错误信息,conne为返回的链接的实例'''
def cursemsql(host, port, user, password, database):
try:
conne = connect(host=host, port=port, user=user, password=password, db=database)
return {'code': 1, 'conne': conne}
except Exception as e:
return {'code': 0, 'error': e}
'''执行数据库的sql,code为1即执行sql成功,result为返回结果'''
def excemysql(conne, Sqlmy):
try:
with conne.cursor() as conn:
conn.execute(Sqlmy)
result = conn.fetchall()
return {'code': 1, 'result': result}
except Exception as e:
return {'code': 0, 'error': e}
| StarcoderdataPython |
1605820 | import os
import collections
import gym
import numpy as np
import joblib
import tensorflow.compat.v1 as tf
from baselines.common.policies import build_policy
from gfootball.env import football_action_set
from gfootball.env import player_base
from gfootball.env.wrappers import Simple115StateWrapper
class Player(player_base.PlayerBase):
"""An agent loaded from PPO2 cnn model checkpoint."""
def __init__(self, checkpoint_path):
player_base.PlayerBase.__init__(self)
self._action_set = 'default'
self._player_prefix = 'player_0'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self._sess = tf.Session(config=config)
with tf.variable_scope(self._player_prefix):
with tf.variable_scope('ppo2_model'):
policy_fn = build_policy(DummyEnv(self._action_set), 'mlp', num_layers=5, num_hidden=128)
self._policy = policy_fn(nbatch=1, sess=self._sess)
_load_variables(checkpoint_path, self._sess, prefix=self._player_prefix + '/')
saver = tf.train.Saver()
saver.save(self._sess, "/home/alex/Dropbox/projects/python/kaggle/football/saved_models/simple_ppo2/simple_ppo2")
def __del__(self):
self._sess.close()
def take_action(self, observation):
assert len(observation) == 1, 'Multiple players control is not supported'
observation = Simple115StateWrapper.convert_observation(observation, True, True)
action = self._policy.step(observation)[0][0]
actions = [action] #[football_action_set.action_set_dict[self._action_set][action]]
return actions
def _load_variables(load_path, sess, prefix='', remove_prefix=True):
"""Loads variables from checkpoint of policy trained by baselines."""
# Forked from address below since we needed loading from different var names:
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py
variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if v.name.startswith(prefix)]
loaded_params = joblib.load(load_path)
restores = []
for v in variables:
v_name = v.name[len(prefix):] if remove_prefix else v.name
restores.append(v.assign(loaded_params[v_name]))
sess.run(restores)
class DummyEnv(object):
# We need env object to pass to build_policy, however real environment
# is not there yet.
def __init__(self, action_set):
self.action_space = gym.spaces.Discrete(
len(football_action_set.action_set_dict[action_set]))
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=[115+46], dtype=np.float32)
player = Player("/home/alex/Dropbox/projects/python/kaggle/football/checkpoints/openai-2020-11-26-12-35-02-877222/checkpoints/03600") | StarcoderdataPython |
3347129 | from django.urls import path
from pastry_shop.blog.views import (
PostListView,
PostDetailView,
PostCreateView,
PostEditView,
PostDeleteView,
CommentEditView,
CommentDeleteView,
)
app_name = "blog"
urlpatterns = [
path("posts/", PostListView.as_view(), name="post-list"),
path("posts/<int:pk>/", PostDetailView.as_view(), name="post-detail"),
path("posts/add/", PostCreateView.as_view(), name="post-create"),
path("posts/<int:pk>/edit/", PostEditView.as_view(), name="post-edit"),
path("posts/<int:pk>/delete/", PostDeleteView.as_view(), name="post-delete"),
path("comments/<int:pk>/edit/", CommentEditView.as_view(), name="comment-edit"),
path(
"comments/<int:pk>/delete/", CommentDeleteView.as_view(), name="comment-delete"
),
]
| StarcoderdataPython |
3202764 | <gh_stars>1-10
import logging
import sys
from calculator_handler import CalculatorHandler
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s - [%(filename)s:%(lineno)s]')
ch.setFormatter(formatter)
root.addHandler(ch)
CalculatorHandler(use_rpc=True, server=True)
| StarcoderdataPython |
38462 | import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )
r = R * np.cbrt( u )
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return( x, y, z )
def LPFbead(qrange, sigmabead):
'''
Compute the spherical form factor given a range of q values.
Parameters
----------
qrange: numpy.array
array of values in q-space to compute form factor for.
sigmabead: float
diameter of the sphere.
Return
-------
Fqb: numpy.array
array of values of the spherical form factors (F(q)) computed at q-points listed in qrange.
'''
R=np.true_divide(sigmabead,2)
QR=np.multiply(qrange,R)
Fqb=np.multiply(np.true_divide(np.sin(QR)-np.multiply(QR,np.cos(QR)),np.power(QR,3)),3)
return Fqb
def LPOmega(qrange, nAin, nAout, nB, r): # qvalues number_of_B number_of_A scatterer_coordinates
Ntot=nAin+nB+nAout # Total number of scatterers to loop through
omegaarrt=np.zeros((1,len(qrange))) # initiating array
omegaarr=np.zeros((1,len(qrange))) # initiating array
rur=r[0,:,:]# selects
rur=rur.transpose()
for i in range(Ntot-1): # loops through index and all further indexes to prevent double counting
all_disp = rur[i,:]-rur[(i+1):,:]
rij = np.sqrt(np.sum(np.square(all_disp),axis=1))
rij = rij.transpose()
rs = rij[:,np.newaxis] # reshapes array for consistency
Q = qrange[np.newaxis,:] # reshapes array for consistency
vals = ne.evaluate("sin(Q*rs)/(Q*rs)") # ne is efficient at calculations
inds=np.argwhere(np.isnan(vals)) # error catching in case there are NaN values
if len(inds)>0:
for val in inds:
vals[val[0],val[1]]=1
inds_double_check=np.argwhere(np.isnan(vals))
if len(inds_double_check)>0:
print('nan error!')
vals = ne.evaluate("sum((vals), axis=0)") # adds together scatterer contributions for each q value
omegaarr+=vals
omegaarr=np.true_divide(2*omegaarr,Ntot)+1 # 1 accounts for the guarenteed overlap of same bead # 2* accounts for double counting avoided to reduce computational expense by looping for all other pairs
omegaarrt+=omegaarr # stores values between loops
return omegaarrt
def visualize(r, Rcore, dR_Ain, dR_B, dR_Aout, sigmabead):
import py3Dmol
view = py3Dmol.view()
for ri in r[0,:,:].transpose():
if np.linalg.norm(ri) < Rcore+dR_Ain or np.linalg.norm(ri) > (Rcore+dR_Ain+dR_B):
col = 'blue'
else:
col = 'red'
view.addSphere(
{
'center': {'x': ri[0], 'y': ri[1], 'z': ri[2]},
'radius': sigmabead/2,
'color': col,
'alpha': 0.9,
}
)
#view.zoomTo()
view.show()
return view
def genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB):
# core radius, inner A layer thickness, B layer thickness, outer A layer thickness,
# bead diameter, # of inner A beads, # of outer A beads, # of B beads
ntot = nAin+nB+nAout
power = 2
r = np.zeros((1, 3, ntot))
types = np.zeros((ntot))
### Create configuration for each replicate with dispersity ###
for step in range(0, 1):
### Populate A inner Layer ###
x, y, z = gen_layer(Rcore, Rcore+dR_Ain, nAin)
for i in range(nAin):
r[0,:,i] = [x[i], y[i], z[i]]
types[i] = 1
### Populate B middle Layer ###
x, y, z = gen_layer(Rcore+dR_Ain, Rcore+dR_Ain+dR_B, nB)
for i in range(nB):
r[0,:,i+nAin] = [x[i], y[i], z[i]]
types[i+nAin] = 2
### Populate A outer Layer ###
x, y, z = gen_layer(Rcore+dR_Ain+dR_B, Rcore+dR_Ain+dR_B+dR_Aout, nAout)
for i in range(nAout):
r[0,:,i+nAin+nB] = [x[i], y[i], z[i]]
types[i+nAin+nB] = 1
return r
class scatterer_generator:
'''
The wrapper class for vesicle shape. Default length unit: Angstrom.
Notes
-----
**The following 7 shape-specific descriptors are to be specified by user (see
*Attributes*) as
a list, in the precise order as listed, while calling `Model.load_shape`
to load this shape:**
num_scatterers:
Number of scatterers used to represent a chain. Default: 24
N:
Number of monomers in a chain. Default: 54
eta_B:
Packing fraction of scatterers in B layer. Default: 0.5
lmono_b:
Diameter of a monomer of chemistry B. Default: 50.4 A
lmono_a:
Diameter of a monomer of chemistry A. Default: 50.4 A
fb:
Fraction of monomers in chain that are of B type. fa = 1-fb. Default: 0.55
nLP:
Number of replicates for each individual. Default: 7
**The following 7 parameters are to be predicted, in the precise order
as listed, by GA:**
R_core:
Core radius. Default [min,max]: [50 A, 400 A]
t_Ain:
Thickness of inner A layer. Default [min,max]: [30 A, 200 A]
t_B:
Thickness of B layer. Default [min,max]: [30 A, 200 A]
t_Aout:
Thickness of outer A layer. Default [min,max]: [30 A, 200 A]
sigma_Ain:
Split of solvophilic scatterers between inner and outer layers.
Default [min,max]: [0.1, 0.45]
sigma_R:
Dispersity in vesicle size as implemented in the core radius.
Default [min,max]: [0.0, 0.45]
log10(bg):
Negative log10 of background intensity.
E.g. an background intensity of 0.001 leads to this value being 3.
Default [min,max]: [0.1,4]
See also
--------
crease_ga.Model.load_shape
'''
def __init__(self,
shape_params = [24,54,0.5,50.4,50.4,0.55,7],
minvalu = (50, 30, 30, 30, 0.1, 0.0, 0.1),
maxvalu = (400, 200, 200, 200, 0.45, 0.45, 4)):
num_scatterers = shape_params[0]
N = shape_params[1]
rho_B = shape_params[2]
lmono_a = shape_params[3]
lmono_b= shape_params[4]
fb = shape_params[5]
nLP = shape_params[6]
self._numvars = 7
self.minvalu = minvalu
self.maxvalu = maxvalu
self.num_scatterers=num_scatterers ## number of scatterers per chain
self.N=N ## Number of beads on chain
self.rho_B=rho_B ## density/volume fraction of beads in B layer
self.lmono_a=lmono_a ## Angstrom 'monomer contour length'
self.lmono_b=lmono_b ## Angstrom 'monomer contour length'
self.MB=np.pi/6*(self.lmono_b)**3 ## volume of B monomer
self.sigmabead=np.true_divide(self.N*self.lmono_b,self.num_scatterers) ## scatterer bead diameter
self.fb=fb ## fraction of B type monomers in chain
self.nLP=nLP ## number of replicates
@property
def numvars(self):
return self._numvars
def converttoIQ(self, qrange, param):
'''
Calculate computed scattering intensity profile.
Parameters
----------
qrange: numpy.array
q values.
param: numpy.array
Decoded input parameters. See *Notes* section of the class
documentation.
Returns
-------
IQid: A numpy array holding I(q).
'''
# q values, decoded parameters,
# number of repeat units per chain, fraction of B beads per chain, core density,
# scatterer diameter, molar mass of B chemistry,
# length of A chemistry bond, length of B chemistry bond,
# number of scatterers per chain, # of replicates, stdev in Rcore size
sigmabead = self.sigmabead
N = self.N
fb = self.fb
rho_B = self.rho_B
MB = self.MB
lmono_a = self.lmono_a
lmono_b = self.lmono_b
num_scatterers = self.num_scatterers
nLP = self.nLP
IQid=np.zeros((len(qrange))) #initiates array for output IQ
### Parameters used to generate scatterer placements ###
Rcore=param[0]
dR_Ain=param[1]
dR_B=param[2]
dR_Aout=param[3]
sAin=param[4] # split of type A scatterer
sigmaR=param[5] # variation in Rcore, dispersity
#print(Rcore, dR_Ain, dR_B, dR_Aout, sAin)
Background=10**(-param[6])
varR = Rcore*sigmaR # variation in Rcore
disper = np.array([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]) # fixed intervals of sigma
sum_omegaarr=np.zeros((1,len(qrange)))
for step in range(0, nLP):
Rcore = param[0] + varR*disper[step + int((9-nLP)/2.)] ## add displacement to Rcore
# print("disper = ", disper[step + int((9-nLP)/2.)])
# print("Rcore = ", Rcore)
vol_B = (4/3.0)*np.pi*(np.power(Rcore + dR_Ain + dR_B, 3)
- np.power(Rcore + dR_Ain, 3)) ## volume of solvophobic layer B
nagg = int(np.true_divide( rho_B*vol_B, N*fb*MB )) ## number of chains in vesicle
ntot = nagg*num_scatterers ## total number of scatterers
nB = int(ntot*fb) ## number of scatterers in B
nAin = int(ntot*(1-fb)*sAin) ## number of scatterers in A_in
nAout = int(ntot*(1-fb)*(1-sAin)) ## number of scatterers in A_out
for reps in range(0, 3):
### Generates scatterer positions in structure ###
r = genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB)
### Calculates omega from scatterers in shape ###
sum_omegaarr += LPOmega(qrange, nAin, nAout, nB, r)
omegaarr=np.true_divide(sum_omegaarr,nLP*3) # average omega
omegaarr=omegaarr.reshape(len(qrange),)
Fqb=LPFbead(qrange,sigmabead) # calcualtes sphere shape factor
F2qb=np.multiply(Fqb,Fqb) # Sphere shape factor square
sqmm=np.ones((np.shape(Fqb))) # assuming dilute mixture the micelle-micelle structure factor = 1
F2qb_sqmm=np.multiply(F2qb,sqmm) # determines the micelle form factor
IQid=np.multiply(omegaarr,F2qb_sqmm) # calculates Icomp
maxIQ=np.max(IQid)
IQid=np.true_divide(IQid,maxIQ) # normalizes the I(q) to have its maximum = 1
IQid+=Background # add background
return IQid
| StarcoderdataPython |
76293 | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to run PyNodes in Docker containers using XManager."""
import atexit
import copy
import dataclasses
from distutils import dir_util
import os
import pathlib
import shutil
import sys
import tempfile
from typing import Any, List, Optional, Sequence, Tuple
import cloudpickle
try:
from xmanager import xm
except ModuleNotFoundError:
raise Exception('Launchpad requires `xmanager` for XM-based runtimes.'
'Please run `pip install dm-launchpad[xmanager]`.')
_DATA_FILE_NAME = 'job.pkl'
@dataclasses.dataclass
class DockerConfig:
"""Local docker launch configuration.
Attributes:
code_directory: Path to directory containing any user code that may be
required inside the Docker image. The user code from this directory is
copied over into the Docker containers, as the user code may be needed
during program execution. If needed, modify docker_instructions in
xm.PythonContainer construction below if user code needs installation.
docker_requirements: Path to requirements.txt specifying Python packages to
install inside the Docker image.
hw_requirements: Hardware requirements.
"""
code_directory: Optional[str] = None
docker_requirements: Optional[str] = None
hw_requirements: Optional[xm.JobRequirements] = None
def to_docker_executables(
nodes: Sequence[Any],
docker_config: DockerConfig,
) -> List[Tuple[xm.PythonContainer, xm.JobRequirements]]:
"""Returns a list of `PythonContainer`s objects for the given `PyNode`s."""
if docker_config.code_directory is None or docker_config.docker_requirements is None:
raise ValueError(
'code_directory and docker_requirements must be specified through'
'DockerConfig via local_resources when using "xm_docker" launch type.')
# Generate tmp dir without '_' in the name, Vertex AI fails otherwise.
tmp_dir = '_'
while '_' in tmp_dir:
tmp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmp_dir, ignore_errors=True)
data_file_path = pathlib.Path(tmp_dir, _DATA_FILE_NAME)
with open(data_file_path, 'wb') as f:
cloudpickle.dump([node.function for node in nodes], f)
file_path = pathlib.Path(__file__).absolute()
shutil.copy(pathlib.Path(file_path.parent, 'process_entry.py'), tmp_dir)
dir_util.copy_tree(docker_config.code_directory, tmp_dir)
shutil.copy(docker_config.docker_requirements,
pathlib.Path(tmp_dir, 'requirements.txt'))
workdir_path = pathlib.Path(tmp_dir).name
if not os.path.exists(docker_config.docker_requirements):
raise FileNotFoundError('Please specify a path to a file with Python'
'package requirements through'
'docker_config.docker_requirements.')
job_requirements = docker_config.hw_requirements
if not job_requirements:
job_requirements = xm.JobRequirements()
# Make a copy of requirements since they are being mutated below.
job_requirements = copy.deepcopy(job_requirements)
if job_requirements.replicas != 1:
raise ValueError(
'Number of replicas is computed by the runtime. '
'Please do not set it explicitly in the requirements.'
)
job_requirements.replicas = len(nodes)
base_image = f'python:{sys.version_info.major}.{sys.version_info.minor}'
return [(xm.PythonContainer(
path=tmp_dir,
base_image=base_image,
entrypoint=xm.CommandList(
[f'python -m process_entry --data_file={_DATA_FILE_NAME}']),
docker_instructions=[
'RUN apt-get update && apt-get install -y git',
'RUN python -m pip install --upgrade pip',
'RUN apt-get -y install libpython3.9',
f'COPY {workdir_path}/requirements.txt requirements.txt',
'RUN python -m pip install xmanager',
'RUN python -m pip install -r requirements.txt',
f'COPY {workdir_path}/ {workdir_path}',
f'WORKDIR {workdir_path}',
]), job_requirements)]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.