id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3371075 | """
This quiz will generate chord symbols for you to play.
It's designed for when you're with your instrument, to practice playing
different chords.
"""
import sys
from datetime import datetime as dt
from time import sleep
from src.chords import generate_random_chord
def countdown(seconds):
for i in range(seconds, 0, -1):
sys.stdout.write(' ' + str(i))
sys.stdout.flush()
sleep(1)
def run_chord_quiz_manually_proceed():
"""
manually proceed to the next chord
"""
cont = True
while cont is True:
start_time = dt.now()
print('\n', generate_random_chord(), '\n')
result = input('press enter to continue or Q then enter to quit')
if result == 'q' or result == 'Q':
cont = False
finish_time = dt.now()
print(int(round((finish_time-start_time).total_seconds(), 0)), ' seconds')
def run_chord_quiz_autoproceed():
"""
once started this quiz will keep going until interrupted
"""
seconds_to_guess_the_chord = 20
seconds_with_hint = 20
# intro
input("This quiz is designed for you to play along with when you are with your instrument.\n\n"
"At any time you can press CTRL + C to interrupt it.\n" \
"To start, press ENTER.")
while 1 == 1:
chord = generate_random_chord()
print('\n', chord, '\n\n Hint will be shown in')
countdown(seconds_to_guess_the_chord)
print('\n\n', chord.hint, 'Next chord will be shown in')
countdown(seconds_with_hint)
print('\n\n\n\n')
if __name__ == '__main__':
run_chord_quiz_autoproceed()
| StarcoderdataPython |
3213137 | import numpy as np
import xarray as xr
grav = 9.81
cp = 1004
Lc = 2.5104e6
rho0 = 1.19
def metpy_wrapper(fun):
"""Given a metpy function return an xarray compatible version
"""
from metpy.units import units as u
def func(*args):
def f(*largs):
new_args = [u.Quantity(larg, arg.units)
for larg, arg in zip(largs, args)]
return fun(*new_args)
output_units = f(*[1 for arg in args]).units
ds = xr.apply_ufunc(f, *args)
ds.attrs['units'] = str(output_units)
return ds
return func
def omega_from_w(w, rho):
"""Presure velocity in anelastic framework
omega = dp_0/dt = dp_0/dz dz/dt = - rho_0 g w
"""
return - w * rho * grav
def liquid_water_temperature(t, qn, qp):
"""This is an approximate calculation neglecting ice and snow
"""
sl = t + grav/cp * t.z - Lc/cp * (qp + qn)/1000.0
sl.attrs['units'] = 'K'
return sl
def total_water(qv, qn):
qt = qv + qn
qt.attrs['units'] = 'g/kg'
return qt
def get_dz(z):
zext = np.hstack((-z[0], z, 2.0*z[-1] - 1.0*z[-2]))
zw = .5 * (zext[1:] + zext[:-1])
dz = zw[1:] - zw[:-1]
return xr.DataArray(dz, z.coords)
def layer_mass(rho):
dz = get_dz(rho.z)
return (rho*dz).assign_attrs(units='kg/m2')
def layer_mass_from_p(p, ps=None):
if ps is None:
ps = 2*p[0] - p[1]
ptop = p[-1]*2 - p[-2]
pext = np.hstack((ps, p, ptop))
pint = (pext[1:] + pext[:-1])/2
dp = - np.diff(pint*100)/grav
return xr.DataArray(dp, p.coords)
def mass_integrate(p, x, average=False):
dp = layer_mass_from_p(p)
ans = (x * dp).sum(p.dims)
if average:
ans = ans / dp.sum()
return ans
def column_rh(QV, TABS, p):
from metpy.calc import relative_humidity_from_mixing_ratio
rh = metpy_wrapper(relative_humidity_from_mixing_ratio)(QV, TABS, p)
return mass_integrate(p, rh/1000, average=True)
| StarcoderdataPython |
3239453 |
import ast
from gemini.utils import *
from gemini.code_tree.code_node_leaf import CodeNodeLeaf
from ..transformer.import_module_transformer import ImportModuleTransformer
from .pass_base import PassBase
__all__ = [
'ImportModulePass',
]
class ImportModulePass(PassBase):
__slots__ = [
'_solvers',
'_import_vector',
]
@property
def import_vector(self):
return self._import_vector
def __init__(self):
# type: (None) -> None
super(ImportModulePass, self).__init__()
self._solvers = []
self._solvers.append(ImportModuleTransformer)
def run_pass(self, _cnode):
# TODO(albert) support recursive importing, not Done
solver1 = self._solvers[0]()
_cnode.ast = solver1.visit(_cnode.ast)
ast.fix_missing_locations(_cnode.ast)
_modules = solver1.modules
for _module_name, _module_src in _modules.items():
_cleaf = CodeNodeLeaf(_cnode)
_cleaf.src = _module_src
_cleaf.src_file = _module_name
_cnode.add_code_node(_cleaf)
return _cnode
| StarcoderdataPython |
1704666 | <filename>Collect/SRTM/DEM.py
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
Module: Collect/SRTM
"""
import os
from pyWAPOR.Collect.SRTM.DataAccess import DownloadData
import sys
def main(Dir, latlim, lonlim, Waitbar = 1):
"""
Downloads HydroSHED data from http://srtm.csi.cgiar.org/download
this data includes a Digital Elevation Model (DEM)
The spatial resolution is 90m (3s)
The following keyword arguments are needed:
Dir -- 'C:/file/to/path/'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
Waitbar -- '1' if you want a waitbar (Default = 1)
"""
# Create directory if not exists for the output
output_folder = os.path.join(Dir, 'SRTM', 'DEM')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Define the output map and create this if not exists
nameEnd = os.path.join(output_folder, 'DEM_SRTM_m_3s.tif')
if not os.path.exists(nameEnd):
# Create Waitbar
if Waitbar == 1:
print('\nDownload SRTM altitude map with a resolution of 3s')
import pyWAPOR.Functions.WaitbarConsole as WaitbarConsole
total_amount = 1
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Download and process the data
DownloadData(output_folder, latlim, lonlim)
if Waitbar == 1:
amount = 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
else:
if Waitbar == 1:
print("\nSRTM altitude map (3s) already exists in output folder")
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
1649095 | # Python code for calculating communication efficiency of a network.
# The reference articles for the computed measure:
# <NAME>., and <NAME>. (2001). Efficient behavior of small-world networks. Physical Review Letters 87.
# <NAME>., and <NAME>. (2003). Economic small-world behavior in weighted networks. Eur Phys J B 32, 249-263.
# Input 1: Node_file
# Input 2: Edge_file with weights
"""
=================================================================================================
If you are using this code, kindly cite the following articles:
(1) <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Network approach towards understanding the crazing in glassy amorphous polymers, Journal of Statistical Mechanics: Theory and Experiment 043305 (2018).
(2) <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Network geometry and market instability, R. Soc. Open Sci. 8: 201734 (2021).
(3) <NAME>, <NAME>, <NAME> & <NAME>, Network-centric indicators for fragility in global financial indices, Front. Phys. 8: 624373 (2021).
=================================================================================================
"""
import igraph as ig
import sys
#edge files folder path
nodefile=sys.argv[1]
infile=sys.argv[2]
G=ig.Graph.Read_Ncol(infile,weights=True,directed=False,names=True)
N=G.vcount()
nodes=[i.strip().split()[0] for i in open(nodefile) if isinstance(int(i.strip().split()[0]),int)]
n_nodes=len(nodes)
n_isolated_nodes=n_nodes-N
if n_isolated_nodes != 0:
G.add_vertices(n_isolated_nodes)
N=G.vcount()
length=G.shortest_paths_dijkstra(weights='weight')
temp=0
for i in length:
temp+=sum([1.0/x for x in i if x!=0])
ce=float(temp)/float(N*(N-1))
print(ce)
| StarcoderdataPython |
47629 | # Generated by Django 3.0.11 on 2020-12-09 06:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('telephone_directory', '0002_auto_20201208_1801'),
]
operations = [
migrations.AddField(
model_name='contacts',
name='profile_pic',
field=models.ImageField(blank=True, null=True, upload_to='profile_pic/'),
),
]
| StarcoderdataPython |
1621113 | <filename>scripts/VCF/QC/plot_variants_density.py<gh_stars>1-10
from VcfQC import VcfQC
import argparse
import os
#get command line arguments
parser = argparse.ArgumentParser(description='Script to get the variant density for a certain VCF file')
parser.add_argument('--bedtools_folder', type=str, required=True, help='Folder containing the bedtools binary' )
parser.add_argument('--outprefix', type=str, required=True, help='Prefix for output' )
parser.add_argument('--filename', type=str, required=True, help='Path to the VCF file that will be analysed' )
parser.add_argument('--length', type=str, required=True, help='Window length (in bp) that will be used for calculating the density' )
parser.add_argument('--genome_file', type=str, required=True, help='File with genome sizes' )
parser.add_argument('--r_folder', type=str, required=False, help='Path to R binary' )
parser.add_argument('--r_scripts', type=str, required=True, help='Path to folder containing the R scripts required for constructing some of the plots used by this class' )
args = parser.parse_args()
if __name__ == '__main__':
vcfQC = VcfQC(vcf=args.filename,bedtools_folder=args.bedtools_folder,r_folder=args.r_folder,r_scripts=args.r_scripts)
vcfQC.plot_variant_density(length=args.length,genome=args.genome_file,outprefix=args.outprefix)
| StarcoderdataPython |
20215 | <gh_stars>0
from .utils import *
class Filter(object):####TODO add logging
def __init__(self, measure, cutting_rule):
"""
Basic univariate filter class with chosen(even custom) measure and cutting rule
:param measure:
Examples
--------
>>> f=Filter("PearsonCorr", GLOB_CR["K best"](6))
"""
inter_class = 0.0
intra_class = 0.0
for value in np.unique(y_data):
index_for_this_value = np.where(y_data == value)[0]
n = np.sum(row[index_for_this_value])
mu = np.mean(row[index_for_this_value])
var = np.var(row[index_for_this_value])
inter_class += n * np.power((mu - mu), 2)
intra_class += (n - 1) * var
f_ratio = inter_class / intra_class
return f_ratio
@classmethod
def __f_ratio_measure(cls, X, y, n):
X, y = _DefaultMeasures.__check_input(X, y)
assert not 1 < X.shape[1] < n, 'incorrect number of features'
f_ratios = []
for feature in X.T:
f_ratio = _DefaultMeasures.__calculate_F_ratio(feature, y.T)
f_ratios.append(f_ratio)
f_ratios = np.array(f_ratios)
return np.argpartition(f_ratios, -n)[-n:]
@staticmethod
def f_ratio_measure(n):
return partial(_DefaultMeasures.__f_ratio_measure, n=n)
@staticmethod
def gini_index(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
cum_x = np.cumsum(X / np.linalg.norm(X, 1, axis=0), axis=0)
cum_y = np.cumsum(y / np.linalg.norm(y, 1))
diff_x = (cum_x[1:] - cum_x[:-1])
diff_y = (cum_y[1:] + cum_y[:-1])
return np.abs(1 - np.sum(np.multiply(diff_x.T, diff_y).T, axis=0))
# Calculate the entropy of y.
@staticmethod
def __calc_entropy(y):
dict_label = dict()
for label in y:
if label not in dict_label:
dict_label.update({label: 1})
else:
dict_label[label] += 1
entropy = 0.0
for i in dict_label.values():
entropy += -i / len(y) * log(i / len(y), 2)
return entropy
@staticmethod
def __calc_conditional_entropy(x_j, y):
dict_i = dict()
for i in range(x_j.shape[0]):
if x_j[i] not in dict_i:
dict_i.update({x_j[i]: [i]})
else:
dict_i[x_j[i]].append(i)
# Conditional entropy of a feature.
con_entropy = 0.0
# get corresponding values in y.
for f in dict_i.values():
# Probability of each class in a feature.
p = len(f) / len(x_j)
# Dictionary of corresponding probability in labels.
dict_y = dict()
for i in f:
if y[i] not in dict_y:
dict_y.update({y[i]: 1})
else:
dict_y[y[i]] += 1
# calculate the probability of corresponding label.
sub_entropy = 0.0
for l in dict_y.values():
sub_entropy += -l / sum(dict_y.values()) * log(l / sum(dict_y.values()), 2)
con_entropy += sub_entropy * p
return con_entropy
# IGFilter = filters.IGFilter() # TODO: unexpected .run() interface; .run() feature_names; no default constructor
@staticmethod
def ig_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
f_ratios[index] = entropy - _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
return f_ratios
@staticmethod
def __contingency_matrix(labels_true, labels_pred):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
# TODO redo it with numpy
contingency = sp.csr_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
contingency.sum_duplicates()
return contingency
@staticmethod
def __mi(U, V):
contingency = _DefaultMeasures.__contingency_matrix(U, V)
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = (pi.take(nzx).astype(np.int64, copy=False)
* pj.take(nzy).astype(np.int64, copy=False))
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
@classmethod
def __mrmr_measure(cls, X, y, n):
assert not 1 < X.shape[1] < n, 'incorrect number of features'
x, y = _DefaultMeasures.__check_input(X, y)
# print([_DefaultMeasures.__mi(X[:, j].reshape(-1, 1), y) for j in range(X.shape[1])])
return [MI(x[:, j].reshape(-1, 1), y) for j in range(x.shape[1])]
@staticmethod
def mrmr_measure(n):
return partial(_DefaultMeasures.__mrmr_measure, n=n)
# RandomFilter = filters.RandomFilter() # TODO: bad .run() interface; .run() feature_names; no default constructor
@staticmethod
def su_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
entropy_x = _DefaultMeasures.__calc_entropy(X[:, index])
con_entropy = _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
f_ratios[index] = 2 * (entropy - con_entropy) / (entropy_x + entropy)
return f_ratios
@staticmethod
def spearman_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
np.sort(X, axis=1) # need to sort, because Spearman is a rank correlation
np.sort(y)
n = X.shape[0]
c = 6 / (n * (n - 1) * (n + 1))
dif = X - np.repeat(y, X.shape[1]).reshape(X.shape)
return 1 - c * np.sum(dif * dif, axis=0)
@staticmethod
def pearson_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
x_dev = X - np.mean(X, axis=0)
y_dev = y - np.mean(y)
sum_dev = y_dev.T.dot(x_dev)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
return (sum_dev / np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x))).reshape((-1,))
# TODO concordation coef
@staticmethod
def fechner_corr(X, y):
"""
Sample sign correlation (also known as Fechner correlation)
"""
X, y = _DefaultMeasures.__check_input(X, y)
y_mean = np.mean(y)
n = X.shape[0]
f_ratios = np.zeros(X.shape[1])
for j in range(X.shape[1]):
y_dev = y[j] - y_mean
x_j_mean = np.mean(X[:, j])
for i in range(n):
x_dev = X[i, j] - x_j_mean
if x_dev >= 0 & y_dev >= 0:
f_ratios[j] += 1
else:
f_ratios[j] -= 1
f_ratios[j] /= n
return f_ratios
@staticmethod
def __label_binarize(y):
"""
Binarize labels in a one-vs-all fashion
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
"""
classes = np.unique(y)
n_samples = len(y)
n_classes = len(classes)
row = np.arange(n_samples)
col = [np.where(classes == el)[0][0] for el in y]
data = np.repeat(1, n_samples)
# TODO redo it with numpy
return sp.csr_matrix((data, (row, col)), shape=(n_samples, n_classes)).toarray()
@staticmethod
def __chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq
@staticmethod
def chi2_measure(X, y):
"""
This score can be used to select the n_features features with the highest values
for the test chi-squared statistic from X,
which must contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
"""
X, y = _DefaultMeasures.__check_input(X, y)
if np.any(X < 0):
raise ValueError("Input X must be non-negative.")
Y = _DefaultMeasures.__label_binarize(y)
# If you use sparse input
# you can use sklearn.utils.extmath.safe_sparse_dot instead
observed = np.dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _DefaultMeasures.__chisquare(observed, expected)
@staticmethod
def __distance_matrix(X, y, n_samples):
dm = np.zeros((n_samples, n_samples), dtype=tuple)
for i in range(n_samples):
for j in range(i, n_samples):
# using the Manhattan (L1) norm rather than
# the Euclidean (L2) norm,
# although the rationale is not specified
value = np.linalg.norm(X[i, :] - X[j, :], 1)
dm[i, j] = (value, j, y[j])
dm[j, i] = (value, i, y[i])
# sort_indices = dm.argsort(1)
# dm.sort(1)
# indices = np.arange(n_samples) #[sort_indices]
# dm = np.dstack((dm, indices))
return dm
# TODO redo with np.where
@staticmethod
def __take_k(dm_i, k, r_index, choice_func):
hits = []
dm_i = sorted(dm_i, key=lambda x: x[0])
for samp in dm_i:
if (samp[1] != r_index) & (k > 0) & (choice_func(samp[2])):
hits.append(samp)
k -= 1
return np.array(hits, int)
@staticmethod
def reliefF_measure(X, y, k_neighbors=1):
"""
Based on the ReliefF algorithm as introduced in:
<NAME> al. Relief-based feature selection: Introduction and review
Journal of Biomedical Informatics 85 (2018) 189–203
Differs with skrebate.ReliefF
Only for complete X
Rather than repeating the algorithm m(TODO Ask Nikita about user defined) times,
implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
:param X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
:param y: array-like {n_samples}
Training labels
:param k_neighbors: int (default: 1)
The number of neighbors to consider when assigning feature importance scores.
More neighbors results in more accurate scores, but takes longer.
Selection of k hits and misses is the basic difference to Relief
and ensures greater robustness of the algorithm concerning noise.
:return: array-like {n_features}
Feature importances
"""
X, y = _DefaultMeasures.__check_input(X, y)
f_ratios = np.zeros(X.shape[1])
classes, counts = np.unique(y, return_counts=True)
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = X.shape[0]
n_features = X.shape[1]
dm = _DefaultMeasures.__distance_matrix(X, y, n_samples)
for i in range(n_samples):
r = X[i]
dm_i = dm[i]
hits = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == y[i])
if len(hits) != 0:
ind_hits = hits[:, 1]
else:
ind_hits = []
value_hits = X.take(ind_hits, axis=0)
m_c = np.empty(len(classes), np.ndarray)
for j in range(len(classes)):
if classes[j] != y[i]:
misses = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == classes[j])
ind_misses = misses[:, 1]
m_c[j] = X.take(ind_misses, axis=0)
for A in range(n_features):
weight_hit = np.sum(np.abs(r[A] - value_hits[:, A]))
weight_miss = 0
for j in range(len(classes)):
if classes[j] != y[i]:
weight_miss += prior_prob[y[j]] * np.sum(np.abs(r[A] - m_c[j][:, A]))
f_ratios[A] += weight_miss / (1 - prior_prob[y[i]]) - weight_hit
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
f_ratios /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return f_ratios / (np.amax(X, axis=0) - np.amin(X, axis=0))
VDM = filters.VDM() # TODO: probably not a filter
GLOB_MEASURE = {"FitCriterion": _DefaultMeasures.fit_criterion_measure,
"FRatio": _DefaultMeasures.f_ratio_measure,
"GiniIndex": _DefaultMeasures.gini_index,
"InformationGain": _DefaultMeasures.ig_measure,
"MrmrDiscrete": _DefaultMeasures.mrmr_measure,
"SymmetricUncertainty": _DefaultMeasures.su_measure,
"SpearmanCorr": _DefaultMeasures.spearman_corr,
"PearsonCorr": _DefaultMeasures.pearson_corr,
"FechnerCorr": _DefaultMeasures.fechner_corr,
"ReliefF": _DefaultMeasures.reliefF_measure,
"Chi2": _DefaultMeasures.chi2_measure}
class _DefaultCuttingRules:
@staticmethod
def select_best_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=True)
@staticmethod
def select_worst_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=False)
@staticmethod
def __select_by_value(scores, value, more=True):
features = []
for key, sc_value in scores.items():
if more:
if sc_value >= value:
features.append(key)
else:
if sc_value <= value:
features.append(key)
return features
@staticmethod
def select_k_best(k):
return partial(_DefaultCuttingRules.__select_k, k=k, reverse=True)
@staticmethod
def select_k_worst(k):
return partial(_DefaultCuttingRules.__select_k, k=k)
@classmethod
def __select_k(cls, scores, k, reverse=False):
if type(k) != int:
raise TypeError("Number of features should be integer")
return [keys[0] for keys in sorted(scores.items(), key=lambda kv: kv[1], reverse=reverse)[:k]]
GLOB_CR = {"Best by value": _DefaultCuttingRules.select_best_by_value,
"Worst by value": _DefaultCuttingRules.select_worst_by_value,
"K best": _DefaultCuttingRules.select_k_best,
"K worst": _DefaultCuttingRules.select_k_worst}
class Filter(object):
def __init__(self, measure, cutting_rule):
if type(measure) is str:
try:
self.measure = GLOB_MEASURE[measure]
except KeyError:
raise KeyError("No %r measure yet" % measure)
else:
self.measure = measure
if type(cutting_rule) is str:
try:
self.cutting_rule = GLOB_CR[cutting_rule]
except KeyError:
raise KeyError("No %r cutting rule yet" % measure)
else:
self.cutting_rule = cutting_rule
self.feature_scores = None
self.hash = None
def run(self, x, y, feature_names=None, store_scores=False, verbose=0):
try:
x = x.values
y = y.values
except AttributeError:
x = x
self.feature_scores = None
try:
feature_names = x.columns
except AttributeError:
if feature_names is None:
feature_names = list(range(x.shape[1]))
feature_scores = None
if not (self.hash == hash(self.measure)):
feature_scores = dict(zip(feature_names, self.measure(x, y)))
self.hash = hash(self.measure)
if store_scores:
self.feature_scores = feature_scores
selected_features = self.cutting_rule(feature_scores)
return x[:, selected_features]
| StarcoderdataPython |
3324905 | <gh_stars>1-10
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import Float32
from std_msgs.msg import Float64
speed = 0.3
steering_angle = 0.5
def servo_commands():
rospy.init_node('servo_commands', anonymous=True)
pub_vel_left_front_wheel = rospy.Publisher('/jetsoncar/front_left_wheel_velocity_controller/command', Float64, queue_size=1)
pub_vel_right_front_wheel = rospy.Publisher('/jetsoncar/front_right_wheel_velocity_controller/command', Float64, queue_size=1)
#pub_vel_left_rear_wheel = rospy.Publisher('/jetsoncar/rear_left_wheel_velocity_controller/command', Float64, queue_size=1)
#pub_vel_right_rear_wheel = rospy.Publisher('/jetsoncar/rear_right_wheel_velocity_controller/command', Float64, queue_size=1)
pub_pos_left_steering_hinge = rospy.Publisher('/jetsoncar/front_left_hinge_position_controller/command', Float64, queue_size=1)
pub_pos_right_steering_hinge = rospy.Publisher('/jetsoncar/front_right_hinge_position_controller/command', Float64, queue_size=1)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
pub_vel_left_front_wheel.publish(speed)
pub_vel_right_front_wheel.publish(speed)
#pub_vel_left_rear_wheel.publish(speed)
#pub_vel_right_rear_wheel.publish(speed)
pub_pos_left_steering_hinge.publish(steering_angle)
pub_pos_right_steering_hinge.publish(steering_angle)
rate.sleep()
if __name__ == '__main__':
try:
servo_commands()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
122548 | from django.apps import AppConfig
class MemberConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'apps.member'
verbose_name = '회원'
swagger_tag = dict(name='사용자 API 목록', description='')
def ready(self):
from . import signals
| StarcoderdataPython |
4824227 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2client.commands import resource
from st2client.models import TriggerType
from st2client.formatters import table
class TriggerTypeBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(TriggerTypeBranch, self).__init__(
TriggerType, description, app, subparsers,
parent_parser=parent_parser,
commands={
'list': TriggerTypeListCommand,
'get': TriggerTypeGetCommand,
'update': TriggerTypeUpdateCommand,
'delete': TriggerTypeDeleteCommand
})
# Registers extended commands
self.commands['getspecs'] = TriggerTypeSubTriggerCommand(
self.resource, self.app, self.subparsers,
add_help=False)
class TriggerTypeListCommand(resource.ContentPackResourceListCommand):
display_attributes = ['ref', 'pack', 'description']
class TriggerTypeGetCommand(resource.ContentPackResourceGetCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
'parameters_schema', 'payload_schema']
class TriggerTypeUpdateCommand(resource.ContentPackResourceUpdateCommand):
pass
class TriggerTypeDeleteCommand(resource.ContentPackResourceDeleteCommand):
pass
class TriggerTypeSubTriggerCommand(resource.ResourceCommand):
attribute_display_order = ['id', 'ref', 'context', 'parameters', 'status',
'start_timestamp', 'result']
def __init__(self, resource, *args, **kwargs):
super(TriggerTypeSubTriggerCommand, self).__init__(
resource, kwargs.pop('name', 'getspecs'),
'Return Trigger Specifications of a Trigger.',
*args, **kwargs)
self.parser.add_argument('ref', nargs='?',
metavar='ref',
help='Fully qualified name (pack.trigger_name) ' +
'of the trigger.')
self.parser.add_argument('-h', '--help',
action='store_true', dest='help',
help='Print usage for the given action.')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
trigger_mgr = self.app.client.managers['Trigger']
return trigger_mgr.query(**{'type': args.ref})
@resource.add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
if args.help:
self.parser.print_help()
return
instances = self.run(args, **kwargs)
self.print_output(instances, table.MultiColumnTable,
json=args.json, yaml=args.yaml)
| StarcoderdataPython |
143652 | <reponame>moeyensj/atm
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
from ..config import Config
from ..constants import Constants
from ..helpers import __handleParameters
from .hg import calcHG
from .hg import calcQ
from .temperature import calcTss
from .blackbody import calcPlanckLambda
__all__ = ["calcFluxLambdaSun",
"calcFluxLambdaAtObs",
"calcFluxLambdaAtObsWithSunlight",
"calcFluxLambdaObs",
"calcFluxLambdaObsWithSunlight",
"calcFluxLambdaSED"]
R_Sun = Constants.SOLAR_RADIUS
T_Sun = Constants.SOLAR_TEMPERATURE
AU = Constants.ASTRONOMICAL_UNIT
def calcFluxLambdaSun(lambd, r, T=T_Sun):
"""
Calculate solar flux at wavelength lambd in meters
at heliocentric distance r in AU.
Parameters
----------
lambd : float or `~numpy.ndarray` (N)
Wavelength in m.
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
T : float, optional
Solar temperature in K.
[Default = `~atm.Constants.SOLAR_TEMPERATURE`]
Returns
-------
float or `~numpy.ndarray` (N)
Returns solar flux at lambd wavelength.
"""
return (np.pi * R_Sun**2) / (r**2 * AU**2) * calcPlanckLambda(lambd, T)
def calcFluxLambdaAtObs(model, r, delta, lambd, T_ss, D, alpha, eps, threads=Config.threads):
"""
Calculate the flux at an observer or observatory from an asteroid without
reflected sunlight.
Parameters
----------
model : `~atm.models.Model`
Flux model object.
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
delta : float or `~numpy.ndarray` (N)
Distance between asteroid and the observatory in AU.
lambd : float or `~numpy.ndarray` (N)
Wavelength in m.
T_ss : float or `~numpy.ndarray` (N)
Subsolar temperature in K.
D : float or `~numpy.ndarray` (N)
Asteroid diameter in m.
alpha : float or `~numpy.ndarray` (N)
Phase angle in radians.
eps : float or `~numpy.ndarray` (N)
Emissivity.
threads : int, optional
Number of processors to use.
[Default = `atm.Config.threads`]
Returns
-------
float or `~numpy.ndarray` (N)
Returns array of fluxes at an observer or observatory.
"""
return (D**2 / (4 * delta**2 * AU**2) *
(eps * model.calcTotalFluxLambdaEmittedToObsMany(lambd, T_ss, alpha)))
def calcFluxLambdaAtObsWithSunlight(model, r, delta, lambd, T_ss, D, alpha, eps, p, G, threads=Config.threads):
"""
Calculate the flux at an observer or observatory from an asteroid with
reflected sunlight.
Parameters
----------
model : `~atm.models.Model`
Flux model object.
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
delta : float or `~numpy.ndarray` (N)
Distance between asteroid and the observatory in AU.
lambd : float or `~numpy.ndarray` (N)
Wavelength in m.
T_ss : float or `~numpy.ndarray` (N)
Subsolar temperature in K.
D : float or `~numpy.ndarray` (N)
Asteroid diameter in meters.
alpha : float or `~numpy.ndarray` (N)
Phase angle in radians.
eps : float or `~numpy.ndarray` (N)
Emissivity.
p : float or `~numpy.ndarray` (N)
Albedo.
G : float or `~numpy.ndarray` (N)
HG slope parameter.
threads : int, optional
Number of processors to use.
[Default = `atm.Config.threads`]
Returns
-------
float or `~numpy.ndarray` (N)
Returns array of fluxes at an observer or observatory with
reflected sunlight.
"""
return (calcFluxLambdaAtObs(model, r, delta, lambd, T_ss, D, alpha, eps, threads=threads)
+ (D**2 / (4 * delta**2 * AU**2)) * p * calcHG(alpha, G)
* calcFluxLambdaSun(lambd, r))
def calcFluxLambdaObs(model, obs, r, delta, T_ss, D, alpha, eps, threads=Config.threads):
"""
Calculate the observed flux from an asteroid without
reflected sunlight.
This function is multi-processed to make calculations faster since
doing many integrations can take a while. If you are looking to do any
sort of fitting it is recommended to use the interpolated version of this
function.
Parameters
----------
model : `~atm.models.Model`
Flux model object.
obs : `~atm.obs.Observatory`
Observatory object containing filter bandpass information.
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
delta : float or `~numpy.ndarray` (N)
Distance between asteroid and the observatory in AU.
T_ss : float or `~numpy.ndarray` (N)
Subsolar temperature in K.
D : float or `~numpy.ndarray` (N)
Asteroid diameter in meters.
alpha : float or `~numpy.ndarray` (N)
Phase angle in radians.
eps : float or `~numpy.ndarray` (N)
Emissivity.
threads : int, optional
Number of processors to use.
[Default = `atm.Config.threads`]
Returns
-------
float or `~numpy.ndarray` (N, M)
Returns an array of fluxes with shape N observations by
M filters.
"""
return (D**2 / (4 * delta**2 * AU**2) * (eps * obs.bandpassLambda(model.calcTotalFluxLambdaEmittedToObsMany, args=[T_ss, alpha])))
def calcFluxLambdaObsWithSunlight(model, obs, r, delta, T_ss, D, alpha, eps, p, G, threads=Config.threads):
"""
Calculate the observed flux from an asteroid with
reflected sunlight.
This function is multi-processed to make calculations faster since
doing many integrations can take a while. If you are looking to do any
sort of fitting it is recommended to use the interpolated version of this
function.
Parameters
----------
model : `~atm.models.Model`
Flux model object.
obs : `~atm.obs.Observatory`
Observatory object containing filter bandpass information.
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
delta : float or `~numpy.ndarray` (N)
Distance between asteroid and the observatory in AU.
T_ss : float or `~numpy.ndarray` (N)
Subsolar temperature in K.
D : float or `~numpy.ndarray` (N)
Asteroid diameter in meters.
alpha : float v=or `~numpy.ndarray` (N)
Phase angle in radians.
eps : float or `~numpy.ndarray` (N)
Emissivity.
p : float or `~numpy.ndarray` (N)
Albedo.
G : float or `~numpy.ndarray` (N)
HG slope parameter.
threads : int, optional
Number of processors to use.
[Default = `atm.Config.threads`]
Returns
-------
float or `~numpy.ndarray` (N, M)
Returns an array of fluxes with shape N observations by
M filters.
"""
return (calcFluxLambdaObs(model, obs, r, delta, T_ss, D, alpha, eps, threads=threads)
+ (D**2 / (4 * delta**2 * AU**2)) * p * calcHG(alpha, G)
* obs.bandpassLambda(calcFluxLambdaSun, args=[r]))
def calcFluxLambdaSED(model, obs, data,
summary=None,
lambdaRange=[1.5e-6, 30e-6],
lambdaNum=200,
lambdaEdges=[3.9e-6, 6.5e-6, 18.5e-6],
linearInterpolation=True,
fitParameters=[],
emissivitySpecification=None,
albedoSpecification="auto",
threads=4,
columnMapping=Config.columnMapping):
"""
Calculate flux between lambdaRange[0] and lambdaRange[1] for a single asteroid thermal model. If fitParameters is an
empty list, this function will look for all the required parameters in data. If fitParameters is not an empty
list, then this function will look for the fitParameters in the summary DataFrame.
Uses the median observation geometry from the data to plot the calculated best-fit SED.
If the fitting scenario allowed for emissivity or albedo to change as a function of wavelength or bandpass, then the user
has the option to linearly interpolate albedo and emissivity between calculated or assumed values.
Parameters
----------
model : `~atm.models.Model`
Flux model object with the approprate tables loaded into memory.
obs : `~atm.obs.Observatory`
Observatory object containing filter bandpass information.
data : `~pandas.DataFrame`
DataFrame containing the relevant data to fit. The user should define
the columnMapping dictionary which maps internally used variables to the
variables used in the user's DataFrame.
summary : `~pandas.DataFrame`, optional
Summary DataFrame returned from fit function for a single object.
lambdaRange : list, optional
Minimum and maximum wavelength in meters at which to calculate model flux.
[Default = [1.5e-6, 30e-6]]
lambdaNum : int, optional
Number of wavelength points between (and including) lambdaRange[0] and lambdaRange[1].
[Default = 200]
lambdaEdges : {None, list}, optional
If emissivity or albedo are not constant over the wavelength range, then set the lambdaEdges to a list
with the interior boundaries where emissivity and/or albedo should change. For example, set lambda edges to
the wavelengths between two filters. Do not include lambdaRange[0] or lambdaRange[1]. If linearInterpolation
is set to True, these edges are ignored in favor of a more robust linear interpolation.
[Default = [3.9e-6, 6.5e-6, 18.5e-6]]
linearInterpolation : bool, optional
Linearly interpolate emissivity and albedo values between calculated best fit values or values from the data.
[Default = True]
fitParameters : list, optional
The parameters that should be fit for. If a fit parameter is found to
exist inside the data, that column will be ignored for fitting purposes.
[Default = `~atm.Config.fitParameters`]
emissivitySpecification : {None, "perBand", dict, "auto"}, optional
There are for different emissivity scenarios supported for fitting:
1) Setting the emissivity specification to None forces the fitter to
use a single epsilon across all bands.
2) Setting the emissivity specification to "perBand" forces the fitter
to use an epsilon per band. In the case of WISE, this would mean having
four epsilons: eps_W1, eps_W2, eps_W3, eps_W4
3) Setting the emissivity specification to a dictionary allows the fitter
to use combination emissivities. For example, passing
{"eps_W1W2": ["W1", "W2"],
"eps_W3W4": ["W3", "W4"]}
tells the fitter that there should be two epsilon parameters. One that constrains
emissivity in filters W1 and W2, and one that constrains emissivity
in filters W3 and W4. The data format should be a dictionary with the combination
parameter(s) as key(s) and lists of the filter names as values. The combination
parameter name should be formatted as in the example: "eps_{filter names}".
(Again, using WISE as the example.)
4) Setting the emissivity specification to "auto" forces fitting to use the albedo
specification definition and creates emissivity parameters that obey Kirchoff's
law. Both emissivity specification and albedo specification cannot be "auto".
[Default = None]
albedoSpecification : {None, "perBand", dict, "auto"} optional
There are three different albedo scenarios supported for fitting:
1) Setting the albedo specification to None forces the fitter to
use a single p across all bands.
2) Setting the albedo specification to "perBand" forces the fitter
to use an epislon per band. In the case of WISE, this would mean having
four epsilons: p_W1, p_W2, p_W3, p_W4
3) Setting the albedo specification to a dictionary allows the fitter
to use combination albedos. For example, passing
{"p_W1W2": ["W1", "W2"],
"p_W3W4": ["W3", "W4"]}
tells the fitter that there should be two albedo parameters. One that constrains
reflectivity in filters W1 and W2, and one that constrains reflectivity
in filters W3 and W4. The data format should be a dictionary with the combination
parameter(s) as key(s) and lists of the filter names as values. The combination
parameter name should be formatted as in the example: "p_{filter names}".
(Again, using WISE as the example.)
4) Setting the albedo specification to "auto" forces fitting to use the emissivity
specification definition and creates albedo parameters that obey Kirchoff's
law. Both emissivity specification and albedo specification cannot be "auto".
[Default = "auto"]
columnMapping : dict, optional
This dictionary should define the column names of the user's data relative to the
internally used names.
[Default = `~atm.Config.columnMapping`]
Returns
-------
model_observations : `~pandas.DataFrame`
A pandas DataFrame containing the predicted fluxes and magnitudes for a best fit
model.
"""
# Handle parameters
fitParametersSet, parametersSet, emissivityParameters, albedoParameters, dataParametersToIgnoreSet = __handleParameters(
obs,
fitParameters,
data.columns.tolist(),
emissivitySpecification=emissivitySpecification,
albedoSpecification=albedoSpecification,
columnMapping=columnMapping)
lambd = np.linspace(lambdaRange[0], lambdaRange[-1], num=lambdaNum)
if "r_au" in parametersSet and "r_au" not in fitParametersSet:
r = np.median(data[columnMapping["r_au"]].values) * np.ones(len(lambd))
else:
r = summary[summary["parameter"].isin(["r_au__{}".format(i) for i in range(0, len(data))])]["median"].values * np.ones(len(lambd))
if "delta_au" in parametersSet and "delta_au" not in fitParametersSet:
delta = np.median(data[columnMapping["delta_au"]].values) * np.ones(len(lambd))
else:
delta = summary[summary["parameter"].isin(["delta_au__{}".format(i) for i in range(0, len(data))])]["median"].values * np.ones(len(lambd))
if "alpha_rad" in parametersSet and "alpha_rad" not in fitParametersSet:
alpha = np.median(data[columnMapping["alpha_rad"]].values) * np.ones(len(lambd))
else:
alpha = summary[summary["parameter"].isin(["alpha_rad__{}".format(i) for i in range(0, len(data))])]["median"].values * np.ones(len(lambd))
if "G" in parametersSet and "G" not in fitParametersSet:
G = data[columnMapping["G"]].values[0] * np.ones(len(lambd))
else:
G = summary[summary["parameter"] == "G"]["median"].values[0] * np.ones(len(lambd))
if "logD" in parametersSet and "logD" not in fitParametersSet:
logD = data[columnMapping["logD"]].values[0] * np.ones(len(lambd))
else:
logD = summary[summary["parameter"] == "logD"]["median"].values[0] * np.ones(len(lambd))
if "logT1" in parametersSet and "logT1" not in fitParametersSet:
logT1 = data[columnMapping["logT1"]].values[0] * np.ones(len(lambd))
else:
logT1 = summary[summary["parameter"] == "logT1"]["median"].values[0] * np.ones(len(lambd))
T_ss = 10**logT1 / np.sqrt(r)
if emissivityParameters == "eps" and emissivitySpecification != "auto":
if "eps" in parametersSet and "eps" not in fitParametersSet:
eps = data[columnMapping["eps"]].values[0] * np.ones(len(lambd))
else:
eps = summary[summary["parameter"] == "eps"]["median"].values[0] * np.ones(len(lambd))
if albedoSpecification == "auto":
p = (1 - eps) / calcQ(G)
if type(emissivityParameters) is list and emissivitySpecification != "auto":
eps_values = np.zeros_like(emissivityParameters, dtype=float)
for i, parameter in enumerate(emissivityParameters):
if parameter in parametersSet and parameter not in fitParametersSet:
eps_values[i] = data[parameter].values[0]
else:
eps_values[i] = summary[summary["parameter"] == parameter]["median"].values[0]
eps = np.zeros_like(lambd)
if linearInterpolation is True:
eps = np.interp(lambd, obs.filterEffectiveLambdas, eps_values)
else:
for i, (edge_start, edge_end) in enumerate(zip([lambdaRange[0]] + lambdaEdges,
lambdaEdges + [lambdaRange[-1]])):
eps = np.where((lambd >= edge_start) & (lambd <= edge_end), eps_values[i], eps)
if albedoSpecification == "auto":
p = (1 - eps) / calcQ(G)
if albedoParameters == "p" and albedoSpecification != "auto":
if "p" in parametersSet and "p" not in fitParametersSet:
p = data[columnMapping["p"]].values[0] * np.ones(len(lambd))
else:
p = summary[summary["parameter"] == "p"]["median"].values[0] * np.ones(len(lambd))
if emissivitySpecification == "auto":
eps = 1 - p * calcQ(G)
if type(albedoParameters) is list and albedoSpecification != "auto":
p_values = np.zeros_like(albedoParameters, dtype=float)
for i, parameter in enumerate(albedoParameters):
if parameter in parametersSet and parameter not in fitParametersSet:
p_values[i] = data[parameter].values[0]
else:
p_values[i] = summary[summary["parameter"] == parameter]["median"].values[0]
p = np.zeros_like(lambd)
if linearInterpolation is True:
p = np.interp(lambd, obs.filterEffectiveLambdas, p_values)
else:
for i, (edge_start, edge_end) in enumerate(zip([lambdaRange[0]] + lambdaEdges,
lambdaEdges + [lambdaRange[-1]])):
p = np.where((lambd >= edge_start) & (lambd <= edge_end), p_values[i], p)
if emissivitySpecification == "auto":
eps = 1 - p * calcQ(G)
fitted_flux = calcFluxLambdaAtObsWithSunlight(model, r, delta, lambd, T_ss, 10**logD, alpha, eps, p, G, threads=threads)
df = pd.DataFrame(data={
"lambda": lambd,
"eps": eps,
"p" : p,
"flux": fitted_flux})
return df
| StarcoderdataPython |
1749413 | <filename>torrent_client/network/tracker_clients/__init__.py
from urllib.parse import urlparse
from torrent_client.models import DownloadInfo
from torrent_client.network.tracker_clients.base import *
from torrent_client.network.tracker_clients.http import *
from torrent_client.network.tracker_clients.udp import *
from torrent_client.network.tracker_clients.dht import *
def create_tracker_client(announce_url: str, download_info: DownloadInfo, our_peer_id: bytes) -> BaseTrackerClient:
parsed_announce_url = urlparse(announce_url)
scheme = parsed_announce_url.scheme
protocols = {
'http': HTTPTrackerClient,
'https': HTTPTrackerClient,
'udp': UDPTrackerClient,
'dht': DHTTrackerClient,
}
if scheme not in protocols:
raise ValueError('announce_url uses unknown protocol "{}"'.format(scheme))
client_class = protocols[scheme]
return client_class(parsed_announce_url, download_info, our_peer_id)
| StarcoderdataPython |
3271828 | #!/usr/bin/env python
from __future__ import division
import argparse
import numpy as np
import os
import GPy
import matplotlib.pyplot as plt
from fipy import *
from scipy.interpolate import griddata
from pdb import set_trace as keyboard
import time
import random
seed=19
os.environ['PYTHONHASHSEED'] = '0'
# Setting the seed for numpy-generated random numbers
np.random.seed(seed=seed)
# Setting the seed for python random numbers
random.seed(seed)
num_samples=100000 # Number of MC samples.
nx1=32
nx2=32
kern_1=GPy.kern.RBF
ellx1_1=2
ellx2_1=2
variance_1=0.25
kern_2=GPy.kern.RBF
ellx1_2=0.1
ellx2_2=0.1
variance_2=0.75
#define a mean function
def mean_1(x):
return x
#define a mean function
def mean_2(x):
n = x.shape[0]
return np.zeros((n, 1))
#GPy kernel
k_1=kern_1(input_dim = 2,
lengthscale = [ellx1_1, ellx2_1],
variance = variance_1,
ARD = True)
# GPy kernel
k_2=kern_2(input_dim = 2,
lengthscale = [ellx1_2, ellx2_2],
variance = variance_2,
ARD = True)
#defining mesh to get cellcenters
Lx1 = 1. # always put . after 1
Lx2 = 1. # always put . after 1
mesh = Grid2D(nx=nx1, ny=nx2, dx=Lx1/nx1, dy=Lx2/nx2) # with nx1*nx2 number of cells/cellcenters/pixels/pixelcenters
cellcenters = mesh.cellCenters.value.T # (nx1*nx2,2) matrix
np.save('cellcenters_nx1='+str(nx1)+'_nx2='+str(nx2)+'.npy', cellcenters)
print cellcenters
#define matrices to save results
inputs = np.zeros((num_samples, nx1*nx2))
outputs = np.zeros((num_samples, nx1*nx2))
start = time.time()
#generate samples
for i in xrange(num_samples):
#display
if (i+1)%10000 == 0:
print "Generating sample "+str(i+1)
#get covariance matrix and compute its Cholesky decomposition
m_1 = mean_1(cellcenters)
nugget = 1e-6 # This is a small number required for stability
Cov_1 = k_1.K(cellcenters) + nugget * np.eye(cellcenters.shape[0])
L_1 = np.linalg.cholesky(Cov_1)
#generate a sample
z_1 = np.random.randn(cellcenters.shape[0], 1)
f_1 = m_1 + np.dot(L_1, z_1)
# print f_1
# print np.shape(f_1)
#get covariance matrix and compute its Cholesky decomposition
m_2 = mean_2(f_1)
nugget = 1e-6 # This is a small number required for stability
Cov_2 = k_2.K(f_1) + nugget * np.eye(f_1.shape[0])
L_2 = np.linalg.cholesky(Cov_2)
#generate a sample
z_2 = np.random.randn(f_1.shape[0], 1)
f_2 = m_2 + np.dot(L_2, z_2)
# print f_2
# print np.shape(f_2)
sample = np.exp(f_2)# 'sample' is one image of input field: conductivity image
# bounding input fields from below and above
lower_bound = np.exp(-5.298317366548036) # 0.005000000000000002
upper_bound = np.exp(3.5) # 33.11545195869231
sample = np.where(sample < lower_bound, lower_bound, sample)
sample = np.where(sample > upper_bound, upper_bound, sample)
# FIPY solution
value_left=1.
value_right=0.
value_top=0.
value_bottom=0.
# define cell and face variables
phi = CellVariable(name='$T(x)$', mesh=mesh, value=0.)
D = CellVariable(name='$D(x)$', mesh=mesh, value=1.0) ## coefficient in diffusion equation
# D = FaceVariable(name='$D(x)$', mesh=mesh, value=1.0) ## coefficient in diffusion equation
source = CellVariable(name='$f(x)$', mesh=mesh, value=1.0)
C = CellVariable(name='$C(x)$', mesh=mesh, value=1.0)
# apply boundary conditions
# dirichet
phi.constrain(value_left, mesh.facesLeft)
phi.constrain(value_right, mesh.facesRight)
# homogeneous Neumann
phi.faceGrad.constrain(value_top, mesh.facesTop)
phi.faceGrad.constrain(value_bottom, mesh.facesBottom)
# setup the diffusion problem
eq = -DiffusionTerm(coeff=D)+ImplicitSourceTerm(coeff=C) == source
c = 0.
f = 0. # source
source.setValue(f)
C.setValue(c)
D.setValue(sample.ravel())
eq.solve(var=phi)
x_fipy = mesh.cellCenters.value.T ## fipy solution (nx1*nx2,2) matrix # same as cellcenters defined above
u_fipy = phi.value[:][:, None] ## fipy solution (nx1*nx2,1) matrix
#save data
inputs[i] = sample.ravel()
outputs[i] = u_fipy.flatten()
#end timer
finish = time.time() - start
print "Time (sec) to generate "+str(num_samples)+" MC samples : " +str(finish)
print np.shape(inputs)
print np.shape(outputs)
print inputs
print outputs
#save data
np.save("MC_samples_inputfield_warped_double_rbf"+\
"_nx1="+str(nx1)+\
"_nx2="+str(nx2)+\
"_num_samples="+str(num_samples)+".npy", inputs)
np.save("MC_samples_u_fipy_warped_double_rbf"+\
"_nx1="+str(nx1)+\
"_nx2="+str(nx2)+\
"_num_samples="+str(num_samples)+".npy", outputs)
# END | StarcoderdataPython |
94431 | <reponame>MesoSim/chase<gh_stars>0
#!/usr/bin/env python
"""
Main API Control
================
Using Flask-RESTful, this script hosts the resources for the full frontend API
"""
#########
# Setup #
#########
# Imports
from datetime import datetime, timedelta
import os
import pytz
from sqlite3 import dbapi2 as sql
import traceback
import warnings
from flask import Flask, request, make_response
from flask_restful import Resource, Api
from mesosim.chase.actions import create_hazard_registry, shuffle_new_hazard
from mesosim.chase.team import Team
from mesosim.chase.vehicle import Vehicle
from mesosim.core.config import Config
from mesosim.core.timing import arc_time_from_cur, std_fmt
from mesosim.core.utils import direction_angle_to_str, money_format, move_lat_lon
from mesosim.lsr import scale_raw_lsr_to_cur_time, gr_lsr_placefile_entry_from_tuple, type_to_icon
import numpy as np
import requests
# Constants
app = Flask(__name__)
api = Api(app)
lsr_db_file = "/home/jthielen/lsr.db"
main_db_file = "/home/jthielen/main.db"
team_db_dir = '/home/jthielen/teams/'
lsr_asset_url = 'https://chase.iawx.info/assets/'
config = Config(main_db_file)
# Shared
def get_team_and_hazard_registry(team_id):
hazard_registry = create_hazard_registry(config)
return Team(
team_db_dir + team_id + '.db',
hazard_registry=hazard_registry,
config=config
), hazard_registry
def get_team(team_id):
team, _ = get_team_and_hazard_registry(team_id)
return team
def get_vehicle(vehicle_id):
return Vehicle(vehicle_id, config)
def vehicle_stats(vehicle):
if vehicle is None:
return {
"vehicle_type": None,
"print_name": None,
"top_speed": None,
"mpg": None,
"fuel_cap": None,
"traction_rating": None
}
else:
return {
"vehicle_type": vehicle.vehicle_type,
"print_name": vehicle.print_name,
"top_speed": vehicle.top_speed,
"mpg": vehicle.mpg,
"fuel_cap": vehicle.fuel_cap,
"traction_rating": vehicle.traction_rating
}
# Recreate the file header and footer texts for use in placefiles.
def file_headertext(team_name_str, preface=""):
return (
'RefreshSeconds: 10'
'\nThreshold: 999'
f'\nTitle: {preface}Location of {team_name_str}'
'\nFont: 1, 11, 0, "Courier New"'
'\nIconFile: 1, 22, 22, 11, 11, "http://www.spotternetwork.org/icon/spotternet.png"'
'\nIconFile: 2, 15, 25, 8, 25, "http://www.spotternetwork.org/icon/arrows.png"'
'\nIconFile: 3, 22, 22, 11, 11, "http://www.spotternetwork.org/icon/sn_reports.png"'
'\nIconFile: 4, 22, 22, 11, 11, "http://www.spotternetwork.org/icon/sn_reports_30.png"'
'\nIconFile: 5, 22, 22, 11, 11, "http://www.spotternetwork.org/icon/sn_reports_60.png"'
'\nIconFile: 6, 22, 22, 11, 11, "http://www.spotternetwork.org/icon/spotternet_new.png"\n\n'
)
def file_footertext(team_name):
return '\nText: 15, 10, 1, "%s"\nEnd:\n' % (team_name,)
def file_footerend():
return '\nEnd:\n'
def list_current_teams():
return [
team_db_name[:-3] for team_db_name in os.listdir(team_db_dir)
if team_db_name[-3:] == ".db"
]
#########
# Teams #
#########
class TeamList(Resource):
def get(self):
return {
"teams": list_current_teams()
}
def post(self):
team_id = request.form['team_id']
team_name = request.form['team_name']
pin = request.form['pin']
if team_id in list_current_teams():
# ERROR: Team exists already
return {
"error": True,
"error_message": f"Team {team_id} already exists!"
}, 409
try:
# New Team
info_insert = 'INSERT INTO team_info (team_setting, team_value) VALUES (?,?)'
# search for team name in the database for easter egg
config.cur.execute(
'SELECT * FROM name_easter_eggs WHERE input_name = ?',
[team_name]
)
search_result = config.cur.fetchall()
if len(search_result) > 0:
# EASTER EGG FOUND!!
team_name = search_result[0][1]
message = search_result[0][2]
if search_result[0][3] is not None and len(search_result[0][3]) > 0:
vehicle_type = search_result[0][3]
# Handle vehicle-specific setup
vehicle = Vehicle(vehicle_type, config)
fuel_level = (1 + np.random.random()) * 0.5 * vehicle.fuel_cap
else:
vehicle = None
if search_result[0][4] is not None and float(search_result[0][4]) > 0:
budget_bonus = float(search_result[0][4])
else:
budget_bonus = 0
else:
message = None
budget_bonus = 0
vehicle = None
# Budget
budget = budget_bonus + config.starting_budget
# create the team db
con = sql.connect(team_db_dir + team_id + '.db')
cur = con.cursor()
cur.execute('CREATE TABLE team_info (team_setting varchar, team_value varchar)')
cur.execute('CREATE TABLE team_history (cur_timestamp varchar, '
'arc_timestamp varchar, latitude decimal, '
'longitude decimal, speed decimal, direction decimal, '
'status_color varchar, status_text varchar, balance decimal, '
'points decimal, fuel_level decimal, active_hazard varchar)')
cur.execute('CREATE TABLE action_queue (action_id varchar, message varchar, '
'activation_type varchar, activation_amount varchar, '
'action_taken varchar)')
cur.execute('CREATE TABLE hazard_queue (hazard_type varchar, '
'expiry_time varchar, message varchar, message_end varchar, '
'overridden_by varchar, speed_limit decimal, direction_lock varchar, '
'speed_lock varchar, status varchar)')
cur.execute(info_insert, ['name', team_name])
cur.execute(info_insert, ['id', team_id])
cur.execute(info_insert, ['pin', pin])
cur.execute(info_insert, ['balance', budget])
cur.execute(info_insert, ['points', 0])
cur.execute(info_insert, ['hazard_exp_time', None])
cur.execute(info_insert, ['active_hazard', None])
if vehicle is not None:
cur.execute(info_insert, ['vehicle', vehicle_type])
cur.execute(info_insert, ['fuel_level', fuel_level])
con.commit()
# Build the output
output = {
'team_id': team_id,
'team_name': team_name,
'easter_egg': False
}
if message is not None:
output['easter_egg'] = True
output['message'] = message
output['vehicle'] = ''
if vehicle is not None:
output['vehicle'] = vehicle_type
return output
except Exception as exc:
return {
"error": True,
"error_message": str(exc)
}, 400
api.add_resource(TeamList, '/team')
class TeamListSimple(Resource):
def get(self):
output = "\n".join(list_current_teams())
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(TeamListSimple, '/team/list')
class TeamLeaderboard(Resource):
def get(self):
output = (
'<!doctype html><html lang="en"><head><meta charset="utf-8">'
'<meta name="viewport" content="width=device-width, initial-scale=1">'
'<link rel="stylesheet" '
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css" '
'integrity="<KEY>" '
'crossorigin="anonymous">'
'<title>ISU AMS Chase Simulation Leaderboard</title>'
r'<style>body {color: white; background: rgba(0, 0, 0, 0);}</style>'
'<body>'
'<div class="container-fluid"><div class="row justify-content-center"><div class="col-12">'
'<h4>Leaderboard</h4>'
)
team_tuples = []
for team_id in list_current_teams():
try:
team = get_team(team_id)
team_tuples.append((team.name, team.points))
except:
pass
for i, (team_name, team_points) in enumerate(sorted(team_tuples, key=lambda t: -t[1])):
output += f'<p class="mb-1">{i + 1}) {team_name} ({team_points} pts)</p>'
output += '</div></div></div></body></html>'
response = make_response(output)
response.headers['content-type'] = 'text/html'
return response
api.add_resource(TeamLeaderboard, '/team/leaderboard.html')
class TeamResource(Resource):
def get(self, team_id):
return get_team(team_id).output_status_dict()
def put(self, team_id):
# (this is the chase.py replacement)
# OUTPUT: team.output_status_dict() combined with messages
try:
pin = request.form['pin']
speed = float(request.form['speed'])
try:
direction = float(request.form['direction'])
except:
direction = 0
try:
refuel = (request.form['refuel'] == "true")
except:
refuel = False
team, hazard_registry = get_team_and_hazard_registry(team_id)
message_list = []
if team.status['pin'] != pin:
return {"error": True, "error_message": "invalid pin"}, 403
# Sanitize input values
if team.cannot_refuel:
refuel = False
if refuel or speed <= 0 or team.stopped or team.fuel_level <= 0:
speed = 0
if speed > team.current_max_speed:
speed = team.current_max_speed
# Movement Updates
current_time = datetime.now(tz=pytz.UTC)
try:
diff_time = (current_time - team.last_update).total_seconds()
except:
# If this gets messed up, default to usual ping
diff_time = 10
distance = speed * config.speed_factor * diff_time / 3600
team.latitude, team.longitude = move_lat_lon(team.latitude, team.longitude, distance, direction)
team.speed = speed
team.direction = direction
# Gas management
if refuel:
if team.fuel_level <= 0:
team.balance -= config.aaa_fee
message_list.append(
"You have been charged " + money_format(config.aaa_fee) + " to get someone "
"to fill your vehicle up."
)
fuel_amt = min(diff_time * config.fill_rate,
team.vehicle.fuel_cap - team.fuel_level)
team.fuel_level += fuel_amt
team.balance -= fuel_amt * config.gas_price
done_refueling = (team.fuel_level >= team.vehicle.fuel_cap - .01)
else:
fuel_amt = distance / team.vehicle.calculate_mpg(speed)
team.fuel_level -= fuel_amt * float(config.get_config_value("fuel_factor"))
if team.fuel_level < 0:
team.fuel_level = 0
message_list.append(datetime.now(tz=pytz.UTC).strftime('%H%MZ') +
': You are running on fumes! Better call for help.')
# Current hazard/hazard expiry
# Need to partition into ongoing and expired
ongoing_hazards = [haz for haz in team.active_hazards if haz.expiry_time > datetime.now(tz=pytz.UTC)]
expired_hazards = [haz for haz in team.active_hazards if haz.expiry_time <= datetime.now(tz=pytz.UTC)]
for haz in expired_hazards:
message_list.append(haz.generate_expiry_message())
if len(ongoing_hazards) > 0:
team.active_hazards = ongoing_hazards
else:
team.clear_active_hazards()
# Check queue for action items (either instant action or a hazard to queue)
queued_hazard = None
if team.has_action_queue_item:
for action in team.get_action_queue(hazard_registry):
if not action.is_hazard:
if action.is_adjustment:
team.apply_action(action)
message_list.append(action.generate_message())
team.dismiss_action(action)
elif action.is_hazard and queued_hazard is None:
queued_hazard = action
# If no hazard queued, shuffle in a chance of a random hazard
if queued_hazard is None:
queued_hazard = shuffle_new_hazard(team, diff_time, hazard_registry, config)
# Apply the queued hazard if it overrides a current hazard (otherwise ignore)
if (
queued_hazard is not None
and all(haz.overridden_by(queued_hazard) for haz in team.active_hazards)
):
team.apply_hazard(queued_hazard) # actually make it take effect
message_list.append(queued_hazard.generate_message())
team.dismiss_action(queued_hazard) # in case it was from DB
team.write_status()
output = team.output_status_dict()
output['messages'] = message_list
output['debug'] = {key: request.form[key] for key in ("pin", "speed", "direction", "refuel")}
return output
except Exception as exc:
return {"error": True, "error_message": str(exc), "traceback": traceback.format_exc()}, 500
api.add_resource(TeamResource, '/team/<team_id>')
class TeamLocation(Resource):
def get(self, team_id):
team = get_team(team_id)
return {
"lat": team.latitude,
"lon": team.longitude
}
def put(self, team_id):
team = get_team(team_id)
if 'pin' in request.form and team.status['pin'] != request.form['pin']:
return {"error": True, "error_message": "invalid pin"}, 403
elif 'auth' in request.form and config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
elif 'pin' not in request.form and 'auth' not in request.form:
return {"error": True, "error_message": "need authorization"}, 403
team.latitude = float(request.form['lat'])
team.longitude = float(request.form['lon'])
team.speed = 0
team.direction = 0
team.write_status()
return {
"success": True,
"lat": team.latitude,
"lon": team.longitude
}
api.add_resource(TeamLocation, '/team/<team_id>/location')
class TeamVehicle(Resource):
def get(self, team_id):
team = get_team(team_id)
return vehicle_stats(team.vehicle)
def put(self, team_id):
team = get_team(team_id)
if 'pin' in request.form and team.status['pin'] != request.form['pin']:
return {"error": True, "error_message": "invalid pin"}, 403
elif 'auth' in request.form and config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
elif 'pin' not in request.form and 'auth' not in request.form:
return {"error": True, "error_message": "need authorization"}, 403
team.status["vehicle"] = request.form['vehicle_type']
# Handle vehicle-specific setup
vehicle = Vehicle(team.status["vehicle"], config)
fuel_level = (1 + np.random.random()) * 0.5 * vehicle.fuel_cap
team.fuel_level = fuel_level
team.write_status()
return {
"success": True,
"vehicle": vehicle_stats(get_vehicle(request.form['vehicle_type']))
}
api.add_resource(TeamVehicle, '/team/<team_id>/vehicle')
class TeamPoints(Resource):
def get(self, team_id):
team = get_team(team_id)
return {
"points": team.points
}
def put(self, team_id):
team = get_team(team_id)
if 'pin' in request.form and team.status['pin'] != request.form['pin']:
return {"error": True, "error_message": "invalid pin"}, 403
elif 'auth' in request.form and config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
elif 'pin' not in request.form and 'auth' not in request.form:
return {"error": True, "error_message": "need authorization"}, 403
try:
team.points += int(float(request.form['points']))
except:
team.points = int(float(request.form['points']))
team.write_status()
return {
"success": True,
"points": team.points
}
api.add_resource(TeamPoints, '/team/<team_id>/points')
class TeamBalance(Resource):
def get(self, team_id):
team = get_team(team_id)
return {
"balance": team.balance,
"money_formatted": money_format(team.balance)
}
def put(self, team_id):
team = get_team(team_id)
if 'pin' in request.form and team.status['pin'] != request.form['pin']:
return {"error": True, "error_message": "invalid pin"}, 403
elif 'auth' in request.form and config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
elif 'pin' not in request.form and 'auth' not in request.form:
return {"error": True, "error_message": "need authorization"}, 403
team.balance += float(request.form['balance'])
team.write_status()
return {
"success": True,
"balance": team.balance,
"money_formatted": money_format(team.balance)
}
api.add_resource(TeamBalance, '/team/<team_id>/balance')
class TeamVerify(Resource):
def put(self, team_id):
team = get_team(team_id)
if team.status['pin'] != request.form['pin']:
return {"error": True, "error_message": "invalid pin"}, 403
if team.vehicle is None:
return {
"team_name": team.name,
"needs_setup": True,
"setup_step": "vehicle-selection"
}
elif team.latitude is None:
return {
"team_name": team.name,
"needs_setup": True,
"setup_step": "location-selection"
}
else:
return {
"team_name": team.name,
"needs_setup": False,
"setup_step": ""
}
api.add_resource(TeamVerify, '/team/<team_id>/verify')
##############
# Placefiles #
##############
class PlacefileLsrContent(Resource):
def get(self):
url = lsr_asset_url
output = "\n\n"
output += "RefreshSeconds: 5\n"
output += "Threshold: 999\n"
output += "Title: Live Storm Reports (LSRs)\n"
output += 'Font: 1, 11, 0, "Courier New"\n'
output += f'IconFile: 1, 25, 25, 11, 11, "{url}Lsr_FunnelCloud_Icon.png"\n'
output += f'IconFile: 2, 25, 32, 11, 11, "{url}Lsr_Hail_Icons.png"\n'
output += f'IconFile: 3, 25, 25, 11, 11, "{url}Lsr_Tornado_Icon.png"\n'
output += f'IconFile: 4, 25, 25, 11, 11, "{url}Lsr_TstmWndDmg_Icon.png"\n\n'
hours_valid = config.lsr_hours_valid # LSR Validity (archive time)
remark_wrap_length = int(config.get_config_value("lsr_remark_wrap_length")) # Text Wrapping
try:
lsr_con = sql.connect(lsr_db_file)
lsr_cur = lsr_con.cursor()
# Prep the time interval (arc time)
t1 = arc_time_from_cur(datetime.now(tz=pytz.UTC), timings=config.timings)
t0 = t1 - timedelta(hours=hours_valid)
t0, t1 = (t.strftime(std_fmt) for t in [t0, t1])
# Get the data
lsr_cur.execute("SELECT * FROM lsrs_raw WHERE valid BETWEEN ? AND ?", [t0, t1])
lsrs_raw = lsr_cur.fetchall()
# Scale the data to cur time
lsrs_scaled = scale_raw_lsr_to_cur_time(lsrs_raw, timings=config.timings)
# Output the LSRs
for lsr_tuple in lsrs_scaled:
output += gr_lsr_placefile_entry_from_tuple(
lsr_tuple,
wrap_length=remark_wrap_length,
tz=pytz.timezone(config.get_config_value("pytz_timezone"))
) + '\n'
except:
# Just have the header matter if errors
warnings.warn(traceback.format_exc())
pass
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileLsrContent, '/placefile/lsr/content')
class PlacefileLsrLoad(Resource):
def post(self):
if config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
endpoint_args = {
'start': request.form['start'],
'end': request.form['end'],
'wfos': request.form['wfos']
}
lsr_endpoint = (
"http://mesonet.agron.iastate.edu/geojson/lsr.php"
+ "?sts={start}&ets={end}&wfos={wfos}".format(**endpoint_args)
)
lsr_request = requests.get(lsr_endpoint)
lsrs = lsr_request.json()["features"]
lsr_con = sql.connect(lsr_db_file)
lsr_cur = lsr_con.cursor()
lsr_cur.execute(
"CREATE TABLE IF NOT EXISTS lsrs_raw (city char, county char, lat decimal, "
+ "lon decimal,magnitude char, remark char, source char, st char, "
+ "type char, typetext char, valid datetime, wfo char)"
)
lsr_con.commit()
# Save the data
print("Loading into local database...")
query = (
"INSERT INTO lsrs_raw (city, county, lat, lon, magnitude, remark, "
+ "source, st, type, typetext, valid, wfo) VALUES "
+ "(?,?,?,?,?,?,?,?,?,?,?,?)"
)
for lsr_row in lsrs:
lsr = lsr_row["properties"]
if type_to_icon(lsr["type"]):
lsr_cur.execute(
query,
[
lsr["city"],
lsr["county"],
lsr["lat"],
lsr["lon"],
lsr["magnitude"],
lsr["remark"],
lsr["source"],
lsr["st"],
lsr["type"],
lsr["typetext"],
lsr["valid"],
lsr["wfo"],
],
)
lsr_con.commit()
return {"count": len(lsrs)}
api.add_resource(PlacefileLsrLoad, '/placefile/lsr/load')
class PlacefileAllTeamsCurrentContent(Resource):
def get(self):
output = file_headertext("All Teams", preface="Current ")
for team_id in list_current_teams():
try:
team = get_team(team_id)
except:
continue
if team.latitude is not None and team.speed is not None:
output += f"Object: {team.latitude:.4f},{team.longitude:.4f}\n"
if team.speed > 0:
output += f"Icon: 0,0,{team.direction:03d},2,15,\n"
direction = team.direction
heading_row = f"Heading: {direction_angle_to_str(team.direction)}\\n"
else:
direction = 0
heading_row = ""
if team.status_color is None:
color_code = 2
else:
color_code = {"green": 2, "yellow": 6, "red": 10}[team.status_color]
output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{team.last_update.strftime("%Y-%m-%d %H:%M:%S")} UTC\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {team.speed:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {team.fuel_level:.2f} gallons\\n'
f'{team.status_text}"'
)
output += file_footerend()
output += "\n\n"
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileAllTeamsCurrentContent, '/placefile/team/current/content')
class PlacefileAllTeamsTracksContent(Resource):
def get(self):
output = file_headertext("All Teams", preface="Tracked ")
for team_id in list_current_teams():
try:
team = get_team(team_id)
except:
continue
try:
team.cur.execute(
"SELECT cur_timestamp, latitude, longitude, speed, direction, status_color, "
"status_text, fuel_level "
"FROM team_history ORDER BY cur_timestamp DESC LIMIT 10"
)
history_rows = team.cur.fetchall()
except:
history_rows = []
for i, row in enumerate(history_rows):
try:
start_time = row[0]
if i == 0:
end_time = (datetime.now(tz=pytz.UTC) + timedelta(seconds=30)).strftime(std_fmt)
arrow_icon = f"Icon: 0,0,{row[4]:03d},2,15,\n"
else:
end_time = history_rows[i - 1][0]
arrow_icon = ""
this_output = f"Object: {row[1]:.4f},{row[2]:.4f}\n"
if row[3] > 0:
this_output += arrow_icon
direction = row[4]
heading_row = f"Heading: {direction_angle_to_str(row[4])}\\n"
else:
direction = 0
heading_row = ""
if row[5] is None:
color_code = 2
else:
color_code = {"green": 2, "yellow": 6, "red": 10}[row[5]]
if arrow_icon:
this_output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{start_time}\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {row[3]:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {row[7]:.2f} gallons\\n'
f'{row[6]}"'
)
this_output += file_footertext(team.name)
else:
this_output += f'Icon: 0,0,{direction:03d},6,{color_code},\n'
this_output += file_footerend()
this_output += '\n\n'
output += this_output
except:
pass
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileAllTeamsTracksContent, '/placefile/team/tracks/content')
class PlacefileAllTeamsHistoryContent(Resource):
def get(self):
output = file_headertext("All Teams", preface="History of ")
for team_id in list_current_teams():
try:
team = get_team(team_id)
except:
continue
try:
team.cur.execute(
"SELECT cur_timestamp, latitude, longitude, speed, direction, status_color, "
"status_text, fuel_level "
"FROM team_history ORDER BY cur_timestamp DESC"
)
history_rows = team.cur.fetchall()
except:
history_rows = []
for i, row in enumerate(history_rows):
try:
start_time = row[0]
if i == 0:
end_time = (datetime.now(tz=pytz.UTC) + timedelta(seconds=30)).strftime(std_fmt)
else:
end_time = history_rows[i - 1][0]
this_output = f"TimeRange: {start_time} {end_time}\n"
this_output += f"Object: {row[1]:.4f},{row[2]:.4f}\n"
if row[3] > 0:
this_output += f"Icon: 0,0,{row[4]:03d},2,15,\n"
direction = row[4]
heading_row = f"Heading: {direction_angle_to_str(row[4])}\\n"
else:
direction = 0
heading_row = ""
color_code = {"green": 2, "yellow": 6, "red": 10}[row[5]]
this_output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{start_time}\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {row[3]:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {row[7]:.2f} gallons\\n'
f'{row[6]}"'
)
this_output += file_footertext(team.name)
this_output += '\n\n'
output += this_output
except:
pass
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileAllTeamsHistoryContent, '/placefile/team/history/content')
class PlacefileSingleTeamCurrentContent(Resource):
def get(self, team_id):
if team_id in list_current_teams():
team = get_team(team_id)
else:
return "", 404
output = file_headertext(team.name, preface="Current ")
if team.latitude is not None and team.speed is not None:
output += f"Object: {team.latitude:.4f},{team.longitude:.4f}\n"
if team.speed > 0:
output += f"Icon: 0,0,{team.direction:03d},2,15,\n"
direction = team.direction
heading_row = f"Heading: {direction_angle_to_str(team.direction)}\\n"
else:
direction = 0
heading_row = ""
if team.status_color is None:
color_code = 2
else:
color_code = {"green": 2, "yellow": 6, "red": 10}[team.status_color]
output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{team.last_update.strftime("%Y-%m-%d %H:%M:%S")} UTC\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {team.speed:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {team.fuel_level:.2f} gallons\\n'
f'{team.status_text}"'
)
output += file_footertext(team.name)
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileSingleTeamCurrentContent, '/placefile/team/<team_id>/current/content')
class PlacefileSingleTeamTracksContent(Resource):
def get(self, team_id):
if team_id in list_current_teams():
team = get_team(team_id)
else:
return "", 404
output = file_headertext(team.name, preface="Tracked ")
try:
team.cur.execute(
"SELECT cur_timestamp, latitude, longitude, speed, direction, status_color, "
"status_text, fuel_level "
"FROM team_history ORDER BY cur_timestamp DESC LIMIT 10"
)
history_rows = team.cur.fetchall()
except:
history_rows = []
for i, row in enumerate(history_rows):
try:
start_time = row[0]
if i == 0:
end_time = (datetime.now(tz=pytz.UTC) + timedelta(seconds=30)).strftime(std_fmt)
arrow_icon = f"Icon: 0,0,{row[4]:03d},2,15,\n"
else:
end_time = history_rows[i - 1][0]
arrow_icon = ""
this_output = f"Object: {row[1]:.4f},{row[2]:.4f}\n"
if row[3] > 0:
this_output += arrow_icon
direction = row[4]
heading_row = f"Heading: {direction_angle_to_str(row[4])}\\n"
else:
direction = 0
heading_row = ""
if row[5] is None:
color_code = 2
else:
color_code = {"green": 2, "yellow": 6, "red": 10}[row[5]]
if arrow_icon:
this_output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{start_time}\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {row[3]:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {row[7]:.2f} gallons\\n'
f'{row[6]}"'
)
this_output += file_footertext(team.name)
else:
this_output += f'Icon: 0,0,{direction:03d},6,{color_code},\n'
this_output += file_footerend()
this_output += '\n\n'
output += this_output
except:
pass
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileSingleTeamTracksContent, '/placefile/team/<team_id>/tracks/content')
class PlacefileSingleTeamHistoryContent(Resource):
def get(self, team_id):
if team_id in list_current_teams():
team = get_team(team_id)
else:
return "", 404
output = file_headertext(team.name, preface="History of ")
try:
team.cur.execute(
"SELECT cur_timestamp, latitude, longitude, speed, direction, status_color, "
"status_text, fuel_level "
"FROM team_history ORDER BY cur_timestamp DESC"
)
history_rows = team.cur.fetchall()
except:
history_rows = []
for i, row in enumerate(history_rows):
try:
start_time = row[0]
if i == 0:
end_time = (datetime.now(tz=pytz.UTC) + timedelta(seconds=30)).strftime(std_fmt)
else:
end_time = history_rows[i - 1][0]
this_output = f"TimeRange: {start_time} {end_time}\n"
this_output += f"Object: {row[1]:.4f},{row[2]:.4f}\n"
if row[3] > 0:
this_output += f"Icon: 0,0,{row[4]:03d},2,15,\n"
direction = row[4]
heading_row = f"Heading: {direction_angle_to_str(row[4])}\\n"
else:
direction = 0
heading_row = ""
if row[5] is None:
color_code = 2
else:
color_code = {"green": 2, "yellow": 6, "red": 10}[row[5]]
this_output += (
f'Icon: 0,0,{direction:03d},6,{color_code}, "Team: {team.name}\\n'
f'{start_time}\\n'
f'Car type: {team.vehicle.print_name}\\n'
f'Speed: {row[3]:.1f} mph\\n{heading_row}'
f'Fuel Remaining: {row[7]:.2f} gallons\\n'
f'{row[6]}"'
)
this_output += file_footertext(team.name)
this_output += '\n\n'
output += this_output
except:
pass
response = make_response(output)
response.headers['content-type'] = 'text/plain'
return response
api.add_resource(PlacefileSingleTeamHistoryContent, '/placefile/team/<team_id>/history/content')
############
# Vehicles #
############
class VehicleList(Resource):
def get(self):
config.cur.execute("SELECT vehicle_type FROM vehicles WHERE shown_in_list = 1")
return {'vehicles': [
vehicle_stats(get_vehicle(r[0])) for r in config.cur.fetchall()
]}
api.add_resource(VehicleList, '/vehicle')
class VehicleResource(Resource):
def get(self, vehicle_id):
return vehicle_stats(get_vehicle(vehicle_id))
api.add_resource(VehicleResource, '/vehicle/<vehicle_id>')
#########
# Admin #
#########
class SimTimings(Resource):
def get(self):
return {field: config.get_config_value(field) for field in (
"simulation_running",
"arc_start_time",
"cur_start_time",
"speed_factor"
)}
def put(self):
if config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
for allowed_field in ('simulation_running', 'arc_start_time', 'cur_start_time', 'speed_factor'):
if allowed_field in request.form:
config.cur.execute(
"UPDATE config SET config_value = ? WHERE config_setting = ?",
[request.form[allowed_field], allowed_field]
)
config.con.commit()
return {"success": True}
api.add_resource(SimTimings, '/simulation/timings')
class SimRunning(Resource):
def get(self):
running = int(config.get_config_value("simulation_running"))
if running:
return {"running": 1}
else:
return {"running": 0}
api.add_resource(SimRunning, '/simulation/running')
class SimDisplay(Resource):
def get(self):
try:
display_items = []
config.cur.execute("SELECT arc_time, type, filename FROM display_queue ORDER BY arc_time DESC")
for row in config.cur.fetchall():
display_items.append({
'arc_time': row[0],
'type': row[1],
'url': str(config.get_config_value("display_base_url")) + row[2]
})
return {
'success': True,
'items': display_items
}
except:
return {'error': True, 'error_message': traceback.format_exc()}, 500
api.add_resource(SimDisplay, '/simulation/display')
class SimConfig(Resource):
def put(self):
if config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
updated = []
for allowed_field in (
'simulation_running',
'gas_price',
'fill_rate',
'min_town_distance_search',
'min_town_distance_refuel',
'min_town_population',
'speed_limit',
'lsr_hours_valid',
'aaa_fee'
):
if allowed_field in request.form:
config.cur.execute(
"UPDATE config SET config_value = ? WHERE config_setting = ?",
[request.form[allowed_field], allowed_field]
)
config.con.commit()
updated.append(allowed_field)
return {"success": True, "updated": updated}
api.add_resource(SimConfig, '/simulation/config')
class SimHazardConfig(Resource):
def put(self):
if config.get_config_value('auth') != request.form['auth']:
return {"error": True, "error_message": "invalid auth"}, 403
updated = []
for allowed_field in (
'active_hazards',
'speeding_max_chance',
'speeding_ticket_amt',
'dirt_road_prob',
'cc_prob',
'flat_tire_prob',
'pay_for_flat_prob',
'pay_for_flat_amt',
'dead_end_prob',
'flooded_road_prob'
):
if allowed_field in request.form:
config.cur.execute(
"UPDATE hazard_config SET hazard_value = ? WHERE hazard_setting = ?",
[request.form[allowed_field], allowed_field]
)
config.con.commit()
updated.append(allowed_field)
return {"success": True, "updated": updated}
api.add_resource(SimHazardConfig, '/simulation/hazard_config')
class TestResource(Resource):
def get(self):
return {"test": True, "time": datetime.now().strftime(std_fmt)}
api.add_resource(TestResource, '/test')
##########################
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
1683624 | <reponame>mmaaz60/DCL<filename>models/Asoftmax_linear.py
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features,out_features))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
| StarcoderdataPython |
57288 | from .models import ToDoList
from rest_framework.generics import ListAPIView
from rest_framework import permissions
from api.serializers import ToDoSerializerList
from django.db import connection
from rest_framework.response import Response
class ToDoListView(ListAPIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
queryset = ToDoList.objects.all()
serializer_class = ToDoSerializerList
def post(self, request, pk):
# client_ip = self.get_client_ip(request)
# cursor = connection.cursor()
# cursor.execute(f"SELECT * FROM website.postlikes where post_liked='{pk}' and client_ip='{client_ip}'")
# row = cursor.fetchone()
# if not row:
# post = Post.objects.get(id=pk)
# likes = request.data['likes']
# post.likes = likes
# post.save()
# cursor.execute(f"INSERT INTO website.postlikes (post_liked, client_ip) values('{pk}', '{client_ip}')")
# data = {'succes': 1, 'status': '200'}
# return Response(data)
# else:
# data = {'failed': 1, 'message': 'You already liked this article', 'status': '210'}
# return Response(data, status=210)
pass | StarcoderdataPython |
33067 | # Escreva um programa que pergunte a quantidade de Km
# percorridos por um carro alugado e a quantidade de dias pelos
# quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro
# custa R$60 por dia e R$0.15 por Km rodado.
km = float(input("Quantos km percorreu?: "))
dia = int(input("Quantos dias ele foi alugado?: "))
print("O valor a ser pago é: R${:.2f}".format(km * 0.15 + dia * 60)) | StarcoderdataPython |
4816039 | <filename>pythonv4/ScharfSandhiTest.py<gh_stars>1-10
"""
ScharfSandhiTest.py May 22, 2015
Jul 20, 2015
May 11, 2020. Revise for python3
"""
from scharfsandhi import ScharfSandhi
def simple_sandhioptions(code,sandhi):
if (code == 'C'):
sandhi.sandhioptions("C","N","S","")
elif (code == 'E'):
sandhi.sandhioptions("E","N","S","Y")
elif (code == 'E1'):
sandhi.sandhioptions("E","N","S","")
elif (code == 'E2'):
sandhi.sandhioptions("E","N","S","Y")
sandhi.lopah_v=True
else:
sandhi.Error = 5
return sandhi.Error
def testfile(filein,fileknown,sopt):
with open(filein,"r") as f:
lines = [l.rstrip('\r\n ') for l in f]
with open(fileknown,"r") as f:
correct = [l.rstrip('\r\n ') for l in f]
sandhi = ScharfSandhi()
err = simple_sandhioptions(sopt,sandhi)
#err = sandhi.simple_sandhioptions(sopt)
if err != 0:
print("ERROR",err," sopt must be E, E1, or C, not",sopt)
exit(1)
i=0
nok = 0
for line in lines:
out = sandhi.sandhi(line)
known = correct[i]
ok = "?"
if out == known:
ok = "OK"
nok = nok+1
else:
ok = "PROBLEM"
print("Problem at line",i)
print(" input:",line)
print(" options:",sopt)
print("computed:",out)
print("standard:",known)
print("========================================")
i = i + 1
print("Test Results:")
print("Input:",filein)
print("Standard:",fileknown)
print(nok,"input lines were ok out of",len(lines),"total lines.")
if __name__ == '__main__':
import sys
sopt = sys.argv[1]
filein = sys.argv[2]
fileknown = sys.argv[3]
testfile(filein,fileknown,sopt)
| StarcoderdataPython |
172368 | from .enums import *
from .structs import *
from .api import *
| StarcoderdataPython |
1656736 | <gh_stars>0
import os
import boto3
import json
import csv
s3 = boto3.resource('s3')
bucket = s3.Bucket('tracking-metrics')
# Iterates through all the objects, doing the pagination for you. Each obj
# is an ObjectSummary, so it doesn't contain the body. You'll need to call
# get to get the whole body.
with open('florence_sept_links.csv', 'w', newline = '') as csv_out:
csv_writer = csv.writer(csv_out)
#files = ['2018/08/14', '2018/08/15', '2018/08/16', '2018/08/17', '2018/08/18', '2018/08/19', '2018/08/20', '2018/08/21', '2018/08/22', '2018/08/23', '2018/08/24', '2018/08/25', '2018/08/26', '2018/08/27', '2018/08/28', '2018/08/29', '2018/08/30', '2018/08/31', '2018/09/01', '2018/09/02', '2018/09/03', '2018/09/04', '2018/09/05', '2018/09/06', '2018/09/07', '2018/09/08', '2018/09/09', '2018/09/10', '2018/09/11']
#files = ['2018/09/12', '2018/09/13', '2018/09/14', '2018/09/15', '2018/09/16', '2018/09/17']
files = ['2018/09/23', '2018/09/24', '2018/09/25', '2018/09/26', '2018/09/27', '2018/09/28', '2018/09/29', '2018/09/30', '2018/10/01', '2018/10/02', '2018/10/03', '2018/10/04', '2018/10/05', '2018/10/06', '2018/10/07', '2018/10/08', '2018/10/09', '2018/10/10']
for f in files:
for obj in bucket.objects.filter(Prefix=f):
key = obj.key
body = obj.get()['Body'].read().decode('utf-8')
print(key)
for line in body.splitlines():
json_data = json.loads(line)
print(json_data['eventType'])
print(json_data['mail']['destination'][0])
if json_data['eventType'] == 'Click' and json_data['click']['link'] != 'http://email-unsub.s3-website-us-east-1.amazonaws.com':
csv_writer.writerow([json_data['mail']['destination'][0], 'Clicked', json_data['click']['link']]) | StarcoderdataPython |
97668 | #!/usr/bin/env python
#
# :History:
#
# 10 Aug 2018: Created.
#
# @author: <NAME> (UKATC)
#
"""
Script `cdp_correct_wildcard` corrects wildcard references within CDP metadata.
Prior to the CDP-7 release, MIRI CDPs would set a metadata keywords to 'ANY'
to indicate that the CDP was valid for any variant of that CDP (e.g. FILTER='ANY').
From CDP-7 onwards, the naming convention is changed so the string 'N/A' is
used instead, which is more compatible with the JWST CRDS searching mechanism.
This script checks the keywords contained in a CDP file and changes all
occurrences of THING='ANY' to THING='N/A'.
The following command arguments are defined by position::
inputfile[0]
The path+name of the file to be read. Compulsory.
outputfile[1]
The path+name of the file to be written.
Optional. Defaults to the same name as inputfile with "_out" appended.
The command also takes the following options::
--verbose or -v
Generate more verbose output.
--overwrite or -o
Overwrite wildcard existing FITS file.
"""
# Python logging facility.
import logging
# Set the default logging level.
logging.basicConfig(level=logging.INFO)
# Get a default parent logger
logger = logging.getLogger("cdp_correct_wildcard")
import optparse
import sys, time
import miri.datamodels
def correct_wildcard_metadata( datamodel ):
"""
Correct the wild card used to .
:Parameters:
datamodel: MiriDataModel
The calibration data model whose metadata is to be updated.
:Returns:
nchanges: int
Returns the number of changes made to the metadata.
"""
# Check MRS wildcard information.
nchanges = 0
if hasattr(datamodel, 'meta') and hasattr(datamodel.meta, 'instrument') and \
hasattr(datamodel.meta, 'exposure') and hasattr(datamodel.meta, 'subarray'):
if datamodel.meta.instrument.model is not None:
if str(datamodel.meta.instrument.model).strip() == 'ANY':
datamodel.meta.instrument.model = 'N/A'
nchanges += 1
if datamodel.meta.instrument.detector is not None:
if str(datamodel.meta.instrument.detector).strip() == 'ANY':
datamodel.meta.instrument.detector = 'N/A'
nchanges += 1
if datamodel.meta.instrument.detector_settings is not None:
if str(datamodel.meta.instrument.detector_settings).strip() == 'ANY':
datamodel.meta.instrument.detector_settings = 'N/A'
nchanges += 1
if datamodel.meta.instrument.filter is not None:
if str(datamodel.meta.instrument.filter).strip() == 'ANY':
datamodel.meta.instrument.filter = 'N/A'
nchanges += 1
if datamodel.meta.instrument.channel is not None:
if str(datamodel.meta.instrument.channel).strip() == 'ANY':
datamodel.meta.instrument.channel = 'N/A'
nchanges += 1
if datamodel.meta.instrument.band is not None:
if str(datamodel.meta.instrument.band).strip() == 'ANY':
datamodel.meta.instrument.band = 'N/A'
nchanges += 1
if datamodel.meta.exposure.readpatt is not None:
if str(datamodel.meta.exposure.readpatt).strip() == 'ANY':
datamodel.meta.exposure.readpatt = 'N/A'
nchanges += 1
if datamodel.meta.subarray.name is not None:
if str(datamodel.meta.subarray.name).strip() == 'ANY':
datamodel.meta.subarray.name = 'N/A'
nchanges += 1
else:
strg = "MIRI instrument, exposure and subarray metadata attributes missing from data model %s" % \
datamodel.__class__.__name__
raise TypeError(strg)
return nchanges
if __name__ == "__main__":
# Parse arguments
help_text = __doc__
usage = "%prog [opt] inputfile outputfile\n"
usage += "Corrects the wildcard usage (\'ANY\'-->\'N/A\') within a "
usage += "MIRI calibration data product."
parser = optparse.OptionParser(usage)
parser.add_option("-v", "--verbose", dest="verb", action="store_true",
help="Verbose mode"
)
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true",
help="Overwrite the copy of the file if it already exists"
)
(options, args) = parser.parse_args()
try:
inputfile = args[0]
if len(args) > 1:
outputfile = args[1]
else:
outputfile = inputfile + "_out.fits"
except IndexError:
print(help_text)
time.sleep(1) # Ensure help text appears before error messages.
parser.error("Not enough arguments provided")
sys.exit(1)
verb = options.verb
overwrite = options.overwrite
# Open the data model using the class derived from the data type.
with miri.datamodels.open( init=inputfile ) as datamodel:
# Attempt to correct the wildcards in the metadata keyword
logger.info("Analysing %s..." % inputfile)
nchanges = correct_wildcard_metadata( datamodel )
if verb:
print(datamodel)
print(datamodel.get_history_str())
if nchanges > 0:
datamodel.save( outputfile, overwrite=overwrite)
logger.info("%d changes made. Data saved to new file, %s\n" % (nchanges, outputfile))
else:
logger.info("Data not changed. No output file written.\n")
del datamodel
| StarcoderdataPython |
46530 | <filename>paws/lib/python2.7/site-packages/requestbuilder-0.7.1-py2.7.egg/requestbuilder/mixins/formatting.py
# Copyright (c) 2012-2016 Hewlett Packard Enterprise Development LP
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import operator
import sys
try:
import prettytable
except ImportError:
pass
from requestbuilder import Arg
class TabifyingMixin(object):
'''
A command mixin that provides the tabify() function along with its
associated --show-empty-fields command line arg.
'''
ARGS = [Arg('--show-empty-fields', action='store_true', route_to=None,
help='show empty values as "(nil)"')]
def tabify(self, fields, include=None):
"""
Join a list of strings with tabs. Nonzero items that Python considers
false are printed as-is if they appear in the include list, replaced
with '(nil)' if the user specifies --show-empty-fields at the command
line, and omitted otherwise.
"""
if self.args['show_empty_fields']:
fstr = '(nil)'
else:
fstr = ''
return '\t'.join(str(s) for s in
_filter_row_values(fields, fstr, include=include))
class TableOutputMixin(object):
ARGS = [Arg('--show-headers', action='store_true', route_to=None,
help='show column headers'),
Arg('--show-empty-fields', action='store_true', route_to=None,
help='show empty field values as "(nil)"')]
def get_table(self, field_names):
table = _FilteredTable(field_names=field_names,
show_empty=self.args.get('show_empty_fields'))
table.border = False
table.header = self.args.get('show_headers') or False
table.header_style = 'upper'
table.align = 'l' # left
table.left_padding_width = 0
table.right_padding_width = 2
return table
if 'prettytable' in sys.modules:
class _FilteredTable(prettytable.PrettyTable):
def __init__(self, show_empty=False, **kwargs):
if show_empty:
self.__empty = '(nil)'
else:
self.__empty = ''
prettytable.PrettyTable.__init__(self, **kwargs)
def add_row(self, row):
prettytable.PrettyTable.add_row(
self, _filter_row_values(row, self.__empty))
else:
# UglyTable
class _FilteredTable(object):
def __init__(self, field_names, show_empty=False):
self.field_names = field_names
self.header = False
self.reversesort = False
self._rows = []
self._sortindex = 0
if show_empty:
self.__empty = '(nil)'
else:
self.__empty = ''
def add_row(self, row):
if len(row) != len(self.field_names):
raise ValueError('row has incorrect number of values '
'({0} given, {1} expected)'
.format(len(row), len(self.field_names)))
self._rows.append(_filter_row_values(row, self.__empty))
@property
def sortby(self):
return self.field_names[self._sortindex]
@sortby.setter
def sortby(self, field):
self._sortindex = self.field_names.index(field)
def get_string(self):
lines = []
if self.header:
lines.append('\t'.join(name.upper() for name in
self.field_names))
for row in sorted(self._rows, reverse=self.reversesort,
key=operator.itemgetter(self._sortindex)):
lines.append('\t'.join(map(str, row)))
return '\n'.join(lines)
def __str__(self):
return self.get_string()
def _filter_row_values(row, empty_str, include=None):
filtered = []
for field in row:
# pylint: disable=superfluous-parens
if (field or field is 0 or (isinstance(field, float) and field == 0)
or field in (include or [])):
filtered.append(field)
else:
filtered.append(empty_str)
# pylint: enable=superfluous-parens
return filtered
| StarcoderdataPython |
3252473 | import threading
from modules.const import Const
from modules.lib.agent_utils import get_mac
from modules.lib.report_queue import ReportQueue
from modules.lib.reporter_manager import ReporterManager
from modules.things_cloud.device import ThingsCloudDevice
from modules.things_cloud.operation_handler import OperationDispatcher
from modules.things_cloud.report_repository import ThingsCloudReportRepository
from modules.things_cloud.alarm_repository import ThingsCloudAlarmRepository
class ThingsCloud:
def set_cloud_interval(self, interval):
if interval is None:
interval = Const.DEFAULT_CLOUD_REPORT_INTERVAL_SEC
self.__report_repo.interval = interval
self.__dispatcher.interval = interval
def set_reporter_interval(self, key, interval):
reporters = (x for x in self.__reporters if x.data_type() == key)
for reporter in reporters:
reporter.interval = interval
def __listen_to_reporters(self):
for reporter in self.__reporters:
if reporter.interval == 0:
self.__manager.add_nop(reporter)
else:
self.__manager.listen_to(reporter)
def __apply_params(self, params):
self.set_cloud_interval(params.interval('cloud'))
for reporter in self.__reporters:
reporter_name = reporter.data_type()
reporter.interval = params.interval(reporter_name)
reporter.set_alarm_condition(params.alarms(reporter_name))
def upate_config(self, userame, password):
self.__config.update(userame, password)
def update_params_with_json(self, params):
self.__params.update_with_json(params)
self.__apply_params(self.__params)
def __init__(self, reporters, config, params, led):
print('Hello ThingsCloud')
unique_id = 'armadillo-' + get_mac()
if not config.is_valid():
raise Exception("The config is invalid: some fields are not set!")
self.__config = config
self.__device = ThingsCloudDevice(config, self.upate_config,
params, self.update_params_with_json,
unique_id, led)
self.__report_queue = ReportQueue()
self.__alarm_queue = ReportQueue()
report_interval = Const.DEFAULT_CLOUD_REPORT_INTERVAL_SEC
self.__report_repo = ThingsCloudReportRepository(self.__report_queue,
self.__device,
report_interval)
self.__alarm_repo = ThingsCloudAlarmRepository(self.__alarm_queue,
self.__device, 1)
self.__manager = ReporterManager(
self.__report_queue, self.__alarm_queue)
self.__params = params
self.__dispatcher = OperationDispatcher(self.__device,
params.interval('cloud'))
self.__reporters = reporters
self.__apply_params(params)
self.__listen_to_reporters()
loopables = [
self.__report_repo,
self.__alarm_repo,
self.__manager,
self.__dispatcher
]
for loopable in loopables:
self.thread = threading.Thread(target=loopable.start_loop)
self.thread.start()
| StarcoderdataPython |
3320615 | <gh_stars>0
import cv2
import numpy as np
from keras.models import load_model
from img_processing import scale_and_centre
def predict(img_grid):
image = img_grid.copy()
image = cv2.resize(image, (28, 28))
image = image.astype('float32')
image = image.reshape(1, 28, 28, 1)
image /= 255
model = load_model('Model/model.h5')
pred = model.predict(image.reshape(1, 28, 28, 1), batch_size=1)
return pred.argmax()
def extract_number_from_image(img_grid):
tmp_sudoku = [[0 for i in range(9)] for j in range(9)]
for i in range(9):
for j in range(9):
image = img_grid[i][j]
image = cv2.resize(image, (28, 28))
original = image.copy()
thresh = 128 # define a threshold, 128 is the middle of black and white in grey scale
# threshold the image
gray = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
# Find contours
cnts = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
if (x < 3 or y < 3 or h < 3 or w < 3):
# Note the number is always placed in the center
# Since image is 28x28
# the number will be in the center thus x >3 and y>3
# Additionally any of the external lines of the sudoku will not be thicker than 3
continue
ROI = gray[y:y + h, x:x + w]
ROI = scale_and_centre(ROI, 120)
tmp_sudoku[i][j] = predict(ROI)
return tmp_sudoku
| StarcoderdataPython |
120016 | from datetime import datetime
import json
import os
import shutil
import sys
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils.dateparse import parse_datetime
from django.utils.timezone import utc
from django.conf import settings
from container.models import ContainerFamily, ContainerArgument, Container
from file_access_utils import compute_md5
from librarian.models import Dataset
from portal.management.commands.reset import Command as ResetCommand
class FixtureBuilder:
def __init__(self):
self.next_output_num = self.next_step_num = 1
def get_name(self):
""" Return the fixture file's name. """
raise NotImplementedError()
def build(self):
""" Build all the records that should be in the fixture. """
raise NotImplementedError()
def run(self):
print("--------")
print(self.get_name())
print("--------")
call_command('reset')
before_filename = 'test_fixture_before.json'
self.dump_all_data(before_filename)
self.build()
after_filename = 'test_fixture_after.json'
self.dump_all_data(after_filename)
with open(before_filename, 'rU') as jsonfile:
before_objects = json.load(jsonfile)
with open(after_filename, 'rU') as jsonfile:
after_objects = json.load(jsonfile)
dump_objects = []
before_index = 0
for after_object in after_objects:
before_object = (before_index < len(before_objects) and
before_objects[before_index])
if after_object == before_object:
before_index += 1
else:
dump_objects.append(after_object)
self.replace_timestamps(dump_objects)
self.rename_dataset_files(os.path.join(settings.MEDIA_ROOT,
Dataset.UPLOAD_DIR),
dump_objects)
dump_filename = os.path.join('portal', 'fixtures', self.get_name())
with open(dump_filename, 'w') as dump_file:
json.dump(dump_objects, dump_file, indent=4, sort_keys=True)
os.remove(before_filename)
os.remove(after_filename)
for target in ResetCommand.TARGETS:
target_path = os.path.join(settings.MEDIA_ROOT, target)
fixture_name, _extension = os.path.splitext(self.get_name())
fixture_files_path = os.path.join("FixtureFiles", fixture_name, target)
# Out with the old...
if os.path.isdir(fixture_files_path):
shutil.rmtree(fixture_files_path)
# ... in with the new.
if os.path.isdir(target_path):
shutil.copytree(target_path, fixture_files_path)
self.remove_empty_folders(fixture_files_path)
def remove_empty_folders(self, path):
is_empty = True
for name in os.listdir(path):
child_path = os.path.join(path, name)
if os.path.isdir(child_path):
self.remove_empty_folders(child_path)
if os.path.exists(child_path):
is_empty = False
if is_empty:
os.rmdir(path)
def fillpathset(self, orgset):
""" Given a set of directory name strings, create a new set of strings that contains
the intermediate directory names as well as the original strings.
E.g.
input: ( 'micall/core', micall/bla/goo', 'micall/utils' )
output:
( 'micall/core', micall/bla/goo', 'micall/utils', 'micall', 'micall/bla' )
"""
newset = set()
for pathname in orgset:
clst = pathname.split(os.sep)
for n in range(len(clst)):
newset.add(os.path.join(*clst[:n+1]))
return newset
def _rdfilelst(self, fnamelst):
"""Given a list of file names, return a list of strings, where
each string is the contents of that file.
"""
rlst = []
for fn in fnamelst:
with open(fn, "r") as f:
rlst.append(f.read())
return rlst
def dump_all_data(self, filename):
with open(filename, "w") as fixture_file:
old_stdout = sys.stdout
sys.stdout = fixture_file
try:
call_command("dumpdata", indent=4)
finally:
sys.stdout = old_stdout
def rename_dataset_files(self, dataset_path, dump_objects):
source_root, source_folder = os.path.split(dataset_path)
fixtures_path = os.path.join(dataset_path, 'fixtures')
if not os.path.isdir(fixtures_path):
os.makedirs(fixtures_path)
for dump_object in dump_objects:
if dump_object['model'] == 'archive.dataset':
file_path = dump_object['fields']['dataset_file']
source_path = os.path.join(source_root, file_path)
file_name = os.path.basename(file_path)
target_path = os.path.join(fixtures_path, file_name)
os.rename(source_path, target_path)
new_file_path = os.path.join(source_folder, 'fixtures', file_name)
dump_object['fields']['dataset_file'] = new_file_path
def replace_timestamps(self, dump_objects):
date_map = {} # {old_date: new_date}
field_names = set()
for dump_object in dump_objects:
for field, value in dump_object['fields'].items():
if value is not None and (field.endswith('time') or
field.startswith('date') or
field.endswith('DateTime') or
field == 'last_login'):
field_names.add(field)
date_map[value] = None
old_dates = date_map.keys()
old_dates.sort()
offset = None
for old_date in old_dates:
old_datetime = parse_datetime(old_date)
if offset is None:
offset = datetime(2000, 1, 1, tzinfo=utc) - old_datetime
rounded = (old_datetime + offset).replace(microsecond=0, tzinfo=None)
date_map[old_date] = rounded.isoformat() + 'Z'
for dump_object in dump_objects:
for field, value in dump_object['fields'].items():
if value is not None and field in field_names:
dump_object['fields'][field] = date_map[value]
def create_cable(self, source, dest):
""" Create a cable between two pipeline objects.
@param source: either a PipelineStep or one of the pipeline's
TransformationInput objects for the cable to use as a source.
@param dest: either a PipelineStep or the Pipeline for the cable to use
as a destination.
"""
try:
source_output = source.transformation.outputs.first()
source_step_num = source.step_num
except AttributeError:
# must be a pipeline input
source_output = source
source_step_num = 0
try:
cable = dest.cables_in.create(dest=dest.transformation.inputs.first(),
source=source_output,
source_step=source_step_num)
except AttributeError:
# must be a pipeline output
cable = dest.create_raw_outcable(source.name,
self.next_output_num,
source.step_num,
source_output)
self.next_output_num += 1
return cable
def create_step(self, pipeline, method, input_source):
""" Create a pipeline step.
@param pipeline: the pipeline that will contain the step
@param method: the method for the step to run
@param input_source: either a pipeline input or another step that this
step will use for its input.
"""
step = pipeline.steps.create(transformation=method,
name=method.family.name,
step_num=self.next_step_num)
self.create_cable(input_source, step)
step.clean()
self.next_step_num += 1
return step
def set_position(self, objlst):
"""Set the x, y screen coordinates of the objects in the list
along a diagonal line (top left to bottom right)
"""
n = len(objlst)
for i, obj in enumerate(objlst, 1):
obj.x = obj.y = float(i)/(n+1)
obj.save()
class ContainerRunBuilder(FixtureBuilder):
"""For testing the tools that find datasets in a sandbox."""
def get_name(self):
return "container_run.json"
def build(self):
user = User.objects.first()
assert user is not None
input_path = os.path.abspath(os.path.join(
__file__,
'../../../../../samplecode/singularity/host_input/example_names.csv'))
family = ContainerFamily.objects.create(name='fixture family', user=user)
container_path = os.path.abspath(os.path.join(
__file__,
'../../../../../samplecode/singularity/python2-alpine-trimmed.simg'))
with open(container_path, "rb") as f:
container_md5 = compute_md5(f)
container = family.containers.create(
tag='vFixture',
user=user,
file='Containers/kive-default.simg',
md5=container_md5
)
app = container.apps.create()
arg1 = app.arguments.create(type=ContainerArgument.INPUT,
name='names_csv',
position=1)
app.arguments.create(type=ContainerArgument.OUTPUT,
name='greetings_csv',
position=2)
dataset = Dataset.create_dataset(input_path,
name='names.csv',
user=user)
run = app.runs.create(name='fixture run', user=user)
run.sandbox_path = "" # blank this out as it won't be accessible in testing anyway
run.slurm_job_id = None # this also would cause tests to fail on a fresh system
run.save(schedule=False) # scheduling would overwrite sandbox_path
run.datasets.create(argument=arg1, dataset=dataset)
upload_path = os.path.join(settings.MEDIA_ROOT, Container.UPLOAD_DIR)
readme_path = os.path.join(upload_path, 'README.md')
os.makedirs(upload_path)
with open(readme_path, 'w') as f:
f.write('Just a placeholder to create the folder for containers.')
class Command(BaseCommand):
help = "Update test fixtures by running scripts and dumping test data."
def handle(self, *args, **options):
ContainerRunBuilder().run()
self.stdout.write('Done.')
| StarcoderdataPython |
192302 | <reponame>tamahassam/farmer
from .plot_history import plot_history
from .history import * | StarcoderdataPython |
43470 | <filename>galleries/sql/queries/data_retriever.py<gh_stars>0
import abc
import numpy as np
from typing import List, Any, Dict, Optional
from galleries.annotations_filtering.filter import FilterStatement
class SqlDataRetriever:
@abc.abstractmethod
def get_indices(self, cursor, filters: List[List[FilterStatement]] = None):
pass
@abc.abstractmethod
def get_annotations_by_index(self, cursor, index: Any) -> dict:
pass
@abc.abstractmethod
def get_image_by_index(self, cursor, index: Any) -> np.ndarray:
pass
@abc.abstractmethod
def get_annotations_types(self) -> Optional[Dict[str, type]]:
pass
@abc.abstractmethod
def get_discrete_annotations_values(self) -> Dict[str, list]:
pass | StarcoderdataPython |
1685463 | <gh_stars>1-10
#!/usr/bin/env python3
# This script is used to publish Cargo to crates.io.
import os
import re
import subprocess
import time
import urllib.request
from urllib.error import HTTPError
TO_PUBLISH = [
'crates/cargo-platform',
'crates/crates-io',
'.',
]
def already_published(name, version):
try:
urllib.request.urlopen('https://crates.io/api/v1/crates/%s/%s/download' % (name, version))
except HTTPError as e:
if e.code == 404:
return False
raise
return True
def maybe_publish(path):
content = open(os.path.join(path, 'Cargo.toml')).read()
name = re.search('^name = "([^"]+)"', content, re.M).group(1)
version = re.search('^version = "([^"]+)"', content, re.M).group(1)
if already_published(name, version):
print('%s %s is already published, skipping' % (name, version))
return False
subprocess.check_call(['cargo', 'publish', '--no-verify'], cwd=path)
return True
def main():
print('Starting publish...')
for i, path in enumerate(TO_PUBLISH):
if maybe_publish(path):
if i < len(TO_PUBLISH)-1:
# Sleep to allow the index to update. This should probably
# check that the index is updated, or use a retry loop
# instead.
time.sleep(5)
print('Publish complete!')
if __name__ == '__main__':
main()
| StarcoderdataPython |
4805466 | import sys as _sys
if _sys.version_info < (3,5):
raise RuntimeError('expectorant requires Python 3.5 or higher but the current vesion is: {}.{}'.format(_sys.version_info.major, _sys.version_info.minor))
from . import spec
from . import singletons
from .expector import * # all the matchers
from .runner import load_specs, run_specs, main, Scope
#
# Use these global functions for syntactic sugar
#
context = singletons.global_suite.context
describe = singletons.global_suite.describe
it = singletons.global_suite.it
before = singletons.global_suite.before
after = singletons.global_suite.after
expect = Expector(singletons.global_outcomes)
| StarcoderdataPython |
1738971 | <filename>examples/Cluster-based_Input_Weight_Initialization_for_Echo_State_Networks.py<gh_stars>10-100
import time
import glob
import os
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.metrics import make_scorer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, ParameterGrid, cross_val_score
from sklearn.utils import shuffle
from sklearn.utils.fixes import loguniform
from scipy.stats import uniform
from sklearn.cluster import MiniBatchKMeans
from joblib import dump, load
from pyrcn.echo_state_network import SeqToLabelESNClassifier
from pyrcn.base import PredefinedWeightsInputToNode, NodeToNode
from pyrcn.metrics import accuracy_score
from pyrcn.model_selection import SequentialSearchCV
import matplotlib
import seaborn as sns
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
#Options
plt.rc('image', cmap='RdBu')
plt.rc('font', family='serif', serif='Times')
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import ticker
arab = np.load(r"E:\multivariate_time_series_dataset\numpy\ARAB.npz")
X_train = np.empty(shape=(6600, ), dtype=object)
y_train = np.empty(shape=(6600, ), dtype=object)
X_test = np.empty(shape=(2200, ), dtype=object)
y_test = np.empty(shape=(2200, ), dtype=object)
for k, (X, y) in enumerate(zip(arab['X'], arab['Y'])):
X_train[k] = X[X.sum(axis=1)!=0, :] # Sequences are zeropadded -> should we remove zeros? if not, X_train[k] = X
y_train[k] = np.tile(y, (X_train[k].shape[0], 1))
scaler = StandardScaler().fit(np.concatenate(X_train))
for k, X in enumerate(X_train):
X_train[k] = scaler.transform(X=X) # Sequences are zeropadded -> should we remove zeros? if not, X_train[k] = X
X_train, y_train = shuffle(X_train, y_train, random_state=0)
for k, (X, y) in enumerate(zip(arab['Xte'], arab['Yte'])):
X_test[k] = scaler.transform(X=X[X.sum(axis=1)!=0, :]) # Sequences are zeropadded -> should we remove zeros? if not, X_train[k] = X
y_test[k] = np.tile(y, (X_test[k].shape[0], 1))
initially_fixed_params = {'hidden_layer_size': 50,
'k_in': 10,
'input_scaling': 0.4,
'input_activation': 'identity',
'bias_scaling': 0.0,
'spectral_radius': 0.0,
'leakage': 0.1,
'k_rec': 10,
'reservoir_activation': 'tanh',
'bi_directional': False,
'wash_out': 0,
'continuation': False,
'alpha': 1e-3,
'random_state': 42}
step1_esn_params = {'input_scaling': uniform(loc=1e-2, scale=1),
'spectral_radius': uniform(loc=0, scale=2)}
step2_esn_params = {'leakage': loguniform(1e-5, 1e0)}
step3_esn_params = {'bias_scaling': np.linspace(0.0, 1.0, 11)}
step4_esn_params = {'alpha': loguniform(1e-5, 1e1)}
kwargs_step1 = {'n_iter': 200, 'random_state': 42, 'verbose': 1, 'n_jobs': 1, 'scoring': make_scorer(mean_squared_error, greater_is_better=False, needs_proba=True)}
kwargs_step2 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 'scoring': make_scorer(mean_squared_error, greater_is_better=False, needs_proba=True)}
kwargs_step3 = {'verbose': 1, 'n_jobs': -1, 'scoring': make_scorer(mean_squared_error, greater_is_better=False, needs_proba=True)}
kwargs_step4 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 'scoring': make_scorer(mean_squared_error, greater_is_better=False, needs_proba=True)}
# The searches are defined similarly to the steps of a sklearn.pipeline.Pipeline:
searches = [('step1', RandomizedSearchCV, step1_esn_params, kwargs_step1),
('step2', RandomizedSearchCV, step2_esn_params, kwargs_step2),
('step3', GridSearchCV, step3_esn_params, kwargs_step3),
('step4', RandomizedSearchCV, step4_esn_params, kwargs_step4)]
base_esn = SeqToSeqESNClassifier(**initially_fixed_params)
base_esn.fit(X_train, y_train)
try:
sequential_search = load("../sequential_search_arab.joblib")
except FileNotFoundError:
sequential_search = SequentialSearchCV(base_esn, searches=searches).fit(X_train, y_train)
dump(sequential_search, "../sequential_search_arab.joblib")
| StarcoderdataPython |
3351102 | import unittest
from datetime import datetime
from botocore.stub import Stubber
from freezegun import freeze_time
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.source.glue import GlueSource, GlueSourceConfig, get_column_type
from datahub.ingestion.source.metadata_common import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.common import AuditStamp, Status
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import DatasetSnapshot
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.com.linkedin.pegasus2avro.schema import (
ArrayTypeClass,
MapTypeClass,
MySqlDDL,
NumberTypeClass,
SchemaField,
SchemaFieldDataType,
SchemaMetadata,
StringTypeClass,
)
from datahub.metadata.schema_classes import (
AuditStampClass,
DatasetPropertiesClass,
OwnerClass,
OwnershipClass,
OwnershipTypeClass,
)
FROZEN_TIME = "2020-04-14 07:00:00"
class GlueSourceTest(unittest.TestCase):
glue_source = GlueSource(
ctx=PipelineContext(run_id="glue-source-test"),
config=GlueSourceConfig(aws_region="us-east-1"),
)
def test_get_column_type_contains_key(self):
field_type = "char"
data_type = get_column_type(self.glue_source, field_type, "a_table", "a_field")
self.assertEqual(
data_type.to_obj(), SchemaFieldDataType(type=StringTypeClass()).to_obj()
)
def test_get_column_type_contains_array(self):
field_type = "array_lol"
data_type = get_column_type(self.glue_source, field_type, "a_table", "a_field")
self.assertEqual(
data_type.to_obj(), SchemaFieldDataType(type=ArrayTypeClass()).to_obj()
)
def test_get_column_type_contains_map(self):
field_type = "map_hehe"
data_type = get_column_type(self.glue_source, field_type, "a_table", "a_field")
self.assertEqual(
data_type.to_obj(), SchemaFieldDataType(type=MapTypeClass()).to_obj()
)
def test_get_column_type_contains_set(self):
field_type = "set_yolo"
data_type = get_column_type(self.glue_source, field_type, "a_table", "a_field")
self.assertEqual(
data_type.to_obj(), SchemaFieldDataType(type=ArrayTypeClass()).to_obj()
)
def test_get_column_type_not_contained(self):
field_type = "bad_column_type"
data_type = get_column_type(self.glue_source, field_type, "a_table", "a_field")
self.assertEqual(
data_type.to_obj(), SchemaFieldDataType(type=StringTypeClass()).to_obj()
)
self.assertEqual(
self.glue_source.report.warnings["bad_column_type"],
[
"The type 'bad_column_type' is not recognised for field 'a_field' in table 'a_table', "
"setting as StringTypeClass."
],
)
@freeze_time(FROZEN_TIME)
def test_turn_boto_glue_data_to_metadata_event(self):
stringy_timestamp = datetime.strptime(FROZEN_TIME, "%Y-%m-%d %H:%M:%S")
timestamp = int(datetime.timestamp(stringy_timestamp) * 1000)
response = {
"TableList": [
{
"Name": "Barbeque",
"Owner": "Susan",
"DatabaseName": "datalake_grilled",
"Description": "Grilled Food",
"StorageDescriptor": {
"Columns": [
{
"Name": "Size",
"Type": "int",
"Comment": "Maximum attendees permitted",
}
]
},
}
]
}
def flatten(d):
out = {}
for key, val in d.items():
if isinstance(val, dict):
val = [val]
if isinstance(val, list):
for subdict in val:
deeper = flatten(subdict).items()
out.update({key + "_" + key2: val2 for key2, val2 in deeper})
else:
out[key] = val
return out
with Stubber(self.glue_source.glue_client) as stubber:
stubber.add_response("search_tables", response, {})
actual_work_unit = list(self.glue_source.get_workunits())[0]
expected_metadata_work_unit = create_metadata_work_unit(timestamp)
self.assertTrue(
sorted(flatten(vars(expected_metadata_work_unit)))
== sorted(flatten(vars(actual_work_unit)))
)
def create_metadata_work_unit(timestamp):
mce = MetadataChangeEvent()
dataset_snapshot = DatasetSnapshot(
urn="urn:li:dataset:(urn:li:dataPlatform:glue,datalake_grilled.Barbeque,PROD)",
aspects=[],
)
dataset_snapshot.aspects.append(
OwnershipClass(
owners=[
OwnerClass(
owner="urn:li:corpuser:Susan", type=OwnershipTypeClass.DATAOWNER
)
],
lastModified=AuditStampClass(
time=timestamp, actor="urn:li:corpuser:datahub"
),
)
)
dataset_snapshot.aspects.append(
DatasetPropertiesClass(
description="Grilled Food",
customProperties={},
uri=None,
tags=[],
)
)
dataset_snapshot.aspects.append(Status(removed=False))
mce.proposedSnapshot = dataset_snapshot
fields = [
SchemaField(
fieldPath="Size",
nativeDataType="int",
type=SchemaFieldDataType(type=NumberTypeClass()),
description="Maximum attendees permitted",
nullable=True,
recursive=False,
)
]
schema_metadata = SchemaMetadata(
schemaName="datalake_grilled.Barbeque",
version=0,
fields=fields,
platform="urn:li:dataPlatform:glue",
created=AuditStamp(time=timestamp, actor="urn:li:corpuser:etl"),
lastModified=AuditStamp(time=timestamp, actor="urn:li:corpuser:etl"),
hash="",
platformSchema=MySqlDDL(tableSchema=""),
)
dataset_snapshot.aspects.append(schema_metadata)
return MetadataWorkUnit(id="glue-datalake_grilled.Barbeque", mce=mce)
| StarcoderdataPython |
6973 | <gh_stars>0
import logging
TRACE_LVL = int( (logging.DEBUG + logging.INFO) / 2 )
| StarcoderdataPython |
169353 | # -*- coding: utf-8 -*-
import django.contrib.admin.helpers
from ajaximage.utils import format_image
from django.contrib.admin.utils import display_for_field
from django.core.files.storage import default_storage
from django.db.models import Field
from django.db.models.fields.files import FileDescriptor, ImageFieldFile
from django.utils.safestring import mark_safe
from .widgets import AjaxImageWidget
class AjaxImageField(Field):
storage = default_storage
attr_class = ImageFieldFile
descriptor_class = FileDescriptor
def __init__(self, *args, **kwargs):
upload_to = kwargs.pop('upload_to', '')
max_height = kwargs.pop('max_height', 0)
max_width = kwargs.pop('max_width', 0)
crop = kwargs.pop('crop', False)
crop = 1 if crop is True else 0
if crop is 1 and (max_height is 0 or max_width is 0):
raise Exception('Both max_width and max_height are needed if cropping')
self.widget = AjaxImageWidget(
upload_to=upload_to,
max_width=max_width,
max_height=max_height,
crop=crop
)
super(AjaxImageField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(AjaxImageField, self).contribute_to_class(cls, name, virtual_only)
setattr(cls, self.name, self.descriptor_class(self))
def get_prep_value(self, value):
"""Returns field's value prepared for saving into a database."""
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return str(value)
def get_internal_type(self):
return "TextField"
def formfield(self, **kwargs):
defaults = {'widget': self.widget}
defaults.update(kwargs)
return super(AjaxImageField, self).formfield(**defaults)
# Monkey path to rightly display readonly field.
def display_for_field_patch(value, field, empty_value_display):
if isinstance(field, AjaxImageField) and value:
width = value.width if value.width < 200 else 200
return format_image(value)
else:
return display_for_field(value, field, empty_value_display)
django.contrib.admin.helpers.display_for_field = display_for_field_patch
| StarcoderdataPython |
3232521 | # coding: utf-8
"""
Subscriptions
Subscriptions allow contacts to control what forms of communications they receive. Contacts can decide whether they want to receive communication pertaining to a specific topic, brand, or an entire HubSpot account. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.communication_preferences.configuration import Configuration
class PublicUpdateSubscriptionStatusRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"email_address": "str", "subscription_id": "str", "legal_basis": "str", "legal_basis_explanation": "str"}
attribute_map = {"email_address": "emailAddress", "subscription_id": "subscriptionId", "legal_basis": "legalBasis", "legal_basis_explanation": "legalBasisExplanation"}
def __init__(self, email_address=None, subscription_id=None, legal_basis=None, legal_basis_explanation=None, local_vars_configuration=None): # noqa: E501
"""PublicUpdateSubscriptionStatusRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._email_address = None
self._subscription_id = None
self._legal_basis = None
self._legal_basis_explanation = None
self.discriminator = None
self.email_address = email_address
self.subscription_id = subscription_id
if legal_basis is not None:
self.legal_basis = legal_basis
if legal_basis_explanation is not None:
self.legal_basis_explanation = legal_basis_explanation
@property
def email_address(self):
"""Gets the email_address of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
Contact's email address. # noqa: E501
:return: The email_address of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this PublicUpdateSubscriptionStatusRequest.
Contact's email address. # noqa: E501
:param email_address: The email_address of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and email_address is None: # noqa: E501
raise ValueError("Invalid value for `email_address`, must not be `None`") # noqa: E501
self._email_address = email_address
@property
def subscription_id(self):
"""Gets the subscription_id of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
ID of the subscription the contact is being resubscribed to. # noqa: E501
:return: The subscription_id of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:rtype: str
"""
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
"""Sets the subscription_id of this PublicUpdateSubscriptionStatusRequest.
ID of the subscription the contact is being resubscribed to. # noqa: E501
:param subscription_id: The subscription_id of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and subscription_id is None: # noqa: E501
raise ValueError("Invalid value for `subscription_id`, must not be `None`") # noqa: E501
self._subscription_id = subscription_id
@property
def legal_basis(self):
"""Gets the legal_basis of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
Legal basis for resubscribing the contact (required for GDPR enabled portals). # noqa: E501
:return: The legal_basis of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:rtype: str
"""
return self._legal_basis
@legal_basis.setter
def legal_basis(self, legal_basis):
"""Sets the legal_basis of this PublicUpdateSubscriptionStatusRequest.
Legal basis for resubscribing the contact (required for GDPR enabled portals). # noqa: E501
:param legal_basis: The legal_basis of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:type: str
"""
allowed_values = [
"LEGITIMATE_INTEREST_PQL",
"LEGITIMATE_INTEREST_CLIENT",
"PERFORMANCE_OF_CONTRACT",
"CONSENT_WITH_NOTICE",
"NON_GDPR",
"PROCESS_AND_STORE",
"LEGITIMATE_INTEREST_OTHER",
] # noqa: E501
if self.local_vars_configuration.client_side_validation and legal_basis not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `legal_basis` ({0}), must be one of {1}".format(legal_basis, allowed_values)) # noqa: E501
self._legal_basis = legal_basis
@property
def legal_basis_explanation(self):
"""Gets the legal_basis_explanation of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
A more detailed explanation to go with the legal basis (required for GDPR enabled portals). # noqa: E501
:return: The legal_basis_explanation of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:rtype: str
"""
return self._legal_basis_explanation
@legal_basis_explanation.setter
def legal_basis_explanation(self, legal_basis_explanation):
"""Sets the legal_basis_explanation of this PublicUpdateSubscriptionStatusRequest.
A more detailed explanation to go with the legal basis (required for GDPR enabled portals). # noqa: E501
:param legal_basis_explanation: The legal_basis_explanation of this PublicUpdateSubscriptionStatusRequest. # noqa: E501
:type: str
"""
self._legal_basis_explanation = legal_basis_explanation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublicUpdateSubscriptionStatusRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PublicUpdateSubscriptionStatusRequest):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
94032 | <reponame>zazaho/SimImg
''' The basic object that represents one file '''
import os
import hashlib
from datetime import datetime
from PIL import Image, ExifTags
from ..utils import pillowplus as PP
class FileObject():
' File object that contains all information relating to one file on disk '
def __init__(self, parent, FullPath=None, checksumFilenameDict=None):
self._Ctrl = parent
self._Cfg = parent.Cfg
self.fullPath = FullPath
self.dirName = os.path.dirname(self.fullPath)
self.fileName = os.path.basename(self.fullPath)
dummy, self.fileExtension = os.path.splitext(self.fileName)
self.hashDict = {}
# These are private variables that allow to call the
# corresponding method for cheap
# If the variable is None we calculate the value
# otherwise we return the value of this private variable
self._isImage = None
self._checksum = checksumFilenameDict[FullPath] if FullPath in checksumFilenameDict else None
self._exifTags = None
self._thumbnail = None
self._dateTime = None
self._size = None
# It this file active
self.active = True
def isImage(self):
' Set IsImage to True if the file can be read by PIL '
if self._isImage is None:
try:
img = Image.open(self.fullPath)
self._isImage = True
# do this here to save time
self._size = img.size
except:
self._isImage = False
return self._isImage
def checksum(self):
if self._checksum is None:
hasher = hashlib.sha1()
with open(self.fullPath, 'rb') as afile:
hasher.update(afile.read())
self._checksum = hasher.hexdigest()
return self._checksum
def exifTags(self):
if self._exifTags is None:
# default to empty basic values
self._exifTags = {
'Make': '',
'Model': '',
'DateTimeOriginal': '',
'DateTime': '',
'DateTimeDigitized': ''
}
with Image.open(self.fullPath) as image:
# image does not have method to get tags
if not hasattr(image, '_getexif'):
return self._exifTags
exif = image._getexif()
# image does not have tags
if not exif:
return self._exifTags
for key, value in exif.items():
if key in ExifTags.TAGS:
self._exifTags[ExifTags.TAGS[key]] = value
return self._exifTags
def cameraMake(self):
return self.exifTags()['Make']
def cameraModel(self):
return self.exifTags()['Model']
def date(self):
if self.exifTags()['DateTimeOriginal']:
return self.exifTags()['DateTimeOriginal']
if self.exifTags()['DateTime']:
return self.exifTags()['DateTime']
if self.exifTags()['DateTimeDigitized']:
return self.exifTags()['DateTimeDigitized']
return ''
def dateTime(self):
if self._dateTime is None:
thisDateString = self.date()
if thisDateString == '':
self._dateTime = 'Missing'
return self._dateTime
try:
self._dateTime = datetime.strptime(
thisDateString,
'%Y:%m:%d %H:%M:%S'
)
except:
self._dateTime = 'Missing'
return self._dateTime
def size(self):
if self._size is None:
self._size = (0, 0)
with Image.open(self.fullPath) as image:
self._size = image.size
return self._size
def shapeParameter(self):
w, h = self.size()
# (width-height)/(width+height)*100
# positive for landscape, negative for portait
return (w-h)/(w+h)*100
def thumbnail(self):
if self._thumbnail is None:
ThumbSize = self._Cfg.get('thumbnailsize')
self._thumbnail = PP.photoImageOpenAndResizeToFit(
self.fullPath,
ThumbSize,
ThumbSize
)
return self._thumbnail
| StarcoderdataPython |
1750208 | <filename>run/runPS_Recycle.py
import os
import numpy as np
import platform_paths as pp
EXE = 'stat_stokes'
EXE = 'peri_stokes'
i = 6
n = str(2**i+1)
woms = np.array([0.01, 0.05, 0.1, 0.5, 1., 5, 10., 50, 100., 225])
woms = 10**np.linspace(-2, 3, 5)
oms = woms
case_consts = ' --domain=2 --flow=5 --nx='+n+' --ny='+n+' '
os.chdir(pp.EXE_PATH)
os.system('make -j2')
for sol in ["GMRES", "GCRODR"]:
CASE_PATH0 = sol
if not os.path.exists(pp.DATA_PATH+CASE_PATH0):
os.mkdir(pp.DATA_PATH+CASE_PATH0)
print pp.DATA_PATH + CASE_PATH0
i = 0
for om in oms:
CASE_PATH1 = '/case'+str(i)
print pp.DATA_PATH + CASE_PATH0 + CASE_PATH1
if not os.path.exists(pp.DATA_PATH+CASE_PATH0+CASE_PATH1):
os.mkdir(pp.DATA_PATH+CASE_PATH0+CASE_PATH1)
os.chdir(pp.DATA_PATH+CASE_PATH0+CASE_PATH1)
case_para = ' --omega='+str(om) + ' --solver1='+sol+' '
print case_consts + case_para
os.system(pp.exe_pre+pp.EXE_PATH+EXE+case_para+case_consts)
i += 1
| StarcoderdataPython |
147783 | """Summary info about tickets."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI.command import SLCommand as SLCommand
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SLCommand)
@environment.pass_env
def cli(env):
"""Summary info about tickets."""
mask = ('openTicketCount, closedTicketCount, '
'openBillingTicketCount, openOtherTicketCount, '
'openSalesTicketCount, openSupportTicketCount, '
'openAccountingTicketCount')
account = env.client['Account'].getObject(mask=mask)
table = formatting.Table(['Status', 'count'])
nested = formatting.Table(['Type', 'count'])
nested.add_row(['Accounting',
account['openAccountingTicketCount']])
nested.add_row(['Billing', account['openBillingTicketCount']])
nested.add_row(['Sales', account['openSalesTicketCount']])
nested.add_row(['Support', account['openSupportTicketCount']])
nested.add_row(['Other', account['openOtherTicketCount']])
nested.add_row(['Total', account['openTicketCount']])
table.add_row(['Open', nested])
table.add_row(['Closed', account['closedTicketCount']])
env.fout(table)
| StarcoderdataPython |
1666183 | num1 = int(input('Digite o primeiro número: '))
num2 = int(input('Digite o segundo número: '))
if num1 > num2:
print('O primeiro valor é maior.')
elif num2 > num1:
print('O segundo valor é maior.')
else:
print('Os dois valores são iguais.')
'''elif num1 == num2:
print('Não existe valor maior, os dois números são iguais.')''' #poderia ter feito com elif, mas com else fica mais curto | StarcoderdataPython |
4812909 | <gh_stars>0
"""Example using Advanced Array Filters with Statistics, Math and Logic"""
# Dependencies
import numpy as np
# Website analytics data:
# (row = day), (col = users, bounce, duration)
a = np.array([[815, 70, 115],
[767, 80, 50],
[912, 74, 77],
[554, 88, 70],
[1008, 65, 128]])
mean, stdev = np.mean(a, axis=0), np.std(a, axis=0)
# One-liner
outliers = ((np.abs(a[:,0] - mean[0]) > stdev[0])
* (np.abs(a[:,1] - mean[1]) > stdev[1])
* (np.abs(a[:,2] - mean[2]) > stdev[2]))
# Result
print(a[outliers]) | StarcoderdataPython |
152469 | #!/usr/bin/env python
"""Unique Crater Distribution Functions
Functions for extracting craters from model target predictions and filtering
out duplicates.
"""
from __future__ import absolute_import, division, print_function
from PIL import Image
import matplotlib
import cv2
import matplotlib.pyplot as plt
import numpy as np
import h5py
import sys
import utils.template_match_target as tmt
import utils.processing as proc
import utils.transform as trf
from keras.models import load_model
import os
import pandas as pd
from input_data_gen import ringmaker,circlemaker,get_merge_indices
#########################
def get_model_preds(CP):
"""Reads in or generates model predictions.
Parameters
----------
CP : dict
Containins directory locations for loading data and storing
predictions.
Returns
-------
craters : h5py
Model predictions.
"""
n_imgs, dtype = CP['n_imgs'], CP['datatype']
data = h5py.File(CP['dir_data'], 'r')
Data = {
dtype: [data['input_images'][:n_imgs].astype('float32'),
data['target_masks'][:n_imgs].astype('float32')]
}
data.close()
proc.preprocess(Data)
model = load_model(CP['dir_model'])
preds=[]
for i in range(0,n_imgs,2):
pred = model.predict(Data[dtype][0][i:i+2])
for j in range(len(pred)):
preds.append(pred[j])
# save
h5f = h5py.File(CP['dir_preds'], 'w')
h5f.create_dataset(dtype, data=preds)
#h5f.close()
print("Successfully generated and saved model predictions.")
return preds
def get_data(CP):
"""Reads in or generates model predictions.
Parameters
----------
CP : dict
Containins directory locations for loading data and storing
predictions.
Returns
-------
craters : h5py
Model predictions.
"""
n_imgs, dtype = CP['n_imgs'], CP['datatype']
data = h5py.File(CP['dir_data'], 'r')
Data = {
dtype: [data['input_images'][:n_imgs].astype('float32'),
data['target_masks'][:n_imgs].astype('float32')]
}
data.close()
craters = pd.HDFStore(CP['crater_data'], 'r')
csvs=[]
minrad, maxrad, cutrad, n_csvs ,dim= 3, 50, 0.8, len(craters),256
diam = 'Diameter (pix)'
for i in range(n_csvs):
csv = craters[proc.get_id(i,2)]
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 1: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
return Data,csvs
def get_coords_classification(detect_coords,note_coords,longlat_thresh2 = 1.8,rad_thresh = 1.0):
true_carter=[]
detect_carter=[]
Undetected_carter=[]
detect_list=[]
note_list=[]
for i in range(len(detect_coords)):
lo,la,r=detect_coords[i]
for j in range(len(note_coords)):
#print(note_coords)
Long,Lat,Rad=note_coords[j]
minr = np.minimum(r, Rad)
dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2
dR = abs(Rad - r) / minr
if (dR < rad_thresh) & (dL < longlat_thresh2):
detect_list.append(i)
note_list.append(j)
true_carter.append(detect_coords[i])
break
for k in range(len(note_coords)):
if k not in note_list:
Undetected_carter.append(note_coords[k])
for k in range(len(detect_coords)):
if k not in detect_list:
detect_carter.append(detect_coords[k])
return true_carter,detect_carter,Undetected_carter
def draw_pic(img,detect_coords,note_coords,save_path):
true_carter,detect_carter,Undetected_carter=get_coords_classification(detect_coords,note_coords)
true_carter_color = (255,0,0)#blue
detect_carter_color = (0,255,0)#green
Undetected_carter_color=(0,0,255)#red
ring_width=2
for x,y,r in true_carter:
cv2.circle(img, (int(x), int(y)), int(r), true_carter_color, ring_width)
for x,y,r in detect_carter:
cv2.circle(img, (int(x), int(y)), int(r), detect_carter_color, ring_width)
for x,y,r in Undetected_carter:
cv2.circle(img, (int(x), int(y)), int(r), Undetected_carter_color, ring_width)
cv2.imwrite(save_path,img)
#########################
def add_unique_craters(craters, craters_unique, thresh_longlat2, thresh_rad):
"""Generates unique crater distribution by filtering out duplicates.
Parameters
----------
craters : array
Crater tuples from a single image in the form (long, lat, radius).
craters_unique : array
Master array of unique crater tuples in the form (long, lat, radius)
thresh_longlat2 : float.
Hyperparameter that controls the minimum squared longitude/latitude
difference between craters to be considered unique entries.
thresh_rad : float
Hyperparaeter that controls the minimum squared radius difference
between craters to be considered unique entries.
Returns
-------
craters_unique : array
Modified master array of unique crater tuples with new crater entries.
"""
k2d = 180. / (np.pi * 1737.4) # km to deg
Long, Lat, Rad = craters_unique.T
for j in range(len(craters)):
lo, la, r = craters[j].T
la_m = (la + Lat) / 2.
minr = np.minimum(r, Rad) # be liberal when filtering dupes
# duplicate filtering criteria
dL = (((Long - lo) / (minr * k2d / np.cos(np.pi * la_m / 180.)))**2
+ ((Lat - la) / (minr * k2d))**2)
dR = np.abs(Rad - r) / minr
index = (dR < thresh_rad) & (dL < thresh_longlat2)
if len(np.where(index == True)[0]) == 0:
craters_unique = np.vstack((craters_unique, craters[j]))
return craters_unique
#########################
def estimate_longlatdiamkm(dim, llbd, distcoeff, coords):
"""First-order estimation of long/lat, and radius (km) from
(Orthographic) x/y position and radius (pix).
For images transformed from ~6000 pixel crops of the 30,000 pixel
LROC-Kaguya DEM, this results in < ~0.4 degree latitude, <~0.2
longitude offsets (~2% and ~1% of the image, respectively) and ~2% error in
radius. Larger images thus may require an exact inverse transform,
depending on the accuracy demanded by the user.
Parameters
----------
dim : tuple or list
(width, height) of input images.
llbd : tuple or list
Long/lat limits (long_min, long_max, lat_min, lat_max) of image.
distcoeff : float
Ratio between the central heights of the transformed image and original
image.
coords : numpy.ndarray
Array of crater x coordinates, y coordinates, and pixel radii.
Returns
-------
craters_longlatdiamkm : numpy.ndarray
Array of crater longitude, latitude and radii in km.
"""
# Expand coords.
long_pix, lat_pix, radii_pix = coords.T
# Determine radius (km).
km_per_pix = 1. / trf.km2pix(dim[1], llbd[3] - llbd[2], dc=distcoeff)
radii_km = radii_pix * km_per_pix
# Determine long/lat.
deg_per_pix = km_per_pix * 180. / (np.pi * 1737.4)
long_central = 0.5 * (llbd[0] + llbd[1])
lat_central = 0.5 * (llbd[2] + llbd[3])
# Iterative method for determining latitude.
lat_deg_firstest = lat_central - deg_per_pix * (lat_pix - dim[1] / 2.)
latdiff = abs(lat_central - lat_deg_firstest)
# Protect against latdiff = 0 situation.
latdiff[latdiff < 1e-7] = 1e-7
lat_deg = lat_central - (deg_per_pix * (lat_pix - dim[1] / 2.) *
(np.pi * latdiff / 180.) /
np.sin(np.pi * latdiff / 180.))
# Determine longitude using determined latitude.
long_deg = long_central + (deg_per_pix * (long_pix - dim[0] / 2.) /
np.cos(np.pi * lat_deg / 180.))
# Return combined long/lat/radius array.
return np.column_stack((long_deg, lat_deg, radii_km))
def extract_unique_craters(CP, craters_unique):
"""Top level function that extracts craters from model predictions,
converts craters from pixel to real (degree, km) coordinates, and filters
out duplicate detections across images.
Parameters
----------
CP : dict
Crater Parameters needed to run the code.
craters_unique : array
Empty master array of unique crater tuples in the form
(long, lat, radius).
Returns
-------
craters_unique : array
Filled master array of unique crater tuples.
"""
# Load/generate model preds
try:
preds = h5py.File(CP['dir_preds'], 'r')[CP['datatype']]
print("Loaded model predictions successfully")
except:
print("Couldnt load model predictions, generating")
preds = get_model_preds(CP)
Data,Carters=get_data(CP)
# need for long/lat bounds
P = h5py.File(CP['dir_data'], 'r')
llbd, pbd, distcoeff = ('longlat_bounds', 'pix_bounds',
'pix_distortion_coefficient')
#r_moon = 1737.4
dim = (float(CP['dim']), float(CP['dim']))
N_matches_tot = 0
if not os.path.exists(CP['result_img']):
os.mkdir(CP['result_img'])
lenstr=""
lenstr1="true_carter"
lenstr2="detect_carter"
lenstr3="undetected_carter"
num=0
num1=0
num2=0
num3=0
for i in range(CP['n_imgs']):
id = proc.get_id(i,2)
print("Drawing picture:%d" %i)
input_images=Data[CP['datatype']][0][i]
imgs = Image.fromarray(input_images.astype('uint8')).convert('RGB')
img = cv2.cvtColor(np.asarray(imgs),cv2.COLOR_RGB2BGR)
coords = tmt.template_match_t(preds[i])
num=num+len(coords)
lenstr=lenstr+" "+str(len(coords))
matplotlib.image.imsave(CP['result_img']+"/"+str(i)+'_mask.jpg', preds[i])
true_carter,detect_carter,Undetected_carter=get_coords_classification(coords,Carters[i])
lenstr1=lenstr1+" "+str(len(true_carter))
num1=num1+len(true_carter)
lenstr2=lenstr2+" "+str(len(detect_carter))
num2=num2+len(detect_carter)
lenstr3=lenstr3+" "+str(len(Undetected_carter))
num3=num3+len(Undetected_carter)
draw_pic(img,coords,Carters[i],CP['result_img']+"/"+str(i)+'.jpg')
if len(coords) > 0:
# for i in range(len(coords)):
new_craters_unique = estimate_longlatdiamkm(
dim, P[llbd][id], P[distcoeff][id][0], coords)
N_matches_tot += len(coords)
#print(id,new_craters_unique)
# Only add unique (non-duplicate) craters
if len(craters_unique) > 0:
craters_unique = add_unique_craters(new_craters_unique,
craters_unique,
CP['llt2'], CP['rt2'])
else:
craters_unique = np.concatenate((craters_unique,
new_craters_unique))
print(lenstr)
print("total num:%d" %num)
print(lenstr1)
print(num1)
print(lenstr2)
print(num2)
print(lenstr3)
print(num3)
np.save(CP['dir_result'], craters_unique)
return craters_unique
| StarcoderdataPython |
3342669 | <reponame>marketredesign/pricecypher_python_api<filename>src/pricecypher/collections/scope_value_collection.py
from pricecypher.collections.base_collection import BaseCollection
from pricecypher.models import ScopeValue
class ScopeValueCollection(BaseCollection):
_type = ScopeValue
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__, self._list)
def __len__(self):
"""List length"""
return len(self._list)
def __getitem__(self, ii):
"""Get a list item"""
return self._list[ii]
def __str__(self):
return str(self._list)
def where_in(self, values):
"""
Filter collection on the given values.
:param list or float or str values: Value or values to filter the collection on.
:return: Collection of filtered scope values.
:rtype: ScopeValueCollection
"""
# Turn values into a list if it is not a list already.
if type(values) is not list:
values = [values]
# Make sure all values are strings.
values = list(map(str, values))
# Filter and create new collection
scope_values = [sv for sv in self._list if sv.value in values]
return ScopeValueCollection(scope_values)
| StarcoderdataPython |
3218741 | import contextlib
import os
import threading
from textwrap import dedent
import unittest
import time
from test import support
from test.support import import_helper
_interpreters = import_helper.import_module('_xxsubinterpreters')
from test.support import interpreters
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w', encoding='utf-8') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r, encoding='utf-8')
def clean_up_interpreters():
for interp in interpreters.list_all():
if interp.id == 0: # main
continue
try:
interp.close()
except RuntimeError:
pass # already destroyed
def _run_output(interp, request, channels=None):
script, rpipe = _captured_script(request)
with rpipe:
interp.run(script, channels=channels)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interp.run(dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
class CreateTests(TestBase):
def test_in_main(self):
interp = interpreters.create()
self.assertIsInstance(interp, interpreters.Interpreter)
self.assertIn(interp, interpreters.list_all())
def test_in_thread(self):
lock = threading.Lock()
interp = None
def f():
nonlocal interp
interp = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(interp, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
from test.support import interpreters
interp = interpreters.create()
print(interp.id)
"""))
interp2 = interpreters.Interpreter(int(out))
self.assertEqual(interpreters.list_all(), [main, interp, interp2])
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
interp_lst = []
for _ in range(3):
interps = interpreters.create()
interp_lst.append(interps)
# Now destroy them.
for interp in interp_lst:
interp.close()
# Finally, create another.
interp = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {interp})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
# Now destroy 2 of them.
interp1.close()
interp2.close()
# Finally, create another.
interp = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {interp3, interp})
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
current = interpreters.get_current()
self.assertEqual(current, main)
def test_subinterpreter(self):
main = _interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
from test.support import interpreters
cur = interpreters.get_current()
print(cur.id)
"""))
current = interpreters.Interpreter(int(out))
self.assertNotEqual(current, main)
class ListAllTests(TestBase):
def test_initial(self):
interps = interpreters.list_all()
self.assertEqual(1, len(interps))
def test_after_creating(self):
main = interpreters.get_current()
first = interpreters.create()
second = interpreters.create()
ids = []
for interp in interpreters.list_all():
ids.append(interp.id)
self.assertEqual(ids, [main.id, first.id, second.id])
def test_after_destroying(self):
main = interpreters.get_current()
first = interpreters.create()
second = interpreters.create()
first.close()
ids = []
for interp in interpreters.list_all():
ids.append(interp.id)
self.assertEqual(ids, [main.id, second.id])
class TestInterpreterAttrs(TestBase):
def test_id_type(self):
main = interpreters.get_main()
current = interpreters.get_current()
interp = interpreters.create()
self.assertIsInstance(main.id, _interpreters.InterpreterID)
self.assertIsInstance(current.id, _interpreters.InterpreterID)
self.assertIsInstance(interp.id, _interpreters.InterpreterID)
def test_main_id(self):
main = interpreters.get_main()
self.assertEqual(main.id, 0)
def test_custom_id(self):
interp = interpreters.Interpreter(1)
self.assertEqual(interp.id, 1)
with self.assertRaises(TypeError):
interpreters.Interpreter('1')
def test_id_readonly(self):
interp = interpreters.Interpreter(1)
with self.assertRaises(AttributeError):
interp.id = 2
@unittest.skip('not ready yet (see bpo-32604)')
def test_main_isolated(self):
main = interpreters.get_main()
self.assertFalse(main.isolated)
@unittest.skip('not ready yet (see bpo-32604)')
def test_subinterpreter_isolated_default(self):
interp = interpreters.create()
self.assertFalse(interp.isolated)
def test_subinterpreter_isolated_explicit(self):
interp1 = interpreters.create(isolated=True)
interp2 = interpreters.create(isolated=False)
self.assertTrue(interp1.isolated)
self.assertFalse(interp2.isolated)
@unittest.skip('not ready yet (see bpo-32604)')
def test_custom_isolated_default(self):
interp = interpreters.Interpreter(1)
self.assertFalse(interp.isolated)
def test_custom_isolated_explicit(self):
interp1 = interpreters.Interpreter(1, isolated=True)
interp2 = interpreters.Interpreter(1, isolated=False)
self.assertTrue(interp1.isolated)
self.assertFalse(interp2.isolated)
def test_isolated_readonly(self):
interp = interpreters.Interpreter(1)
with self.assertRaises(AttributeError):
interp.isolated = True
def test_equality(self):
interp1 = interpreters.create()
interp2 = interpreters.create()
self.assertEqual(interp1, interp1)
self.assertNotEqual(interp1, interp2)
class TestInterpreterIsRunning(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(main.is_running())
@unittest.skip('Fails on FreeBSD')
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interp.is_running())
with _running(interp):
self.assertTrue(interp.is_running())
self.assertFalse(interp.is_running())
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp.id}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interp.close()
with self.assertRaises(RuntimeError):
interp.is_running()
def test_does_not_exist(self):
interp = interpreters.Interpreter(1_000_000)
with self.assertRaises(RuntimeError):
interp.is_running()
def test_bad_id(self):
interp = interpreters.Interpreter(-1)
with self.assertRaises(ValueError):
interp.is_running()
class TestInterpreterClose(TestBase):
def test_basic(self):
main = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2, interp3})
interp2.close()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp3})
def test_all(self):
before = set(interpreters.list_all())
interps = set()
for _ in range(3):
interp = interpreters.create()
interps.add(interp)
self.assertEqual(set(interpreters.list_all()), before | interps)
for interp in interps:
interp.close()
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
main.close()
def f():
with self.assertRaises(RuntimeError):
main.close()
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
interp = interpreters.create()
interp.close()
with self.assertRaises(RuntimeError):
interp.close()
def test_does_not_exist(self):
interp = interpreters.Interpreter(1_000_000)
with self.assertRaises(RuntimeError):
interp.close()
def test_bad_id(self):
interp = interpreters.Interpreter(-1)
with self.assertRaises(ValueError):
interp.close()
def test_from_current(self):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
from test.support import interpreters
interp = interpreters.Interpreter({int(interp.id)})
try:
interp.close()
except RuntimeError:
print('failed')
"""))
self.assertEqual(out.strip(), 'failed')
self.assertEqual(set(interpreters.list_all()), {main, interp})
def test_from_sibling(self):
main, = interpreters.list_all()
interp1 = interpreters.create()
interp2 = interpreters.create()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2})
interp1.run(dedent(f"""
from test.support import interpreters
interp2 = interpreters.Interpreter(int({interp2.id}))
interp2.close()
interp3 = interpreters.create()
interp3.close()
"""))
self.assertEqual(set(interpreters.list_all()), {main, interp1})
def test_from_other_thread(self):
interp = interpreters.create()
def f():
interp.close()
t = threading.Thread(target=f)
t.start()
t.join()
@unittest.skip('Fails on FreeBSD')
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
with self.assertRaises(RuntimeError):
interp.close()
self.assertTrue(interp.is_running())
class TestInterpreterRun(TestBase):
def test_success(self):
interp = interpreters.create()
script, file = _captured_script('print("it worked!", end="")')
with file:
interp.run(script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
interp = interpreters.create()
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interp.run(script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
@support.requires_fork()
def test_fork(self):
interp = interpreters.create()
import tempfile
with tempfile.NamedTemporaryFile('w+', encoding='utf-8') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w', encoding='utf-8') as out:
out.write('{expected}')
""")
interp.run(script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
@unittest.skip('Fails on FreeBSD')
def test_already_running(self):
interp = interpreters.create()
with _running(interp):
with self.assertRaises(RuntimeError):
interp.run('print("spam")')
def test_does_not_exist(self):
interp = interpreters.Interpreter(1_000_000)
with self.assertRaises(RuntimeError):
interp.run('print("spam")')
def test_bad_id(self):
interp = interpreters.Interpreter(-1)
with self.assertRaises(ValueError):
interp.run('print("spam")')
def test_bad_script(self):
interp = interpreters.create()
with self.assertRaises(TypeError):
interp.run(10)
def test_bytes_for_script(self):
interp = interpreters.create()
with self.assertRaises(TypeError):
interp.run(b'print("spam")')
# test_xxsubinterpreters covers the remaining Interpreter.run() behavior.
class TestIsShareable(TestBase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
shareable = interpreters.is_shareable(obj)
self.assertTrue(shareable)
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class TestChannels(TestBase):
def test_create(self):
r, s = interpreters.create_channel()
self.assertIsInstance(r, interpreters.RecvChannel)
self.assertIsInstance(s, interpreters.SendChannel)
def test_list_all(self):
self.assertEqual(interpreters.list_all_channels(), [])
created = set()
for _ in range(3):
ch = interpreters.create_channel()
created.add(ch)
after = set(interpreters.list_all_channels())
self.assertEqual(after, created)
class TestRecvChannelAttrs(TestBase):
def test_id_type(self):
rch, _ = interpreters.create_channel()
self.assertIsInstance(rch.id, _interpreters.ChannelID)
def test_custom_id(self):
rch = interpreters.RecvChannel(1)
self.assertEqual(rch.id, 1)
with self.assertRaises(TypeError):
interpreters.RecvChannel('1')
def test_id_readonly(self):
rch = interpreters.RecvChannel(1)
with self.assertRaises(AttributeError):
rch.id = 2
def test_equality(self):
ch1, _ = interpreters.create_channel()
ch2, _ = interpreters.create_channel()
self.assertEqual(ch1, ch1)
self.assertNotEqual(ch1, ch2)
class TestSendChannelAttrs(TestBase):
def test_id_type(self):
_, sch = interpreters.create_channel()
self.assertIsInstance(sch.id, _interpreters.ChannelID)
def test_custom_id(self):
sch = interpreters.SendChannel(1)
self.assertEqual(sch.id, 1)
with self.assertRaises(TypeError):
interpreters.SendChannel('1')
def test_id_readonly(self):
sch = interpreters.SendChannel(1)
with self.assertRaises(AttributeError):
sch.id = 2
def test_equality(self):
_, ch1 = interpreters.create_channel()
_, ch2 = interpreters.create_channel()
self.assertEqual(ch1, ch1)
self.assertNotEqual(ch1, ch2)
class TestSendRecv(TestBase):
def test_send_recv_main(self):
r, s = interpreters.create_channel()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
interp = interpreters.create()
interp.run(dedent("""
from test.support import interpreters
r, s = interpreters.create_channel()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv()
assert obj == orig, 'expected: obj == orig'
assert obj is not orig, 'expected: obj is not orig'
"""))
@unittest.skip('broken (see BPO-...)')
def test_send_recv_different_interpreters(self):
r1, s1 = interpreters.create_channel()
r2, s2 = interpreters.create_channel()
orig1 = b'spam'
s1.send_nowait(orig1)
out = _run_output(
interpreters.create(),
dedent(f"""
obj1 = r.recv()
assert obj1 == b'spam', 'expected: obj1 == orig1'
# When going to another interpreter we get a copy.
assert id(obj1) != {id(orig1)}, 'expected: obj1 is not orig1'
orig2 = b'eggs'
print(id(orig2))
s.send_nowait(orig2)
"""),
channels=dict(r=r1, s=s2),
)
obj2 = r2.recv()
self.assertEqual(obj2, b'eggs')
self.assertNotEqual(id(obj2), int(out))
def test_send_recv_different_threads(self):
r, s = interpreters.create_channel()
def f():
while True:
try:
obj = r.recv()
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
s.send(obj)
t = threading.Thread(target=f)
t.start()
orig = b'spam'
s.send(orig)
t.join()
obj = r.recv()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_nowait_main(self):
r, s = interpreters.create_channel()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv_nowait()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_nowait_main_with_default(self):
r, _ = interpreters.create_channel()
obj = r.recv_nowait(None)
self.assertIsNone(obj)
def test_send_recv_nowait_same_interpreter(self):
interp = interpreters.create()
interp.run(dedent("""
from test.support import interpreters
r, s = interpreters.create_channel()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv_nowait()
assert obj == orig, 'expected: obj == orig'
# When going back to the same interpreter we get the same object.
assert obj is not orig, 'expected: obj is not orig'
"""))
@unittest.skip('broken (see BPO-...)')
def test_send_recv_nowait_different_interpreters(self):
r1, s1 = interpreters.create_channel()
r2, s2 = interpreters.create_channel()
orig1 = b'spam'
s1.send_nowait(orig1)
out = _run_output(
interpreters.create(),
dedent(f"""
obj1 = r.recv_nowait()
assert obj1 == b'spam', 'expected: obj1 == orig1'
# When going to another interpreter we get a copy.
assert id(obj1) != {id(orig1)}, 'expected: obj1 is not orig1'
orig2 = b'eggs'
print(id(orig2))
s.send_nowait(orig2)
"""),
channels=dict(r=r1, s=s2),
)
obj2 = r2.recv_nowait()
self.assertEqual(obj2, b'eggs')
self.assertNotEqual(id(obj2), int(out))
def test_recv_channel_does_not_exist(self):
ch = interpreters.RecvChannel(1_000_000)
with self.assertRaises(interpreters.ChannelNotFoundError):
ch.recv()
def test_send_channel_does_not_exist(self):
ch = interpreters.SendChannel(1_000_000)
with self.assertRaises(interpreters.ChannelNotFoundError):
ch.send(b'spam')
def test_recv_nowait_channel_does_not_exist(self):
ch = interpreters.RecvChannel(1_000_000)
with self.assertRaises(interpreters.ChannelNotFoundError):
ch.recv_nowait()
def test_send_nowait_channel_does_not_exist(self):
ch = interpreters.SendChannel(1_000_000)
with self.assertRaises(interpreters.ChannelNotFoundError):
ch.send_nowait(b'spam')
def test_recv_nowait_empty(self):
ch, _ = interpreters.create_channel()
with self.assertRaises(interpreters.ChannelEmptyError):
ch.recv_nowait()
def test_recv_nowait_default(self):
default = object()
rch, sch = interpreters.create_channel()
obj1 = rch.recv_nowait(default)
sch.send_nowait(None)
sch.send_nowait(1)
sch.send_nowait(b'spam')
sch.send_nowait(b'eggs')
obj2 = rch.recv_nowait(default)
obj3 = rch.recv_nowait(default)
obj4 = rch.recv_nowait()
obj5 = rch.recv_nowait(default)
obj6 = rch.recv_nowait(default)
self.assertIs(obj1, default)
self.assertIs(obj2, None)
self.assertEqual(obj3, 1)
self.assertEqual(obj4, b'spam')
self.assertEqual(obj5, b'eggs')
self.assertIs(obj6, default)
| StarcoderdataPython |
1713116 | <gh_stars>1-10
"""
Instagram插件: 有搜索接口可用
"""
import json
import traceback
import requests
from commonbaby.httpaccess.httpaccess import HttpAccess
from datacontract import IscoutTask
from idownclient.clientdatafeedback.scoutdatafeedback import NetworkProfile
from idownclient.scout.plugin.scoutplugbase import ScoutPlugBase
class Instagram(ScoutPlugBase):
"""Instagram info search"""
def __init__(self, task: IscoutTask):
ScoutPlugBase.__init__(self)
self.task = task
self._ha: HttpAccess = HttpAccess()
self.basic_url = "https://www.instagram.com/"
self.headers = """
accept: */*,
accept-encoding: gzip, deflate, br,
accept-language: zh-CN,zh;q=0.9,
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36,
"""
# 搜索栏api
self.searchBox_api = 'https://www.instagram.com/web/search/topsearch/?context=blended&query={}&include_reel=true'
# 用户数据api
self.userData_api = 'https://www.instagram.com/{}/?__a=1'
self.source = "instagram"
self.reason = "instagram身份落地"
# #######################################
# 设置一个判断
def judgment_url(self, query: str, level: int, reason: str):
if query is None or query == "":
self._logger.error(f'Invalid query for search_by_url, error: {query}')
return
if 27 <= len(query) <= 52:
return self.search_by_url(query, level, reason)
else:
return self.judgment_userid(query, level, reason)
def judgment_userid(self, query: str, level: int, reason: str):
if query is None or query == "":
self._logger.error(f'Invalid query for search_by_userid, error: {query}')
return
if 5 <= len(query) <= 26:
return self.search_by_userid(query, level, reason)
else:
return
def judgment_keyword(self, query: str, level: int, reason: str):
if query is None or query == "":
self._logger.error(f'Invalid query for search_keyword, error: {query}')
return
if 1 <= len(query) < 5:
return self.search_keyword(query, level, reason)
# #######################################
# 明确知道_用户名
def search_by_userid(self, userid: str, level: int, reason: str):
""""根据 用户名(userid) 搜索用户
如: jaychou ; jjlin 之类的
"""
# 暂且判定为 正常用户名 的长度条件
if userid is None or userid == "" or not 5 <= len(userid) <= 26:
self._logger.error(f'Invalid userid for instagram search, error: {userid}')
return
if not userid.startswith('https://'):
self._logger.info("userid matches search rules√")
self._logger.info(f"Get a userid to start search...: {userid}")
return self._search_userid(userid, level, reason)
# 明确知道_用户主页url
def search_by_url(self, url: str, level: int, reason: str):
""""根据 用户主页URL搜索用户
如: https://www.instagram.com/jaychou/
"""
# 暂且判定为 url 的长度条件
if url is None or url == "" or not 27 <= len(url) <= 52: # len(self.basic_url)为26
self._logger.error(f'Invalid url for instagram search: {url}')
return
if url.startswith("https://www."):
self._logger.info("url matches search rules√")
self._logger.info(f"Get a userUrl and start search...: {url}")
if url.endswith('/'):
keyword = url.split('/')[-2]
else:
keyword = url.split('/')[-1]
# 提取url结尾的userid, 拼接成对应数据url
data_url = self.basic_url + f"{keyword}" + "/?__a=1"
return self._search_url(data_url, level, reason)
# 模糊搜索
def search_keyword(self, keyword: str, level: int, reason: str):
"""(模糊)搜索,搜索userid相关的用户,默认取前10个"""
# 暂且判定为 模糊搜索 的长度条件
if keyword is None or keyword == "" or not 1 <= len(keyword) < 5:
self._logger.error(f'Invalid keyword for instagram search, error: {keyword}')
return
if not keyword.startswith('https://') or keyword.startswith('http://'):
self._logger.info("keyword matches search rules√")
self._logger.info("Get a keyword, Start searching for related users")
return self._search_keyword(keyword, level, reason)
# #######################################
# to get user(去拿用户)
def _search_userid(self, userid: str, level: int, reason) -> iter:
"""通过 userid 搜索用户; ->返回NetorkProfile"""
if not isinstance(userid, str):
self._logger.error("Invalid userid")
return
res: NetworkProfile = self.__get_user_by_userid(userid, reason)
return res
def _search_url(self, userurl: str, level: int, reason) -> iter:
"""通过 url 搜索用户; ->返回NetorkProfile"""
if not isinstance(userurl, str):
self._logger.error("Invalid userurl")
return
res: NetworkProfile = self.__get_user_by_url(userurl, reason)
return res
def _search_keyword(self, keyword: str, level: int, reason) -> iter:
"""通过 userid 搜索用户; ->返回NetorkProfile; userid"""
if not isinstance(keyword, str):
self._logger.error("Invalid keyword")
return
return self.__get_user_by_keyword(keyword, reason)
# #######################################
# ensure user(确定拿到用户名)
def __get_user_by_userid(self, userid: str, reason: str) -> NetworkProfile:
"""请求api接口,提取用户数据"""
try:
# 用户数据api接口
user_home_url = self.userData_api.format(userid) # 拿用户名,拼接成用户主页url
return self.__get_user_by_url(user_home_url, reason)
except Exception:
self._logger.error(f"Get user by userid error: {traceback.format_exc()}")
def __get_user_by_url(self, userurl: str, reason: str) -> NetworkProfile:
"""通过url 确定用户"""
try:
# 请求用户数据api,获取用户数据
self._logger.info(f"Start requesting user data: {userurl}")
html = requests.get(userurl, self.headers)
if html is None or html == "":
return
for p_info in self.__parse_user_profile(html.text):
return p_info
except Exception:
self._logger.error(f"Get user by url error: {traceback.format_exc()}")
def __get_user_by_keyword(self, keyword: str, reason: str) -> NetworkProfile:
"""请求 搜索栏api接口,获取关键字 相关的用户 (列表)"""
try:
# 搜索栏api接口,获取到一堆{关键字}相关的用户
url = self.searchBox_api.format(keyword)
users_li_info = requests.get(url, self.headers)
if users_li_info is None or users_li_info == "":
return
start = 0
stop = 10
js_user_li_data = json.loads(users_li_info.text)
if js_user_li_data:
# 默认取前10个用户(列表)
users = js_user_li_data["users"][start:stop]
for user in users:
username = user['user']['username'] # 取到用户名(唯一的)
user_home_url = self.userData_api.format(username) # 拼接成用户主页url
yield self.__get_user_by_url(user_home_url, reason)
except Exception:
self._logger.error(f"Get user by keyword error: {traceback.format_exc()}")
# #######################################
# parse user(解析用户资料)
def __parse_user_profile(self, html: str):
"""
解析用户个人资料
:param html:
:return:
"""
js_html = json.loads(html)
if js_html['graphql']:
user = js_html['graphql']['user']
username = user['username']
profile = NetworkProfile(username, username, 'instagram')
profile.networkid = None # 从当前接口中,是没有其他关联的账号的
profile.userid = username # 是唯一的
# profile.user_id = user['id'] # id值(数字)
profile.url = self.basic_url + f'{username}' # 用户主页链接
profile.source = self.source
profile.reason = self.reason
profile.emails = None
profile.phones = None
profile.nickname = username
profile.gender = None
profile.birthday = None
profile.address = None
profile.profile_pic = self.__handling_user_avatar(user['profile_pic_url_hd'])
# 以下是其他详情details
detail = dict()
detail['biography'] = user['biography'] # 自我介绍
detail['posts'] = user['edge_owner_to_timeline_media']['count'] # 发帖数
detail['fans'] = user['edge_followed_by']['count'] # 粉丝数
detail['follow'] = user['edge_follow']['count'] # 正在关注
detail['full_name'] = user['full_name'] # 全名
detail['is_verified'] = user['is_verified'] # 是否验证
detail['external_url'] = user['external_url'] # 外部链接(如: youtube),也可能没有
detail['profile_pic_url'] = user['profile_pic_url_hd'] # 高清头像url
profile._details = f'{detail}'.strip()
yield profile
# base64处理下用户头像
def __handling_user_avatar(self, profile_pic_url_hd):
"""base64处理用户头像"""
self._logger.info("Get a user avatar and start processing")
try:
res = requests.get(profile_pic_url_hd)
res_data = res.content
return res_data
# str_data = helper_str.base64bytes(res_data)
# return '?utf-8?b?' + str_data
except Exception:
self._logger.error("Download avatar error: {}".format(traceback.format_exc()))
# return pic_url
| StarcoderdataPython |
3230008 | <gh_stars>100-1000
import torch
import torch.nn as nn
from torch.nn import init
def weights_init_cpm(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
m.weight.data.normal_(0, 0.01)
if m.bias is not None: m.bias.data.zero_()
elif classname.find('BatchNorm2d') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
| StarcoderdataPython |
15794 | import napari
import time
from napari._qt.qthreading import thread_worker
import numpy as np
# create a viewer window
viewer = napari.Viewer()
# https://napari.org/guides/stable/threading.html
@thread_worker
def loop_run():
while True: # endless loop
print("Hello world", time.time())
time.sleep(0.5)
yield np.random.random((2, 2))
def update_layer(image):
"""
Updates the image in the layer 'result'
or adds this layer.
"""
try:
viewer.layers['result'].data = image
except KeyError:
viewer.add_image(image, name='result')
# Start the loop
worker = loop_run()
worker.yielded.connect(update_layer)
worker.start()
# Start napari
napari.run()
| StarcoderdataPython |
160260 | <reponame>zywkloo/Insomnia-Dungeon
import pygame
from helper_functions import *
##################################################################################################
class Item:
item_data = {}
for item in csv_loader('item.csv'):
item_data[item[0]] = {'function':item[1],'name': item[2],'sprite':item[3],'Desc':item[4]}
def __init__(self, item_id,location):
self.id = item_id
self.name = Item.item_data[str(item_id)]['name']
self.sprite = pygame.image.load(Item.item_data[str(item_id)]['sprite']).convert_alpha()
self.icon = pygame.transform.scale(pygame.image.load(Item.item_data[str(item_id)]['sprite']).convert_alpha(),(50,50))
self.size = pygame.image.load(Item.item_data[str(item_id)]['sprite']).convert_alpha().get_size()
self.function = Item.item_data[str(item_id)]['function']
self.Desc = Item.item_data[str(item_id)]['Desc']
self.Brief_Desc = None
self.location = location
##################################################################################################
def take_item(room,bag):
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
item = None
for Item in room.Items:
if Item.location[0] <= x <= Item.location[0]+Item.size[0] and Item.location[1] <= y <= Item.location[1]+Item.size[1]:
item = Item
break
if item != None:
add_item = item
bag.append(add_item)
room.Items.remove(item)
##################################################################################################
def render_bag(bag,sur_bag,screen):
line = -1
sur_bag.fill((51,42,31))
for col in range(len(bag)):
c = col%3
if c == 0:
line += 1
sur_bag.blit(bag[col].icon,(c*60,60*line))
screen.blit(sur_bag,(620,420))
def render_item(room,screen):
for i in room.Items:
screen.blit(i.sprite,i.location)
| StarcoderdataPython |
3352546 |
#consider a string as follows
s = "Harry and Hermione along with Ron went to Hogwarts to learn magic and also win battle against Lord Voldemort"
#basic slicing format is s[startindex:stopindex:stepvalue(optional)] where stop index is not inclusive
# space is also considered as a charcater
print(s[0:7]) # prints from character at index 0 to charcater at index 6
print(s[0:10:2]) # prints from charcater at index 0 to charcater at index 9 with a step value of 2
print(s[-1]) # prints last charcater of the string s
print(s[::-1]) # prints the string in a reverse order
print(s[:]) # prints the whole string
k="Welcome"
print(k*3) #prints a string 3 times continuously
#String methods and functions
a="Hope <NAME>"
a=a.split(" ") #The name is splitted a part based on the delimitor used i.e " "
print(a) #prints list of words seperated by spaces
print(a[0].upper()) #converts each character in the string to uppercase
print(a[0].lower()) #converts each character in the string to lowercase
print(a[0].capitalize()) #converts first character in the string to uppercase
print(a[0].startswith("an")) #if string starts with "an" substring then returns true else false
print(a[1].endswith("klaus")) #if string ends with "klaus" substring then returns true else false
a=' '.join(a) #Join function combines the words into a sentence with mentioned delimiter between the words
print(a)
print(a.find('a')) #returns the position of mentioned charcater from left if existed else -1 will be returned
try:
print(a.index('e')) #if character is found then it's position is retunred
print(a.index('c')) #if charcater is not found then index function raises an exception
except Exception as e:
print("Element is not found")
m='Dark,Matter,is,found,everywhere'
m=m.replace(","," ") #replace funtion is used to replace a charcater with other character in whole string
print(m)
k='hello1'
print(k.isalpha()) #returns true if string has only alphabets else false
print(k.isalnum()) #returns true if string has either alphabets,numbers or both else false
print(k.isdigit()) #returns true if all charcaters in string are digits
#string library
import string
print(string.ascii_letters) #prints alphabets both lower,uppercase
print(string.ascii_lowercase) #prints lowercase alphabets
print(string.ascii_uppercase) #prints uppercase alphabets
print(string.digits) #prints digits from [0-9]
print("\n")
#Custom printing using format method
s=str(input("Please Enter your name: "))
print("Dear {}, \nWelcome to Hacktober fest".format(s))
| StarcoderdataPython |
3290129 | import argparse
import os
import sys
import pyfmt
DEFAULT_PATH = os.getenv("BASE_CODE_DIR", ".")
DEFAULT_LINE_LENGTH = int(os.getenv("MAX_LINE_LENGTH", "100"))
def main():
parser = argparse.ArgumentParser(prog="pyfmt")
parser.add_argument(
"path",
nargs="?",
default=DEFAULT_PATH,
metavar="PATH",
help="path to base directory where pyfmt will be run;"
" defaults to $BASE_CODE_DIR or the current directory",
)
parser.add_argument(
"--check",
action="store_true",
help="don't write changes, just print the files that would be formatted",
)
parser.add_argument(
"--line-length",
type=int,
default=DEFAULT_LINE_LENGTH,
help="max characters per line; defaults to $MAX_LINE_LENGTH or 100",
)
parser.add_argument("--extra-isort-args", default="", help="additional args to pass to isort")
parser.add_argument("--extra-black-args", default="", help="additional args to pass to black")
opts = parser.parse_args()
exitcode = pyfmt.pyfmt(
opts.path,
check=opts.check,
line_length=opts.line_length,
extra_isort_args=opts.extra_isort_args,
extra_black_args=opts.extra_black_args,
)
sys.exit(exitcode)
if __name__ == "__main__":
main()
| StarcoderdataPython |
156782 | <filename>examples/python/partner_data.py<gh_stars>0
# For demonstration this will serve as the database of partners
# For real implementation this will come from a database.
# Both partnerId and key should be shared between services.
partners = {
'abcd123' : { # this is partner ssoId (abcd123)
'name': 'Partner 1 inc.',
'shared_key' : '<KEY>',
'is_active' : True
},
'abcd1234':{ # this is partner ssoId (abcd1234)
'name' : 'Partner 2 inc.',
'shared_key' : '<KEY>',
'is_active' : False
}
}
| StarcoderdataPython |
3356625 | <gh_stars>1-10
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
from bl_operators.presets import AddPresetBase
from bpy.types import Operator
class AddPresetIntegrator(AddPresetBase, Operator):
'''Add an Integrator Preset'''
bl_idname = "render.cycles_integrator_preset_add"
bl_label = "Add Integrator Preset"
preset_menu = "CYCLES_MT_integrator_presets"
preset_defines = [
"cycles = bpy.context.scene.cycles"
]
preset_values = [
"cycles.max_bounces",
"cycles.min_bounces",
"cycles.diffuse_bounces",
"cycles.glossy_bounces",
"cycles.transmission_bounces",
"cycles.volume_bounces",
"cycles.transparent_min_bounces",
"cycles.transparent_max_bounces",
"cycles.use_transparent_shadows",
"cycles.caustics_reflective",
"cycles.caustics_refractive",
"cycles.blur_glossy"
]
preset_subdir = "cycles/integrator"
class AddPresetSampling(AddPresetBase, Operator):
'''Add a Sampling Preset'''
bl_idname = "render.cycles_sampling_preset_add"
bl_label = "Add Sampling Preset"
preset_menu = "CYCLES_MT_sampling_presets"
preset_defines = [
"cycles = bpy.context.scene.cycles"
]
preset_values = [
"cycles.samples",
"cycles.preview_samples",
"cycles.aa_samples",
"cycles.preview_aa_samples",
"cycles.diffuse_samples",
"cycles.glossy_samples",
"cycles.transmission_samples",
"cycles.ao_samples",
"cycles.mesh_light_samples",
"cycles.subsurface_samples",
"cycles.volume_samples",
"cycles.use_square_samples",
"cycles.progressive",
"cycles.seed",
"cycles.sample_clamp_direct",
"cycles.sample_clamp_indirect",
"cycles.sample_all_lights_direct",
"cycles.sample_all_lights_indirect",
]
preset_subdir = "cycles/sampling"
classes = (
AddPresetIntegrator,
AddPresetSampling,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
def unregister():
from bpy.utils import unregister_class
for cls in classes:
unregister_class(cls)
if __name__ == "__main__":
register()
| StarcoderdataPython |
3344922 | # coding: utf-8
"""
Collection of builder functions
"""
from typing import Callable, Optional, Generator
import torch
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau, \
StepLR, ExponentialLR
from torch.optim import Optimizer
from joeynmt.helpers import ConfigurationError
def build_gradient_clipper(config: dict) -> Optional[Callable]:
"""
Define the function for gradient clipping as specified in configuration.
If not specified, returns None.
Current options:
- "clip_grad_val": clip the gradients if they exceed this value,
see `torch.nn.utils.clip_grad_value_`
- "clip_grad_norm": clip the gradients if their norm exceeds this value,
see `torch.nn.utils.clip_grad_norm_`
:param config: dictionary with training configurations
:return: clipping function (in-place) or None if no gradient clipping
"""
clip_grad_fun = None
if "clip_grad_val" in config.keys():
clip_value = config["clip_grad_val"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_value_(parameters=params,
clip_value=clip_value)
elif "clip_grad_norm" in config.keys():
max_norm = config["clip_grad_norm"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_norm_(parameters=params, max_norm=max_norm)
if "clip_grad_val" in config.keys() and "clip_grad_norm" in config.keys():
raise ConfigurationError(
"You can only specify either clip_grad_val or clip_grad_norm.")
return clip_grad_fun
def build_optimizer(config: dict, parameters: Generator) -> Optimizer:
"""
Create an optimizer for the given parameters as specified in config.
Except for the weight decay and initial learning rate,
default optimizer settings are used.
Currently supported configuration settings for "optimizer":
- "sgd" (default): see `torch.optim.SGD`
- "adam": see `torch.optim.adam`
- "adagrad": see `torch.optim.adagrad`
- "adadelta": see `torch.optim.adadelta`
- "rmsprop": see `torch.optim.RMSprop`
The initial learning rate is set according to "learning_rate" in the config.
The weight decay is set according to "weight_decay" in the config.
If they are not specified, the initial learning rate is set to 3.0e-4, the
weight decay to 0.
Note that the scheduler state is saved in the checkpoint, so if you load
a model for further training you have to use the same type of scheduler.
:param config: configuration dictionary
:param parameters:
:return: optimizer
"""
optimizer_name = config.get("optimizer", "sgd").lower()
learning_rate = config.get("learning_rate", 3.0e-4)
weight_decay = config.get("weight_decay", 0)
if optimizer_name == "adam":
adam_betas = config.get("adam_betas", (0.9, 0.999))
optimizer = torch.optim.Adam(parameters,
weight_decay=weight_decay,
lr=learning_rate,
betas=adam_betas)
elif optimizer_name == "adagrad":
optimizer = torch.optim.Adagrad(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "adadelta":
optimizer = torch.optim.Adadelta(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "rmsprop":
optimizer = torch.optim.RMSprop(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "sgd":
# default
optimizer = torch.optim.SGD(parameters,
weight_decay=weight_decay,
lr=learning_rate)
else:
raise ConfigurationError("Invalid optimizer. Valid options: 'adam', "
"'adagrad', 'adadelta', 'rmsprop', 'sgd'.")
return optimizer
def build_scheduler(config: dict, optimizer: Optimizer, scheduler_mode: str,
hidden_size: int = 0) \
-> (Optional[_LRScheduler], Optional[str]):
"""
Create a learning rate scheduler if specified in config and
determine when a scheduler step should be executed.
Current options:
- "plateau": see `torch.optim.lr_scheduler.ReduceLROnPlateau`
- "decaying": see `torch.optim.lr_scheduler.StepLR`
- "exponential": see `torch.optim.lr_scheduler.ExponentialLR`
- "noam": see `joeynmt.builders.NoamScheduler`
- "warmupexponentialdecay": see
`joeynmt.builders.WarmupExponentialDecayScheduler`
If no scheduler is specified, returns (None, None) which will result in
a constant learning rate.
:param config: training configuration
:param optimizer: optimizer for the scheduler, determines the set of
parameters which the scheduler sets the learning rate for
:param scheduler_mode: "min" or "max", depending on whether the validation
score should be minimized or maximized.
Only relevant for "plateau".
:param hidden_size: encoder hidden size (required for NoamScheduler)
:return:
- scheduler: scheduler object,
- scheduler_step_at: either "validation" or "epoch"
"""
scheduler, scheduler_step_at = None, None
if "scheduling" in config.keys() and \
config["scheduling"]:
if config["scheduling"].lower() == "plateau":
# learning rate scheduler
scheduler = ReduceLROnPlateau(optimizer=optimizer,
mode=scheduler_mode,
verbose=False,
threshold_mode='abs',
factor=config.get(
"decrease_factor", 0.1),
patience=config.get("patience", 10))
# scheduler step is executed after every validation
scheduler_step_at = "validation"
elif config["scheduling"].lower() == "decaying":
scheduler = StepLR(optimizer=optimizer,
step_size=config.get("decaying_step_size", 1))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "exponential":
scheduler = ExponentialLR(optimizer=optimizer,
gamma=config.get("decrease_factor", 0.99))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "noam":
factor = config.get("learning_rate_factor", 1)
warmup = config.get("learning_rate_warmup", 4000)
scheduler = NoamScheduler(hidden_size=hidden_size,
factor=factor,
warmup=warmup,
optimizer=optimizer)
scheduler_step_at = "step"
elif config["scheduling"].lower() == "warmupexponentialdecay":
min_rate = config.get("learning_rate_min", 1.0e-5)
decay_rate = config.get("learning_rate_decay", 0.1)
warmup = config.get("learning_rate_warmup", 4000)
peak_rate = config.get("learning_rate_peak", 1.0e-3)
decay_length = config.get("learning_rate_decay_length", 10000)
scheduler = WarmupExponentialDecayScheduler(
min_rate=min_rate,
decay_rate=decay_rate,
warmup=warmup,
optimizer=optimizer,
peak_rate=peak_rate,
decay_length=decay_length)
scheduler_step_at = "step"
return scheduler, scheduler_step_at
class NoamScheduler:
"""
The Noam learning rate scheduler used in "Attention is all you need"
See Eq. 3 in https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self,
hidden_size: int,
optimizer: torch.optim.Optimizer,
factor: float = 1,
warmup: int = 4000):
"""
Warm-up, followed by learning rate decay.
:param hidden_size:
:param optimizer:
:param factor: decay factor
:param warmup: number of warmup steps
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.hidden_size = hidden_size
self._rate = 0
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
return self.factor * \
(self.hidden_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"step": self._step,
"warmup": self.warmup,
"factor": self.factor,
"hidden_size": self.hidden_size,
"rate": self._rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self._step = state_dict["step"]
self.warmup = state_dict["warmup"]
self.factor = state_dict["factor"]
self.hidden_size = state_dict["hidden_size"]
self._rate = state_dict["rate"]
class WarmupExponentialDecayScheduler:
"""
A learning rate scheduler similar to Noam, but modified:
Keep the warm up period but make it so that the decay rate can be tuneable.
The decay is exponential up to a given minimum rate.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
peak_rate: float = 1.0e-3,
decay_length: int = 10000,
warmup: int = 4000,
decay_rate: float = 0.5,
min_rate: float = 1.0e-5):
"""
Warm-up, followed by exponential learning rate decay.
:param peak_rate: maximum learning rate at peak after warmup
:param optimizer:
:param decay_length: decay length after warmup
:param decay_rate: decay rate after warmup
:param warmup: number of warmup steps
:param min_rate: minimum learning rate
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.decay_length = decay_length
self.peak_rate = peak_rate
self._rate = 0
self.decay_rate = decay_rate
self.min_rate = min_rate
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
warmup = self.warmup
if step < warmup:
rate = step * self.peak_rate / warmup
else:
exponent = (step - warmup) / self.decay_length
rate = self.peak_rate * (self.decay_rate**exponent)
return max(rate, self.min_rate)
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"warmup": self.warmup,
"step": self._step,
"decay_length": self.decay_length,
"peak_rate": self.peak_rate,
"rate": self._rate,
"decay_rate": self.decay_rate,
"min_rate": self.min_rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self.warmup = state_dict['warmup']
self._step = state_dict['step']
self.decay_length = state_dict['decay_length']
self.peak_rate = state_dict['peak_rate']
self._rate = state_dict['rate']
self.decay_rate = state_dict['decay_rate']
self.min_rate = state_dict['min_rate']
| StarcoderdataPython |
1731686 | <filename>LuoguCodes/AT1476.py
from math import *
def isp(x):
if x <= 1: return False
if x == 2: return True
for i in range(2, int(x ** 0.5) + 1):
if x % i == 0: return False
return True
def sim(x):
return x % 2 != 0 and x % 3 != 0 and x % 5 != 0
n = int(raw_input())
print [';Not Prime';, ';Prime';][isp(n) or sim(n) and n != 1]
| StarcoderdataPython |
3259494 | <gh_stars>0
import pymysql
import pandas as pd
import joblib
from sklearn.metrics import mean_squared_error
import numpy as np
# Guname = AA,BB,CC..... factor: light_num,schoolnum.... value 1,2,3,10,20....
def return_graph_data(GuName,factor):
connection = pymysql.connect('localhost' ,'root','123123','dev')
cursor = connection.cursor()
get_Gu = "select * from traffic where gu_code=%s"
cursor.execute(get_Gu,(GuName))
datas = cursor.fetchall()
df = pd.DataFrame(datas, columns = ["id", "gu_code", "Longitude", "Latitude", "trafficlight_num", "crosswalk_num", "station_num", "school_num", "avg_landprice", "house_1", "house_2", "house_3", "house_4", "commerce_1", "commerce_2", "commerce_3", "commerce_4", "green_1", "green_2", "green_3", "industry_1","industry_2", "industry_3", "limit_num", "mediansep_num", "island_num", "mean_lane", "mean_maxspeed", "mean_roadwth", "mean_roadlen", "busstop_num", "acc_count"])
print("------------------pick selected gu from sql ------------------")
print(df)
df_process = df.iloc[:,4:-1]
print("------------------only factor columns(into xgboost)------------------")
print(df_process)
df_process[df_process.columns] = df_process[df_process.columns].apply(pd.to_numeric, downcast='float', errors='coerce')
xgboost_001 = joblib.load('xg_reg_002')
pred_ori = xgboost_001.predict(df_process)
df["pred_ori"] = pred_ori
print("------------------pre_ori column add at df------------------")
print(df)
insert_value = []
if(factor == "trafficlight_num" or factor == "crosswalk_num" or factor == "mean_lane"):
insert_value = [-2,-1,0,1,2]
elif(factor == "mean_roadwth"):
insert_value = [-10,-5,0,5,10]
elif (factor=="mediansep_num" or factor=="island_num" or factor=="school_num"):
insert_value = [-1,0,1]
df_process_ori = df_process[factor].copy() #notion
print(df_process_ori)
for value in insert_value:
df_process[factor] = df_process[factor] + value
for j in range(len(df["gu_code"])):
if(df_process[factor][j] < 0):
df_process[factor][j] = 0
print("------------------checkout changed factor ------------------")
print(df[factor])
print(df_process[factor])
pred_factor = xgboost_001.predict(df_process)
df[str(value)] = pred_factor
df_process[factor] =df_process_ori
# df["pred-decline"] = df["pred_factor"]-df["pred_ori"]
# df["pred-decline"] = round(df["pred-decline"],2)
print("------------------pred-decline = pred_factor - pred_ori------------------")
print(df)
# acc_num 으로 내림차순 sort하기
df = df.sort_values(["pred_ori"],ascending=[False])
row_num =[]
for i in range(len(df["gu_code"])):
row_num.append(i+1)
df["rank"]= row_num
print("------------------rank add at df column------------------")
print(df)
x = df.loc[:,"pred_ori":]
print("------------------finally data (send to web)------------------")
print(x)
json_x = x.to_json(orient='table')
print(json_x)
return json_x
return_graph_data("AA","trafficlight_num") #test data | StarcoderdataPython |
3211157 | from app.infrastructure.smtp import Mail, create_message
from app.pkgs.token import TokenFactory
from flask import render_template
class EmailService(object):
token_factory: TokenFactory
def __init__(self, mail: Mail, default_mail_sender: str, token: TokenFactory):
self.mail = mail
self.mail_sender = default_mail_sender
self.token_factory = token
def send_email(self, to: str, subject: str, template, sender=None):
msg = create_message(sender_email=sender or self.mail_sender, receiver_email=to, subject=subject, html_body=template)
self.mail.send(msg)
def send_confirm_email(self, email: str, confirm_url: str, template):
token = self.token_factory.generate_confirmation_token(email)
confirm_url = confirm_url + str(token)
html = render_template(template, confirm_url=confirm_url) # 'user/activate.html'
subject = "Please confirm your email"
self.send_email(email, subject, html)
def confirm_email(self, token: str) -> str:
return self.token_factory.confirm_token(token)
def send_reset_password(self, email: str, reset_password: str, template='user/new_password.html'):
token = self.token_factory.generate_confirmation_token(email)
html = render_template(template, password=reset_password)
subject = "Reset password"
self.send_email(email, subject, html)
| StarcoderdataPython |
165917 | import os
def prepare_videos(
videos, extension, start, duration, kinect_mask=True, width=1920, height=1080
):
video_start_secs = start % 60
video_start_mins = start // 60
print(f"Dumping frames and segmenting {len(videos)} input videos")
for i, video in enumerate(videos):
try:
os.makedirs(video)
except FileExistsError:
continue
print(f"Dumping frames from {video} ({i+1}/{len(videos)})...")
ffmpeg_duration = ""
if duration != "-1":
ffmpeg_duration = f"-t {duration}"
code = os.system(
f"ffmpeg -y -ss 00:{video_start_mins:02}:{video_start_secs:02}.000 "
f"-vsync 0 "
f"-i {video}{extension} -vf scale={width}:{height} "
f"-map 0:0 {ffmpeg_duration} {video}/%04d_img.png -hide_banner"
f" > bg_matting_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Segmenting frames...")
if kinect_mask:
code = os.system(
f"KinectMaskGenerator.exe {video}{extension} {video} {start} {duration}"
f" > segmentation_logs_{i}.txt 2>&1"
)
if code != 0:
exit(code)
else:
code = os.system(
f"python segmentation_deeplab.py -i {video}"
f" > segmentation_logs_{i}.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Extracting background...")
code = os.system(
f"ffmpeg -y -i {video}{extension} -vf scale={width}:{height} "
f"-map 0:0 -ss 00:00:02.000 -vframes 1 {video}.png -hide_banner"
" > bg_matting_logs.txt 2>&1"
)
if code != 0:
exit(code)
| StarcoderdataPython |
3322202 | """Module with main parts of NSGA-II algorithm.
Contains main loop"""
from nsga2.utils import NSGA2Utils
from nsga2.population import Population
class Evolution(object):
def __init__(self, problem, num_of_generations, num_of_individuals):
self.utils = NSGA2Utils(problem, num_of_individuals)
self.population = None
self.num_of_generations = num_of_generations
self.on_generation_finished = []
self.num_of_individuals = num_of_individuals
def register_on_new_generation(self, fun):
self.on_generation_finished.append(fun)
def evolve(self):
self.population = self.utils.create_initial_population()
self.utils.fast_nondominated_sort(self.population)
for front in self.population.fronts:
self.utils.calculate_crowding_distance(front)
children = self.utils.create_children(self.population)
returned_population = None
for i in range(self.num_of_generations):
self.population.extend(children)
self.utils.fast_nondominated_sort(self.population)
new_population = Population()
front_num = 0
while len(new_population) + len(self.population.fronts[front_num]) <= self.num_of_individuals:
self.utils.calculate_crowding_distance(self.population.fronts[front_num])
new_population.extend(self.population.fronts[front_num])
front_num += 1
sorted(self.population.fronts[front_num], cmp=self.utils.crowding_operator)
new_population.extend(self.population.fronts[front_num][0:self.num_of_individuals-len(new_population)])
returned_population = self.population
self.population = new_population
children = self.utils.create_children(self.population)
for fun in self.on_generation_finished:
fun(returned_population, i)
return returned_population.fronts[0]
| StarcoderdataPython |
3352795 | <reponame>Frikallo/YAKbot
import torch
import wandb
from argparse import ArgumentParser
import model
import sys
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_ckpt(args):
"""Loads a trained checkpoint."""
net = model.Model.load_from_checkpoint(args.ckpt)
net = net.eval().requires_grad_(False).to(args.device)
return net
def save_params(args, net):
learned_params = {}
for pname, p in net.named_parameters():
if args.ft in pname:
learned_params[pname] = p
torch.save(learned_params, args.adapter)
def load_params(args):
params = torch.load(args.adapter)
keys = list(params.keys())
rnet = model.Model(args).eval().requires_grad_(False)
rnet_dict = rnet.state_dict()
for pname in keys:
rnet_dict[pname] = params[pname]
rnet.load_state_dict(rnet_dict)
return rnet.to(args.device)
def main():
# Args
parser = ArgumentParser()
parser.add_argument("--run", type=str)
parser.add_argument("--tmpdir", type=str)
parser.add_argument("--savedir", type=str)
parser.add_argument("--ckpt", type=str, default="v0")
parser.add_argument("--device", type=str, default="cpu")
args = parser.parse_args()
print("Loading config...")
api = wandb.Api()
settings = api.run(args.run)
settings.config = dotdict(settings.config)
(user_id, project_id, run_id) = args.run.split("/")
settings.config.ckpt = f"{args.tmpdir}/model.ckpt"
settings.config.adapter = f"{args.savedir}/{run_id}.ckpt"
settings.config.device = args.device
print("Downloading ckpt...")
run = wandb.init()
command = f"{user_id}/{project_id}/model-{run_id}:{args.ckpt}"
artifact = run.use_artifact(command, type="model")
artifact.download(root=args.tmpdir)
print("Loading ckpt...")
net = load_ckpt(settings.config)
print("Saving adapter and config...")
torch.save(dict(settings.config), f"{args.savedir}/{run_id}-config")
save_params(settings.config, net)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3356806 | <reponame>no1xsyzy/bgmtinygrail
import queue
import threading
from bgmtinygrail.model_link.accounts import all_accounts
from bgmtinygrail.tinygrail.api import *
xsb_player = all_accounts['xsb_player']
NUM_F_WORKERS = 4
call = queue.Queue()
back = queue.Queue()
cid_set = []
def main():
for cid in range(1, 100000):
call.put(cid)
workers = [threading.Thread(target=f_worker, daemon=True) for _ in range(NUM_F_WORKERS)]
workers.append(threading.Thread(target=p_worker, daemon=True))
for worker in workers:
worker.start()
call.join()
back.join()
print(cid_set)
def f_worker():
while True:
cid = call.get()
j = character_auction(xsb_player.tinygrail, cid)
back.put((cid, j))
call.task_done()
def p_worker():
while True:
cid, j = back.get()
if j.amount != j.total:
cid_set.append(cid)
print(cid, j)
back.task_done()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3323339 |
from math import sqrt
def vdot(u, v):
assert len(u) == len(v)
tot = 0.0
if hasattr(u, "keys"):
for i in u:
tot += u[i] * v[i]
else:
for i in xrange(len(u)):
tot += u[i] * v[i]
return tot
def vproj(u, v):
return vmuls(v, vdot(u, v) / vmag(v)**2)
def vadd(u, v):
assert len(u) == len(v)
if hasattr(u, "keys"):
return map(lambda i, j: u[i] + v[j], u, v)
else:
return map(lambda a, b: a + b, u, v)
def vsub(u, v):
assert len(u) == len(v)
if hasattr(u, "keys"):
return map(lambda i, j: u[i] - v[j], u, v)
else:
return map(lambda a, b: a - b, u, v)
def vmul(u, v):
assert len(v) == len(u)
if hasattr(u, "keys"):
return map(lambda i, j: u[i] * v[j], u, v)
else:
return map(lambda a, b: a * b, u, v)
def vdiv(u, v):
assert len(v) == len(u)
if hasattr(u, "keys"):
return map(lambda i, j: u[i] / float(v[j]), u, v)
else:
return map(lambda a, b: a / float(b), u, v)
def vidiv(u, v):
assert len(v) == len(u)
if hasattr(u, "keys"):
return map(lambda i, j: u[i] / v[j], u, v)
else:
return map(lambda a, b: a / b, u, v)
def vmag(v):
tot = 0.0
for i in v:
tot += i*i
return sqrt(tot)
def vdist(u, v):
tot = 0.0
for i in xrange(len(v)):
tot += (u[i] - v[i])**2
return sqrt(tot)
#=============================================================================
# vector and scalar
def vadds(u, s):
if hasattr(u, "keys"):
return [u[i] + s for i in u]
else:
return [a + s for a in u]
def vsubs(u, s):
if hasattr(u, "keys"):
return [u[i] - s for i in u]
else:
return [a - s for a in u]
def vmuls(u, s):
if hasattr(u, "keys"):
return [u[i] * s for i in u]
else:
return [a * s for a in u]
def vdivs(u, s):
s = float(s)
if hasattr(u, "keys"):
return [u[i] / s for i in u]
else:
return [a / s for a in u]
def vidivs(u, s):
if hasattr(u, "keys"):
return [u[i] / s for i in u]
else:
return [a / s for a in u]
def in_left_halfspace2(a, b, p):
"""Returns True is point p is to the left of line a<->b.
where left is defined as standing at a and facing towards b"""
return (b[0]-a[0]) * (p[1]-a[1]) - (b[1]-a[1]) * (p[0]-a[0]) <= 0
def in_triangle2(a, b, c, pos):
"""Returns True is pos in triangle a,b,c"""
clockwise = in_left_halfspace2(b, a, c)
if clockwise:
return (in_left_halfspace2(b, a, pos) and
in_left_halfspace2(c, b, pos) and
in_left_halfspace2(a, c, pos))
else:
return (in_left_halfspace2(a, b, pos) and
in_left_halfspace2(b, c, pos) and
in_left_halfspace2(c, a, pos))
def in_polygon2(pts, pos):
"""Returns True if point 'pos' in convex polygon with points 'pts'"""
assert len(pts) >= 3, Exception("polygon must have at least 3 points")
clockwise = in_left_halfspace2(pts[1], pts[0], pts[2])
if clockwise:
for i in xrange(1, len(pts)):
if not in_left_halfspace2(pts[i], pts[i-1], pos):
return False
return in_left_halfspace2(pts[0], pts[-1], pos)
else:
for i in xrange(0, len(pts)-1):
if not in_left_halfspace2(pts[i], pts[i+1], pos):
return False
return in_left_halfspace2(pts[-1], pts[0], pos)
| StarcoderdataPython |
3368554 | <reponame>benhoyt/pythondotorg<filename>jobs/forms.py
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django_comments_xtd.conf import settings as comments_settings
from django_comments_xtd.forms import CommentForm
from django_comments_xtd.models import TmpXtdComment
from .models import Job
from cms.forms import ContentManageableModelForm
class JobForm(ContentManageableModelForm):
class Meta:
model = Job
fields = (
'category',
'job_types',
'company',
'city',
'region',
'country',
'description',
'requirements',
'contact',
'email',
'url',
'telecommuting',
'agencies',
)
widgets = {
'job_types': CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['job_types'].help_text = None
self.fields['telecommuting'].label = 'Is telecommuting allowed?'
self.fields['agencies'].label = 'Is job on behalf of an agency?'
class JobCommentForm(CommentForm):
reply_to = forms.IntegerField(required=True, initial=0, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
comment = kwargs.pop("comment", None)
if comment:
initial = kwargs.pop("initial", {})
initial.update({"reply_to": comment.pk})
kwargs["initial"] = initial
super(JobCommentForm, self).__init__(*args, **kwargs)
self.fields['name'] = forms.CharField(
widget=forms.TextInput(attrs={'placeholder':_('name')}))
self.fields['email'] = forms.EmailField(
label=_("Email"), help_text=_("Required for comment verification"),
widget=forms.TextInput(attrs={'placeholder':_('email')})
)
self.fields['url'] = forms.URLField(
required=False,
widget=forms.TextInput(attrs={'placeholder':_('website')}))
self.fields['comment'] = forms.CharField(
widget=forms.Textarea(attrs={'placeholder':_('comment')}),
max_length=comments_settings.COMMENT_MAX_LENGTH)
def get_comment_model(self):
return TmpXtdComment
def get_comment_create_data(self):
data = super(JobCommentForm, self).get_comment_create_data()
data.update({'thread_id': 0, 'level': 0, 'order': 1,
'parent_id': self.cleaned_data['reply_to'],
'followup': True})
if comments_settings.COMMENTS_XTD_CONFIRM_EMAIL:
# comment must be verified before getting approved
data['is_public'] = False
return data
| StarcoderdataPython |
94817 | <reponame>nyquist-h/premock<filename>reggaefile.py
from reggae import object_files, link, Build, user_vars, optional
san_opts = ""
if 'production' in user_vars:
san_opts = '-fsanitize=address'
includes = [".", "example/cpp/test", "example/src",
"example/deps", "example/cpp/mocks"]
common_flags = san_opts + " -Wall -Werror -Wextra -g"
c_flags = common_flags
prod_flags = c_flags + " -include mocks.h"
cpp_flags = common_flags + " -std=c++14"
linker_flags = san_opts
# production code we want to test
prod_objs = object_files(src_dirs=["example/src"],
includes=includes,
flags=prod_flags)
# C dependencies of the production code
dep_objs = object_files(src_dirs=["example/deps"],
flags=c_flags)
# Test code where the mock implementations live
mock_objs = object_files(src_dirs=["example/cpp/mocks"],
includes=includes,
flags=cpp_flags)
# The unit tests themselves
test_objs = object_files(src_dirs=["example/cpp/test"],
includes=includes,
flags=cpp_flags)
# The example_cpp binary
example_cpp = link(exe_name="example_cpp",
dependencies=[test_objs, prod_objs,
dep_objs, mock_objs],
flags=linker_flags)
# Unit tests for premock itself
ut_cpp_objs = object_files(src_dirs=["tests"],
flags=cpp_flags,
includes=[".", "tests"])
ut_cpp = link(exe_name="ut_cpp", dependencies=ut_cpp_objs, flags=linker_flags)
d_objs = object_files(src_dirs=["example/d"],
src_files=["premock.d"],
flags='-g -unittest',
includes=[".", "example/d"])
ut_d = link(exe_name="example_d",
dependencies=[d_objs, prod_objs, dep_objs],
flags="-L-lstdc++")
build = Build(example_cpp, ut_cpp, optional(ut_d))
| StarcoderdataPython |
46648 | <filename>paleomix/nodes/bedtools.py<gh_stars>0
#!/usr/bin/python
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from paleomix.node import Node, NodeError
from paleomix.common.bedtools import read_bed_file, pad_bed_records, merge_bed_records
from paleomix.common.fileutils import move_file, reroot_path
class PaddedBedNode(Node):
"""Simple node for padding BED records a fixed amount and merging
overlapping records. Columns beyond the 3rd column are dropped.
"""
def __init__(self, infile, outfile, fai_file, amount=0, dependencies=()):
self._amount = int(amount)
self._infile = infile
self._outfile = outfile
self._fai_file = fai_file
Node.__init__(
self,
description="<PaddedBed (%i): %r -> %r>" % (amount, infile, outfile),
input_files=(infile, fai_file),
output_files=(outfile,),
dependencies=dependencies,
)
def _run(self, config, temp):
contigs = {}
with open(self._fai_file) as handle:
for line in handle:
name, length, _ = line.split("\t", 2)
if name in contigs:
raise NodeError(
"Reference genome contains multiple "
"identically named contigs (%r)!" % (name,)
)
contigs[name] = int(length)
with open(reroot_path(temp, self._outfile), "w") as handle:
records = list(read_bed_file(self._infile, contigs=contigs))
pad_bed_records(records=records, padding=self._amount, max_sizes=contigs)
for record in merge_bed_records(records):
handle.write("%s\n" % (record,))
def _teardown(self, config, temp):
source = reroot_path(temp, self._outfile)
move_file(source, self._outfile)
| StarcoderdataPython |
1621705 | <filename>osx/devkit/plug-ins/scripted/pyVertexBufferGenerator.py<gh_stars>1-10
# Copyright 2015 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk
# license agreement provided at the time of installation or download,
# or which otherwise accompanies this software in either electronic
# or hard copy form.
from ctypes import *
import maya.api.OpenMayaRender as omr
import maya.api.OpenMaya as om
# Example plugin: vertexBufferGenerator.py
#
# This plug-in is an example of a custom MPxVertexBufferGenerator.
# It provides custom vertex streams based on shader requirements coming from
# an MPxShaderOverride. The semanticName() in the MVertexBufferDescriptor is used
# to signify a unique identifier for a custom stream.
# This plugin is meant to be used in conjunction with the d3d11Shader or cgShader plugins.
# The vertexBufferGeneratorGL.cgfx and vertexBufferGeneratorDX11.fx files accompanying this sample
# can be loaded using the appropriate shader plugin.
# The Names of the streams and the stream data generated by this plugin match what is
# expected from the included effects files.
# This sample use the MyCustomBufferGenerator to create a custom made streams.
# The vertexBufferGenerator2GL.cgfx and vertexBufferGenerator2DX11.fx files accompanying this sample
# can be loaded using the appropriate shader plugin.
# The Names of the streams and the stream data generated by this plugin match what is
# expected from the included effects files.
# This sample use the MyCustomBufferGenerator2 to create a custom made streams
# by combining the Position and Normal streams in a single one.
def maya_useNewAPI():
"""
The presence of this function tells Maya that the plugin produces, and
expects to be passed, objects created using the Maya Python API 2.0.
"""
pass
class MyCustomBufferGenerator(omr.MPxVertexBufferGenerator):
def __init__(self):
omr.MPxVertexBufferGenerator.__init__(self)
def getSourceIndexing(self, object, sourceIndexing):
# get the mesh from the object
mesh = om.MFnMesh(object)
# if it is an empty mesh we do nothing.
numPolys = mesh.numPolygons
if numPolys == 0:
return False
vertToFaceVertIDs = sourceIndexing.indices()
faceNum = 0
# for each face
for i in range(0, numPolys):
# assign a color ID to all vertices in this face.
faceColorID = faceNum % 3
vertexCount = mesh.polygonVertexCount(i)
for x in range(0, vertexCount):
# set each face vertex to the face color
vertToFaceVertIDs.append(faceColorID)
faceNum += 1
# assign the source indexing
sourceIndexing.setComponentType(omr.MComponentDataIndexing.kFaceVertex)
return False
def getSourceStreams(self, object, sourceStreams):
#No source stream needed
return False
def createVertexStream(self, object, vertexBuffer, targetIndexing, sharedIndexing, sourceStreams):
# get the descriptor from the vertex buffer.
# It describes the format and layout of the stream.
descriptor = vertexBuffer.descriptor()
# we are expecting a float stream.
if descriptor.dataType != omr.MGeometry.kFloat:
return
# we are expecting a float2
if descriptor.dimension != 2:
return
# we are expecting a texture channel
if descriptor.semantic != omr.MGeometry.kTexture:
return
# get the mesh from the current path
# if it is not a mesh we do nothing.
mesh = om.MFnMesh(object)
indices = targetIndexing.indices()
vertexCount = len(indices)
if vertexCount <= 0:
return
# fill the data.
buffer = vertexBuffer.acquire(vertexCount, True) # writeOnly = True - we don't need the current buffer values
inc = sizeof(c_float)
address = buffer
for i in range(0, vertexCount):
# Here we are embedding some custom data into the stream.
# The included effects (vertexBufferGeneratorGL.cgfx and
# vertexBufferGeneratorDX11.fx) will alternate
# red, green, and blue vertex colored triangles based on this input.
c_float.from_address(address).value = 1.0
address += inc
c_float.from_address(address).value = indices[i] # color index
address += inc
# commit the buffer to signal completion.
vertexBuffer.commit(buffer)
class MyCustomBufferGenerator2(omr.MPxVertexBufferGenerator):
def __init__(self):
omr.MPxVertexBufferGenerator.__init__(self)
def getSourceIndexing(self, object, sourceIndexing):
# get the mesh from the object
mesh = om.MFnMesh(object)
(vertexCount, vertexList) = mesh.getVertices()
vertCount = len(vertexList)
vertices = sourceIndexing.indices()
for i in range(0, vertCount):
vertices.append( vertexList[i] )
return True
def getSourceStreams(self, object, sourceStreams):
sourceStreams.append( "Position" )
sourceStreams.append( "Normal" )
return True
def createVertexStream(self, object, vertexBuffer, targetIndexing, sharedIndexing, sourceStreams):
# get the descriptor from the vertex buffer.
# It describes the format and layout of the stream.
descriptor = vertexBuffer.descriptor()
# we are expecting a float or int stream.
dataType = descriptor.dataType
if dataType != omr.MGeometry.kFloat and dataType != omr.MGeometry.kInt32:
return
# we are expecting a dimension of 3 or 4
dimension = descriptor.dimension
if dimension != 4 and dimension != 3:
return
# we are expecting a texture channel
if descriptor.semantic != omr.MGeometry.kTexture:
return
# get the mesh from the current path
# if it is not a mesh we do nothing.
mesh = om.MFnMesh(object)
indices = targetIndexing.indices()
vertexCount = len(indices)
if vertexCount <= 0:
return
positionStream = sourceStreams.getBuffer( "Position" )
if positionStream == None or positionStream.descriptor().dataType != omr.MGeometry.kFloat:
return
positionDimension = positionStream.descriptor().dimension
if positionDimension != 3 and positionDimension != 4:
return
normalStream = sourceStreams.getBuffer( "Normal" )
if normalStream == None or normalStream.descriptor().dataType != omr.MGeometry.kFloat:
return
normalDimension = normalStream.descriptor().dimension
if normalDimension != 3 and normalDimension != 4:
return
positionBuffer = positionStream.map()
if positionBuffer != 0:
normalBuffer = normalStream.map()
if normalBuffer != 0:
compositeBuffer = vertexBuffer.acquire(vertexCount, True) # writeOnly = True - we don't need the current buffer values
if compositeBuffer != 0:
compaddress = compositeBuffer
posaddress = positionBuffer
normaddress = normalBuffer
floatinc = sizeof(c_float)
intinc = sizeof(c_int)
if dataType == omr.MGeometry.kFloat:
for i in range(0, vertexCount):
xcompaddr = compaddress
ycompaddr = compaddress+floatinc
zcompaddr = compaddress+2*floatinc
wcompaddr = compaddress+3*floatinc
#xposaddr = posaddress
yposaddr = posaddress+floatinc
zposaddr = posaddress+2*floatinc
xnormaddr = normaddress
#ynormaddr = normaddress+floatinc
znormaddr = normaddress+2*floatinc
c_float.from_address(xcompaddr).value = c_float.from_address(yposaddr).value # store position.y
c_float.from_address(ycompaddr).value = c_float.from_address(zposaddr).value # store position.z
c_float.from_address(zcompaddr).value = c_float.from_address(xnormaddr).value # store normal.x
if dimension == 4:
c_float.from_address(wcompaddr).value = c_float.from_address(znormaddr).value # store normal.z
compaddress += dimension*floatinc
posaddress += positionDimension*floatinc
normaddress += normalDimension*floatinc
elif dataType == omr.MGeometry.kInt32:
for i in range(0, vertexCount):
xcompaddr = compaddress
ycompaddr = compaddress+intinc
zcompaddr = compaddress+2*intinc
wcompaddr = compaddress+3*intinc
#xposaddr = posaddress
yposaddr = posaddress+floatinc
zposaddr = posaddress+2*floatinc
xnormaddr = normaddress
#ynormaddr = normaddress+floatinc
znormaddr = normaddress+2*floatinc
c_int.from_address(xcompaddr).value = c_float.from_address(yposaddr).value * 255 # store position.y
c_int.from_address(ycompaddr).value = c_float.from_address(zposaddr).value * 255 # store position.z
c_int.from_address(zcompaddr).value = c_float.from_address(xnormaddr).value * 255 # store normal.x
if dimension == 4:
c_int.from_address(wcompaddr).value = c_float.from_address(znormaddr).value * 255 # store normal.z
compaddress += dimension*intinc
posaddress += positionDimension*floatinc
normaddress += normalDimension*floatinc
vertexBuffer.commit(compositeBuffer)
normalStream.unmap()
positionStream.unmap()
# This is the buffer generator creation function registered with the DrawRegistry.
# Used to initialize the generator.
def createMyCustomBufferGenerator():
return MyCustomBufferGenerator()
def createMyCustomBufferGenerator2():
return MyCustomBufferGenerator2()
# The following routines are used to register/unregister
# the vertex generators with Maya
def initializePlugin(obj):
# register a generator based on a custom semantic for DX11. You can use any name in DX11.
omr.MDrawRegistry.registerVertexBufferGenerator("myCustomStream", createMyCustomBufferGenerator)
# register a generator based on a custom semantic for cg.
# Pretty limited in cg so we hook onto the "ATTR" semantics.
omr.MDrawRegistry.registerVertexBufferGenerator("ATTR8", createMyCustomBufferGenerator)
# register a generator based on a custom semantic for DX11. You can use any name in DX11.
omr.MDrawRegistry.registerVertexBufferGenerator("myCustomStreamB", createMyCustomBufferGenerator2)
# register a generator based on a custom semantic for cg.
# Pretty limited in cg so we hook onto the "ATTR" semantics.
omr.MDrawRegistry.registerVertexBufferGenerator("ATTR7", createMyCustomBufferGenerator2)
def uninitializePlugin(obj):
omr.MDrawRegistry.deregisterVertexBufferGenerator("myCustomStream")
omr.MDrawRegistry.deregisterVertexBufferGenerator("ATTR8")
omr.MDrawRegistry.deregisterVertexBufferGenerator("myCustomStreamB")
omr.MDrawRegistry.deregisterVertexBufferGenerator("ATTR7")
| StarcoderdataPython |
1602044 | <filename>pac_maker.py
import sys
import shutil
import os
from typing import Iterable
pacjsminified = """\
al=JSON.parse('allowlist')
bl=JSON.parse('blocklist')
proxy="__PROXY__",direct="DIRECT;",proxy=="__PRO"+"XY__"&&(proxy=eval("__PRO"+"XY__")),hop=Object.hasOwnProperty;function FindProxyForURL(i,r){if(hop.call(al,r))return direct;for(var e,l=r.lastIndexOf(".");;){if(l<=0)return hop.call(bl,r)?proxy:direct;if(e=r.slice(l+1),hop.call(bl,e))return proxy;l=r.lastIndexOf(".",l-1)}}"""
def readfile(filename: str):
with open(filename, encoding='u8') as f:
for line in f:
yield line
def striplines(lines: Iterable[str]):
for line in lines:
if (hashndx := line.find('#')) != -1:
line = line[:hashndx]
if line := line.strip():
yield line
def domainlist_to_jsonobjstr(lst: Iterable[str]):
'''['a','b'] -> '{"a":null,"b":null}' '''
return "{%s}" % ','.join(
f'"{domain}":null' for domain in lst
)
def make_pac_content(altxt: Iterable[str], bltxt: Iterable[str]):
al = domainlist_to_jsonobjstr(altxt)
bl = domainlist_to_jsonobjstr(bltxt)
return pacjsminified.replace('allowlist', al).replace('blocklist', bl)
def _main():
if len(sys.argv) > 1:
print('There is no argument.')
sys.exit(1)
altxt = striplines(readfile('allowlist.txt'))
bltxt = striplines(readfile('blocklist.txt'))
pac_content = make_pac_content(altxt, bltxt)
if os.path.exists('pac.txt'):
shutil.copy2('pac.txt', 'pac.txt.bak')
with open('pac.txt', 'w+') as f:
f.write(pac_content)
if __name__ == "__main__":
_main()
| StarcoderdataPython |
1740727 | <filename>app/src/app_factory.py
from fastapi import FastAPI
from settings import settings # noqa
from logger import configure_logger
def app_factory():
configure_logger()
app = FastAPI(title="VideoStreamer",)
from api.api_v1.api import api_router
app.include_router(api_router)
return app
| StarcoderdataPython |
34581 | import cv2
import urllib
import numpy as np
import multiprocessing as mp
stream = 'http://192.168.53.114:8000/streamLow.mjpg'
stream2 = 'http://192.168.53.114:8001/streamLow.mjpg'
def procImg(str, wind, stop):
bytes = ''
stream = urllib.urlopen(str)
while not stop.is_set():
try:
bytes += stream.read(4096)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if wind == 'Low':
c = bytes.find('\xff\xaa\xee')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
if wind == 'Low':
if c != -1:
str = bytes[b+2:c]
print(str)
bytes = bytes[c+3:]
else:
bytes = bytes[b+2:]
else:
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow(wind, i)
cv2.waitKey(1)
if cv2.waitKey(1) == ord('q'):
stop.set()
break
except:
pass
if __name__ == '__main__':
st = mp.Event()
lowProc = mp.Process(target = procImg, args=(stream, 'Low', st))
HighProc = mp.Process(target = procImg, args=(stream2, 'High', st))
lowProc.start()
HighProc.start()
lowProc.join()
HighProc.join()
exit(0)
| StarcoderdataPython |
3333763 | sentence_file_path = '/home/tim/Documents/NLP/electronics/electronics_large.csv'
sentence_remapped_file_path = '/home/tim/Documents/NLP/electronics/electronics_balanced_large.csv'
label_cnt = {}
def fiveToThreeClasses(label):
if label == '1' or label == '2':
return -1
elif label == '3':
return 0
else:
return 1
def balance_electronics_dataset():
"""
Balance the electronics data set by determining the class (pos, neut, neg) with the smallest number of reviews
and then only keeping this number of reviews from the three categories.
:return:
"""
with open(sentence_file_path) as in_file, open(sentence_remapped_file_path, 'a') as out_file:
next(in_file)
linesPos = []
linesNeg = []
linesNeu = []
for line in in_file:
asdf = line.split(',', maxsplit=2)
label = fiveToThreeClasses(asdf[1])
if label == -1:
linesNeg.append(line)
elif label == 0:
linesNeu.append(line)
else:
linesPos.append(line)
smaller_class = min(len(linesPos), len(linesNeu), len(linesNeg))
print("Set the number of samples per category to: %s" % smaller_class)
for i in range(smaller_class):
out_file.write(linesPos[i])
out_file.write(linesNeg[i])
out_file.write(linesNeu[i])
balance_electronics_dataset() | StarcoderdataPython |
4836999 | from cleo.testers import CommandTester
from tests.helpers import get_package
def test_show_basic_with_installed_packages(app, poetry, installed):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name())])
expected = """\
cachy 0.1.0 Cachy package
pendulum 2.0.0 Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_basic_with_not_installed_packages_non_decorated(app, poetry, installed):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(cachy_010)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name())])
expected = """\
cachy 0.1.0 Cachy package
pendulum (!) 2.0.0 Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_basic_with_not_installed_packages_decorated(app, poetry, installed):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(cachy_010)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name())], {"decorated": True})
expected = """\
\033[32mcachy \033[0m \033[36m0.1.0\033[0m Cachy package
\033[31mpendulum\033[0m \033[36m2.0.0\033[0m Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_latest_non_decorated(app, poetry, installed, repo):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
cachy_020 = get_package("cachy", "0.2.0")
cachy_020.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
pendulum_201 = get_package("pendulum", "2.0.1")
pendulum_201.description = "Pendulum package"
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
repo.add_package(cachy_010)
repo.add_package(cachy_020)
repo.add_package(pendulum_200)
repo.add_package(pendulum_201)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name()), ("--latest", True)])
expected = """\
cachy 0.1.0 0.2.0 Cachy package
pendulum 2.0.0 2.0.1 Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_latest_decorated(app, poetry, installed, repo):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
cachy_020 = get_package("cachy", "0.2.0")
cachy_020.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
pendulum_201 = get_package("pendulum", "2.0.1")
pendulum_201.description = "Pendulum package"
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
repo.add_package(cachy_010)
repo.add_package(cachy_020)
repo.add_package(pendulum_200)
repo.add_package(pendulum_201)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute(
[("command", command.get_name()), ("--latest", True)], {"decorated": True}
)
expected = """\
\033[32mcachy \033[0m \033[36m0.1.0\033[0m \033[33m0.2.0\033[0m Cachy package
\033[32mpendulum\033[0m \033[36m2.0.0\033[0m \033[31m2.0.1\033[0m Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_outdated(app, poetry, installed, repo):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
cachy_020 = get_package("cachy", "0.2.0")
cachy_020.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
repo.add_package(cachy_010)
repo.add_package(cachy_020)
repo.add_package(pendulum_200)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name()), ("--outdated", True)])
expected = """\
cachy 0.1.0 0.2.0 Cachy package
"""
assert tester.get_display(True) == expected
def test_show_hides_incompatible_package(app, poetry, installed, repo):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(pendulum_200)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"requirements": {"python": "1.0"},
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name())])
expected = """\
pendulum 2.0.0 Pendulum package
"""
assert tester.get_display(True) == expected
def test_show_all_shows_incompatible_package(app, poetry, installed, repo):
command = app.find("show")
tester = CommandTester(command)
cachy_010 = get_package("cachy", "0.1.0")
cachy_010.description = "Cachy package"
pendulum_200 = get_package("pendulum", "2.0.0")
pendulum_200.description = "Pendulum package"
installed.add_package(pendulum_200)
poetry.locker.mock_lock_data(
{
"package": [
{
"name": "cachy",
"version": "0.1.0",
"description": "Cachy package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"requirements": {"python": "1.0"},
},
{
"name": "pendulum",
"version": "2.0.0",
"description": "Pendulum package",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"cachy": [], "pendulum": []},
},
}
)
tester.execute([("command", command.get_name()), ("--all", True)])
expected = """\
cachy 0.1.0 Cachy package
pendulum 2.0.0 Pendulum package
"""
assert tester.get_display(True) == expected
| StarcoderdataPython |
3305874 | <filename>utils.py
import copy
import os
import sys
from collections import Counter
import numpy as np
from PIL import Image
from tifffile import tifffile
from skimage.segmentation import slic
def openImage(img_path):
if "jpg" in img_path or "png" in img_path:
image = Image.open(img_path).convert('RGB') # return Image object
elif "tiff" in img_path or "tif" in img_path:
image = tifffile.imread(img_path) # returns numpy array
else:
raise TypeError("The input image format doesn\'t support, we only support png, jpg and tif/tiff format ")
return image
def SP_fusion(image1, image2, n_segments, compactness, merge, merge_regions=50, img_names=('', '')):
"""
:param image1: image of time1
:param image2: image of time2
:param n_segments:
:param compactness:
:param merge:
:param merge_regions
:param img_names
:return: the fused superpixel of image1 and image2
"""
# SLIC Superpixel and save
labels1 = slic(np.array(image1), n_segments, compactness)
labels2 = slic(np.array(image2), n_segments, compactness)
# result_nomerge = [labels1, labels2]
if merge:
import skimage.external.tifffile as tifffile
from os.path import abspath, dirname
Maindirc = abspath(dirname(__file__))
# set constant for superpixels merge
sys.path.insert(0, 'MergeTool')
MergeTool = 'SuperPixelMerge.exe'
SP_label = Maindirc + '/Superpixel.tif'
SPMG_label = Maindirc + '/Merge.tif'
MG_Criterion = 3 # 0, 1, 2, 3
Num_of_Region = merge_regions # the number of regions after region merging
MG_Shape = 0.7
MG_Compact = 0.7
result = []
for img_name, labels in zip(img_names, [labels1, labels2]):
# SLIC Superpixel and save
labels = labels.astype('int32')
tifffile.imsave('Superpixel.tif', labels, photometric='minisblack')
# Call the Superpixel Merge tool, format the command line input
os.chdir('/data/Project_prep/superpixel-cosegmentation/MergeTool/')
cmd_line = '{} {} {} {} {} {} {} {} {}'.format(MergeTool, img_name, SP_label, SPMG_label,
MG_Criterion, Num_of_Region, MG_Shape, ' ',
MG_Compact)
# print('cmd_line', cmd_line)
os.system(cmd_line) # call the Superpixel Merge Tool
os.chdir('..')
# save merged slic labels
MG_labels = tifffile.imread(SPMG_label)
result.append(MG_labels)
labels1, labels2 = result
fusion_labels_after = labels1 + labels2 * 100
return fusion_labels_after, labels1, labels2
def sp_accuracy(sp, label):
"""
:param sp:
:param label:
:return: the accuracy of superpixel based on the label
"""
if len(label.shape) == 3:
label = RGB2Index(label)
sp_pred = classOfSP(sp, label)
correct = sp_pred[sp_pred == label]
acc = len(correct) / sp_pred.size
return round(acc, 3)
def classOfSP(sp, prediction):
"""
:param sp: super pixel label of a image | type: <numpy.ndarray>
:param prediction: the probability of segmented result | type: list 200*200
:return: the segmented result as super pixel | type: list
"""
outset = np.unique(sp.flatten()) # the unique labels
fuse_prediction = copy.deepcopy(prediction)
for i in outset:
mostpred, times = Counter(prediction[sp == i]).most_common(1)[0]
fuse_prediction[sp == i] = mostpred
return fuse_prediction
def colorize_mask(mask, palette):
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def RGB2Index(label, mode='palatte'):
"""
:param label: the ndarray with RGB value needs to be transfer into array with index value
:param mode: whether turn based on palatte or only find Non-zeros
:return: the numpy int labels
"""
if len(label.shape) == 2:
return label
palette = list([[0, 0, 0], [150, 250, 0], [0, 250, 0], [0, 100, 0],
[200, 0, 0], [255, 255, 255], [0, 0, 200], [0, 150, 250]])
h, w, c = label.shape
label = label.tolist()
label_int = copy.deepcopy(label)
for i in range(h):
for j in range(w):
if 'palatte' == mode:
try:
idx = palette.index(label[i][j])
except ValueError:
print('the value {} is not in palatte', label[i][j])
idx = 255
elif mode == 'NonZero':
idx = [0, 1][label[i][j] != [0, 0, 0]]
label_int[i][j] = idx
return np.array(label_int)
| StarcoderdataPython |
1627059 | import os
import autofit as af
from test_autolens.integration import integration_util
from test_autolens.simulate.interferometer import simulate_util
from autofit.optimize.non_linear.mock_nlo import MockNLO
def run(
module,
test_name=None,
non_linear_class=af.MultiNest,
config_folder="config",
positions=None,
):
test_name = test_name or module.test_name
test_path = "{}/../../".format(os.path.dirname(os.path.realpath(__file__)))
output_path = test_path + "output/interferometer/"
config_path = test_path + config_folder
af.conf.instance = af.conf.Config(config_path=config_path, output_path=output_path)
integration_util.reset_paths(test_name=test_name, output_path=output_path)
interferometer = simulate_util.load_test_interferometer(
data_type=module.data_type, data_resolution=module.data_resolution
)
module.make_pipeline_no_lens_light(
name=test_name,
phase_folders=[module.test_type, test_name],
non_linear_class=non_linear_class,
).run(dataset=interferometer, positions=positions)
def run_a_mock(module):
# noinspection PyTypeChecker
run(
module,
test_name=f"{module.test_name}_mock",
non_linear_class=MockNLO,
config_folder="config_mock",
)
def run_with_multi_nest(module):
# noinspection PyTypeChecker
run(
module,
test_name=f"{module.test_name}_nest",
non_linear_class=af.MultiNest,
config_folder="config_mock",
)
| StarcoderdataPython |
3227795 | <reponame>alekseydemidov/gcp_snap<filename>gcp_snap.py
#!/usr/bin/python3
#from __future__ import print_function
import argparse
from datetime import datetime,timedelta,timezone
import time
from google.oauth2 import service_account
import googleapiclient.discovery
def parse_args():
#Arguments parsing
parser = argparse.ArgumentParser(description='Taking snapshort of GCP instances')
parser.add_argument('--project', required=True, help='Project name')
parser.add_argument('--region', type=str, required=True, help='Region name')
parser.add_argument('instances', type=str, help='List of instances comma separated')
parser.add_argument('action', type=str, choices=['create','delete'], help='Action for snapshort: create or delete')
parser.add_argument('--key-file', required=True, help='Path to service account json file')
parser.add_argument('-d','--days', type=int, default=7, required=False, help='How old days snapshot should be deleted for instances, 0 - for every snapshots; default 7' )
parser.add_argument('--ignore', type=str, required=False, default='' ,help='List of snapshots name, what should not be deleted' )
parser.add_argument('--multiregion', required=False, action="store_true", help='Location store for snapshot will be multiregion')
parser.add_argument('--debug', required=False, action="store_true", help='Debug information to stdout')
args = parser.parse_args()
return args
def debug(msg):
if debug_status: print (msg)
def google_compute_auth(key_file):
SCOPES = ['https://www.googleapis.com/auth/compute']
SERVICE_ACCOUNT_FILE = key_file
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
compute = googleapiclient.discovery.build('compute', 'beta',credentials=credentials)
return compute
def list_zones_name(compute, project, region):
zones = []
result = compute.zones().list(project=project,filter="name:"+region+"*").execute()
debug("Found zones: ")
for i in result['items']:
debug(i['name'])
zones.append(i['name'])
return zones
### Return list of dictionaries [ { name:'', zone:'', disks:[disk_names] } ]
def instances_get_fact(compute, project, zone):
instances_fact = []
result = compute.instances().list(project=project, zone=zone).execute()
if 'items' in result:
for i in result['items']:
disks = []
for j in i['disks']: disks.append(j['source'].split('/')[-1])
result = {'name':i['name'],'zone':zone,'disks':disks}
instances_fact.append(result)
return instances_fact
def snapshot_instance_create(compute, project, zone, instance_name, disk, multiregion):
snap_name = instance_name+'-'+disk+'-'+str(datetime.now().date())
region = 'us' if multiregion else zone[:-2]
snapshot_body = {"name": snap_name,"storageLocations": [ region ] }
compute.disks().createSnapshot(project=project, zone=zone, disk=disk, body=snapshot_body).execute()
return snap_name
def snapshot_get_status(compute, project, snapshot):
result = compute.snapshots().get(project=project, snapshot=snapshot).execute()
return result['status']
def snapshot_create(compute, project, instances, multiregion):
debug("Affected instance: ")
snap_list = []
for i in instances:
debug(i)
for d in i['disks']:
result = snapshot_instance_create(compute=compute, project=project, zone=i['zone'], instance_name=i['name'], disk=d, multiregion=multiregion)
debug ("Taken snapshot with name: "+result)
snap_list.append(result)
return snap_list
def snapshot_list(compute, project):
debug ("Snapshots found in project: "+project)
snap_facts = []
result = compute.snapshots().list(project=project).execute()
if 'items' in result:
for i in result['items']:
temp = {'name':i['name'], 'zone':i['sourceDisk'].split('/')[-3], 'disk':i['sourceDisk'].split('/')[-1], 'created':i['creationTimestamp']}
debug (temp)
snap_facts.append(temp)
return snap_facts
def snapshot_delete(compute,project,name):
debug ("Snapshot "+name+" to be deleted")
result = compute.snapshots().delete(project=project, snapshot=name).execute()
debug (result)
def main():
args = parse_args()
global debug_status
debug_status = args.debug
time_now = datetime.now(timezone(timedelta(-1, 57600)))
debug ("Current time: "+str(time_now))
list_target_instances = args.instances.split(',')
list_snap_ingnored = args.ignore.split(',')
debug("Snapshots will be ignored: "+str(list_snap_ingnored) )
### Authenticate for google compute engine
compute = google_compute_auth(args.key_file)
### Get of zones for region
zones = list_zones_name(compute, args.project, args.region)
### Get of target instances list like a dict {'disks': [u''], 'name': u'', 'zone': u''}
instances = []
for i in zones:
result = instances_get_fact(compute, args.project, i)
for j in result:
if j['name'] in list_target_instances: instances.append(j)
### Create snapshots
if args.action == 'create':
snap_list = snapshot_create(compute, args.project, instances, args.multiregion)
debug(snap_list)
### Waiting for snapshots are ready
debug ("Waiting for snapshots are completed")
for i in snap_list:
for x in range(10):
try:
status = snapshot_get_status(compute, args.project, i)
break
except:
debug ("Snapshot is not started")
time.sleep(1)
while status != ('READY' or 'FAILED'):
time.sleep(1)
status = snapshot_get_status(compute, args.project, i)
debug ('Status of '+i+' = '+status)
### Delete old snapshots
if args.action == 'delete':
snap_list = snapshot_list(compute, args.project)
for i in instances:
for disk in i['disks']:
for snap in snap_list:
if disk == snap['disk'] and i['zone'] == snap['zone'] and (snap['name'] not in list_snap_ingnored):
snap_time = datetime.strptime(snap['created'][:-3]+snap['created'][-2:], "%Y-%m-%dT%H:%M:%S.%f%z")
if (time_now - snap_time).days >= args.days : snapshot_delete (compute, args.project, snap['name'])
if __name__ == "__main__":
main()
| StarcoderdataPython |
1755829 | <gh_stars>0
import time
import json
class TTLeague:
def __init__(self, setCount=3):
self.setCount = setCount
def __str__(self):
return str(self.__dict__)
class Match:
def __init__(self, player1, player2):
# type: (Player, Player) -> None
self.timestamp = int(time.time() * 1000)
self.games = []
self.player1 = player1
self.player2 = player2
def add_game(self, game):
# type: (Game) -> None
self.games.append(game)
# data format for each match data: { w: 'Horst', l: '<NAME>', diff: 2, date: 1490262281602 }
def match_data_as_json(self):
# type: (None) -> str
data = self.get_match_data()
return json.dumps(data)
def match_data_for_log(self):
# type: (None) -> str
data = self.get_match_data()
log = '{} - {}; '.format(self.player1.name, self.player2.name)
for g in self.games:
log += '{:d}:{:d},'.format(g.home, g.guest)
log = log[:-1] # remove last ,
return log
def get_match_data(self):
# type: (None) -> dict
data = []
for g in self.games:
if g.home > g.guest:
w = self.player1.name
l = self.player2.name
diff = g.home - g.guest
else:
w = self.player2.name
l = self.player1.name
diff = g.guest - g.home
self.timestamp += 1
data.append({'w': w, 'l': l, 'diff': diff, 'date': self.timestamp})
return data
def __str__(self):
games_str = json.dumps([g.__dict__ for g in self.games])
return "Match: {} vs {}; Sets: {}".format(str(self.player1), str(self.player2), games_str)
class Game:
def __init__(self, home=0, guest=0):
self.home = home
self.guest = guest
def __str__(self):
return str(self.__dict__)
class Player:
def __init__(self, nfcTag="", name="", elo=0):
self.nfcTag = nfcTag
self.name = name
self.elo = elo
def __str__(self):
return str(self.__dict__)
| StarcoderdataPython |
3378709 | <reponame>pmatigakis/vedette
from uuid import uuid4
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.urls import reverse
from events.tests.factories import EventFactory
class EventDetailsTests(TestCase):
def setUp(self):
super(EventDetailsTests, self).setUp()
self.username = "admin"
self.password = "<PASSWORD>"
user = User.objects.create_user(
username=self.username, password=self.password
)
user.save()
self.client = Client()
response = self.client.post(
reverse("login"),
{"username": self.username, "password": self.password},
follow=True,
)
self.assertEqual(response.status_code, 200)
def test_event_details(self):
event = EventFactory()
response = self.client.get(
reverse("event-details", kwargs={"pk": event.id})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["object"], event)
self.assertEqual(response.context["event"], event)
self.assertTemplateUsed("web/events/details.html")
def test_event_details_when_event_does_not_exist(self):
response = self.client.get(
reverse("event-details", kwargs={"pk": uuid4()})
)
self.assertEqual(response.status_code, 404)
self.assertTemplateNotUsed("web/events/details.html")
| StarcoderdataPython |
36575 | import discord
import subprocess
import os, random, re, requests, json
import asyncio
from datetime import datetime
from discord.ext import commands
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('[+] Trashmoney Code ACTIVE!')
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.command(aliases=['tm'])
async def trashmoney(self,ctx,amount:int):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
balancee = await update_bank(ctx.author)
time = 10
if amount > balancee[0]:
await ctx.send('You poor lmao! what money u want to TRASH! NOOB!!!')
return
if amount < 0:
await ctx.send('You are poor or wrong put amount?')
return
await update_bank(ctx.author,-1*amount, 'wallet')
await ctx.send(f"{user} Trash he money! type [!!claim] to get the money!")
msg = await ctx.send(f'Member had {time}s to claim!')
with open('trash_money.txt','w') as f:
f.write(str(amount))
f.close()
while True:
time -= 1
if time == 0:
f = open('trash_money.txt','r')
if f.read() == '0':
await ctx.send('Someone claimed the trash money!')
else:
await ctx.send('No one claimed the trash money!')
break
await msg.edit(content=f'Member had {time}s to claim!')
await asyncio.sleep(1)
async def open_account(user):
users = await get_bank_data()
with open('./bank.json','r') as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["wallet"] = 0
users[str(user.id)]["bank"] = 0
with open('./bank.json','w') as f:
json.dump(users,f)
return True
async def get_bank_data():
with open('./bank.json','r') as f:
users = json.load(f)
return users
async def update_bank(user,change = 0,mode = 'wallet'):
users = await get_bank_data()
users[str(user.id)][mode] += change
with open('./bank.json','w') as f:
json.dump(users,f)
balancee = [users[str(user.id)]['wallet'],users[str(user.id)]['bank']]
return balancee
def setup(bot):
bot.add_cog(Economy(bot)) | StarcoderdataPython |
187058 | from collections import defaultdict
from datetime import datetime, timedelta, time
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from jsonobject.properties import DateTimeProperty
from corehq.apps.app_manager.models import ApplicationBase
from corehq.apps.users.util import WEIRD_USER_IDS
from dimagi.utils.couch.database import get_db
from corehq.apps.domain.models import Domain
from corehq.apps.reminders.models import CaseReminderHandler
from corehq.apps.reports.util import make_form_couch_key
from corehq.apps.users.models import CouchUser
from corehq.elastic import es_query, ADD_TO_ES_FILTER, ES_URLS
from corehq.pillows.mappings.case_mapping import CASE_INDEX
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX
def num_web_users(domain, *args):
key = ["active", domain, 'WebUser']
row = get_db().view('users/by_domain', startkey=key, endkey=key+[{}]).one()
return row["value"] if row else 0
def num_mobile_users(domain, *args):
row = get_db().view('users/by_domain', startkey=[domain], endkey=[domain, {}]).one()
return row["value"] if row else 0
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
DISPLAY_DATE_FORMAT = '%Y/%m/%d %H:%M:%S'
def active_mobile_users(domain, *args):
"""
Returns the number of mobile users who have submitted a form in the last 30 days
"""
now = datetime.now()
then = (now - timedelta(days=30)).strftime(DATE_FORMAT)
now = now.strftime(DATE_FORMAT)
q = {"query": {
"range": {
"form.meta.timeEnd": {
"from": then,
"to": now}}},
"filter": {"and": ADD_TO_ES_FILTER["forms"][:]}}
facets = ['form.meta.userID']
data = es_query(params={"domain.exact": domain}, q=q, facets=facets, es_url=XFORM_INDEX + '/xform/_search', size=1)
terms = [t.get('term') for t in data["facets"]["form.meta.userID"]["terms"]]
user_ids = CouchUser.ids_by_domain(domain)
return len(filter(lambda t: t and t in user_ids, terms))
def cases(domain, *args):
row = get_db().view("hqcase/types_by_domain", startkey=[domain], endkey=[domain, {}]).one()
return row["value"] if row else 0
def cases_in_last(domain, days):
"""
Returns the number of open cases that have been modified in the last <days> days
"""
now = datetime.now()
then = (now - timedelta(days=int(days))).strftime(DATE_FORMAT)
now = now.strftime(DATE_FORMAT)
q = {"query": {
"range": {
"modified_on": {
"from": then,
"to": now}}}}
data = es_query(params={"domain.exact": domain, 'closed': False}, q=q, es_url=CASE_INDEX + '/case/_search', size=1)
return data['hits']['total'] if data.get('hits') else 0
def inactive_cases_in_last(domain, days):
"""
Returns the number of open cases that have been modified in the last <days> days
"""
now = datetime.now()
then = (now - timedelta(days=int(days))).strftime(DATE_FORMAT)
now = now.strftime(DATE_FORMAT)
q = {"query":
{"bool": {
"must_not": {
"range": {
"modified_on": {
"from": then,
"to": now }}}}}}
data = es_query(params={"domain.exact": domain, 'closed': False}, q=q, es_url=CASE_INDEX + '/case/_search', size=1)
return data['hits']['total'] if data.get('hits') else 0
def forms(domain, *args):
key = make_form_couch_key(domain)
row = get_db().view("reports_forms/all_forms", startkey=key, endkey=key+[{}]).one()
return row["value"] if row else 0
def active(domain, *args):
now = datetime.now()
then = (now - timedelta(days=30)).strftime(DATE_FORMAT)
now = now.strftime(DATE_FORMAT)
key = ['submission', domain]
row = get_db().view(
"reports_forms/all_forms",
startkey=key+[then],
endkey=key+[now],
limit=1
).all()
return True if row else False
def display_time(row, display=True):
submission_time = row["key"][2]
if display:
return DateTimeProperty().wrap(submission_time).strftime(DISPLAY_DATE_FORMAT)
else:
return submission_time
def first_form_submission(domain, display=True):
key = make_form_couch_key(domain)
row = get_db().view(
"reports_forms/all_forms",
reduce=False,
startkey=key,
endkey=key+[{}],
limit=1
).first()
return display_time(row, display) if row else "No forms"
def last_form_submission(domain, display=True):
key = make_form_couch_key(domain)
row = get_db().view(
"reports_forms/all_forms",
reduce=False,
endkey=key,
startkey=key+[{}],
descending=True,
limit=1
).first()
return display_time(row, display) if row else "No forms"
def has_app(domain, *args):
return bool(ApplicationBase.get_db().view(
'app_manager/applications_brief',
startkey=[domain],
endkey=[domain, {}],
limit=1
).first())
def app_list(domain, *args):
domain = Domain.get_by_name(domain)
apps = domain.applications()
return render_to_string("domain/partials/app_list.html", {"apps": apps, "domain": domain.name})
def uses_reminders(domain, *args):
handlers = CaseReminderHandler.get_handlers(domain=domain).all()
return len(handlers) > 0
def not_implemented(domain, *args):
return '<p class="text-error">not implemented</p>'
CALC_ORDER = [
'num_web_users', 'num_mobile_users', 'forms', 'cases', 'mobile_users--active', 'mobile_users--inactive', 'active_cases', 'cases_in_last--30',
'cases_in_last--60', 'cases_in_last--90', 'cases_in_last--120', 'active', 'first_form_submission',
'last_form_submission', 'has_app', 'web_users', 'active_apps', 'uses_reminders'
]
CALCS = {
'num_web_users': "# web users",
'num_mobile_users': "# mobile users",
'forms': "# forms",
'cases': "# cases",
'mobile_users--active': "# active mobile users",
'mobile_users--inactive': "# inactive mobile users",
'active_cases': "# active cases",
'cases_in_last--30': "# cases seen last 30 days",
'cases_in_last--60': "# cases seen last 60 days",
'cases_in_last--90': "# cases seen last 90 days",
'cases_in_last--120': "# cases seen last 120 days",
'active': "Active",
'first_form_submission': "Date of first form submission",
'last_form_submission': "Date of last form submission",
'has_app': "Has App",
'web_users': "list of web users",
'active_apps': "list of active apps",
'uses_reminders': "uses reminders",
}
CALC_FNS = {
'num_web_users': num_web_users,
"num_mobile_users": num_mobile_users,
"forms": forms,
"cases": cases,
"mobile_users": active_mobile_users,
"active_cases": not_implemented,
"cases_in_last": cases_in_last,
"inactive_cases_in_last": inactive_cases_in_last,
"active": active,
"first_form_submission": first_form_submission,
"last_form_submission": last_form_submission,
"has_app": has_app,
"web_users": not_implemented,
"active_apps": app_list,
'uses_reminders': uses_reminders,
}
def dom_calc(calc_tag, dom, extra_arg=''):
ans = CALC_FNS[calc_tag](dom, extra_arg) if extra_arg else CALC_FNS[calc_tag](dom)
if ans is True:
return _('yes')
elif ans is False:
return _('no')
return ans
def _all_domain_stats():
webuser_counts = defaultdict(lambda: 0)
commcare_counts = defaultdict(lambda: 0)
form_counts = defaultdict(lambda: 0)
case_counts = defaultdict(lambda: 0)
for row in get_db().view('users/by_domain', startkey=["active"],
endkey=["active", {}], group_level=3).all():
_, domain, doc_type = row['key']
value = row['value']
{
'WebUser': webuser_counts,
'CommCareUser': commcare_counts
}[doc_type][domain] = value
key = make_form_couch_key(None)
form_counts.update(dict([(row["key"][1], row["value"]) for row in \
get_db().view("reports_forms/all_forms",
group=True,
group_level=2,
startkey=key,
endkey=key+[{}]
).all()]))
case_counts.update(dict([(row["key"][0], row["value"]) for row in \
get_db().view("hqcase/types_by_domain",
group=True,group_level=1).all()]))
return {"web_users": webuser_counts,
"commcare_users": commcare_counts,
"forms": form_counts,
"cases": case_counts}
ES_CALCED_PROPS = ["cp_n_web_users", "cp_n_active_cc_users", "cp_n_cc_users", "cp_n_active_cases" , "cp_n_cases",
"cp_n_forms", "cp_first_form", "cp_last_form", "cp_is_active", 'cp_has_app']
def total_distinct_users(domains=None):
"""
Get total number of users who've ever submitted a form.
"""
query = {"in": {"domain.exact": domains}} if domains is not None else {"match_all": {}}
q = {
"query": query,
"filter": {"and": ADD_TO_ES_FILTER["forms"][:]},
}
res = es_query(q=q, facets=["form.meta.userID"], es_url=ES_URLS["forms"], size=0)
user_ids = reduce(list.__add__, [CouchUser.ids_by_domain(d) for d in domains], [])
terms = [t.get('term') for t in res["facets"]["form.meta.userID"]["terms"]]
return len(filter(lambda t: t and t not in WEIRD_USER_IDS and t in user_ids, terms))
| StarcoderdataPython |
99271 | <reponame>vis7/django_pytest_fixture_tutorial
from django.test import TestCase
from django.contrib.auth.models import Group, User
# # simple test
def test_foo():
assert True
# accessing database
def test_should_create_user_with_username(db):
user = User.objects.create_user("Haki")
assert user.username == "vis"
| StarcoderdataPython |
108498 | from setuptools import setup
setup(name='wiggum',
version='0.2',
description='utilities to detect simpson\'s paradox',
url='http://github.com/brownsarahm/DetectSimpsonParadox',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['wiggum', 'wiggum_app','wiggum.trend_components'],
zip_safe=False,
include_package_data = True,
install_requires=['matplotlib', 'Numpy', 'Scipy', 'seaborn', 'pandas','flask'],
entry_points = {
'console_scripts': ['wiggum-app=wiggum_app.command_line:main'],
})
| StarcoderdataPython |
1657481 | <reponame>Tenchi2xh/DNAP
import scrapy
import time
class thinkgeek(scrapy.Spider):
name = __name__
start_urls = ["https://www.thinkgeek.com/collectibles/vinyl-records/"]
def parse(self, response):
scrape_time = time.time()
for div in response.css(".product"):
yield {
"title": div.css("::attr(data-name)").extract_first(),
"price": div.css("::attr(data-price)").extract_first(),
"source": "thinkgeek",
"link": response.urljoin(div.css("a::attr(href)").extract_first()),
"picture": response.urljoin(div.css("img::attr(data-original)").extract_first()),
"first_seen": scrape_time,
"release_date": ""
}
next_page = response.css(".pagenav-item.pagenav-next a::attr(href)").extract_first()
if next_page:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| StarcoderdataPython |
1664618 | <reponame>praiseG/DABS-Django-Backend<gh_stars>1-10
from django.contrib import admin
from .models import Patient
# Register your models here.
class PatientModelAdmin(admin.ModelAdmin):
list_display = (
'name',
'email',
'mobile',
'age',
'disability',
'registered_by',
'registered_on',
)
search_fields = ('name', 'email', 'mobile', )
ordering = ('registered_on', 'name', 'email', )
admin.site.register(Patient, PatientModelAdmin)
| StarcoderdataPython |
3240582 | <gh_stars>10-100
# cec2017.functions
# Author: <NAME>
# Combines simple, hybrid and composition functions (f1 - f30) into a single
# module
from .simple import *
from .hybrid import *
from .composition import *
all_functions = [
f1, f2, f3, f4, f5, f6, f7, f8, f9, f10,
f11, f12, f13, f14, f15, f16, f17, f18, f19, f20,
f21, f22, f23, f24, f25, f26, f27, f28, f29, f30
]
| StarcoderdataPython |
4812273 | from setuptools import setup, find_packages
setup(
name="testbuilder",
version="0.2.32",
packages=find_packages(),
description="A python testing framework for frontend testing",
package_data = {
'': ['*.csv', '*.yaml'],
},
install_requires=[
"click==6.7",
"PyYAML==5.1",
"pandas==0.23.3",
"selenium==3.13.0"
],
project_urls={
"Source": "https://github.com/AsafSilman/testbuilder",
},
classifiers=[
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 3.7"
],
author="<NAME>",
author_email="<EMAIL>"
)
| StarcoderdataPython |
78948 | <gh_stars>0
import requests
BASE = "http://127.0.0.1:5000/"
# putResponse = requests.put(BASE + "classify/", {'beacon1': 1, 'beacon2': 2, 'beacon3':3, 'location': 0})
# putResponse = requests.put(BASE + "classify/", {'beacon1': 2, 'beacon2': 2, 'beacon3':3, 'location': 0})
# putResponse = requests.put(BASE + "classify/", {'beacon1': 1, 'beacon2': 2, 'beacon3':3, 'location': 0})
# putResponse = requests.put(BASE + "classify/", {'beacon1': 3, 'beacon2': 2, 'beacon3':3, 'location': 0})
# putResponse = requests.put(BASE + "classify/", {'beacon1': 99, 'beacon2': 99, 'beacon3':99, 'location': 1})
getResponse = requests.get(BASE + "classify/", {'beacon1': 8, 'beacon2': 8, 'beacon3': 8})
# response = requests.patch(BASE + "/video/2", {"views": 99, "likes": 9999})
print(getResponse.json()) | StarcoderdataPython |
158196 | # Checks mouse position on windows
import pyautogui as pa
import time
while True:
try:
pa.moveTo(2563, 171, duration=.25)
pa.click()
pa.moveRel(10, 0, duration=.25)
for i in range(12):
pa.moveRel(0, 50, duration=0.5)
time.sleep(3)
time.sleep(8)
except KeyboardInterrupt:
print('Stopping...\n')
break
| StarcoderdataPython |
1765683 | <reponame>kgaughan/sterechrome_v2<filename>komorebi/html.py
"""
HTML parsing and serialisation support.
"""
import dataclasses
from html import escape
from html.parser import HTMLParser
import io
import logging
logger = logging.getLogger(__name__)
__all__ = [
"Element",
"escape",
"Parser",
]
# See: https://html.spec.whatwg.org/multipage/syntax.html#void-elements
SELF_CLOSING = {
"area",
"base",
"br",
"col",
"command",
"embed",
"hr",
"img",
"input",
"keygen",
"link",
"meta",
"param",
"source",
"track",
"wbr",
}
def make(tag, attrs, close=None):
"""
Helper for quickly constructing a HTML tag.
"""
attr_list = []
for name, value in attrs.items():
if value is None:
attr_list.append(f" {name}")
else:
attr_list.append(f' {name}="{escape(value, quote=True)}"')
result = f"<{tag}{''.join(attr_list)}>"
if close is None:
close = tag not in SELF_CLOSING
if close:
result += f"</{tag}>"
return result
@dataclasses.dataclass
class Element:
tag: str
attrs: dict = dataclasses.field(default_factory=dict)
children: list = dataclasses.field(default_factory=list)
def __getitem__(self, i):
return self.children[i]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def serialize(self, dest=None) -> io.TextIOBase:
if dest is None:
dest = io.StringIO()
if self.tag is not None:
dest.write("<" + self.tag)
for key, value in self.attrs.items():
dest.write(" " + key)
if value is not None:
dest.write('="' + escape(value, quote=True) + '"')
dest.write(">")
for child in self.children:
if isinstance(child, str):
dest.write(escape(child, quote=False))
elif isinstance(child, Element):
child.serialize(dest)
if self.tag is not None and self.tag not in SELF_CLOSING:
dest.write("</" + self.tag + ">")
return dest
class Parser(HTMLParser):
"""
Parses a HTML document into
"""
def __init__(self):
super().__init__()
self.root = Element(tag=None)
self.stack = [self.root]
@property
def top(self):
return self.stack[-1]
def handle_starttag(self, tag, attrs):
elem = Element(tag=tag, attrs=dict(attrs))
self.top.children.append(elem)
if tag not in SELF_CLOSING:
self.stack.append(elem)
def handle_startendtag(self, tag, attrs):
elem = Element(tag=tag, attrs=dict(attrs))
self.top.children.append(elem)
def handle_endtag(self, tag):
if tag not in SELF_CLOSING:
while len(self.stack) > 1:
self.stack.pop()
if tag == self.top.tag:
break
def handle_data(self, data):
if data != "":
self.top.children.append(data)
def error(self, message):
# This method is undocumented in HTMLParser, but pylint is moaning
# about it, so...
logger.error("Error in Parser: %s", message) # pragma: no cover
def parse(markup) -> Element:
parser = Parser()
parser.feed(markup)
parser.close()
return parser.root
| StarcoderdataPython |
26435 | NAMES = [
'IL13stimulation',
'Rec',
'Rec_i',
'IL13_Rec',
'p_IL13_Rec',
'p_IL13_Rec_i',
'JAK2',
'pJAK2',
'SHP1',
'STAT5',
'pSTAT5',
'SOCS3mRNA',
'DecoyR',
'IL13_DecoyR',
'SOCS3',
'CD274mRNA',
]
for idx, name in enumerate(NAMES):
exec(
'{} = {:d}'.format(
name, idx
)
)
NUM = len(NAMES) | StarcoderdataPython |
3354930 | """
This file handles question related HTTP request.
"""
from flask import request
from flask_restplus import Resource
from flask_jwt_extended import jwt_required
from flask_jwt_extended.exceptions import NoAuthorizationError,InvalidHeaderError,RevokedTokenError
from jwt import ExpiredSignatureError, InvalidTokenError, InvalidAudienceError
# local imports
from api.v1.main.util.question_dto import QuestionDto
from api.v1.main.service.question_service.question_service import save_new_question,get_all_questions, specific_question, upvote_question, downvote_question
api = QuestionDto.api
quiz = QuestionDto.question
@api.route('/<int:meetup_id>/create')
@api.param('meetup_id', 'Meetup Identification')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class CreateQuestion(Resource):
@api.response(201, 'Question has been created successfully')
@api.doc('Create a Question')
@api.expect(quiz, validate=True)
@api.doc(security='Bearer Auth')
@jwt_required
def post(self, meetup_id):
"""
Create a Question
"""
quiz_data = request.json
return save_new_question(question_data=quiz_data, meetup_id=meetup_id)
@api.route('/questions')
@api.response(401, 'You need to login first')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class GetQuestions(Resource):
@api.doc('List of all available questions')
@api.doc(security='Bearer Auth')
@api.marshal_list_with(quiz)
@jwt_required
def get(self):
"""Get a list of all available questionss"""
return get_all_questions()
@api.route('/<int:question_id>')
@api.param('question_id', 'Question Identification.')
@api.response(404, 'Question not found in the database')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class SpecificQuestion(Resource):
@api.doc('Get a specific question using the question id')
@api.doc(security='Bearer Auth')
@api.marshal_list_with(quiz)
@jwt_required
def get(self, question_id):
"""Get a specific question
"""
return specific_question(question_id)
@api.route('/<int:question_id>/upvote')
@api.param('question_id', 'Question Identification')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class CreateQuestion(Resource):
@api.response(201, 'You have successfully upvoted')
@api.doc('Upvote a Question')
@api.doc(security='Bearer Auth')
@jwt_required
def patch(self, question_id):
"""
Upvote a Question
"""
return upvote_question(question_id)
@api.route('/<int:question_id>/downvote')
@api.param('question_id', 'Question Identification')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class CreateQuestion(Resource):
@api.response(201, 'You have successfully downvoted')
@api.doc('Downvote a Question')
@api.doc(security='Bearer Auth')
@jwt_required
def patch(self, question_id):
"""
Downvote a Question
"""
return downvote_question(question_id)
| StarcoderdataPython |
4802814 | import math
num = int(input("Digite um número: "))
print(f'O dobro de {num} é {num*2}')
print(f'O triplo de {num} é {num*3}')
print(f'A raiz quadrada de {num} é {math.sqrt(num):.2f}')
'''
Outras opções de raiz quadrada
pow(num,0.5)
num ** 0.5
''' | StarcoderdataPython |
99571 | <gh_stars>10-100
# coding: utf-8
# # R转Python
# ## 6.2 统计分析
# ### (1)数据读入
# In[7]:
# 导入Python做数据处理的模块pandas,并取别名为pd
# 导入numpy模块,并取别名为np
# 从pandas模块中导入DataFrame和Series类
import pandas as pd
import numpy as np
# In[10]:
#设置当前工作目录
#【注】“当前工作目录”的含义为文件和文件夹的读写路径
os.chdir('H:\PythonProjects')
print(os.getcwd())
# In[13]:
# 调用pandas的read_csv()函数,读取一个csv文件,并创建一个DataFrame
# 注:women.csv源自R数据集
women = pd.read_csv('women.csv', index_col=0)
print(women.head())
# ### (2)数据理解
# In[15]:
# 查看描述性统计分析
women.describe()
# In[17]:
# 查看列名
print(women.columns)
# In[19]:
# 查看形状
print('行数:', women.shape[0])
print('列数:', women.shape[1])
# ### (3)数据建模
# In[65]:
# 从机器学习模块中导入线性回归类LinearRegression
from sklearn.linear_model import LinearRegression
# 构建模型训练集,由于数据较少,全部数据用于训练
# 并设置训练集中的自变量与因变量
# 选取特征变量值为women.height,构造特征矩阵
# 当特征变量为一个时,因调用reshape(-1, 1)方法用于构造特征矩阵
X_train = women.height.values.reshape(-1, 1)
X_train
# In[66]:
# 选取响应变量
y_train = women.weight
# In[69]:
# 实例化模型
# fit_intercept参数用于设置是否训练截距
model = LinearRegression(fit_intercept=True)
# In[74]:
# 训练模型
model.fit(X_train, y_train)
# ### (4)查看模型
# In[77]:
# 查看模型的斜率
# 训练模型的斜率为一个列表对象,依次为各自变量的斜率
print("训练模型斜率为:", model.coef_[0])
# In[78]:
# 查看模型的截距
print("训练模型截距为:", model.intercept_)
# ### (5)模型预测
#
# In[79]:
# 用训练的模型预测对原体重数据进行预测
# 返回结果为numpy的数组类型
predicted_weight = model.predict(women.height.values.reshape(-1, 1))
print(predicted_weight)
# In[80]:
# 将原体重数据转换为数组,并查看其值
print(np.array(women.weight))
# #### (6)分析结果的可视化
# In[82]:
# 导入可视化包matplotlib.pyplot
import matplotlib.pyplot as plt
# 绘制原women数据的散点图
plt.scatter(women.height, women.weight)
# 绘制用训练模型根据women.height预测的predicted_weight
plt.plot(women.height, predicted_weight)
plt.rcParams['font.family']="SimHei" #显示汉字的方法
# 添加标题
plt.title('女性体重与身高的线性回归分析')
# 添加X轴名称
plt.xlabel('身高')
# 添加Y轴名称
plt.ylabel('体重')
# 显示绘图
plt.show()
# #### (7)生成报告
# In[84]:
# 重新绘制一遍图形,并将结果保存为PDF文件
# 若之前为调用show()方法,则可直接保存
# 可在调用show()方法之前绘制结果
# 绘制原women数据的散点图
plt.scatter(women.height, women.weight)
# 绘制用训练模型根据women.height预测的predicted_weight
plt.plot(women.height, predicted_weight)
# 添加标题
plt.title('女性体重与身高的线性回归分析')
# 添加X轴名称
plt.xlabel('身高')
# 添加Y轴名称
plt.ylabel('体重')
# 调用savefig()函数,保存会绘制结果
# 也可保存为其他格式,如png, jpg, svg等
plt.savefig('线性回归结果1.pdf')
# #### 6.3 机器学习
#
# 【例1】KNN算法
# #### (1)数据读入
# In[85]:
bc_data = pd.read_csv('bc_data.csv', header=0)
# 由于数据没有列名信息,header设置为None
bc_data.head()
# #### (2)数据理解
# In[86]:
# 查看描述性统计分析
print(bc_data.describe())
# In[87]:
# 查看列名
print(bc_data.columns)
# In[88]:
# 查看形状
print(bc_data.shape)
# #### (4)数据准备
# In[89]:
# 导入train_test_split()函数用于构建训练集和测试集
# 导入KNeighborsClassifier分类器
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# 删除没有实际意义的ID项数据
data = bc_data.drop(['id'], axis=1)
# 查看删除后的数据项
print(data.head())
# In[92]:
# 获取特征矩阵
X_data = data.drop(['diagnosis'], axis=1)
X_data.head()
# In[94]:
# 获取结果数组
y_data = np.ravel(data[['diagnosis']])
# np.ravel()用于降维处理
y_data[0:6]
# In[98]:
# 拆分测试数据与训练数据
# 用train_test_split()随机拆分训练集合测试集
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, random_state=1)
get_ipython().magic('pinfo train_test_split')
# In[99]:
# 查看训练数据与测试数据的数量
print(X_train.shape)
print(X_test.shape)
# #### (5)数据建模
# In[100]:
# 实例化KNN分类模型
model = KNeighborsClassifier(algorithm='kd_tree')
# In[101]:
# 用训练集训练模型
model.fit(X_train, y_train)
# In[103]:
# 用训练模型预测测试集数据
y_model = model.predict(X_test)
# In[104]:
# 查看预测结果和测试集的结果
print(y_model)
# In[105]:
print(y_test)
# In[106]:
# 计算预测准确率
np.mean(y_model == y_test)
# #### (6)模型准确率
# In[108]:
# 导入accaccuracy_score()函数用于计算模型的准确率
from sklearn.metrics import accuracy_score
# 查看模型的准确率
print(accuracy_score(y_test, y_model))
# 【例2】K-Means算法
# #### (1)数据导入
# In[109]:
# 读入数据
protein = pd.read_table('protein.txt', sep='\t')
# 查看前5条数据
protein.head()
# #### (2)数据理解
# In[110]:
# 查看描述性统计分析
print(protein.describe())
# In[111]:
# 查看列名
print(protein.columns)
# In[112]:
# 查看行数和列数
print(protein.shape)
# #### (3)数据转换
#
# In[114]:
from sklearn import preprocessing
# 由于Country不是一个特征值,故舍去
sprotein = protein.drop(['Country'], axis=1)
# 对数据进行标准化处理
sprotein_scaled = preprocessing.scale(sprotein)
# 查看处理结果
print(sprotein_scaled)
# #### (4)数据建模
# In[117]:
# 导入KMeans类型
from sklearn.cluster import KMeans
# 实例化一个KMeans聚类器
kmeans = KMeans(n_clusters=5)
# n_cluster为聚类中心
# In[130]:
# 训练模型
kmeans.fit(sprotein_scaled)
# #### (5)查看模型
# In[124]:
# 查看模型
print(kmeans)
# #### (6)模型预测
# In[125]:
# 预测聚类结果
y_kmeans = kmeans.predict(sprotein)
print(y_kmeans)
# #### (7)结果输出
# In[127]:
def print_kmcluster(k):
'''用于聚类结果的输出
k:为聚类中心个数
'''
for i in range(k):
print('聚类', i)
ls = []
for index, value in enumerate(y_kmeans):
if i == value:
ls.append(index)
print(protein.loc[ls, ['Country', 'RedMeat', 'Fish', 'Fr&Veg']])
print_kmcluster(5)
# #### 6.4 数据可视化
# #### (1)数据准备
# In[128]:
# 读取数据
salaries = pd.read_csv('salaries.csv', index_col=0)
# In[61]:
# 查看数据
salaries.head()
# #### (2)导入Python包
# In[129]:
# 导入matplotlib.pyplot模块,并取别名为plt
import matplotlib.pyplot as plt
import seaborn as sns
# 设置行内显示图片
get_ipython().magic('matplotlib inline')
# #### (3)可视化绘图
# In[64]:
# 设置图片样式
sns.set_style('darkgrid')
# 绘制散点图
sns.stripplot(data=salaries, x='rank', y='salary', jitter=True, alpha=0.5)
# 绘制箱线图
sns.boxplot(data=salaries, x='rank', y='salary')
| StarcoderdataPython |
3344804 | from pprint import pprint
from configparser import ConfigParser
from powerbi.client import PowerBiClient
# Initialize the Parser.
config = ConfigParser()
# Read the file.
config.read('config/config.ini')
# Get the specified credentials.
client_id = config.get('power_bi_api', 'client_id')
redirect_uri = config.get('power_bi_api', 'redirect_uri')
client_secret = config.get('power_bi_api', 'client_secret')
# Initialize the Client.
power_bi_client = PowerBiClient(
client_id=client_id,
client_secret=client_secret,
scope=['https://analysis.windows.net/powerbi/api/.default'],
redirect_uri=redirect_uri,
credentials='config/power_bi_state.jsonc'
)
# Initialize the `Dashboards` service.
dashboard_service = power_bi_client.dashboards()
# Add a dashboard to our Workspace.
dashboard_service.add_dashboard(name='tradingRobot')
# Get all the dashboards in our Org.
pprint(dashboard_service.get_dashboards())
# Grab all the dashboards for a specific workspace.
pprint(
dashboard_service.get_dashboard(
dashboard_id='bf2c7d16-ec7b-40a2-ab56-f8797fdc5fb8'
)
)
# Add a dashboard to a specific workspace.
pprint(
dashboard_service.add_dashboard_in_group(
name='my_new_dashboard',
group_id='f78705a2-bead-4a5c-ba57-166794b05c78'
)
)
# Grab all the dashboards for a specific workspace.
pprint(
dashboard_service.get_group_dashboards(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78'
)
)
# Grab a specific dashboard from a specific workspace.
pprint(
dashboard_service.get_group_dashboard(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab all the tiles from a dashboard.
pprint(
dashboard_service.get_tiles(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab all the tiles from a specific dashboard from a specific workspace.
pprint(
dashboard_service.get_group_tiles(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab a specific tile from a specific dashboard.
pprint(
dashboard_service.get_tile(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70'
)
)
# Grab a specific tile from a specific workspace and a specific workspace..
pprint(
dashboard_service.get_group_tile(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70'
)
)
# Clone a specific tile.
pprint(
dashboard_service.clone_tile(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70',
target_dashboard_id='86cb0a0e-612d-4822-9a29-d83478e21199'
)
)
# Clone a specific tile from a specific workspace.
pprint(
dashboard_service.clone_group_tile(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70',
target_dashboard_id='86cb0a0e-612d-4822-9a29-d83478e21199'
)
) | StarcoderdataPython |
149456 | ###############################################
##<NAME>, 2021##
##Topo-Seq data analysis##
# Classify TCSs by localization in IGRs or if in TU than by strand orientation which is cleaved.
###############################################
#######
#Packages to be imported.
#######
import random as rd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from scipy.stats import binom
#######
#Import data.
#######
#TCSs input.
PWD_peaks="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\TopoI_Ara_TCSs_called_15.BroadPeak"
#TUs annotation.
TUs_groups_path="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\E_coli_RNA-Seq\Expression_data\DY330_transcripts\Representative_transcripts\DY330_RNA-Seq_transcripts_representative_EP_del_cor.txt"
#######
#Reads annotation of particular set of genes .tab BroadPeak-like (determined on a basis of expression level).
#######
def parse_expression_annotation(annot_inpath):
genes_annotation={}
filein=open(annot_inpath, 'r')
for line in filein:
line=line.rstrip().split('\t')
if line[0] not in ['GeneID', 'OperonID', 'TU_ID']:
TU_name=line[1].lstrip('"').rstrip(';"')
TU_start=int(line[2])
TU_end=int(line[3])
TU_strand=line[4]
TU_expression=float(line[5].replace(',','.'))
genes_annotation[TU_name]=[TU_start, TU_end, TU_strand, TU_expression]
filein.close()
return genes_annotation
#######
#Opens and reads BED or narrowPeak files.
#######
def deletions_info(del_path):
del_ar=[]
filein=open(del_path, 'r')
for line in filein:
line=line.rstrip().split('\t')
del_ar.append([int(line[1]), int(line[2]), line[3].split('_')[2]])
filein.close()
return del_ar
#######
#Localize TCSs.
#######
def loc_TCSs(tcss_inpath, tus_inpath):
#Read TUs data.
TUs_annot=parse_expression_annotation(tus_inpath)
#Read TCSs data.
TCSs_data=deletions_info(tcss_inpath)
#TCSs classification.
TCSs_classif={'IGR' : 0, 'Coding' : 0, 'Template' : 0}
for TCS in TCSs_data:
if_IGR=0
TUs_ar=[]
for TU_name, TU_data in TUs_annot.items():
if (TCS[0]>=TU_data[0]) and (TCS[0]<=TU_data[1]):
TUs_ar.append(TU_data)
if_IGR=1
TCS_strand=TCS[2]
if TCS_strand=='F':
TCS_strand='+'
elif TCS_strand=='R':
TCS_strand='-'
else:
print(TCS_strand)
TU_strand=TU_data[2]
if if_IGR==0:
TCSs_classif['IGR']+=1
if len(TUs_ar)==1:
if (TU_strand=="+") and (TCS_strand=="+"):
TCSs_classif['Coding']+=1
elif (TU_strand=="-") and (TCS_strand=="-"):
TCSs_classif['Coding']+=1
elif (TU_strand=="+") and (TCS_strand=="-"):
TCSs_classif['Template']+=1
elif (TU_strand=="-") and (TCS_strand=="+"):
TCSs_classif['Template']+=1
elif len(TUs_ar)>1:
TU_strand_ar=[]
for TU_data in TUs_ar:
TU_strand_ar.append(TU_data[2])
if len(list(set(TU_strand_ar)))>1:
print(TUs_ar)
continue
elif len(list(set(TU_strand_ar)))==1:
TU_strand=list(set(TU_strand_ar))[0]
if (TU_strand=="+") and (TCS_strand=="+"):
TCSs_classif['Coding']+=1
elif (TU_strand=="-") and (TCS_strand=="-"):
TCSs_classif['Coding']+=1
elif (TU_strand=="+") and (TCS_strand=="-"):
TCSs_classif['Template']+=1
elif (TU_strand=="-") and (TCS_strand=="+"):
TCSs_classif['Template']+=1
Genome_len=4647454
Intergenic_length=617293
Deletions_length=126348
Genes_ratio=(Genome_len-Intergenic_length)/float(Genome_len)
Igenes_ratio=1-Genes_ratio
Genes_length_cor=int((Genome_len-Deletions_length)*Genes_ratio)
Intergenic_length_cor=int((Genome_len-Deletions_length)*Igenes_ratio)
Genes_ratio_cor=float(Genes_length_cor)/(Genes_length_cor+Intergenic_length_cor)
Igenes_ratio_cor=float(Intergenic_length_cor)/(Genes_length_cor+Intergenic_length_cor)
print(f'Binom test p-value for the number of TCSs in intergenic regions: {1-binom.cdf(TCSs_classif["IGR"], len(TCSs_data), Igenes_ratio_cor)}')
print(f'Enrichment of TCSs in intergenic regions: {TCSs_classif["IGR"]/float(len(TCSs_data)*Igenes_ratio_cor)}')
print(f'Binom test p-value for the number of TCSs on coding strand: {binom.cdf(TCSs_classif["Coding"], len(TCSs_data), Genes_ratio_cor/2)}')
print(f'Enrichment of TCSs on coding strand: {TCSs_classif["Coding"]/float(len(TCSs_data)*Genes_ratio_cor/2)}')
print(f'Binom test p-value for the number of TCSs on template strand: {binom.cdf(TCSs_classif["Template"], len(TCSs_data), Genes_ratio_cor/2)}')
print(f'Enrichment of TCSs on template strand: {TCSs_classif["Template"]/float(len(TCSs_data)*Genes_ratio_cor/2)}')
print(TCSs_classif)
return
loc_TCSs(PWD_peaks, TUs_groups_path) | StarcoderdataPython |
3202193 | <gh_stars>0
nombreFichero = "ejemplo1.txt";
fichero = open(nombreFichero,"w");
fichero.write("Este es un ejemplo de escritura\n");
fichero.write("Este es otro ejemplo de escritura\n");
fichero.write("\n");
for item in range(1,11):
fichero.write("%d\n" % item);
fichero.close();
fichero = open(nombreFichero,"r");
result = fichero.readlines();
fichero.close();
print(result);
fichero = open(nombreFichero,"a");
valores = [1,2,3,4,5,6,7,8,9,10];
fichero.writelines(str(valores));
fichero.close(); | StarcoderdataPython |
151063 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-09 14:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0003_auto_20170821_1735'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='status',
field=models.CharField(choices=[('S', 'Submitted'), ('T', 'Submitted late'), ('F', 'Finished'), ('G', 'Being peer-reviewed/graded'), ('N', 'Nothing submitted yet'), ('A', 'Automated (internal)'), ('X', 'File has been deleted from webserver')], default='N', max_length=2),
),
]
| StarcoderdataPython |
153995 | <reponame>yangjiahao106/LeetCode<gh_stars>1-10
#! python3
# __author__ = "YangJiaHao"
# date: 2018/3/2
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
时间复杂度 O(m + n)
"""
if not matrix:
return False
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] == target:
return True
elif matrix[row][col] < target:
row += 1
else:
col -= 1
return False
if __name__ == '__main__':
so = Solution()
res = so.searchMatrix([[-10,-8,-6,-4,-3],[0,2,3,4,5],[8,9,10,10,12]], 0)
print(res) | StarcoderdataPython |
1666478 | <reponame>filvarga/vpp-tests
#!/usr/bin/env python
from sys import stderr
from subprocess import Popen, PIPE
from os import walk, listdir
from os.path import join
from argparse import ArgumentParser
def check_output(args, stderr=None):
return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0]
class Device(object):
def __init__(self, slot, link):
self.link = link
self.slot = slot
def unbind(self):
filename = "/sys/bus/pci/devices/{}/driver/unbind".format(self.slot)
try:
with open(filename, "a") as fo:
fo.write(self.slot)
except:
stderr.write("error unbinding {}\n".format(self))
def bind(self, driver):
filename = "/sys/bus/pci/drivers/{}/bind".format(driver)
try:
with open(filename, "a") as fo:
fo.write(self.slot)
except:
stderr.write("error binding {}\n".format(self))
class Devices(dict):
def filter_by_slots(self, slots):
return filter(None, map(self.get, slots))
def get_all():
devices = Devices()
dev = dict()
dev_lines = check_output(["lspci", "-Dvmmnnk"]).splitlines()
for dev_line in dev_lines:
# check if type matches ?
if len(dev_line):
name, value = dev_line.decode().split("\t", 1)
value_list = value.rsplit(' ', 1)
if len(value_list) > 1:
value = value_list[-1].rstrip("]").lstrip("[")
dev[name.rstrip(":")] = value
continue
tmp = dev
dev = dict()
if tmp['Class'][0:2] != '02':
continue
for base, dirs, _ in walk("/sys/bus/pci/devices/{}/".format(tmp['Slot'])):
if "net" in dirs:
tmp['Interface'] = ",".join(listdir(join(base, "net")))
break
# store the device
devices[tmp['Slot']]= Device(tmp['Slot'], dev.get('Interface'))
return devices
def create_vpp_config(args, devs):
dev0, dev1 = devs.filter_by_slots((args.slot0, args.slot1))
config = """
unix {{
interactive
full-coredump
}}
api-trace {{ on }}
nat {{ endpoint-dependent }}
dpdk {{
dev {0} {{ name lan0 }}
dev {1} {{ name lan1 }}
}}
"""
print(config.format(dev0.slot, dev1.slot))
def create_trex_config(args, devs):
dev0, dev1 = devs.filter_by_slots((args.slot0, args.slot1))
config = """
- port_limit: 2
version: 2
interfaces: ['{0}', '{1}']
port_info:
- ip : 10.0.0.1
default_gw : 10.0.0.2
src_mac : 00:00:00:02:00:00
dst_mac : 00:00:00:03:00:00
- ip : 192.168.3.11
default_gw : 172.16.17.32
src_mac : 00:00:00:04:00:00
dst_mac : 00:00:00:05:00:00
c: 4
platform:
master_thread_id: 0
latency_thread_id: 8
dual_if:
- socket: 0
threads: [1,2,3,4,5,6,7]
- socket: 1
threads: [9,10,11,12,13,14,15]
"""
print(config.format(dev0.slot, dev1.slot))
def rebind_devices(args, devs):
for dev in devs.filter_by_slots(args.slots):
dev.unbind()
dev.bind(args.driver)
def print_devices(args, devs):
for dev in devs.values():
print("slot: {0} link: {1}".format(
dev.slot, dev.link if dev.link else ''))
def slot_to_name(args, devs):
dev = devs.filter_by_slots([args.slot])
if dev:
print(dev.link)
def get_args():
parser = ArgumentParser()
sps = parser.add_subparsers()
# create vpp / trex configuration file
ps = [sps.add_parser('vpp-config'),
sps.add_parser('trex-config')]
ps[0].set_defaults(clb=create_vpp_config)
ps[1].set_defaults(clb=create_trex_config)
for p in ps:
p.add_argument('slot0', help='pci slot')
p.add_argument('slot1', help='pci slot')
# rebind
p = sps.add_parser('rebind')
p.set_defaults(clb=rebind_devices)
p.add_argument('driver', help='driver to rebind to [lspci]')
p.add_argument('slots', metavar='N', type=str, nargs='+',
help='pci slots')
# slot-to-name
p = sps.add_parser('slot-to-name')
p.set_defaults(clb=slot_to_name)
p.add_argument('slot', help='pci slot')
# list command
p = sps.add_parser('print')
p.set_defaults(clb=print_devices)
return parser.parse_args()
def main():
args = get_args()
devs = get_all()
args.clb(args, devs)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3295381 | <reponame>ruixuantan/FourParts
from fourparts.structures.notes.Notes import Notes
from fourparts.structures.Scales import Scales
class Key:
"""Represents all 24 keys.
Attributes
----------
key : str
As represented in KEYS
pitchcenter : str
One of the 12 notes in Notes.NOTES.
scale : Scales
Either Scales.Major or Scales.Minor
"""
KEYS = {
Notes.NOTES[0]: {Scales.Major: "C_MAJOR", Scales.Minor: "C_MINOR"},
Notes.NOTES[1]: {Scales.Major: "C#/Db_MAJOR", Scales.Minor: "C#/Db_MINOR"},
Notes.NOTES[2]: {Scales.Major: "D_MAJOR", Scales.Minor: "D_MINOR"},
Notes.NOTES[3]: {Scales.Major: "D#/Eb_MAJOR", Scales.Minor: "D#/Eb_MINOR"},
Notes.NOTES[4]: {Scales.Major: "E_MAJOR", Scales.Minor: "E_MINOR"},
Notes.NOTES[5]: {Scales.Major: "F_MAJOR", Scales.Minor: "F_MINOR"},
Notes.NOTES[6]: {Scales.Major: "F#/Gb_MAJOR", Scales.Minor: "F#/Gb_MINOR"},
Notes.NOTES[7]: {Scales.Major: "G_MAJOR", Scales.Minor: "G_MINOR"},
Notes.NOTES[8]: {Scales.Major: "G#/Gb_MAJOR", Scales.Minor: "G#/Ab_MINOR"},
Notes.NOTES[9]: {Scales.Major: "A_MAJOR", Scales.Minor: "A_MINOR"},
Notes.NOTES[10]: {Scales.Major: "A#/Bb_MAJOR", Scales.Minor: "A#/Bb_MINOR"},
Notes.NOTES[11]: {Scales.Major: "B_MAJOR", Scales.Minor: "B_MINOR"},
}
def __init__(self, pitchcenter, scale):
"""Constructor method of Key.
Parameters
----------
pitchcenter : str
One of the 12 notes in Notes.NOTES.
scale : Scales
Either Scales.Major or Scales.Minor
"""
self.key = Key.KEYS[pitchcenter][scale]
self.pitchcenter = pitchcenter
self.scale = scale
def __str__(self):
return self.key
def __eq__(self, other):
return self.__class__ == other.__class__ and self.key == other.key
def get_key_index(self):
"""Maps the key to its associated index.
Returns
-------
int
An int from 0 to 23s.
"""
index = Notes.get_note_index(self.pitchcenter) * 2
index += Scales.get_scale_index(self.scale)
return index
| StarcoderdataPython |
3363605 | <gh_stars>1-10
# import the pandas, os, sys, and pprint libraries
import pandas as pd
import os
import sys
import pprint
# import the respondent class
sys.path.append(os.getcwd() + "/helperfunctions")
import respondent as rp
import importlib
importlib.reload(rp)
pd.set_option('display.width', 150)
pd.set_option('display.max_columns', 15)
pd.set_option('display.max_rows', 100)
# load the NLS data and then create a list of dictionaries
nls97 = pd.read_pickle("data/nls97f.pkl")
nls97.shape
nls97list = nls97.reset_index().to_dict('records')
len(nls97list)
pprint.pprint(nls97list[0:1])
# loop through the list creating a respondent instance each time
analysislist = []
for respdict in nls97list:
resp = rp.Respondent(respdict)
newdict = dict(originalid=respdict['originalid'],
childnum=resp.childnum(),
avgweeksworked=resp.avgweeksworked(),
age=resp.ageby('20201015'),
baenrollment=resp.baenrollment())
analysislist.append(newdict)
# create a pandas data frame
len(analysislist)
resp.respondentcnt
pprint.pprint(analysislist[0:2])
analysis = pd.DataFrame(analysislist)
analysis.head(2)
| StarcoderdataPython |
170908 | from . import invocation_support
from . import invocation_trace_support
| StarcoderdataPython |
127167 | na, nb = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
a = set(a)
b = set(b)
t = len(a & b)
t2 = len(a.union(b))
print(t/t2)
| StarcoderdataPython |
1626573 | <filename>hikka/__init__.py
from flask_limiter.util import get_remote_address
from flask import Flask, render_template
# from hikka.modules import descriptors
# from hikka.modules import comments
# from hikka.modules import statuses
# from hikka.modules import episodes
from hikka.modules import account
from hikka.modules import errors
# from hikka.modules import upload
# from hikka.modules import system
# from hikka.modules import anime
# from hikka.modules import teams
from hikka.modules import auth
from flask_cors import CORS
import flask_limiter
import mongoengine
import config
app = Flask(__name__)
app.config["SECRET_KEY"] = config.secret
app.config["JSON_SORT_KEYS"] = False
CORS(app)
limiter = flask_limiter.Limiter(
app=app,
key_func=get_remote_address,
default_limits=config.limits
)
db_settings = dict(
username=config.db["username"],
password=config.db["password"],
port=config.db["port"]
)
mongoengine.register_connection(
alias="default",
name=config.db["name"],
**db_settings
)
# App blueprints
app.register_blueprint(account.blueprint, url_prefix="/account")
app.register_blueprint(auth.blueprint, url_prefix="/auth")
# app.register_blueprint(descriptors.blueprint)
# app.register_blueprint(comments.blueprint)
# app.register_blueprint(episodes.blueprint)
# app.register_blueprint(statuses.blueprint)
app.register_blueprint(errors.blueprint)
# app.register_blueprint(system.blueprint)
# app.register_blueprint(upload.blueprint)
# app.register_blueprint(teams.blueprint)
# app.register_blueprint(anime.blueprint)
# Limiter exemptions
# limiter.exempt(upload.blueprint)
@app.route("/")
def docs():
return render_template("docs.html")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.