id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4987476 | from gfpgan import GFPGANer
from realesrgan import RealESRGANer
import torch
# bg_upsampler = RealESRGANer(
# scale=2,
# model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
# tile=400,
# tile_pad=10,
# pre_pad=0,
# half=True)
model = GFPGANer(
model_path='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth',
upscale=2,
arch='clean',
channel_multiplier=2,
bg_upsampler=None
)
model.load_state_dict(torch.load('experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth', map_location='cpu')).eval()
| StarcoderdataPython |
8009051 | import numpy as np
np_arr = np.array([[1,2], [6,8]])
print(np_arr)
np_arr_insert = np.insert(arr = np_arr , obj = 1 , values = [3,4] , axis = 0 )
print(np_arr_insert)
np_arr_append = np.append(arr = np_arr , values = [[3,4], [1,2]])
print(np_arr_append)
np_arr_delete = np.delete(arr = np_arr , obj = 1 , axis = 1)
print(np_arr_delete)
import numpy as np
np_arr= np.array( [[1,2,3], [2,3,5], [1,3,5]] )
np_condarr = np_arr [ np_arr>2 ]
print( np_condarr )
print((np_arr>2).dtype )
| StarcoderdataPython |
4903505 | <reponame>neotje/PyBluetoothctl<filename>src/bluetoothctl/__init__.py
from bluetoothctl.classes import BluetoothCtl, BLdevice | StarcoderdataPython |
72914 | import argparse
from .shared import glob_paths, print_utf8, has_magic
import glob
import os
import sys
from . import chunks
import math
from itertools import count
def tabulate(lens, rows, columns):
data = []
sizes = []
for chunk in chunks(lens, rows):
size = max(chunk) + 1
sizes.append(size)
data.append(chunk)
if sum(sizes) > columns:
return False, []
if sum(sizes) > columns:
return False, []
return True, sizes
def print_group(items, isatty):
if isatty:
lens = [len(item) for item in items]
area = sum(lens)
columns = os.get_terminal_size().columns
rows = int(math.ceil(area / columns))
if max(lens) + 1 >= columns:
for line in items:
print_utf8(line)
return
while True:
ok, sizes = tabulate(lens, rows, columns)
if ok:
lines = ["" for _ in range(rows)]
for chunk, size in zip(chunks(items, rows), sizes):
for j, item in enumerate(chunk):
lines[j] = lines[j] + str.ljust(item, size)
for line in lines:
print_utf8(line)
return
rows += 1
else:
for item in items:
print_utf8(item)
def main():
parser = argparse.ArgumentParser(description='lists directory')
parser.add_argument('path', nargs='*')
args = parser.parse_args()
if len(args.path) == 0:
paths = ['.']
else:
paths = args.path
isatty = sys.stdout.isatty()
for path in paths:
if has_magic(path):
print_group(list(glob.glob(path)), isatty)
else:
if len(paths) > 1:
print_utf8(path + ":")
for root, dirs, files in os.walk(path):
print_group([d + "/" for d in dirs] + files, isatty)
while len(dirs) > 0:
dirs.pop()
if __name__ == "__main__":
main() | StarcoderdataPython |
11387389 | <reponame>Mikuana/oops_fhir
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.encounter_status import (
EncounterStatus as EncounterStatus_,
)
__all__ = ["EncounterStatus"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class EncounterStatus(EncounterStatus_):
"""
EncounterStatus
Current state of the encounter.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/encounter-status
"""
class Meta:
resource = _resource
| StarcoderdataPython |
8056683 | # Get Two Integers from the user and print the greater value among them.
| StarcoderdataPython |
1657183 | '''
EvolvePy's integration with Unity's ML Agents (https://github.com/Unity-Technologies/ml-agents).
'''
from .unity import UnityFitnessFunction | StarcoderdataPython |
3463572 | <filename>service_info_cms/migrations/0002_pagerating_ratingextension.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0012_auto_20150607_2207'),
('service_info_cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PageRating',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('average_rating', models.IntegerField(default=0)),
('num_ratings', models.IntegerField(default=0)),
('rating_total', models.IntegerField(default=0)),
('page_obj', models.ForeignKey(to='cms.Page')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RatingExtension',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('include_rating', models.BooleanField(default=False)),
('extended_object', models.OneToOneField(to='cms.Page', editable=False)),
('public_extension', models.OneToOneField(to='service_info_cms.RatingExtension', editable=False, related_name='draft_extension', null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
3367158 | <gh_stars>1-10
from paramiko.client import SSHClient, AutoAddPolicy
from core.logger.Logger import Logger
class SSHTarget:
def __init__(self, host, port=22, timeout=10):
self.host = host
self.port = int(port)
self.timeout = timeout
self.ssh_client = SSHClient()
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
def connect(self, username, password):
self.ssh_client.connect(self.host, self.port, username, password, timeout=self.timeout)
def connect_with_key(self, username, key):
self.ssh_client.connect(self.host, self.port, username, key_filename=key, timeout=self.timeout)
def close(self):
self.ssh_client.close()
def send(self, data):
tdin, stdout, stderr = self.ssh_client.exec_command(data)
if stderr.readline() != "":
Logger.warning("STDERR was not null! (" + stderr.read().decode("utf-8") + ")")
return stdout.read().decode("utf-8")
| StarcoderdataPython |
3311587 | 15 uid=2057284
20 ctime=1290656304
20 atime=1292623534
24 SCHILY.dev=234881026
23 SCHILY.ino=25871146
18 SCHILY.nlink=1
| StarcoderdataPython |
3549012 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(here, os.pardir))
from project_name.main import run_server
run_server()
| StarcoderdataPython |
127331 | <reponame>inuradz/Advent-of-code-2018
import sys
num = 0
for line in sys.stdin:
num += int(line.rstrip())
print(num) | StarcoderdataPython |
6529505 | #!/usr/bin/python
import sqlite3
conn = sqlite3.connect('esp32.db')
print ("Opened database successfully")
conn.execute('''CREATE TABLE SENSOR
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
NAME TEXT NOT NULL,
DATA INT NOT NULL);''')
print ("Table created successfully")
conn.close()
| StarcoderdataPython |
11247737 | <gh_stars>1-10
from IQNewsClipThread import IQNewsClipThread as Scraper
if __name__ == '__main__':
with open('config/sources.csv', 'r', encoding='utf-8-sig') as f:
sources = [source.strip() for source in f.readlines()]
with open('config/symbols.csv', 'r', encoding='utf-8-sig') as f:
keys = [symbol.strip() for symbol in f.readlines()]
scraper = Scraper(keys, sources, n_thread=5)
scraper.start()
# scraper.create_newscount_file('NewsCount')
# scraper.start(whole_file=True)
# scraper.create_newscount_file(d_dup=False) | StarcoderdataPython |
8167966 | """
Test the pipeline module.
"""
from distutils.version import LooseVersion
from tempfile import mkdtemp
import shutil
import time
import pytest
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.dummy import DummyRegressor
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import Memory
from sklearn.utils._joblib import __version__ as joblib_version
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X):
return X
class Transf(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummyEstimatorParams(BaseEstimator):
"""Mock classifier that takes params on predict"""
def fit(self, X, y):
return self
def predict(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = assert_no_warnings(clone, pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((('transf', Transf()), ('clf', FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf=None)
pipe.fit(X, y=None)
pipe.score(X)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', pipe),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(gamma='scale', probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(gamma='scale', probability=True, random_state=0,
decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_predict_with_predict_params():
# tests that Pipeline passes predict_params to the final estimator
# when predict is invoked
pipe = Pipeline([('transf', Transf()), ('clf', DummyEstimatorParams())])
pipe.fit(None, None)
pipe.predict(X=None, got_attribute=True)
assert_true(pipe.named_steps['clf'].got_attribute)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
fs2 = assert_no_warnings(clone, fs)
assert_false(fs.transformer_list[0][1] is fs2.transformer_list[0][1])
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_pipeline_named_steps():
transf = Transf()
mult2 = Mult(mult=2)
pipeline = Pipeline([('mock', transf), ("mult", mult2)])
# Test access via named_steps bunch object
assert_true('mock' in pipeline.named_steps)
assert_true('mock2' not in pipeline.named_steps)
assert_true(pipeline.named_steps.mock is transf)
assert_true(pipeline.named_steps.mult is mult2)
# Test bunch with conflict attribute of dict
pipeline = Pipeline([('values', transf), ("mult", mult2)])
assert_true(pipeline.named_steps.values is not transf)
assert_true(pipeline.named_steps.mult is mult2)
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
assert_raise_message(
TypeError,
'Unknown keyword arguments: "random_parameter"',
make_pipeline, t1, t2, random_parameter='rnd'
)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
@pytest.mark.parametrize('drop', ['drop', None])
def test_set_feature_union_step_drop(drop):
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=drop)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=drop)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
# Check 'drop' step at construction time
ft = FeatureUnion([('m2', drop), ('m3', mult3)])
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Estimator names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Estimator names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_set_params_nested_pipeline():
estimator = Pipeline([
('a', Pipeline([
('b', DummyRegressor())
]))
])
estimator.set_params(a__b__alpha=0.001, a__b=Lasso())
estimator.set_params(a__steps=[('b', LogisticRegression())], a__b__C=5)
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()),
('svc', SVC())], memory=memory)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.utils.Memory."
" Got memory='1' instead.", cached_pipe.fit, X, y)
class DummyMemory(object):
def cache(self, func):
return func
class WrongDummyMemory(object):
pass
def test_pipeline_with_cache_attribute():
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=DummyMemory())
pipe.fit(X, y=None)
dummy = WrongDummyMemory()
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=dummy)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.utils.Memory."
" Got memory='{}' instead.".format(dummy), pipe.fit, X)
def test_pipeline_memory():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
if LooseVersion(joblib_version) < LooseVersion('0.12'):
# Deal with change of API in joblib
memory = Memory(cachedir=cachedir, verbose=10)
else:
memory = Memory(location=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(gamma='scale', probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the transformer in the cached pipeline
ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_false(hasattr(transf, 'means_'))
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_)
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(gamma='scale', probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_)
finally:
shutil.rmtree(cachedir)
def test_make_pipeline_memory():
cachedir = mkdtemp()
if LooseVersion(joblib_version) < LooseVersion('0.12'):
# Deal with change of API in joblib
memory = Memory(cachedir=cachedir, verbose=10)
else:
memory = Memory(location=cachedir, verbose=10)
pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory)
assert_true(pipeline.memory is memory)
pipeline = make_pipeline(DummyTransf(), SVC())
assert_true(pipeline.memory is None)
shutil.rmtree(cachedir)
| StarcoderdataPython |
6610546 | from PyQt5 import QtWidgets, QtCore, QtGui
import numpy as np
import cv2
from PIL import ImageGrab
import platform
platform_name = platform.system()
isMac = platform_name == 'Darwin'
if isMac:
from MacCapture import cartesian_capture
else:
import tkinter as tk
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 400
IMG_FILE_NAME = 'temp.png'
# Refer to https://github.com/harupy/snipping-tool
class SnippingWidget(QtWidgets.QWidget):
is_snipping = False
background = True
def __init__(self, parent=None, app=None):
super().__init__()
if isMac:
screen_width = app.primaryScreen().size().width()
screen_height = app.primaryScreen().size().height()
else:
root = tk.Tk()
screen_width = root.winfo_screenwidth()
screen_height= root.winfo_screenheight()
print("screen_width", screen_width)
print("screen_height", screen_height)
self.setGeometry(0, 0, screen_width, screen_height)
self.setWindowTitle(' ')
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint)
def start(self):
self.setWindowFlags(QtCore.Qt.WindowType.WindowStaysOnTopHint)
SnippingWidget.background = False
SnippingWidget.is_snipping = True
self.setWindowOpacity(0.3)
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))
self.show()
# def start(self):
# self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint)
# SnippingWidget.is_snipping = True
# self.setWindowOpacity(0.3)
# QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))
# self.show()
def paintEvent(self, event):
if SnippingWidget.is_snipping:
brush_color = (128, 128, 255, 100)
lw = 3
opacity = 0.3
else:
# reset points, so the rectangle won't show up again.
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
brush_color = (0, 0, 0, 0)
lw = 0
opacity = 0
self.setWindowOpacity(opacity)
qp = QtGui.QPainter(self)
qp.setPen(QtGui.QPen(QtGui.QColor('black'), lw))
qp.setBrush(QtGui.QColor(*brush_color))
rect = QtCore.QRect(self.begin, self.end)
qp.drawRect(rect)
def mousePressEvent(self, event):
self.begin = event.pos()
self.end = self.begin
self.update()
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
def mouseReleaseEvent(self, event):
SnippingWidget.is_snipping = False
QtWidgets.QApplication.restoreOverrideCursor()
# self.close()
if isMac:
screenShape = QtGui.QGuiApplication.primaryScreen().availableGeometry()
cartesian_capture(x=x1,
y=y1,
width=abs(x1-x2),
height=abs(y1-y2),
total_width = screenShape.width(),
total_height = screenShape.height(),
path=IMG_FILE_NAME)
img = None
else:
x1 = min(self.begin.x(), self.end.x())
y1 = min(self.begin.y(), self.end.y())
x2 = max(self.begin.x(), self.end.x())
y2 = max(self.begin.y(), self.end.y())
self.repaint()
QtWidgets.QApplication.processEvents()
img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
img.save(IMG_FILE_NAME)
QtWidgets.QApplication.processEvents()
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
# cv2.imshow('Captured Image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if self.onSnippingCompleted is not None:
self.onSnippingCompleted(img)
self.close()
def convert_numpy_img_to_qpixmap(self, np_img):
height, width, channel = np_img.shape
bytesPerLine = 3 * width
return QtGui.QPixmap(QtGui.QImage(np_img.data, width, height, bytesPerLine, QtGui.QImage.Format.Format_RGB888).rgbSwapped()) | StarcoderdataPython |
8160363 | <filename>tornado_server/router.py
#!/usr/bin/python3
from tornado.web import Application
from handlers import *
settings = {
"cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__"
}
import defaults
from tinydb import TinyDB, Query
cache = TinyDB(defaults.CACHE_FILE)
import database
db = database.Database()
# router for http server
def router():
return Application([
(r"/", MainHandler),
(r'/api/v1/device', CreateDeviceHandler, dict(cache=cache, database=db)),
(r'/api/v1/device/(?P<device_id>[^\/]+)/waypoint', InsertDeviceWaypointHandler, dict(cache=cache, database=db)),
(r'/api/v1/device/(?P<device_id>[^\/]+)/trip', TripHandler, dict(cache=cache, database=db)),
], **settings)
| StarcoderdataPython |
200086 | <reponame>FullFact/python-batchmailchimp<filename>batch_mailchimp/batch_operations.py<gh_stars>0
from mailchimp3.entities.batchoperations import \
BatchOperations as OriginalBatchOperations
class BatchOperations(OriginalBatchOperations):
def create(self, data):
if type(data) is dict:
batch = None
return super(BatchOperations, self).create(data)
batch = data
if batch._run:
raise Exception('Batch has already been run.')
data = {
'operations': batch.operations,
}
resp = super(BatchOperations, self).create(data)
batch.id = resp.get('id')
batch._run = True
return batch
def get(self, batch_id, **queryparams):
if type(batch_id) is not str:
batch = batch_id
if not batch._run:
return {'status': 'not started'}
batch_id = batch.id
return super(BatchOperations, self).get(batch_id, **queryparams)
def delete(self, batch_id):
if type(batch_id) is not str:
batch = batch_id
if not batch._run:
raise Exception('Batch hasn\'t been run yet.')
batch_id = batch.id
return super(BatchOperations, self).delete(batch_id)
| StarcoderdataPython |
6556968 | <gh_stars>0
import os
from PIL import Image
import torch
import torchvision.transforms as tf
from torchvision.datasets import CIFAR10
import torch
from torchvision.transforms import transforms
def cifar10n(transform=None):
if transform is None:
transform = tf.ToTensor()
cifar10 = CIFAR10('./data/cifar10', transform=transform, download=True)
data = torch.cat([s[0].unsqueeze(0) for s in cifar10], dim=0)
mean, std = compute_mean(cifar10)
cifar10_n = (data - mean[..., None, None])/std[..., None, None]
dataset = DiscardLabels(torch.utils.data.TensorDataset(cifar10_n))
return dataset, mean, std
def compute_mean(dataset):
mean = torch.mean(torch.stack(
[s[0].mean((1, 2)) for s in dataset]), dim=0)
mean_2 = torch.mean(torch.stack(
[(s[0]**2).mean((1, 2)) for s in dataset]), dim=0)
std = torch.sqrt(mean_2 - mean**2)
return mean, std
class DiscardLabels():
def __init__(self, dataset) -> None:
super().__init__()
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx][0]
class SingleImageDataset():
def __init__(self, image_path, iterations,
PIL_image_mode='RGB', transform=None):
self.image_path = os.path.abspath(image_path)
self.image = Image.open(self.image_path).convert(PIL_image_mode)
self.transform = transform
self.iterations = iterations
def __len__(self):
return self.iterations
def __getitem__(self, idx):
return self.transform(self.image) if self.transform is not None \
else self.image
| StarcoderdataPython |
1949936 | <reponame>tuming1990/tf-pdnn
# Copyright 2013 <NAME> Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from layers.logistic_sgd import LogisticRegression
from layers.mlp import HiddenLayer
from layers.da import dA, dA_maxout
class SdA(object):
def __init__(self, numpy_rng, theano_rng=None, cfg = None, dnn = None):
""" Stacked Denoising Autoencoders for DNN Pre-training """
self.cfg = cfg
self.hidden_layers_sizes = cfg.hidden_layers_sizes
self.n_ins = cfg.n_ins
self.hidden_layers_number = len(self.hidden_layers_sizes)
self.dA_layers = []
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = dnn.x
for i in xrange(self.hidden_layers_number):
# the size of the input is either the number of hidden units of
# the layer below, or the input size if we are on the first layer
if i == 0:
input_size = self.n_ins
layer_input = self.x
else:
input_size = self.hidden_layers_sizes[i - 1]
layer_input = dnn.layers[i-1].output
# Construct a denoising autoencoder that shared weights with this layer
if i == 0:
reconstruct_activation = cfg.firstlayer_reconstruct_activation
else:
reconstruct_activation = cfg.hidden_activation
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=self.hidden_layers_sizes[i],
W=dnn.layers[i].W,
bhid=dnn.layers[i].b,
sparsity = cfg.sparsity,
sparsity_weight = cfg.sparsity_weight,
hidden_activation = cfg.hidden_activation,
reconstruct_activation = reconstruct_activation)
self.dA_layers.append(dA_layer)
def pretraining_functions(self, train_set_x, batch_size):
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
momentum = T.scalar('momentum')
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level, learning_rate, momentum)
# compile the theano function
fn = theano.function(inputs=[index,
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1),
theano.Param(momentum, default=0.5)],
outputs=cost,
updates=updates,
givens={self.x: train_set_x[batch_begin:batch_end]})
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
# an outdated class for SdA with maxout hidden activation. pre-training has been expirically found to be
# NOT helpful for maxout networks, so we don't update this class
class SdA_maxout(object):
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10,
corruption_levels=[0.1, 0.1], pool_size = 3,
sparsity = None, sparsity_weight = None,
first_reconstruct_activation = T.tanh):
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x')
self.y = T.ivector('y')
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i] * pool_size,
activation=(lambda x: 1.0*x),
do_maxout = True, pool_size = pool_size)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this layer
if i == 0:
reconstruct_activation = first_reconstruct_activation
else:
reconstruct_activation = (lambda x: 1.0*x)
# reconstruct_activation = first_reconstruct_activation
dA_layer = dA_maxout(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i] * pool_size,
W=sigmoid_layer.W,
bhid=sigmoid_layer.b,
sparsity = sparsity,
sparsity_weight = sparsity_weight,
pool_size = pool_size,
reconstruct_activation = reconstruct_activation)
self.dA_layers.append(dA_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1], n_out=n_outs)
self.sigmoid_layers.append(self.logLayer)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
def pretraining_functions(self, train_set_x, batch_size):
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(inputs=[index,
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1)],
outputs=cost,
updates=updates,
givens={self.x: train_set_x[batch_begin:
batch_end]})
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
| StarcoderdataPython |
11232234 | <filename>gask/utils/gcommands.py
from .gobjects import GGask, GTimeEntry, GUser, GProject, GThread, GIssue
import json
def build_me_profile(session, url):
req = session.get(url + '/me/')
profile_ = json.loads(req.text)
profile = GUser(data=profile_)
req = session.get(url + '/me/?last_entry=true&')
last_entry_ = json.loads(req.text)
try:
last_entry_ = GTimeEntry(data=last_entry_)
except ValueError:
last_entry_ = None
if last_entry_ is not None and last_entry_.end_time is None:
req = session.get(url + '/gasks/' + str(last_entry_.parent))
last_gask = json.loads(req.text)
last_entry = last_gask['title']
profile.last_time_entry = last_entry
return profile
profile.last_time_entry = 'noting..'
return profile
def build_projects(session, url):
req = session.get(url + '/projects/')
projects_ = json.loads(req.text)
projects = []
for project_ in projects_:
project = GProject(data=project_)
for gask in project.gasks:
req = session.get(url + '/gasks/' + str(gask))
data = json.loads(req.text)
gask_object = GGask(data=data)
build_rgask_object(
root_gask_object=gask_object, url=url, session=session)
project.gask_objects.append(gask_object)
for issue in project.issues:
req = session.get(url + '/issues/' + str(issue))
data = json.loads(req.text)
issue_object = GIssue(data=data)
build_rgask_object(
root_gask_object=issue_object, url=url, session=session)
project.issue_objects.append(issue_object)
for thread in project.threads:
req = session.get(url + '/threads/' + str(thread))
data = json.loads(req.text)
thread_object = GThread(data=data)
build_rgask_object(
root_gask_object=thread_object, url=url, session=session)
project.thread_objects.append(thread_object)
projects.append(project)
return projects
# Root gask objects are issues, threads and gasks themselves
def build_rgask_object(root_gask_object, url, session):
for gask in root_gask_object.gasks:
prefix = '/gasks/'
req = session.get(url + prefix + str(gask))
data = json.loads(req.text)
gask_object = GGask(data=data)
build_rgask_object(
root_gask_object=gask_object, url=url, session=session)
root_gask_object.gask_objects.append(gask_object)
for issue in root_gask_object.issues:
prefix = '/issues/'
req = session.get(url + prefix + str(issue))
data = json.loads(req.text)
issue_object = GIssue(data=data)
build_rgask_object(
root_gask_object=issue_object, url=url, session=session)
root_gask_object.issue_objects.append(issue_object)
for thread in root_gask_object.threads:
prefix = '/threads/'
req = session.get(url + prefix + str(thread))
data = json.loads(req.text)
thread_object = GThread(data=data)
build_rgask_object(
root_gask_object=thread_object, url=url, session=session)
root_gask_object.thread_objects.append(thread_object)
| StarcoderdataPython |
11367694 | <gh_stars>1-10
import glob
import argparse
#import lsst.eotest.sensor as sensorTest
from ultraflatstask import UltraFlatsTask
from os.path import join
sensor_id = 'ITL-3800c-090'
in_file_path = '/nfs/slac/g/ki/ki19/lsst/elp25/S11/ultratask/'
infiles = glob.glob(join(in_file_path, '00*.fits'))
#bias = glob.glob(join(in_file_path, '*bias*.fits'))[0]
bias = 'ITL-3800C-090_sflat_bias_000_4663_20170621212349.fits'
mask_files = ()
gains = 1
ultratask = UltraFlatsTask()
ultratask.config.output_dir = '/nfs/slac/g/ki/ki19/lsst/elp25/S11/ultratask/'
ultratask.stack(sensor_id=sensor_id, infiles=infiles, mask_files=mask_files, gains=gains, binsize=1, bias_frame = bias)
in_file_path = '/nfs/slac/g/ki/ki19/lsst/elp25/S11/ultratask/'
ultratask = UltraFlatsTask()
infiles = glob.glob(join('/nfs/slac/g/ki/ki19/lsst/elp25/S11/ultratask/', '00*.fits'))
meanimages = glob.glob(join(in_file_path, 'mean*.fits'))
varimages = glob.glob(join(in_file_path, 'var*.fits'))
ultratask.single_pixel_ptc(meanimages, varimages,infiles)
| StarcoderdataPython |
5058559 | import os
CONFIDENTIALITY_MAP = {
"public": "green",
"restricted": "yellow",
"non-public": "red",
}
def get_confidentiality(dataset):
return CONFIDENTIALITY_MAP[dataset["accessRights"]]
def getenv(name):
"""Return the environment variable named `name`, or raise OSError if unset."""
env = os.getenv(name)
if not env:
raise OSError(f"Environment variable {name} is not set")
return env
| StarcoderdataPython |
6429767 | '''
Created on 30.01.2020
@author: JM
'''
from PyTrinamic.ic.TMC5161.TMC5161_register import TMC5161_register
from PyTrinamic.ic.TMC5161.TMC5161_register_variant import TMC5161_register_variant
from PyTrinamic.ic.TMC5161.TMC5161_fields import TMC5161_fields
from PyTrinamic.helpers import TMC_helpers
class TMC5161():
"""
Class for the TMC5161 IC
"""
def __init__(self, channel):
self.__channel = channel
self.registers = TMC5161_register
self.fields = TMC5161_fields
self.variants = TMC5161_register_variant
self.MOTORS = 2
def showChipInfo(self):
print("TMC5161 chip info: The TMC5161 is a high-power two phase stepper motor controller and driver IC with serial communication interfaces. Voltage supply: 8 - 55V")
def writeRegister(self, registerAddress, value, channel):
raise NotImplementedError
def readRegister(self, registerAddress, channel):
raise NotImplementedError
def writeRegisterField(self, field, value):
return self.writeRegister(field[0], TMC_helpers.field_set(self.readRegister(field[0], self.__channel), field[1], field[2], value), self.__channel)
def readRegisterField(self, field):
return TMC_helpers.field_get(self.readRegister(field[0], self.__channel), field[1], field[2])
def moveBy(self, motor, distance, velocity):
if not(0 <= motor < self.MOTORS):
raise ValueError
position = self.readRegister(self.registers.XACTUAL, self.__channel, signed=True)
self.moveTo(motor, position + distance, velocity)
return position + distance
def get_pin_state(self):
pass
| StarcoderdataPython |
233051 | <filename>847. Shortest Path Visiting All Nodes/847. Shortest Path Visiting All Nodes.py
class Solution:
def shortestPathLength(self, graph):
def dp(node, mask):
state = (node, mask)
if state in cache:
return cache[state]
if mask & (mask - 1) == 0:
# Base case - mask only has a single "1", which means
# that only one node has been visited (the current node)
return 0
cache[state] = float("inf") # Avoid infinite loop in recursion
for neighbor in graph[node]:
if mask & (1 << neighbor):
already_visited = 1 + dp(neighbor, mask)
not_visited = 1 + dp(neighbor, mask ^ (1 << node))
cache[state] = min(cache[state], already_visited, not_visited)
return cache[state]
n = len(graph)
ending_mask = (1 << n) - 1
cache = {}
return min(dp(node, ending_mask) for node in range(n))
| StarcoderdataPython |
4956453 | <filename>14_Day_Higher_order_functions/exercises/7.py
countries = ['Estonia', 'Finland', 'Sweden', 'Denmark', 'Norway', 'Iceland']
def upper_countries(country):
return country.upper()
countries_upper = map(upper_countries, countries)
print(list(countries_upper))
| StarcoderdataPython |
6436116 | """
Block for testing variously scoped XBlock fields.
"""
import json
from webob import Response
from xblock.core import XBlock, Scope
from xblock import fields
class UserStateTestBlock(XBlock):
"""
Block for testing variously scoped XBlock fields.
"""
BLOCK_TYPE = "user-state-test"
has_score = False
display_name = fields.String(scope=Scope.content, name='User State Test Block')
# User-specific fields:
user_str = fields.String(scope=Scope.user_state, default='default value') # This usage, one user
uss_str = fields.String(scope=Scope.user_state_summary, default='default value') # This usage, all users
pref_str = fields.String(scope=Scope.preferences, default='default value') # Block type, one user
user_info_str = fields.String(scope=Scope.user_info, default='default value') # All blocks, one user
@XBlock.json_handler
def set_user_state(self, data, suffix): # pylint: disable=unused-argument
"""
Set the user-scoped fields
"""
self.user_str = data["user_str"]
self.uss_str = data["uss_str"]
self.pref_str = data["pref_str"]
self.user_info_str = data["user_info_str"]
return {}
@XBlock.handler
def get_user_state(self, request, suffix=None): # pylint: disable=unused-argument
"""
Get the various user-scoped fields of this XBlock.
"""
return Response(
json.dumps({
"user_str": self.user_str,
"uss_str": self.uss_str,
"pref_str": self.pref_str,
"user_info_str": self.user_info_str,
}),
content_type='application/json',
charset='UTF-8',
)
| StarcoderdataPython |
59338 | <gh_stars>0
from base_models.densenet import DenseNet
from base_models.resnet import ResNet
from base_models.vgg import VGG
from base_models.xception import Xception
from base_models.mobilenet import MobileNet
from base_models.vovnet import VovNet
from base_models.vovnet_shortcut import VovNet_shortcut
from base_models.osa import OSA
| StarcoderdataPython |
1784620 | import click
from Bio import SeqIO
from Bio.Seq import Seq
from .CAI import CAI
@click.command()
@click.option(
"-s",
"--sequence",
type=click.Path(exists=True, dir_okay=False),
help="The sequence to calculate the CAI for.",
required=True,
)
@click.option(
"-r",
"--reference",
type=click.Path(exists=True, dir_okay=False),
help="The reference sequences to calculate CAI against.",
required=True,
)
@click.option(
"-g",
"--genetic-code",
type=int,
default=11,
help="The genetic code to use. Defaults to 11.",
)
def cli(reference, sequence, genetic_code):
sequence = SeqIO.read(sequence, "fasta").seq
reference = [str(x.seq) for x in SeqIO.parse(reference, "fasta")]
print(CAI(sequence, reference=reference, genetic_code=genetic_code))
if __name__ == "__main__":
cli()
| StarcoderdataPython |
9687548 | #!/usr/bin/env python
import os
import unittest
import uuid
import manifest
import plow.client
def launch_test_job(name):
"""
struct JobSpecT {
1:string name,
2:string project,
3:bool paused,
4:string username,
5:i32 uid,
6:string logPath
7:list<LayerSpecT> layers,
8:list<DependSpecT> depends
1:string name,
2:list<string> command,
3:set<string> tags,
4:optional string range,
5:i32 chunk = 1,
6:i32 minCores = 1,
7:i32 maxCores = 1,
8:i32 minRam = 1024,
9:bool threadable = false,
10:list<DependSpecT> depends,
11:list<TaskSpecT> tasks
"""
spec = plow.client.JobSpecT()
spec.name = name
spec.project = "test"
spec.paused = True
spec.username = os.environ["USER"]
spec.uid = os.geteuid()
spec.logPath = "/tmp"
spec.layers = []
spec.depends = []
layer = plow.client.LayerSpecT()
layer.name = "test_layer"
layer.command = ["/bin/ls"]
layer.tags = ["unassigned"]
layer.range = "1-1"
spec.layers.append(layer)
return plow.client.launch_job(spec)
def clear_job(name):
try:
job = plow.client.get_active_job(name)
plow.client.kill_job(job)
except:
pass
class ApiModuleTests(unittest.TestCase):
def test_get_jobs(self):
clear_job("test_job_1")
job = None
try:
job = launch_test_job("test_job_1")
self.assertTrue(job.id in {j.id for j in plow.client.get_jobs()})
finally:
if job:
plow.client.kill_job(job)
def test_get_job(self):
clear_job("test_job_2")
job1 = None
try:
job1 = launch_test_job("test_job_2")
job2 = plow.client.get_job(job1.id)
self.assertEquals(job1.id, job2.id)
finally:
if job1:
plow.client.kill_job(job1)
def test_get_active_job(self):
clear_job("test_job_3")
job1 = None
try:
job1 = launch_test_job("test_job_3")
job2 = plow.client.get_active_job(job1.name)
self.assertEquals(job1.id, job2.id)
finally:
if job1:
plow.client.kill_job(job1)
def test_get_clusters(self):
clusters = plow.client.get_clusters()
self.assertTrue(len(clusters) > 0)
def test_get_cluster(self):
c1 = plow.client.get_cluster("unassigned")
c2 = plow.client.get_cluster(c1.id)
self.assertEquals(c1, c2)
def test_create_cluster(self):
name = str(uuid.uuid4())
c = plow.client.create_cluster(name, ["linux", "himem"])
self.assertEquals(name, c.name)
def test_delete_cluster(self):
name = str(uuid.uuid4())
c = plow.client.create_cluster(name, ["linux", "himem"])
plow.client.delete_cluster(c)
try:
plow.client.get_cluster(name)
except Exception, e:
pass
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(ApiModuleTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
5185862 | <filename>Python/Polyval/Polyval.py<gh_stars>0
import numpy as np
print(np.polyval(list(map(float,input().split())), int(input())))
| StarcoderdataPython |
1932559 | <reponame>LittleBai0606/TeachingSecretarySystem<filename>TSSystem/apps/srtp_project/migrations/0018_auto_20180608_0921.py
# Generated by Django 2.0.5 on 2018-06-08 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('srtp_project', '0017_auto_20180607_2143'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='notifi_id',
field=models.IntegerField(default=1528420882, unique=True, verbose_name='通知id'),
),
]
| StarcoderdataPython |
8097331 | <reponame>uzairAK/serverom-panel<gh_stars>0
# -*- coding: utf-8 -*-
from .cloudManager import CloudManager
import json
from loginSystem.models import Administrator
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def router(request):
try:
data = json.loads(request.body)
controller = data['controller']
serverUserName = data['serverUserName']
admin = Administrator.objects.get(userName=serverUserName)
cm = CloudManager(data, admin)
if admin.api == 0:
return cm.ajaxPre(0, 'API Access Disabled.')
if controller == 'statusFunc':
pass
else:
if cm.verifyLogin(request)[0] == 1:
pass
else:
return cm.verifyLogin(request)[1]
if controller == 'verifyLogin':
return cm.verifyLogin(request)[1]
elif controller == 'fetchWebsites':
return cm.fetchWebsites()
elif controller == 'fetchWebsiteDataJSON':
return cm.fetchWebsiteDataJSON()
elif controller == 'fetchWebsiteData':
return cm.fetchWebsiteData()
elif controller == 'submitWebsiteCreation':
return cm.submitWebsiteCreation()
elif controller == 'fetchModifyData':
return cm.fetchModifyData()
elif controller == 'saveModifications':
return cm.saveModifications()
elif controller == 'submitDBCreation':
return cm.submitDBCreation()
elif controller == 'fetchDatabases':
return cm.fetchDatabases()
elif controller == 'submitDatabaseDeletion':
return cm.submitDatabaseDeletion()
elif controller == 'changePassword':
return cm.changePassword()
elif controller == 'getCurrentRecordsForDomain':
return cm.getCurrentRecordsForDomain()
elif controller == 'deleteDNSRecord':
return cm.deleteDNSRecord()
elif controller == 'addDNSRecord':
return cm.addDNSRecord()
elif controller == 'submitEmailCreation':
return cm.submitEmailCreation(request)
elif controller == 'getEmailsForDomain':
return cm.getEmailsForDomain(request)
elif controller == 'submitEmailDeletion':
return cm.submitEmailDeletion(request)
elif controller == 'submitPasswordChange':
return cm.submitPasswordChange(request)
elif controller == 'fetchCurrentForwardings':
return cm.fetchCurrentForwardings(request)
elif controller == 'submitForwardDeletion':
return cm.submitForwardDeletion(request)
elif controller == 'submitEmailForwardingCreation':
return cm.submitEmailForwardingCreation(request)
elif controller == 'fetchDKIMKeys':
return cm.fetchDKIMKeys(request)
elif controller == 'generateDKIMKeys':
return cm.generateDKIMKeys(request)
elif controller == 'submitFTPCreation':
return cm.submitFTPCreation(request)
elif controller == 'getAllFTPAccounts':
return cm.getAllFTPAccounts(request)
elif controller == 'submitFTPDelete':
return cm.submitFTPDelete(request)
elif controller == 'changeFTPPassword':
return cm.changeFTPPassword(request)
elif controller == 'issueSSL':
return cm.issueSSL(request)
elif controller == 'submitWebsiteDeletion':
return cm.submitWebsiteDeletion(request)
elif controller == 'statusFunc':
return cm.statusFunc()
elif controller == 'submitDomainCreation':
return cm.submitDomainCreation()
elif controller == 'fetchDomains':
return cm.fetchDomains()
elif controller == 'submitDomainDeletion':
return cm.submitDomainDeletion()
elif controller == 'changeOpenBasedir':
return cm.changeOpenBasedir()
elif controller == 'changePHP':
return cm.changePHP()
elif controller == 'backupStatusFunc':
return cm.backupStatusFunc()
elif controller == 'submitBackupCreation':
return cm.submitBackupCreation()
elif controller == 'getCurrentBackups':
return cm.getCurrentBackups()
elif controller == 'deleteBackup':
return cm.deleteBackup()
elif controller == 'fetchACLs':
return cm.fetchACLs()
elif controller == 'submitUserCreation':
return cm.submitUserCreation(request)
elif controller == 'fetchUsers':
return cm.fetchUsers()
elif controller == 'submitUserDeletion':
return cm.submitUserDeletion(request)
elif controller == 'saveModificationsUser':
return cm.saveModificationsUser(request)
elif controller == 'userWithResellerPriv':
return cm.userWithResellerPriv()
elif controller == 'saveResellerChanges':
return cm.saveResellerChanges(request)
elif controller == 'changeACLFunc':
return cm.changeACLFunc(request)
elif controller == 'createACLFunc':
return cm.createACLFunc(request)
elif controller == 'findAllACLs':
return cm.findAllACLs(request)
elif controller == 'deleteACLFunc':
return cm.deleteACLFunc(request)
elif controller == 'fetchACLDetails':
return cm.fetchACLDetails(request)
elif controller == 'submitACLModifications':
return cm.submitACLModifications(request)
elif controller == 'submitPackage':
return cm.submitPackage(request)
elif controller == 'fetchPackages':
return cm.fetchPackages(request)
elif controller == 'submitPackageDelete':
return cm.submitPackageDelete(request)
elif controller == 'submitPackageModify':
return cm.submitPackageModify(request)
elif controller == 'getDataFromLogFile':
return cm.getDataFromLogFile(request)
elif controller == 'fetchErrorLogs':
return cm.fetchErrorLogs(request)
elif controller == 'submitApplicationInstall':
return cm.submitApplicationInstall(request)
elif controller == 'obtainServer':
return cm.obtainServer(request)
elif controller == 'getSSHConfigs':
return cm.getSSHConfigs()
elif controller == 'saveSSHConfigs':
return cm.saveSSHConfigs()
elif controller == 'deleteSSHKey':
return cm.deleteSSHKey()
elif controller == 'addSSHKey':
return cm.addSSHKey()
elif controller == 'getCurrentRules':
return cm.getCurrentRules()
elif controller == 'addRule':
return cm.addRule()
elif controller == 'deleteRule':
return cm.deleteRule()
elif controller == 'getLogsFromFile':
return cm.getLogsFromFile(request)
elif controller == 'serverSSL':
return cm.serverSSL(request)
elif controller == 'setupNode':
return cm.setupManager(request)
elif controller == 'fetchManagerTokens':
return cm.fetchManagerTokens(request)
elif controller == 'addWorker':
return cm.addWorker(request)
elif controller == 'fetchSSHKey':
return cm.fetchSSHKey(request)
elif controller == 'putSSHkeyFunc':
return cm.putSSHkeyFunc(request)
elif controller == 'leaveSwarm':
return cm.leaveSwarm(request)
elif controller == 'setUpDataNode':
return cm.setUpDataNode(request)
elif controller == 'submitEditCluster':
return cm.submitEditCluster(request)
elif controller == 'connectAccount':
return cm.connectAccount(request)
elif controller == 'fetchBuckets':
return cm.fetchBuckets(request)
elif controller == 'createPlan':
return cm.createPlan(request)
elif controller == 'fetchBackupPlans':
return cm.fetchBackupPlans(request)
elif controller == 'deletePlan':
return cm.deletePlan(request)
elif controller == 'fetchWebsitesInPlan':
return cm.fetchWebsitesInPlan(request)
elif controller == 'deleteDomainFromPlan':
return cm.deleteDomainFromPlan(request)
elif controller == 'savePlanChanges':
return cm.savePlanChanges(request)
elif controller == 'fetchBackupLogs':
return cm.fetchBackupLogs(request)
elif controller == 'forceRunAWSBackup':
return cm.forceRunAWSBackup(request)
elif controller == 'systemStatus':
return cm.systemStatus(request)
elif controller == 'killProcess':
return cm.killProcess(request)
elif controller == 'connectAccountDO':
return cm.connectAccountDO(request)
elif controller == 'fetchBucketsDO':
return cm.fetchBucketsDO(request)
elif controller == 'createPlanDO':
return cm.createPlanDO(request)
elif controller == 'fetchBackupPlansDO':
return cm.fetchBackupPlansDO(request)
elif controller == 'deletePlanDO':
return cm.deletePlanDO(request)
elif controller == 'fetchWebsitesInPlanDO':
return cm.fetchWebsitesInPlanDO(request)
elif controller == 'fetchBackupLogsDO':
return cm.fetchBackupLogsDO(request)
elif controller == 'deleteDomainFromPlanDO':
return cm.deleteDomainFromPlanDO(request)
elif controller == 'savePlanChangesDO':
return cm.savePlanChangesDO(request)
elif controller == 'forceRunAWSBackupDO':
return cm.forceRunAWSBackupDO(request)
elif controller == 'showStatus':
return cm.showStatus(request)
elif controller == 'fetchRam':
return cm.fetchRam(request)
elif controller == 'applyMySQLChanges':
return cm.applyMySQLChanges(request)
elif controller == 'restartMySQL':
return cm.restartMySQL(request)
elif controller == 'fetchDatabasesMYSQL':
return cm.fetchDatabasesMYSQL(request)
elif controller == 'fetchTables':
return cm.fetchTables(request)
elif controller == 'deleteTable':
return cm.deleteTable(request)
elif controller == 'fetchTableData':
return cm.fetchTableData(request)
elif controller == 'fetchStructure':
return cm.fetchStructure(request)
elif controller == 'addMINIONode':
return cm.addMINIONode(request)
elif controller == 'fetchMINIONodes':
return cm.fetchMINIONodes(request)
elif controller == 'deleteMINIONode':
return cm.deleteMINIONode(request)
elif controller == 'createPlanMINIO':
return cm.createPlanMINIO(request)
elif controller == 'fetchBackupPlansMINIO':
return cm.fetchBackupPlansMINIO(request)
elif controller == 'deletePlanMINIO':
return cm.deletePlanMINIO(request)
elif controller == 'savePlanChangesMINIO':
return cm.savePlanChangesMINIO(request)
elif controller == 'forceRunAWSBackupMINIO':
return cm.forceRunAWSBackupMINIO(request)
elif controller == 'fetchWebsitesInPlanMINIO':
return cm.fetchWebsitesInPlanMINIO(request)
elif controller == 'fetchBackupLogsMINIO':
return cm.fetchBackupLogsMINIO(request)
elif controller == 'deleteDomainFromPlanMINIO':
return cm.deleteDomainFromPlanMINIO(request)
elif controller == 'submitWebsiteStatus':
return cm.submitWebsiteStatus(request)
elif controller == 'submitChangePHP':
return cm.submitChangePHP(request)
elif controller == 'getSwitchStatus':
return cm.getSwitchStatus(request)
elif controller == 'switchServer':
return cm.switchServer(request)
elif controller == 'tuneSettings':
return cm.tuneSettings(request)
elif controller == 'getCurrentPHPConfig':
return cm.getCurrentPHPConfig(request)
elif controller == 'savePHPConfigBasic':
return cm.savePHPConfigBasic(request)
elif controller == 'fetchPHPSettingsAdvance':
return cm.fetchPHPSettingsAdvance(request)
elif controller == 'savePHPConfigAdvance':
return cm.savePHPConfigAdvance(request)
elif controller == 'fetchPHPExtensions':
return cm.fetchPHPExtensions(request)
elif controller == 'submitExtensionRequest':
return cm.submitExtensionRequest(request)
elif controller == 'getRequestStatus':
return cm.getRequestStatus(request)
elif controller == 'getContainerizationStatus':
return cm.getContainerizationStatus(request)
elif controller == 'submitContainerInstall':
return cm.submitContainerInstall(request)
elif controller == 'switchTOLSWSStatus':
return cm.switchTOLSWSStatus(request)
elif controller == 'fetchWebsiteLimits':
return cm.fetchWebsiteLimits(request)
elif controller == 'saveWebsiteLimits':
return cm.saveWebsiteLimits(request)
elif controller == 'getUsageData':
return cm.getUsageData(request)
else:
return cm.ajaxPre(0, 'This function is not available in your version of CyberPanel.')
except BaseException as msg:
cm = CloudManager(None)
return cm.ajaxPre(0, str(msg))
| StarcoderdataPython |
3443393 | import re
text_to_search = '''
abcdefghijklmnopqurtuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
Ha HaHa
MetaCharacters (Need to be escaped):
. ^ $ * + ? { } [ ] \ | ( )
dazeofthewolf.com
321-555-4321
123.555.1234
123*555*1234
800-555-1234
900-555-1234
Mr. Darklord
Mr Smith
Ms Dana
Mrs. Robinson
Mr. T
'''
sentence = 'Start a sentence and then bring it to an end'
pattern = re.compile(r'start', re.I)
matches = pattern.search(sentence)
print(matches)
| StarcoderdataPython |
3351665 | import os
import sys
sys.path.insert(0, os.path.abspath(".."))
project = "python-chi"
copyright = "2021, University of Chicago"
author = "<NAME>"
version = "0.1"
release = "0.1"
extensions = [
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
source_suffix = [".rst"]
master_doc = "index"
exclude_patterns = ["build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_extra_path = ["_extra"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html',
# 'searchbox.html',
# ],
# }
# Output file base name for HTML help builder.
htmlhelp_basename = "ChameleonCloudPythonAPI"
description = "Chameleon Cloud Python API"
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"ChameleonCloudPythonAPI.tex",
description,
"<NAME>",
"manual",
),
]
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "chameleoncloudapi", description, [author], 1)]
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ChameleonCloudPythonAPI",
description,
author,
"ChameleonCloudPythonAPI",
"A set of Python abstractions for interfacing with the Chameleon testbed",
"Miscellaneous",
),
]
intersphinx_mapping = {
"python3": ("https://docs.python.org/3/", None),
"python37": ("https://docs.python.org/3.7/", None),
"MySQLdb": ("https://mysqlclient.readthedocs.io/", None),
"novaclient": ("https://docs.openstack.org/python-novaclient/latest/", None),
}
notebook_examples = [
(
"Making a reservation",
"notebooks/reservations.ipynb",
[
"tests/test_lease.py:example_reserve_node",
"tests/test_lease.py:example_reserve_floating_ip",
"tests/test_lease.py:example_reserve_network",
"tests/test_lease.py:example_reserve_multiple_resources",
],
),
(
"Launching a bare metal instance",
"notebooks/baremetal.ipynb",
[
"tests/test_server.py:example_create_server",
"tests/test_server.py:example_wait_for_connectivity",
],
),
(
"Launching a container",
"notebooks/container.ipynb",
[
"tests/test_container.py:example_create_container",
],
),
]
nbsphinx_execute = "never"
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. figure:: https://img.shields.io/badge/Chameleon-Open%20Notebook-brightgreen
:target: https://jupyter.chameleoncloud.org/hub/import?deposition_repo=http&deposition_id=https://python-chi.readthedocs.io/en/latest/{{ docname|e }}&ephemeral=true
"""
import generate_notebook
for title, file, examples in notebook_examples:
generate_notebook.generate(examples, output_file=file, title=title)
# Also copy to the extras folder
generate_notebook.generate(examples, output_file=f"_extras/{file}", title=title)
| StarcoderdataPython |
4808201 | import cv2, time
video=cv2.VideoCapture(0)
a=1
while True:
a=a+1
check, frame = video.read()
print(check)
print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#time.sleep(3)
cv2.imshow("Capturing", gray)
#key=cv2.waitKey(1000) # 1 second intevals
key=cv2.waitKey(1) # 1 millisecond intervals
#this is to break the loop with a "q" key (for quit)
if key==ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows() | StarcoderdataPython |
1664282 | <reponame>havenmoney/platform-clients<gh_stars>0
import haven
from haven.authed_api_client import AuthedApiClient
from datetime import datetime
from dateutil.tz import tzutc
def main():
config = haven.Configuration(host="https://haven.dev/api")
api = haven.DefaultApi(AuthedApiClient(
id="YOUR_ID_HERE",
secret="YOUR_SECRET_HERE",
configuration=config))
# upload a transaction
api.upsert_one_transaction(haven.CleanSingleTransactionRequest(
transaction=haven.RawTransaction(
id="txn-1",
account="account-a",
user="user-0",
raw_memo="Uber ***232**123",
merchant=None,
store=None,
network="VISA",
user_transacted_at=datetime.now().replace(tzinfo=tzutc()),
amount="-12.34",
categories=[],
kind=haven.TransactionKind.CREDIT_CARD,
state=haven.TransactionState.COMPLETED)))
transactions = api.get_transactions("user-0", limit=10)
print(transactions)
if __name__ == "__main__":
main()
| StarcoderdataPython |
318235 | # DESCRIPTION
# Given a non-negative integer num represented as a string,
# remove k digits from the number so that the new number is the smallest possible.
# Note:
# The length of num is less than 10002 and will be ≥ k.
# The given num does not contain any leading zero.
# EXAMPLE 1:
# Input: num = "1432219", k = 3
# Output: "1219"
# Explanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.
# EXAMPLE 2:
# Input: num = "10200", k = 1
# Output: "200"
# Explanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.
# EXAMPLE 3:
# Input: num = "10", k = 2
# Output: "0"
# Explanation: Remove all the digits from the number and it is left with nothing which is 0.
class Solution:
'''
Time: O(N), must iterate over the entire string
Space: O(N), size of the stack used grows as the input grows
'''
def removeKdigits(self, num: str, k: int) -> str:
stack = []
max_stack = len(num) - k
# edge case
if max_stack == 0:
return '0'
for i in num:
# stack is not empty
# the top element > curr
# k has not been satisfied
while stack and i < stack[-1] and k > 0:
k -= 1
stack.pop()
stack.append(i)
# if there are still digits to delete
# remove the top one(s)
if k > 0:
stack = stack[:-k]
# delete leading 0s if they exist
return "".join(stack).lstrip("0") or '0'
| StarcoderdataPython |
8121060 | <gh_stars>0
from typing import Dict, Optional, List #, String# helps enforce typing
import random
import numpy as np
from fastapi import APIRouter
import joblib
import pandas as pd
from pydantic import BaseModel, Field, validator, Json
# import spacy
# from sklearn.feature_extraction.text import TfidfVectorizer
# import en_core_web_sm
# from spacy import load
# nlp= en_core_web_sm.load()
# # tokenizer function
# def tokenizer(text):
# doc=nlp(text)
# return [token.lemma_ for token in doc if ((token.is_stop == False) and
# (token.is_punct == False)) and (token.pos_ != 'PRON')]
nlp_working = False
# nlp_preprocessing = TfidfVectorizer(stop_words = 'english',
# ngram_range = (1, 2),
# max_df = .95,
# min_df = 3,
# tokenizer = tokenizer)
# df = pd.read_csv('https://raw.githubusercontent.com/bw-med-cabinet-1/DS/master/data/cannabis_strain')
# df = df.drop('Unnamed: 0', axis= 1)
# nlp_preprocessing.fit_transform(df['effects'])
#print(f'preprocessing')
# dtm = pd.DataFrame(dtm.todense(), columns = nlp_preprocessing.get_feature_names())
dataframe = pd.read_csv('https://raw.githubusercontent.com/bw-med-cabinet-1/DS/master/data/Cannabis_Strains_Features.csv')
#pd.to_numeric(dataframe['strain_id'])
# for id in dataframe['strain_id']:
# dataframe['strain_id'][id] = id.__int__()
# print(dataframe.strain_id.dtypes)
#print(len(dtm))
router = APIRouter()
nn_model = joblib.load("app/api/cat_model.joblib")
#nlp_model = joblib.load("app/api/nlp_model.joblib")
#nlp_preprocessing = joblib.load("app/api/nlp_preprocessing.joblib")
print("Serialized Model Loaded")
nlp_cats = ['strain_id', 'strain', 'type', 'Rating', 'effects', 'flavor',
'description']
# cats = ['hybrid', 'sativa', 'indica', 'Aroused', 'Creative', 'Euphoric',
# 'Energetic', 'Euphoric', 'Focused', 'Giggly', 'Happy', 'Hungry',
# 'Relaxed', 'Sleepy', 'Talkative', 'Tingly', 'Uplifted', 'anxiety',
# 'depression', 'pain', 'fatigue', 'insomnia', 'brain fog',
# 'loss of appetite', 'nausea', 'low libido']
nn_cats = ['anxiety', 'depression', 'pain', 'fatigue', 'insomnia', 'brain fog',
'loss of appetite', 'nausea', 'low libido', 'hybrid', 'sativa',
'indica', 'happy', 'energentic', 'hungry', 'aroused', 'creative',
'euphoric', 'relaxed', 'tingly', 'energetic', 'sleepy', 'giggly',
'uplifted', 'focused', 'talkative']
#nn_cats = [feature.lower() for feature in features]
class UserInputData(BaseModel):
"""Create a class for OOP reasons. I think we are better off hiding
implementation instead of exposing the direct Dict structure
also we will now get back more meaningful errors,
because fastapi is going to parse the new object for us,
basically ensure a valid state for our object. """
include: Optional[List[str]]
exclude: Optional[Dict[str, bool]]
text: Optional[str]
def categorical_formatting(self):
'''somehow force shape, fillna'''
df = pd.DataFrame(columns=nn_cats)
df.loc[0] = [0.5]*len(nn_cats) # number of training dimensions; 0.5 is null
for trait in self.include: # in 'include'
df[trait.lower()] = 1 # converts T/F to ints 1/0
return df
def nlp_formatting(self):
#print(self.text)
#vec = nlp_preprocessing.transform(self.text.encode('unicode_escape'))
vec = nlp_preprocessing.transform([fR"{self.text}"])
#print(f'vec shape: {vec.shape}')
# dense = vec.todense()
# print(self.text)
# print(f'dense: {dense}')
# print(f'self.nlp_formatting() length:{len(dense)}')
return vec
@router.post("/predict")
def predict_strain(user: UserInputData):
"""Predict the ideal strain based on user input"""
nn_return_values = [] # initializing to empty for valid return
nlp_return_value = []
if user.include or user.exclude:
X_new = user.categorical_formatting()
neighbors = nn_model.kneighbors(X_new) # vid @ 56:02
neighbor_ids = [int(id_) for id_ in neighbors[1][0]]
nn_return_values = [dataframe.iloc[id] for id in neighbor_ids]
elif user.text and nlp_working:
#print(f'user.text = True')
X_new = user.nlp_formatting()
#vec = nlp_preprocessing.transform(X_new)
dense = X_new.todense()
#print(f'dense/input shape : {dense.shape}')
similar = nlp_model.kneighbors(dense, return_distance=False)
similar.T
output = []
for i in range(5):
elem = similar[0][i]
output.append(elem)
nlp_return_value = output[0]
#print(user.text)
#print(nlp_return_value)
else: # if neither are given
return {
"error": "insufficient inputs"
}
return {
"Nearest Neighbors": nn_return_values,
"Text-based Prediction": nlp_return_value
}
# @router.get('/random') # What is this route going to be?
# def random_penguin():
# """Return a random penguin species"""
# return random.choice(["Adelie", "ChinStrap", "Gentoo"])
# class Item(BaseModel):
# """Use this data model to parse the request body JSON."""
# x1: float = Field(..., example=3.14)
# x2: int = Field(..., example=-42)
# x3: str = Field(..., example='banjo')
# def to_df(self):
# """Convert pydantic object to pandas dataframe with 1 row."""
# return pd.DataFrame([dict(self)])
# @validator('x1')
# def x1_must_be_positive(cls, value):
# """Validate that x1 is a positive number."""
# assert value > 0, f'x1 == {value}, must be > 0'
# return value
# @router.post('/predict')
# async def predict(item: Item):
# """
# Make random baseline predictions for classification problem 🔮
# ### Request Body
# - `x1`: positive float
# - `x2`: integer
# - `x3`: string
# ### Response
# - `prediction`: boolean, at random
# - `predict_proba`: float between 0.5 and 1.0,
# representing the predicted class's probability
# Replace the placeholder docstring and fake predictions with your own model.
# """
# X_new = item.to_df()
# log.info(X_new)
# y_pred = random.choice([True, False])
# y_pred_proba = random.random() / 2 + 0.5
# return {
# 'prediction': y_pred,
# 'probability': y_pred_proba
# }
| StarcoderdataPython |
1967157 | import json
import math
import mmh3
import os
import sys
import time
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import toposort
from pyspark.sql import Row as SparkRow
from pyspark.sql import SparkSession, Window
from pyspark.sql.types import *
from objects.resource import Resource
from objects.task import Task
from objects.task_state import TaskState
from objects.workflow import Workflow
from objects.workload import Workload
USAGE = "Usage: python(3) ./alibaba2018_to_parquet.py path_to_dir"
NAME = "Alibaba 2018"
TARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), "output_parquet", NAME)
def parse(path_to_dir):
global TARGET_DIR
TARGET_DIR = os.path.join(TARGET_DIR, os.path.split(path_to_dir)[-1])
if "DAS5" in os.environ: # If we want to execute it on the DAS-5 super computer
print("We are on DAS5, {0} is master.".format(os.environ["HOSTNAME"] + ".ib.cluster"))
spark = SparkSession.builder \
.master("spark://" + os.environ['HOSTNAME'] + ".ib.cluster:7077") \
.appName("WTA parser") \
.config("spark.executor.memory", "28G") \
.config("spark.executor.cores", "8") \
.config("spark.executor.instances", "10") \
.config("spark.driver.memory", "256G") \
.config("spark.driver.maxResultSize", "40G") \
.config("spark.network.timeout", "100000s") \
.config("spark.rpc.askTimeout", "100000s") \
.config("spark.default.parallelism", "2000") \
.config("spark.sql.execution.arrow.enabled", "true") \
.config("spark.cleaner.periodicGC.interval", "5s") \
.getOrCreate()
else:
import findspark
findspark.init("<path_to_spark>")
spark = SparkSession.builder \
.master("local[4]") \
.appName("WTA parser") \
.config("spark.executor.memory", "2G") \
.config("spark.driver.memory", "2G") \
.getOrCreate()
machine_meta = spark.read.csv(os.path.join(path_to_dir, "machine_meta.csv"), schema=StructType([
StructField("machine_id", StringType(), True),
StructField("time_stamp", LongType(), True),
StructField("failure_domain_1", LongType(), True),
StructField("failure_domain_2", StringType(), True),
StructField("cpu_num", LongType(), True),
StructField("mem_size", LongType(), True),
StructField("status", StringType(), True)
]))
machine_usage = spark.read.csv(os.path.join(path_to_dir, "machine_usage.csv"), schema=StructType([
StructField("machine_id", StringType(), True),
StructField("time_stamp", DoubleType(), True),
StructField("cpu_util_percent", LongType(), True),
StructField("mem_util_percent", LongType(), True),
StructField("mem_gps", DoubleType(), True),
StructField("mkpi", LongType(), True),
StructField("net_in", DoubleType(), True),
StructField("net_out", DoubleType(), True),
StructField("disk_io_percent", DoubleType(), True)
]))
container_meta = spark.read.csv(os.path.join(path_to_dir, "container_meta.csv"), schema=StructType([
StructField("container_id", StringType(), True),
StructField("machine_id", StringType(), True),
StructField("time_stamp", LongType(), True),
StructField("app_du", StringType(), True),
StructField("status", StringType(), True),
StructField("cpu_request", LongType(), True),
StructField("cpu_limit", LongType(), True),
StructField("mem_size", DoubleType(), True)
]))
container_usage = spark.read.csv(os.path.join(path_to_dir, "container_usage.csv"), schema=StructType([
StructField("container_id", StringType(), True),
StructField("machine_id", StringType(), True),
StructField("time_stamp", DoubleType(), True),
StructField("cpu_util_percent", LongType(), True),
StructField("mem_util_percent", LongType(), True),
StructField("cpi", DoubleType(), True),
StructField("mem_gps", DoubleType(), True),
StructField("mpki", LongType(), True),
StructField("net_in", DoubleType(), True),
StructField("net_out", DoubleType(), True),
StructField("disk_io_percent", DoubleType(), True)
]))
batch_task = spark.read.csv(os.path.join(path_to_dir, "batch_task.csv"), schema=StructType([
StructField("task_name", StringType(), True),
StructField("instance_num", LongType(), True),
StructField("job_name", StringType(), True),
StructField("task_type", StringType(), True),
StructField("status", StringType(), True),
StructField("start_time", LongType(), True),
StructField("end_time", LongType(), True),
StructField("plan_cpu", DoubleType(), True),
StructField("plan_mem", DoubleType(), True)
]))
batch_instance = spark.read.csv(os.path.join(path_to_dir, "batch_instance.csv"), schema=StructType([
StructField("instance_name", StringType(), True),
StructField("task_name", StringType(), True),
StructField("job_name", StringType(), True),
StructField("task_type", StringType(), True),
StructField("status", StringType(), True),
StructField("start_time", LongType(), True),
StructField("end_time", LongType(), True),
StructField("machine_id", StringType(), True),
StructField("seq_no", LongType(), True),
StructField("total_seq_no", LongType(), True),
StructField("cpu_avg", DoubleType(), True),
StructField("cpu_max", DoubleType(), True),
StructField("mem_avg", DoubleType(), True),
StructField("mem_max", DoubleType(), True)
]))
@F.pandas_udf(returnType=Task.get_spark_type(), functionType=F.PandasUDFType.GROUPED_MAP)
def clean_tasks_of_workflow(df):
tasks = dict()
raw_id_to_instances = dict()
job_name = df.loc[0, "job_name"]
workflow_id = mmh3.hash64(job_name)[1]
invalid_task_raw_ids = set()
# group by task name
# - count number of instances
# - compare with row.instance_num
# Check to inspect if the data is noisy
# def check(pdf):
# a = pdf["instance_name"].nunique()
# b = pdf["instance_name"].astype(np.int64).min()
# c = pdf["instance_name"].astype(np.int64).max()
# d = pdf["instance_num"].min()
# e = pdf["instance_num"].max()
# f = pdf["instance_name"].count()
# if d != e or b < 0 or c >= e or a != d or a != f:
# print("Noisy data! {}, {}, {}, {}, {}, {}".format(a, b, c, d, e, f))
#
# df.groupby("task_name").apply(check)
for row in df.itertuples(index=False):
if None in row:
print(row, flush=True)
task_name = row.task_name
instance_name = str(row.instance_name)
memory_requested = row.plan_mem
resources_requested = row.plan_cpu
resource_id = row.machine_id
splits = task_name.split("_")
if splits[0] == "task":
cleaned_task_name = splits[1]
task_type = "bag"
raw_parents = []
else:
cleaned_task_name = splits[0][1:]
task_type = str(splits[0][0])
raw_parents = [x for x in splits[1:] if x.isdigit()]
if resource_id is None:
resource_id = -1
else:
resource_id = mmh3.hash64(row.machine_id)[1]
if row.end_time is None or math.isnan(row.end_time):
invalid_task_raw_ids.add(cleaned_task_name)
continue
if row.start_time is None or math.isnan(row.start_time):
invalid_task_raw_ids.add(cleaned_task_name)
continue
if memory_requested is None or math.isnan(memory_requested):
memory_requested = -1
if resources_requested is None or math.isnan(resources_requested):
avg_cpu = row.cpu_avg
if avg_cpu is None or math.isnan(avg_cpu):
invalid_task_raw_ids.add(cleaned_task_name)
continue
else:
resources_requested = avg_cpu
this_task_id = mmh3.hash64(job_name + "@" + cleaned_task_name + "@" + instance_name)[1]
if cleaned_task_name not in raw_id_to_instances:
raw_id_to_instances[cleaned_task_name] = row.instance_num
if row.instance_num > 10:
# Create parent and child tasks
raw_parent_id = cleaned_task_name + "_p"
parent_task_id = mmh3.hash64(job_name + "@" + raw_parent_id + "@" + "0")[1]
if parent_task_id not in tasks:
tasks[parent_task_id] = Task(id=parent_task_id, type="dummy", submission_site=0,
runtime=0,
ts_submit=row.start_time * 1000,
# We convert time from seconds to milliseconds.
resource_amount_requested=1, parents=raw_parents,
workflow_id=workflow_id,
wait_time=0, resource_type='core', resource=-1, memory_requested=-1)
raw_id_to_instances[raw_parent_id] = 1
raw_child_id = cleaned_task_name + "_c"
child_task_id = mmh3.hash64(job_name + "@" + raw_child_id + "@" + "0")[1]
if child_task_id not in tasks:
tasks[child_task_id] = Task(id=child_task_id, type="dummy", submission_site=0,
runtime=0,
ts_submit=row.start_time * 1000,
# We convert time from seconds to milliseconds.
resource_amount_requested=1, parents=[cleaned_task_name],
workflow_id=workflow_id,
wait_time=0, resource_type='core', resource=-1, memory_requested=-1,
params="child")
raw_id_to_instances[raw_child_id] = 1
raw_parents = [raw_parent_id]
this_task = Task(id=this_task_id, type=task_type, submission_site=0,
runtime=(row.end_time - row.start_time) * 1000,
ts_submit=row.start_time * 1000, # We convert time from seconds to milliseconds.
resource_amount_requested=resources_requested, parents=raw_parents,
workflow_id=workflow_id, params=task_name + " $ " + instance_name + " $ " + str(
row.instance_num) + " $ " + job_name,
wait_time=0, resource_type='core', resource=resource_id, memory_requested=memory_requested)
tasks[this_task_id] = this_task
for task_id, task in tasks.items():
task.parents = [p for p in task.parents if p not in invalid_task_raw_ids]
parents = []
for raw_parent_id in task.parents:
# If previous wave has a child and this task is not that child.
# refer to the child instead of the wave.
if raw_parent_id + "_c" in raw_id_to_instances and task.params is not "child":
raw_parent_id = raw_parent_id + "_c"
# We might hit an edge case where a parent was not recorded by the system of Alibaba
# (e.g. bug or the tracing stopped)
if raw_parent_id not in raw_id_to_instances:
continue
parent_instances = raw_id_to_instances[raw_parent_id]
proper_parent_ids = []
for x in range(parent_instances):
# Alibaba tasks specify instance_nums, however these tasks may not necesarrily be in the data
# So we need to check if they are actually encountered.
hash = mmh3.hash64(job_name + "@" + raw_parent_id + "@" + str(x))[1]
if hash in tasks:
proper_parent_ids.append(hash)
parents.extend(proper_parent_ids)
for proper_id in proper_parent_ids:
tasks[proper_id].children.add(task_id)
# task.params = None
task.parents = parents
# ze_best = pd.concat(pandas_dataframes)
parquet_dicts = [task.get_parquet_dict() for task in tasks.values()]
if len(tasks) > 0:
ret = pd.DataFrame(parquet_dicts)
else: # If no task was valid, return an empty DF with the columns set. Otherwise Spark goes boom.
ret = pd.DataFrame(columns=Task.get_parquet_meta_dict().keys())
return ret
@F.pandas_udf(returnType=Task.get_spark_type(), functionType=F.PandasUDFType.GROUPED_MAP)
def container_to_task(df):
row = df.iloc[0, :]
start_time = df["time_stamp"].min() * 1000
stop_time = df["time_stamp"].max() * 1000
task_id = mmh3.hash64(row["container_id"])[1]
workflow_id = mmh3.hash64(row["app_du"])[1]
task = Task(id=task_id, type="long running",
parents=[],
ts_submit=start_time, # We convert time from seconds to milliseconds.
submission_site=0, runtime=(start_time - stop_time), resource_amount_requested=row["cpu_request"],
memory_requested=row["mem_size"], workflow_id=workflow_id, wait_time=0,
resource=mmh3.hash64(row["machine_id"])[1])
return pd.DataFrame([task.get_parquet_dict()])
if not os.path.exists(os.path.join(TARGET_DIR, Task.output_path())):
# Rename instances
# This allows instance names to be derived using just the task name and number of instances of the task.
task_window = Window.partitionBy("job_name", "task_name").orderBy("start_time")
# Subtracting 1 becasue row number starts at 0. Makes later iteration more intuitive.
# We are using instance name as an index in a particular job and task.
instances_renamed = batch_instance.withColumn("instance_name",
(F.row_number().over(task_window) - F.lit(1)).cast(StringType()))
tasks_unconverted = instances_renamed.join(
batch_task.select("job_name", "task_name", "instance_num", "plan_cpu", "plan_mem"),
on=["job_name", "task_name"], how="inner")
# 100% this line is the issue.
tasks_converted = tasks_unconverted.groupby("job_name").apply(clean_tasks_of_workflow)
# if not os.path.exists(os.path.join(TARGET_DIR, Task.output_path())):
# tasks_converted.write.parquet(os.path.join(TARGET_DIR, Task.output_path()), mode="overwrite")
long_running_tasks = container_meta.groupBy("container_id").apply(container_to_task)
all_tasks = tasks_converted.union(long_running_tasks).dropna()
try:
all_tasks.printSchema()
all_tasks.write.parquet(os.path.join(TARGET_DIR, Task.output_path()), mode="overwrite")
except Exception as e:
print(e, flush=True)
raise e
@F.pandas_udf(returnType=TaskState.get_spark_type(), functionType=F.PandasUDFType.GROUPED_MAP)
def task_states_from_instances(df):
task_states = []
workflow_id = mmh3.hash64(df.loc[0, "job_name"])[1]
for index, row in df.iterrows():
job_name = row["job_name"]
task_name = row["task_name"]
instance_name = row["instance_name"]
splits = task_name.split("_")
just_task_name = splits[0][
1:] # The first letter is irrelevant as it corresponds to nature of task (map or reduce)
# and has nothing to do with the structure of the workflow.
this_task_id = mmh3.hash64(job_name + "@" + just_task_name + "@" + instance_name)[1]
this_task_state = TaskState(ts_start=row["start_time"] * 1000, ts_end=row["end_time"] * 1000,
workflow_id=workflow_id, task_id=this_task_id,
resource_id=mmh3.hash64(row["machine_id"])[1], cpu_rate=row["cpu_avg"],
canonical_memory_usage=row["mem_avg"], maximum_cpu_rate=row["cpu_max"],
maximum_memory_usage=row["mem_max"])
if None in this_task_state.get_parquet_dict().values() or np.isnan(
this_task_state.get_parquet_dict().values()):
print(this_task_state.get_parquet_dict())
raise RuntimeError(this_task_state.get_parquet_dict())
task_states.append(this_task_state.get_parquet_dict())
return pd.DataFrame(task_states)
@F.pandas_udf(returnType=TaskState.get_spark_type(), functionType=F.PandasUDFType.GROUPED_MAP)
def task_states_from_container_usage(df):
machine_id = mmh3.hash64(df.loc[0, "machine_id"])[1]
def convert(cont_df):
task_states = []
prev_end_time = cont_df.loc[0, "start_time"] * 1000
container_id = mmh3.hash64(cont_df.loc[0, "container_id"])[1]
app_id = mmh3.hash64(cont_df.loc[0, "app_du"])[1]
sorted_df = df.sort_values("time_stamp")
for index, row in sorted_df.iterrows():
this_end_time = row["time_stamp"] * 1000
this_task_state = TaskState(ts_start=prev_end_time, ts_end=this_end_time,
workflow_id=app_id, task_id=container_id,
resource_id=machine_id, cpu_rate=row["cpu_util_percent"],
canonical_memory_usage=row["mem_util_percent"],
maximum_disk_bandwidth=row["disk_io_percent"],
network_in=row["net_in"],
network_out=row["net_out"])
prev_end_time = this_end_time
task_states.append(this_task_state.get_parquet_dict())
if None in this_task_state.get_parquet_dict().values() or np.isnan(
this_task_state.get_parquet_dict().values()):
print(this_task_state.get_parquet_dict())
raise ArithmeticError(this_task_state.get_parquet_dict())
return pd.DataFrame(task_states)
return df.groupby("container_id").apply(convert).reset_index(drop=True).fillna(-1)
# Now, derive workflows from tasks
@F.pandas_udf(returnType=Workflow.get_spark_type(), functionType=F.PandasUDFType.GROUPED_MAP)
def compute_workflow_stats(df):
tasks = []
for index, row in df.iterrows():
this_task = Task(id=row["id"], type=row["type"], ts_submit=row["ts_submit"],
# We convert time from seconds to milliseconds.
submission_site=0, runtime=row["runtime"],
resource_amount_requested=row["resource_amount_requested"],
memory_requested=row["memory_requested"],
parents=row["parents"], workflow_id=row["workflow_id"], wait_time=row["wait_time"],
resource=row["resource_used"])
# print(this_task.get_parquet_dict())
tasks.append(this_task)
workflow = Workflow(id=df.loc[0, "workflow_id"],
ts_submit=df["ts_submit"].min(),
tasks=tasks,
scheduler_description="Fuxi",
workflow_domain="Industrial",
workflow_application_name="MapReduce",
workflow_appliation_field="Internet Services"
)
try:
workflow.compute_critical_path()
except toposort.CircularDependencyError: # TODO: Some have cyclic dependencies. Check if this is us, or the data (again)
pass
return pd.DataFrame([workflow.get_parquet_dict()])
if not os.path.exists(os.path.join(TARGET_DIR, Workflow.output_path())):
tasks_df = spark.read.parquet(os.path.join(TARGET_DIR, Task.output_path())) # Spark doesn't understand it can now read from files, so tell him
workflow_df = tasks_df.groupBy("workflow_id").apply(compute_workflow_stats)
workflow_df.write.parquet(os.path.join(TARGET_DIR, Workflow.output_path()), mode="overwrite",
compression="snappy")
def machine_meta_to_resources(row):
resource = Resource(id=mmh3.hash64(row["machine_id"])[1],
type="cpu",
num_resources=float(row["cpu_num"]),
memory=row["mem_size"],
)
resource_dict = resource.get_json_dict()
del resource_dict["events"]
return SparkRow(**resource_dict)
if not os.path.exists(os.path.join(TARGET_DIR, Resource.output_path())):
print("######\n Start parsing Resource DF\n ######")
resource_df = machine_meta.rdd.map(machine_meta_to_resources).toDF(Resource.get_spark_type())
resource_df.write.parquet(os.path.join(TARGET_DIR, Resource.output_path()), mode="overwrite",
compression="snappy")
print("######\n Start parsing Workload\n ######")
if "tasks_df" not in locals():
tasks_df = spark.read.parquet(os.path.join(TARGET_DIR, Task.output_path())) # Spark doesn't understand it can now read from parquet files, so tell him
json_dict = Workload.get_json_dict_from_spark_task_dataframe(tasks_df,
domain="Industrial",
authors=["Alibaba 2018"])
os.makedirs(os.path.join(TARGET_DIR, Workload.output_path()), exist_ok=True)
with open(os.path.join(TARGET_DIR, Workload.output_path(), "generic_information.json"), "w") as file:
# Need this on 32-bit python.
def default(o):
if isinstance(o, np.int64):
return int(o)
file.write(json.dumps(json_dict, default=default))
print("######\n Done parsing Workload\n ######")
if __name__ == "__main__":
if len(sys.argv) < 2:
print(USAGE)
sys.exit(1)
if len(sys.argv) == 3:
TARGET_DIR = sys.argv[2]
start = time.perf_counter()
parse(sys.argv[1])
print(time.perf_counter() - start)
| StarcoderdataPython |
1870945 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from convgru import ConvGRUCell
from gen_models import make_conv_net, make_fc_net, make_upconv_net
import numpy as np
from numpy import pi
from numpy import log as np_log
log_2pi = np_log(2*pi)
LOG_COV_MAX = 0
LOG_COV_MIN = -1
class VRNN(nn.Module):
def __init__(
self,
maze_dims,
action_proc_dim,
z_dim,
x_encoder_specs,
pre_lstm_dim,
lstm_dim,
prior_part_specs,
inference_part_specs,
decoder_part_specs,
):
super().__init__()
in_ch = maze_dims[0]
in_h = maze_dims[1]
self.x_encoder, out_ch, out_h = make_conv_net(in_ch, in_h, x_encoder_specs)
x_enc_channels = out_ch
x_enc_h = out_h
self.prior_action_fc = nn.Linear(4, action_proc_dim, bias=True)
self.post_action_fc = nn.Linear(4, action_proc_dim, bias=True)
self.recon_action_fc = nn.Linear(4, action_proc_dim, bias=True)
self.pre_lstm_action_fc = nn.Linear(4, action_proc_dim, bias=True)
self.lstm = nn.LSTMCell(
pre_lstm_dim, lstm_dim, bias=True
)
self.attention_seq = nn.Sequential(
nn.Linear(lstm_dim + action_proc_dim, lstm_dim, bias=False),
nn.BatchNorm1d(lstm_dim),
nn.ReLU(),
nn.Linear(lstm_dim, lstm_dim),
# nn.Sigmoid()
# nn.Softmax()
)
self.prior_fc_seq, hidden_dim = make_fc_net(lstm_dim + action_proc_dim, prior_part_specs)
self.prior_mean_fc = nn.Linear(hidden_dim, z_dim, bias=True)
self.prior_log_cov_fc = nn.Linear(hidden_dim, z_dim, bias=True)
out_ch = gru_specs['num_channels']
# models for the posterior
self.posterior_fc_seq, hidden_dim = make_fc_net(lstm_dim + x_enc_channels*x_enc_h*x_enc_h + action_proc_dim, inference_part_specs)
self.posterior_mean_fc = nn.Linear(hidden_dim, z_dim, bias=True)
self.posterior_log_cov_fc = nn.Linear(hidden_dim, z_dim, bias=True)
# models for the decoding/generation
self.recon_fc_seq, out_h = make_fc_net(z_dim + lstm_dim + action_proc_dim, decoder_part_specs['fc_part_specs'])
self.recon_upconv_seq, out_ch, out_h = make_upconv_net(gru_specs['num_channels'] + z_dim, self.h_dim[1], decoder_part_specs['upconv_part_specs'])
self.recon_mean_conv = nn.Conv2d(out_ch, 3, 3, stride=1, padding=1, bias=True)
self.recon_log_cov_conv = nn.Conv2d(out_ch, 3, 3, stride=1, padding=1, bias=True)
assert out_h == maze_dims[1]
def get_obs_recon_dist(self, z_batch, prev_h_batch, proc_act_batch):
hidden = torch.cat([z_batch, prev_h_batch, proc_act_batch], 1)
hidden = self.recon_fc_seq(hidden)
hidden = self.recon_upconv_seq(hidden)
recon_mean = self.recon_mean_conv(hidden)
recon_mean = F.sigmoid(recon_mean)
recon_log_cov = self.recon_log_cov_conv(hidden)
recon_log_cov = torch.clamp(recon_log_cov, LOG_COV_MIN, LOG_COV_MAX)
return recon_mean, recon_log_cov
def forward(self, obs_batch, act_batch, prev_h_batch, prev_c_batch):
prior_act_batch = self.prior_action_fc(act_batch)
post_act_batch = self.post_action_fc(act_batch)
recon_act_batch = self.recon_action_fc(act_batch)
pre_lstm_act_batch = self.pre_lstm_action_fc(act_batch)
# compute the prior
hidden = self.prior_fc_seq(torch.cat([prev_h_batch, prior_act_batch], 1))
prior_mean = self.prior_mean_fc(hidden)
prior_log_cov = self.prior_log_cov_fc(hidden)
prior_log_cov = torch.clamp(prior_log_cov, LOG_COV_MIN, LOG_COV_MAX)
# compute posterior
x_enc = self.x_encoder(obs_batch).view(x_enc.size(0), -1)
hidden = torch.cat([x_enc, post_act_batch, prev_h_batch], 1)
hidden = self.posterior_fc_seq(hidden, 1))
post_mean = self.posterior_mean_conv(hidden)
post_log_cov = self.posterior_log_cov_conv(hidden)
post_log_cov = torch.clamp(post_log_cov, LOG_COV_MIN, LOG_COV_MAX)
# sample from the posterior
post_z_sample = post_mean
# eps = Variable(torch.randn(post_mean.size()))
# if post_mean.is_cuda: eps = eps.cuda()
# cur_z_sample = post_mean + eps*torch.exp(0.5 * post_log_cov)
# compute generation
recon_mean, recon_log_cov = self.get_obs_recon_dist(post_z_sample, prev_h_batch, recon_act_batch)
# compute recurence
hidden = torch.cat([x_enc, prev_h_batch, post_z_sample, pre_lstm_act_batch], 1)
prev_h_batch, prev_c_batch = self.lstm(hidden, (prev_h_batch, prev_c_batch))
return prior_mean, prior_log_cov, post_mean, post_log_cov, cur_z_sample, recon_mean, recon_log_cov, prev_h_batch, prev_c_batch
def compute_KL(self, prior_mean, prior_log_cov, post_mean, post_log_cov):
# assert False, 'Check this KL'
bs = prior_mean.size(0)
m1, lc1, m2, lc2 = post_mean.view(bs, -1), post_log_cov.view(bs, -1), prior_mean.view(bs, -1), prior_log_cov.view(bs, -1)
KL = 0.5 * (
torch.sum(lc2, 1) - torch.sum(lc1, 1) - m1.size(1) +
torch.sum(torch.exp(lc1 - lc2), 1) + torch.sum((m2 - m1)**2 / torch.exp(lc2), 1)
)
KL = torch.sum(KL)
# return 0.001 * KL
return 0.
def compute_ELBO(
self,
prior_mean, prior_log_cov,
post_mean, post_log_cov,
recon_mean, recon_log_cov,
obs_batch,
average_over_batch=True
):
KL = self.compute_KL(prior_mean, prior_log_cov, post_mean, post_log_cov)
recon_mean = recon_mean.view(recon_mean.size(0), -1)
recon_log_cov = recon_log_cov.view(recon_log_cov.size(0), -1)
obs_batch = obs_batch.view(obs_batch.size(0), -1)
recon_cov = torch.exp(recon_log_cov)
# log_prob = -0.5 * torch.sum(
# (recon_mean - obs_batch)**2 / recon_cov
# )
# log_det_temp = torch.sum(recon_log_cov, 1) + log_2pi
# log_prob += -0.5 * torch.sum(log_det_temp)
log_prob = -0.5 * torch.sum((recon_mean - obs_batch)**2)
elbo = log_prob - KL
if average_over_batch: elbo = elbo / float(obs_batch.size(0))
return elbo, KL
| StarcoderdataPython |
11267869 | from sources.experiments.experiment_metadata_provider_utils import ExperimentMetadataProvider
from sources.flwr.flwr_servers.early_stopping_server import DEFAULT_NUM_ROUNDS_ABOVE_TARGET
GB_NORM_EXPERIMENTS_FIXED_METADATA_C10 = {
"num_clients": 100,
"num_rounds": 2500,
"clients_per_round": 10,
"batch_size": 20,
"local_epochs": 1,
"val_steps": 2,
"target_accuracy": 0.6,
"num_rounds_above_target": DEFAULT_NUM_ROUNDS_ABOVE_TARGET
}
GB_NORM_EXPERIMENTS_BASE_METADATA_C10 = ExperimentMetadataProvider(
GB_NORM_EXPERIMENTS_FIXED_METADATA_C10)
LENET_GB_NORM_EXPERIMENTS_FIXED_METADATA_C10 = {
"num_clients": 100,
"num_rounds": 500,
"clients_per_round": 10,
"batch_size": 20,
"local_epochs": 1,
"val_steps": None,
"target_accuracy": 0.6,
"num_rounds_above_target": DEFAULT_NUM_ROUNDS_ABOVE_TARGET
}
LENET_GB_NORM_EXPERIMENTS_BASE_METADATA_C10 = ExperimentMetadataProvider(
LENET_GB_NORM_EXPERIMENTS_FIXED_METADATA_C10) | StarcoderdataPython |
11371358 | <filename>main.py
from flask import Flask,request,render_template,jsonify
from flask_restful import Resource,Api
app = Flask(__name__)
api = Api(app)
@app.route('/')
def index():
return render_template("index.html")
class Student(Resource):
def get(self):
return [{"rollNo":101,"name":"<NAME>"},{"rollNo":123,"name":"<NAME>"}]
class Subjects(Resource):
def get(self):
return [{"subjectId":90210,"subjectName":"Martial Arts"}]
api.add_resource(Student,'/api/student/getStudent')
api.add_resource(Subjects,'/api/subject/getSubject')
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3298189 | <gh_stars>1-10
import os
import webapp2
import jinja2
from google.appengine.ext import ndb
from google.appengine.api import users
import logging,json
from contact import Contact
class APIRouterHandler(webapp2.RequestHandler):
def get(self):
url_route = self.request.uri
url_routes = url_route.split("/")
pass
def post(self):
url = self.request.uri
route = url.split('/')
if 'contact' in route:
data = self.request.get('data')
json_data = json.loads(data)
logging.info(data)
Contact.contact_id = ''
Contact.names = json_data['names']
Contact.cell = json_data['cell']
Contact.email = json_data['email']
Contact.subject = json_data['subject']
Contact.message = json_data['message']
Contact.put()
app = webapp2.WSGIApplication([
('/api/.*', APIRouterHandler)
], debug=True)
| StarcoderdataPython |
11311607 | try:
from sentry_sdk import capture_exception
except ImportError:
def capture_exception() -> None:
pass
| StarcoderdataPython |
3401767 | from ast import literal_eval as make_tuple
from django.db import models
from django.core.exceptions import ValidationError
from distributed.protocol.serialize import serialize, deserialize
class DaskSerializedField(models.Field):
description = 'A field the automatically serializes and deserializes using Dask serialization.'
def get_internal_type(self):
return 'TextField'
def from_db_value(self, value, *args, **kwargs):
"""
Called in all circumstances when data is loaded from the database, including in aggregates and values() calls.
"""
if value is None:
return value
try:
return deserialize(*make_tuple(value))
except Exception:
raise ValidationError('Unable to deserialize value: {}'.format(str(value)))
def to_python(self, value):
"""
Called by deserialization and during the clean() method used on forms.
"""
if value is None or not isinstance(value, str):
return value
try:
return deserialize(*make_tuple(value))
except Exception:
raise ValidationError('Unable to deserialize value: {}'.format(value))
def get_prep_value(self, value):
"""
Called to convert Python objects back to query values.
"""
try:
serialize_tuple = serialize(value)
return str(serialize_tuple)
except Exception:
raise ValidationError('Unable to serialize value: {}'.format(value))
def value_to_string(self, obj):
"""
Called by serialization.
"""
value = self.value_from_object(obj)
return self.get_prep_value(value)
| StarcoderdataPython |
191317 | """
Default feature encoding functions and pipelines
for automated feature transformation.
"""
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import VarianceThreshold
from sklearn.impute import SimpleImputer
from ..preprocessing import (
ImputeNull,
SelectField,
FeatureCast,
LabelEncoderPipe,
HashingVectorizerChunked,
MultihotEncoder,
)
def tokenizer(x):
""" Trivial tokenizer """
return x
dict_encoder = lambda c: [
(
"{0}_dict_encoder".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull({})),
("vec", DictVectorizer()),
]
),
)
]
onehot_encoder = lambda c: [
(
"{0}_onehot".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("cast", FeatureCast(cast_type=str)),
("fillna", ImputeNull("")),
(
"vec",
CountVectorizer(
token_pattern=None,
tokenizer=tokenizer,
binary=True,
decode_error="ignore",
),
),
]
),
)
]
multihot_encoder = lambda c: [
(
"{0}_multihot".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull([])),
("vec", MultihotEncoder()),
]
),
)
]
numeric_encoder = lambda c: [
(
"{0}_scaler".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c])),
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler(copy=False)),
]
),
)
]
_default_encoders = {
"small": {
"string_vectorizer": lambda c: [
(
"{0}_word_vec".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull(" ")),
(
"vec",
HashingVectorizerChunked(
ngram_range=(1, 2),
analyzer="word",
decode_error="ignore",
),
),
("var_thresh", VarianceThreshold()),
]
),
)
],
"onehotencoder": onehot_encoder,
"multihotencoder": multihot_encoder,
"numeric": numeric_encoder,
"dict": dict_encoder,
},
"medium": {
"string_vectorizer": lambda c: [
(
"{0}_word_vec".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull(" ")),
(
"vec",
HashingVectorizerChunked(
ngram_range=(1, 3),
analyzer="word",
decode_error="ignore",
),
),
("var_thresh", VarianceThreshold()),
]
),
),
(
"{0}_char_vec".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull(" ")),
(
"vec",
HashingVectorizerChunked(
ngram_range=(3, 4),
analyzer="char_wb",
decode_error="ignore",
),
),
("var_thresh", VarianceThreshold()),
]
),
),
],
"onehotencoder": onehot_encoder,
"multihotencoder": multihot_encoder,
"numeric": numeric_encoder,
"dict": dict_encoder,
},
"large": {
"string_vectorizer": lambda c: [
(
"{0}_word_vec".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull(" ")),
(
"vec",
HashingVectorizerChunked(
ngram_range=(1, 3),
analyzer="word",
decode_error="ignore",
),
),
("var_thresh", VarianceThreshold()),
]
),
),
(
"{0}_char_vec".format(c),
Pipeline(
steps=[
("var", SelectField(cols=[c], single_dimension=True)),
("fillna", ImputeNull(" ")),
(
"vec",
HashingVectorizerChunked(
ngram_range=(2, 5),
analyzer="char_wb",
decode_error="ignore",
),
),
("var_thresh", VarianceThreshold()),
]
),
),
],
"onehotencoder": onehot_encoder,
"multihotencoder": multihot_encoder,
"numeric": numeric_encoder,
"dict": dict_encoder,
},
}
| StarcoderdataPython |
11259645 | <reponame>AurelienLourot/charm-helpers
import unittest
from mock import call, patch
import yaml
import tools.charm_helpers_sync.charm_helpers_sync as sync
import six
if not six.PY3:
builtin_open = '__builtin__.open'
else:
builtin_open = 'builtins.open'
INCLUDE = """
include:
- core
- contrib.openstack
- contrib.storage
- contrib.hahelpers:
- utils
- ceph_utils
- cluster_utils
- haproxy_utils
"""
class HelperSyncTests(unittest.TestCase):
def test_clone_helpers(self):
'''It properly branches the correct helpers branch'''
with patch('subprocess.check_call') as check_call:
sync.clone_helpers(work_dir='/tmp/foo', repo='git:charm-helpers')
check_call.assert_called_with(['git',
'clone', '--depth=1',
'git:charm-helpers',
'/tmp/foo/charm-helpers'])
def test_module_path(self):
'''It converts a python module path to a filesystem path'''
self.assertEquals(sync._module_path('some.test.module'),
'some/test/module')
def test_src_path(self):
'''It renders the correct path to module within charm-helpers tree'''
path = sync._src_path(src='/tmp/charm-helpers',
module='contrib.openstack')
self.assertEquals('/tmp/charm-helpers/charmhelpers/contrib/openstack',
path)
def test_dest_path(self):
'''It correctly finds the correct install path within a charm'''
path = sync._dest_path(dest='/tmp/mycharm/hooks/charmhelpers',
module='contrib.openstack')
self.assertEquals('/tmp/mycharm/hooks/charmhelpers/contrib/openstack',
path)
@patch(builtin_open)
@patch('os.path.exists')
@patch('os.walk')
def test_ensure_init(self, walk, exists, _open):
'''It ensures all subdirectories of a parent are python importable'''
# os walk
# os.path.join
# os.path.exists
# open
def _walk(path):
yield ('/tmp/hooks/', ['helpers'], [])
yield ('/tmp/hooks/helpers', ['foo'], [])
yield ('/tmp/hooks/helpers/foo', [], [])
walk.side_effect = _walk
exists.return_value = False
sync.ensure_init('hooks/helpers/foo/')
ex = [call('/tmp/hooks/__init__.py', 'wb'),
call('/tmp/hooks/helpers/__init__.py', 'wb'),
call('/tmp/hooks/helpers/foo/__init__.py', 'wb')]
for c in ex:
self.assertIn(c, _open.call_args_list)
@patch('tools.charm_helpers_sync.charm_helpers_sync.ensure_init')
@patch('os.path.isfile')
@patch('shutil.copy')
@patch('os.makedirs')
@patch('os.path.exists')
def test_sync_pyfile(self, exists, mkdirs, copy, isfile, ensure_init):
'''It correctly syncs a py src file from src to dest'''
exists.return_value = False
isfile.return_value = True
sync.sync_pyfile('/tmp/charm-helpers/core/host',
'hooks/charmhelpers/core')
mkdirs.assert_called_with('hooks/charmhelpers/core')
copy_f = call('/tmp/charm-helpers/core/host.py',
'hooks/charmhelpers/core')
copy_i = call('/tmp/charm-helpers/core/__init__.py',
'hooks/charmhelpers/core')
self.assertIn(copy_f, copy.call_args_list)
self.assertIn(copy_i, copy.call_args_list)
ensure_init.assert_called_with('hooks/charmhelpers/core')
def _test_filter_dir(self, opts, isfile, isdir):
'''It filters non-python files and non-module dirs from source'''
files = {
'bad_file.bin': 'f',
'some_dir': 'd',
'good_helper.py': 'f',
'good_helper2.py': 'f',
'good_helper3.py': 'f',
'bad_file.img': 'f',
}
def _isfile(f):
try:
return files[f.split('/').pop()] == 'f'
except KeyError:
return False
def _isdir(f):
try:
return files[f.split('/').pop()] == 'd'
except KeyError:
return False
isfile.side_effect = _isfile
isdir.side_effect = _isdir
result = sync.get_filter(opts)(dir='/tmp/charm-helpers/core',
ls=six.iterkeys(files))
return result
@patch('os.path.isdir')
@patch('os.path.isfile')
def test_filter_dir_no_opts(self, isfile, isdir):
'''It filters out all non-py files by default'''
result = self._test_filter_dir(opts=None, isfile=isfile, isdir=isdir)
ex = ['bad_file.bin', 'bad_file.img', 'some_dir']
self.assertEquals(sorted(ex), sorted(result))
@patch('os.path.isdir')
@patch('os.path.isfile')
def test_filter_dir_with_include(self, isfile, isdir):
'''It includes non-py files if specified as an include opt'''
result = sorted(self._test_filter_dir(opts=['inc=*.img'],
isfile=isfile, isdir=isdir))
ex = sorted(['bad_file.bin', 'some_dir'])
self.assertEquals(ex, result)
@patch('os.path.isdir')
@patch('os.path.isfile')
def test_filter_dir_include_all(self, isfile, isdir):
'''It does not filter anything if option specified to include all'''
self.assertEquals(sync.get_filter(opts=['inc=*']), None)
@patch('tools.charm_helpers_sync.charm_helpers_sync.get_filter')
@patch('tools.charm_helpers_sync.charm_helpers_sync.ensure_init')
@patch('shutil.copytree')
@patch('shutil.rmtree')
@patch('os.path.exists')
def test_sync_directory(self, exists, rmtree, copytree, ensure_init,
_filter):
'''It correctly syncs src directory to dest directory'''
_filter.return_value = None
sync.sync_directory('/tmp/charm-helpers/charmhelpers/core',
'hooks/charmhelpers/core')
exists.return_value = True
rmtree.assert_called_with('hooks/charmhelpers/core')
copytree.assert_called_with('/tmp/charm-helpers/charmhelpers/core',
'hooks/charmhelpers/core', ignore=None)
ensure_init.assert_called_with('hooks/charmhelpers/core')
@patch('os.path.isfile')
def test_is_pyfile(self, isfile):
'''It correctly identifies incomplete path to a py src file as such'''
sync._is_pyfile('/tmp/charm-helpers/charmhelpers/core/host')
isfile.assert_called_with(
'/tmp/charm-helpers/charmhelpers/core/host.py'
)
@patch('tools.charm_helpers_sync.charm_helpers_sync.sync_pyfile')
@patch('tools.charm_helpers_sync.charm_helpers_sync.sync_directory')
@patch('os.path.isdir')
def test_syncs_directory(self, is_dir, sync_dir, sync_pyfile):
'''It correctly syncs a module directory'''
is_dir.return_value = True
sync.sync(src='/tmp/charm-helpers',
dest='hooks/charmhelpers',
module='contrib.openstack')
sync_dir.assert_called_with(
'/tmp/charm-helpers/charmhelpers/contrib/openstack',
'hooks/charmhelpers/contrib/openstack', None)
# __init__.py files leading to the directory were also synced.
sync_pyfile.assert_has_calls([
call('/tmp/charm-helpers/charmhelpers/__init__',
'hooks/charmhelpers'),
call('/tmp/charm-helpers/charmhelpers/contrib/__init__',
'hooks/charmhelpers/contrib')])
@patch('tools.charm_helpers_sync.charm_helpers_sync.sync_pyfile')
@patch('tools.charm_helpers_sync.charm_helpers_sync._is_pyfile')
@patch('os.path.isdir')
def test_syncs_file(self, is_dir, is_pyfile, sync_pyfile):
'''It correctly syncs a module file'''
is_dir.return_value = False
is_pyfile.return_value = True
sync.sync(src='/tmp/charm-helpers',
dest='hooks/charmhelpers',
module='contrib.openstack.utils')
sync_pyfile.assert_has_calls([
call('/tmp/charm-helpers/charmhelpers/__init__',
'hooks/charmhelpers'),
call('/tmp/charm-helpers/charmhelpers/contrib/__init__',
'hooks/charmhelpers/contrib'),
call('/tmp/charm-helpers/charmhelpers/contrib/openstack/__init__',
'hooks/charmhelpers/contrib/openstack'),
call('/tmp/charm-helpers/charmhelpers/contrib/openstack/utils',
'hooks/charmhelpers/contrib/openstack')])
@patch('tools.charm_helpers_sync.charm_helpers_sync.sync')
@patch('os.path.isdir')
@patch('os.path.exists')
def test_sync_helpers_from_config(self, exists, isdir, _sync):
'''It correctly syncs a list of included helpers'''
include = yaml.safe_load(INCLUDE)['include']
isdir.return_value = True
exists.return_value = False
sync.sync_helpers(include=include,
src='/tmp/charm-helpers',
dest='hooks/charmhelpers')
mods = [
'core',
'contrib.openstack',
'contrib.storage',
'contrib.hahelpers.utils',
'contrib.hahelpers.ceph_utils',
'contrib.hahelpers.cluster_utils',
'contrib.hahelpers.haproxy_utils'
]
ex_calls = []
[ex_calls.append(
call('/tmp/charm-helpers', 'hooks/charmhelpers', c, [])
) for c in mods]
self.assertEquals(ex_calls, _sync.call_args_list)
@patch('tools.charm_helpers_sync.charm_helpers_sync.sync')
@patch('os.path.isdir')
@patch('os.path.exists')
@patch('shutil.rmtree')
def test_sync_helpers_from_config_cleanup(self, _rmtree, _exists,
isdir, _sync):
'''It correctly syncs a list of included helpers'''
include = yaml.safe_load(INCLUDE)['include']
isdir.return_value = True
_exists.return_value = True
sync.sync_helpers(include=include,
src='/tmp/charm-helpers',
dest='hooks/charmhelpers')
_rmtree.assert_called_with('hooks/charmhelpers')
mods = [
'core',
'contrib.openstack',
'contrib.storage',
'contrib.hahelpers.utils',
'contrib.hahelpers.ceph_utils',
'contrib.hahelpers.cluster_utils',
'contrib.hahelpers.haproxy_utils'
]
ex_calls = []
[ex_calls.append(
call('/tmp/charm-helpers', 'hooks/charmhelpers', c, [])
) for c in mods]
self.assertEquals(ex_calls, _sync.call_args_list)
def test_extract_option_no_globals(self):
'''It extracts option from an included item with no global options'''
inc = 'contrib.openstack.templates|inc=*.template'
result = sync.extract_options(inc)
ex = ('contrib.openstack.templates', ['inc=*.template'])
self.assertEquals(ex, result)
def test_extract_option_with_global_as_string(self):
'''It extracts option for include with global options as str'''
inc = 'contrib.openstack.templates|inc=*.template'
result = sync.extract_options(inc, global_options='inc=foo.*')
ex = ('contrib.openstack.templates',
['inc=*.template', 'inc=foo.*'])
self.assertEquals(ex, result)
def test_extract_option_with_globals(self):
'''It extracts option from an included item with global options'''
inc = 'contrib.openstack.templates|inc=*.template'
result = sync.extract_options(inc, global_options=['inc=*.cfg'])
ex = ('contrib.openstack.templates', ['inc=*.template', 'inc=*.cfg'])
self.assertEquals(ex, result)
def test_extract_multiple_options_with_globals(self):
'''It extracts multiple options from an included item'''
inc = 'contrib.openstack.templates|inc=*.template,inc=foo.*'
result = sync.extract_options(inc, global_options=['inc=*.cfg'])
ex = ('contrib.openstack.templates',
['inc=*.template', 'inc=foo.*', 'inc=*.cfg'])
self.assertEquals(ex, result)
| StarcoderdataPython |
4914872 | from library.telegram.base import RequestContext
from telethon import events
from .base import BaseHandler
class StopHandler(BaseHandler):
filter = events.NewMessage(incoming=True, pattern='^/stop$')
async def handler(self, event: events.ChatAction, request_context: RequestContext):
request_context.statbox(action='show', mode='stop')
| StarcoderdataPython |
3247092 | import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from morgana import utils
from tts_data_tools import file_io
from tts_data_tools.utils import get_file_ids
TO_TORCH_DTYPE = {
np.dtype('float16'): torch.float16,
np.dtype('float32'): torch.float32,
np.dtype('float64'): torch.float64,
np.dtype('int8'): torch.int8,
np.dtype('int16'): torch.int16,
np.dtype('int32'): torch.int32,
np.dtype('int64'): torch.int64,
np.dtype('bool'): torch.bool,
np.dtype('uint8'): torch.uint8,
int: torch.int64,
float: torch.float32,
bool: torch.uint8
}
def batch(data_generator, batch_size=32, shuffle=True, num_data_threads=0, device='cpu'):
r"""Creates the batched data loader for the dataset given, maps the batches to a given device.
Parameters
----------
data_generator : torch.utils.data.Dataset or FilesDataset
Dataset from which to load the batches of data.
batch_size : int
Number of samples to load per batch.
shuffle : bool
Whether to shuffle the data every epoch.
num_data_threads : int
Number of parallel subprocesses to use for data loading.
device : str
Name of the device to place the parameters on.
Returns
-------
:class:`torch.utils.data.DataLoader` (in a :class:`ToDeviceWrapper` container)
An instance with the `__iter__` method, allowing for iteration over batches of the dataset.
"""
data_loader = DataLoader(data_generator, batch_size=batch_size, shuffle=shuffle,
num_workers=num_data_threads, collate_fn=data_generator.collate_fn,
pin_memory=(torch.device(device) != torch.device('cpu')))
# Must be separated as a wrapper, since DataLoader uses multiprocessing which doesn't always play nicely with CUDA.
data_loader = ToDeviceWrapper(data_loader, device)
return data_loader
class FilesDataset(Dataset):
r"""Combines multiple :class:`_DataSource` instances, and enables batching of a dictionary of sequence features.
Parameters
----------
data_sources : dict[str, _DataSource]
Specification of the different data to be loaded.
data_dir : str
The directory containing all data for this dataset split.
id_list : str
The name of the file id-list containing base names to load, contained withing `data_root`.
normalisers : Normalisers or dict[str, _FeatureNormaliser] or dict[str, _SpeakerDependentNormaliser]
Normaliser instances used to normalise loaded features (and delta features).
data_root : str
The directory root for this dataset.
Attributes
----------
file_ids : list[str]
List of base names loaded from `id_list`.
normalisers : Normalisers or dict[str, _FeatureNormaliser] or dict[str, _SpeakerDependentNormaliser]
The normaliser instances, set automatically by :class:`morgana.experiment_builder.ExperimentBuilder`.
Notes
-----
If any speaker-dependent normalisers are provided, the user must define a data source by the name `speaker_id`.
"""
def __init__(self, data_sources, data_dir, id_list, normalisers, data_root='.'):
# Check speaker ids will be generated if they are needed by any speaker dependent normalisers.
for name, normaliser in normalisers.items():
if isinstance(normaliser, _SpeakerDependentNormaliser) and 'speaker_id' not in data_sources:
raise KeyError(f"{name} is a speaker-dependent normaliser, but no 'speaker_id' data_source was defined")
if name in data_sources and normaliser.use_deltas and not data_sources[name].use_deltas:
raise ValueError(f'To normalise deltas of {name}, set `data_source.use_deltas` to True.')
self.data_sources = data_sources
self.data_root = data_root
self.data_dir = os.path.join(self.data_root, data_dir)
self.id_list = os.path.join(self.data_root, id_list)
with open(self.id_list, 'r') as f:
self.file_ids = list(filter(bool, map(str.strip, f.readlines())))
self.normalisers = normalisers
def __getitem__(self, index):
r"""Combines the features loaded by each data source and adds normalised features where specified.
Parameters
----------
index : int
The index of the file in the `id_list` to load.
Returns
-------
features : dict[str, np.array]
Features loaded by each data source, contained in a non-nested dictionary.
"""
def _normalise_feature(feature, is_deltas=False):
if isinstance(self.normalisers[name], _SpeakerDependentNormaliser):
normalised_feature = \
self.normalisers[name].normalise(feature, features['speaker_id'], deltas=is_deltas)
else:
normalised_feature = \
self.normalisers[name].normalise(feature, deltas=is_deltas)
return normalised_feature.astype(np.float32)
base_name = self.file_ids[index]
features = {'name': base_name}
# If speaker ids are provided, extract them before the `for` loop so they can be used by `_normalise_feature`.
if 'speaker_id' in self.data_sources:
speaker_id = self.data_sources['speaker_id'](base_name, self.data_dir)
features.update(speaker_id)
for name, data_source in self.data_sources.items():
if name == 'speaker_id':
continue
data_source_features = data_source(base_name, self.data_dir)
if name in self.normalisers:
data_source_features['normalised_{}'.format(name)] = \
_normalise_feature(data_source_features[name])
if self.normalisers[name].use_deltas:
data_source_features['normalised_{}_deltas'.format(name)] = \
_normalise_feature(data_source_features['{}_deltas'.format(name)], is_deltas=True)
features.update(data_source_features)
return features
def __len__(self):
return len(self.file_ids)
@staticmethod
def collate_fn(batch):
r"""Collates a list of outputs from `self.__getitem__` into a batched structure.
Parameters
----------
batch : list[dict[str, object]]
Each element in the list is a non-nested dictionary containing features loaded by each data source.
Returns
-------
batched_features : dict[str, :class:`torch.Tensor`]
Batched version of the list of `features` items in `batch`.
Note, it is possible to provide objects such as strings that will not be converted to `torch.Tensor`, these
will not be padded or sent to the correct device, but can be accessed in the features dictionary.
"""
batch_size = len(batch)
feature_template = batch[0]
def feature_list_to_batched_tensor(feature_list):
"""Handles padding and type conversion."""
feature_item = feature_list[0]
# Sequence feature.
if isinstance(feature_item, np.ndarray) and feature_item.ndim > 1:
max_seq_len = max(map(len, feature_list))
feat_dim = feature_item.shape[-1]
dtype = TO_TORCH_DTYPE[feature_item.dtype]
# Padding is handled by creating a zeros tensor using the maximum sequence length.
batched_feature = torch.zeros((batch_size, max_seq_len, feat_dim), dtype=dtype)
for i, feature in enumerate(feature_list):
seq_len = feature.shape[0]
batched_feature[i, :seq_len, ...] = torch.tensor(feature, dtype=dtype)
# Static 1 dimensional feature.
elif isinstance(feature_item, np.ndarray) and feature_item.dtype in TO_TORCH_DTYPE:
dtype = TO_TORCH_DTYPE[feature_item.dtype]
batched_feature = torch.tensor(feature_list, dtype=dtype)
# Static 0 dimensional feature.
elif not isinstance(feature_item, np.ndarray) and type(feature_item) in TO_TORCH_DTYPE:
dtype = TO_TORCH_DTYPE[type(feature_item)]
batched_feature = torch.tensor(feature_list, dtype=dtype)
# Feature that will not be converted to `torch.Tensor`.
else:
batched_feature = feature_list
return batched_feature
# First transpose the list of dictionaries:
# from - [ { feat_name: _DataSource.load_file() } ]
# to - { feat_name: [ _DataSource.load_file() ] }
features = {feat_name: [] for feat_name in feature_template.keys()}
for i, item_features in enumerate(batch):
for feat_name, value in item_features.items():
features[feat_name].append(value)
# Convert all features in the batch to `torch.Tensors` if possible.
batched_features = {feat_name: [] for feat_name in feature_template.keys()}
for feat_name, feature_list in features.items():
batched_features[feat_name] = feature_list_to_batched_tensor(feature_list)
return batched_features
class Normalisers(dict):
r"""A dictionary-like container for normalisers, loads parameters for all the normalisers.
Parameters
----------
normaliser_sources : dict[str, _FeatureNormaliser]
Specification of the normalisers.
normalisation_dir : str
The directory containing the normalisation parameters (in a JSON file).
data_root : str
The directory root for this dataset.
device : str or `torch.device`
The name of the device to place the parameters on.
"""
def __init__(self, normaliser_sources, normalisation_dir, data_root='.', device='cpu'):
super(Normalisers, self).__init__()
self.normalisation_dir = os.path.join(data_root, normalisation_dir)
self.device = device
for name, normaliser_source in normaliser_sources.items():
self[name] = normaliser_source
self[name].load_params(self.normalisation_dir, self.device)
class _FeatureNormaliser(object):
r"""Abstract feature normaliser class. Exposes the :func:`~normalise` and :func:`~denormalise` methods.
Normalisers will work on both NumPy arrays and PyTorch tensors. This is necessary to process NumPy arrays in
:func:`_DataSource.__call__` and to normalise/denormalise PyTorch tensors in batch within the model.
Parameters
----------
name : str
Name of the feature.
use_deltas : bool
Whether to load normalisation parameters for delta features.
file_pattern : str
Format of the JSON file containing the normalisation parameters.
Attributes
----------
params : dict[str, np.ndarray]
params_torch : dict[str, torch.Tensor]
delta_params : dict[str, np.ndarray]
delta_params_torch : dict[str, torch.Tensor]
"""
def __init__(self, name, use_deltas=False, file_pattern='{name}.json'):
self.name = name
self.use_deltas = use_deltas
self.file_pattern = file_pattern
self.params = None
self.params_torch = None
if self.use_deltas:
self.delta_params = None
self.delta_params_torch = None
def _normalise(self, feature, **params):
raise NotImplementedError("Underlying calculation of normalisation should be implemented in a subclass.")
def _denormalise(self, feature, **params):
raise NotImplementedError("Underlying calculation of denormalisation should be implemented in a subclass.")
def normalise(self, feature, deltas=False):
r"""Normalises the sequence feature.
Parameters
----------
feature : np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Sequence feature to be normalised, can be a NumPy array or a PyTorch tensor, can be batched.
deltas : bool
Whether `feature` is a delta feature, and should be normalised using the delta parameters.
Returns
-------
np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Normalised sequence feature.
"""
params = self.fetch_params(type(feature), deltas=deltas)
return self._normalise(feature, **params)
def denormalise(self, feature, deltas=False):
r"""De-normalises the sequence feature.
Parameters
----------
feature : np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Sequence feature to be normalised, can be a NumPy array or a PyTorch tensor, can be batched.
deltas : bool
Whether `feature` is a delta feature, and should be normalised using the delta parameters.
Returns
-------
np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Normalised sequence feature.
"""
params = self.fetch_params(type(feature), deltas=deltas)
return self._denormalise(feature, **params)
def fetch_params(self, data_type=np.ndarray, deltas=False):
r"""Gets the normalisation parameters, taking into account the delta flag and type of data."""
if deltas:
if data_type == torch.Tensor:
return self.delta_params_torch
else:
return self.delta_params
else:
if data_type == torch.Tensor:
return self.params_torch
else:
return self.params
@staticmethod
def _from_json(file_path):
r"""Loads parameters from JSON file and converts to `np.ndarray`s."""
feat_params = file_io.load_json(file_path)
params = {}
for param_name, param in feat_params.items():
params[param_name] = np.array(param, dtype=np.float32)
return params
@staticmethod
def _to_torch(params, device='cpu'):
r"""Converts dictionary of parameters to `torch.Tensor`s on the specified device."""
params_torch = {}
for param_name, param in params.items():
params_torch[param_name] = torch.tensor(param).to(device)
return params_torch
def load_params(self, data_dir, data_root='.', device='cpu'):
r"""Loads the parameters from file and converts them to NumPy arrays and PyTorch tensors.
Parameters
----------
data_dir : str
Directory containing all data for this dataset split.
data_root : str
Directory root for this dataset.
device : str or torch.device
Name of the device to place the parameters on.
"""
params_file = os.path.join(
data_root, data_dir, self.file_pattern.format(name=self.name))
self.params = self._from_json(params_file)
self.params_torch = self._to_torch(self.params, device=device)
if self.use_deltas:
delta_params_file = os.path.join(
data_root, data_dir, self.file_pattern.format(name=self.name + '_deltas'))
self.delta_params = self._from_json(delta_params_file)
self.delta_params_torch = self._to_torch(self.delta_params, device=device)
class _SpeakerDependentNormaliser(_FeatureNormaliser):
r"""Speaker-dependent feature normaliser class, wraps individual normalisers exposing speaker identity argument.
Parameters
----------
name : str
Name of the feature.
speaker_id_list : str
File name of the id list containing speaker names, used to load parameters for all speakers.
use_deltas : bool
Whether to load normalisation parameters for delta features.
file_pattern : str
Format of the JSON file containing the normalisation parameters.
Attributes
----------
speaker_ids : list[str]
Names of speakers for each batch item in `feature`.
"""
def __init__(self, name, speaker_id_list, use_deltas=False, file_pattern='{speaker_id}/{name}.json'):
super(_SpeakerDependentNormaliser, self).__init__(name, use_deltas=use_deltas, file_pattern=file_pattern)
self.speaker_id_list = speaker_id_list
self.speaker_ids = None
self.params = {}
self.params_torch = {}
if self.use_deltas:
self.delta_params = {}
self.delta_params_torch = {}
def normalise(self, feature, speaker_ids, deltas=False):
r"""Normalises the sequence feature based on speaker-dependent normalisation parameters.
Parameters
----------
feature : np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Sequence feature to be normalised, can be a NumPy array or a PyTorch tensor, can be batched.
speaker_ids : list[str] or str
Names of speakers for each batch item in `feature`
deltas : bool
Whether `feature` is a delta feature, and should be normalised using the delta parameters.
Returns
-------
np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Normalised sequence feature.
"""
params = self.fetch_params(speaker_ids, type(feature), deltas=deltas)
return self._normalise(feature, **params)
def denormalise(self, feature, speaker_ids, deltas=False):
r"""De-normalises the sequence feature based on speaker-dependent normalisation parameters.
Parameters
----------
feature : np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Sequence feature to be normalised, can be a NumPy array or a PyTorch tensor, can be batched.
speaker_ids : list[str] or str
Names of speakers for each batch item in `feature`
deltas : bool
Whether `feature` is a delta feature, and should be normalised using the delta parameters.
Returns
-------
np.ndarray or torch.Tensor, shape (batch_size, seq_len, feat_dim) or (seq_len, feat_dim)
Normalised sequence feature.
"""
params = self.fetch_params(speaker_ids, type(feature), deltas=deltas)
return self._denormalise(feature, **params)
def fetch_params(self, speaker_ids, data_type=np.ndarray, deltas=False):
r"""Gets the speaker-dependent normalisation parameters, taking into account the delta flag and type of data.
Parameters
----------
speaker_ids : list[str]
Names of speakers for each batch item.
data_type : type
Typically `torch.Tensor` for batched features, or `np.ndarray` for single sentences or visualisation code.
deltas : bool
Whether `feature` is a delta feature, and should be normalised using the delta parameters.
Returns
-------
sd_params : dict[str, torch.Tensor] or dict[str, np.ndarray], shape (batch_size, feat_dim) or (feat_dim)
The speaker dependent parameters
"""
speaker_ids = utils.listify(speaker_ids)
speaker_params = super(_SpeakerDependentNormaliser, self).fetch_params(data_type=data_type, deltas=deltas)
sd_params = {}
for speaker_id in speaker_ids:
params = speaker_params[speaker_id]
for name, param in params.items():
# For current speaker_id (item in batch) and current parameter (e.g. mean), concatenate along dim=0
param = param[None, ...]
if name not in sd_params:
sd_params[name] = param
else:
if data_type == torch.Tensor:
sd_params[name] = torch.cat((sd_params[name], param))
else:
sd_params[name] = np.concatenate((sd_params[name], param))
for name, sd_param in sd_params.items():
sd_params[name] = sd_param.squeeze(0)
return sd_params
def load_params(self, data_dir, data_root='.', device='cpu'):
r"""Loads the parameters for all speakers from file and stacks them in NumPy arrays and PyTorch tensors.
Parameters
----------
data_dir : str
Directory containing all data for this dataset split.
data_root : str
Directory root for this dataset.
device : str or torch.device
Name of the device to place the parameters on.
"""
if self.speaker_ids is None:
self.speaker_ids = get_file_ids(id_list=os.path.join(data_root, self.speaker_id_list))
for speaker_id in self.speaker_ids:
params_file = os.path.join(
data_root, data_dir, self.file_pattern.format(name=self.name, speaker_id=speaker_id))
self.params[speaker_id] = self._from_json(params_file)
self.params_torch[speaker_id] = self._to_torch(self.params[speaker_id], device=device)
if self.use_deltas:
delta_params_file = os.path.join(
data_root, data_dir, self.file_pattern.format(speaker_id=speaker_id, name=self.name + '_deltas'))
self.delta_params[speaker_id] = self._from_json(delta_params_file)
self.delta_params_torch[speaker_id] = self._to_torch(self.delta_params[speaker_id], device=device)
def normalise_mvn(feature, mean, std_dev):
return (feature - mean[..., None, :]) / (std_dev[..., None, :] + 1e-8)
def denormalise_mvn(feature, mean, std_dev):
return (feature * std_dev[..., None, :]) + mean[..., None, :]
class MeanVarianceNormaliser(_FeatureNormaliser):
r"""Normalises features such that they have zero mean and unit variance.
Normalisation:
`norm_f = (f - mean) / std_dev`
Denormalisation:
`f = (norm_f * std_dev) + mean`
Parameters
----------
name : str
Name of the feature.
use_deltas : bool
Whether to load normalisation parameters for delta features.
"""
def __init__(self, name, use_deltas=False):
super(MeanVarianceNormaliser, self).__init__(
name, use_deltas, '{name}_mvn.json')
def _normalise(self, feature, **params):
return normalise_mvn(feature, params['mean'], params['std_dev'])
def _denormalise(self, feature, **params):
return denormalise_mvn(feature, params['mean'], params['std_dev'])
class SpeakerDependentMeanVarianceNormaliser(_SpeakerDependentNormaliser):
def __init__(self, name, speaker_id_list, use_deltas=False):
super(SpeakerDependentMeanVarianceNormaliser, self).__init__(
name, speaker_id_list, use_deltas, '{speaker_id}/{name}_mvn.json')
def _normalise(self, feature, **params):
return normalise_mvn(feature, params['mean'], params['std_dev'])
def _denormalise(self, feature, **params):
return denormalise_mvn(feature, params['mean'], params['std_dev'])
def normalise_minmax(feature, mmin, mmax):
scale = mmax - mmin
scale[abs(scale) <= 1e-8] = 1.
return (feature - mmin[..., None, :]) / scale[..., None, :]
def denormalise_minmax(feature, mmin, mmax):
scale = mmax - mmin
scale[abs(scale) <= 1e-8] = 1.
return (feature * scale[..., None, :]) + mmin[..., None, :]
class MinMaxNormaliser(_FeatureNormaliser):
r"""Normalises features such that they have a minimum value of 0 and a maximum value of 1.
Normalisation:
`norm_f = (f - min) / (max - min)`
Denormalisation:
`f = norm_f * (max - min) + min`
Parameters
----------
name : str
Name of the feature.
use_deltas : bool
Whether to load normalisation parameters for delta features.
"""
def __init__(self, name, use_deltas=False):
super(MinMaxNormaliser, self).__init__(
name, use_deltas, '{name}_minmax.json')
def _normalise(self, feature, **params):
return normalise_minmax(feature, params['mmin'], params['mmax'])
def _denormalise(self, feature, **params):
return denormalise_minmax(feature, params['mmin'], params['mmax'])
class SpeakerDependentMinMaxNormaliser(_SpeakerDependentNormaliser):
def __init__(self, name, speaker_id_list, use_deltas=False):
super(SpeakerDependentMinMaxNormaliser, self).__init__(
name, speaker_id_list, use_deltas, '{speaker_id}/{name}_minmax.json')
def _normalise(self, feature, **params):
return normalise_minmax(feature, params['mmin'], params['mmax'])
def _denormalise(self, feature, **params):
return denormalise_minmax(feature, params['mmin'], params['mmax'])
class _DataLoaderWrapper(object):
r"""Abstract :class:`torch.utils.data.DataLoader` wrapper. Allows attribute reference for underlying data loader."""
def __init__(self, data_loader):
self.data_loader = data_loader
def __getattr__(self, attr):
"""Recursively calls `__getattr__` until `self.data_loader` is the underlying data loader instance."""
if isinstance(self.data_loader, DataLoader):
return self.data_loader.__getattribute__(attr)
else:
# Recurse down until we get to the actual DataLoader.
return self.data_loader.__getattr__(attr)
def __len__(self):
return len(self.data_loader)
class ToDeviceWrapper(_DataLoaderWrapper):
r"""Wraps the `__iter__` method of :class:`torch.utils.data.DataLoader`, mapping each batch to a given device."""
def __init__(self, data_loader, device):
super(ToDeviceWrapper, self).__init__(data_loader)
self.torch_device = torch.device(device)
def to_device(self, tensor):
if isinstance(tensor, torch.Tensor):
return tensor.to(self.torch_device)
else:
return tensor
def __iter__(self):
for features in self.data_loader:
yield utils.map_nested(self.to_device, features)
| StarcoderdataPython |
6441533 | from __future__ import print_function, division
from ngraph.frontends.caffe2.c2_importer.importer import C2Importer
import ngraph.transformers as ngt
import ngraph.frontends.common.utils as util
from caffe2.python import core, workspace
import numpy as np
def linear_regression(iter_num, lrate, gamma, step_size, noise_scale):
# data multiplier
m = 3
# batch_len and data
xs_np = np.array([[0, 0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [-1.0, -1.0]], dtype='f')
ys_np = np.array([[0.5 * m], [2.5 * m], [4.5 * m], [6.5 * m], [-1.5 * m]], dtype='f')
batch_len = len(ys_np)
# with these values we have the following target weight and bias
# to be approximated after computation:
target_b = 0.5 * m
target_w = np.array([1.0, 1.0]) * m
# noise amplitude and noise generation
noise_l = np.array(noise_scale * np.random.randn(batch_len), dtype='f')
noise = [[i] for i in noise_l]
# caffe2 init network
init_net = core.Net("init")
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0, dtype=core.DataType.INT32)
# for the parameters to be learned: we randomly initialize weight
# being output scalar, and two variables, W is 1x2, X is 2x1
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
print('Created init net.')
# caffe2 train net
train_net = core.Net("train")
# definition of external inputs: X, ground truth and noisy version of truth
workspace.FeedBlob('X', xs_np)
workspace.FeedBlob('Y_gt', ys_np)
workspace.FeedBlob('Y_noise', ys_np + noise)
train_net.AddExternalInput("X")
train_net.AddExternalInput("Y_noise")
train_net.AddExternalInput("Y_gt")
# now, for the normal linear regression prediction, this is all we need.
Y_pred = train_net.FC(["X", W, B], "Y_pred")
# when it will be computing the loss, we want to refer to the noisy version of the truth:
dist = train_net.SquaredL2Distance(["Y_noise", Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
# Caffe2 creation of the initialization and training nets, needed to have objects created
# and therefore handlers can be obtained by the importer
workspace.CreateNet(init_net)
workspace.CreateNet(train_net)
# importing in ngraph caffe2 network
print("\n\n---------------------ngraph behaviour:")
importer = C2Importer()
importer.parse_net_def(net_def=train_net.Proto(), init_net_def=init_net.Proto(),
c2_workspace=workspace)
# Get handles to the various objects we are interested to for ngraph computation
y_gt_ng, x_ng, w_ng, b_ng, y_pred_ng, dist_ng, loss_ng = \
importer.get_op_handle(['Y_noise', 'X', 'W', 'B', 'Y_pred', 'dist', 'loss'])
# setting learning rate for ngraph, that matches the one that it will be used for caffe2 below
lr_params = {'name': 'step', 'base_lr': lrate, 'gamma': gamma, 'step': step_size}
SGD = util.CommonSGDOptimizer(lr_params)
parallel_update = SGD.minimize(loss_ng, [w_ng, b_ng])
transformer = ngt.make_transformer()
update_fun = transformer.computation(
[loss_ng, w_ng, b_ng, parallel_update], x_ng, y_gt_ng, SGD.get_iter_buffer())
true_iter = [0]
# ngraph actual computation
for i in range(iter_num // batch_len):
for xs, ys in zip(xs_np, ys_np + noise):
loss_val, w_val, b_val, _ = update_fun(xs, ys, i)
# print("N it: %s W: %s, B: %s loss %s " % (i, w_val, b_val, loss_val))
true_iter[0] += 1
print("Ngraph loss %s " % (loss_val))
# end of ngraph part
# caffe2 backward pass and computation to compare results with ngraph
gradient_map = train_net.AddGradientOperators([loss])
# Increment the iteration by one.
train_net.Iter(ITER, ITER)
# Caffe2 backward pass and computation
# Get gradients for all the computations above and do the weighted sum
LR = train_net.LearningRate(ITER, "LR", base_lr=-lrate, policy="step",
stepsize=step_size, gamma=gamma)
train_net.WeightedSum([W, ONE, gradient_map[W], LR], W)
train_net.WeightedSum([B, ONE, gradient_map[B], LR], B)
workspace.RunNetOnce(init_net)
workspace.CreateNet(train_net)
for i in range(iter_num):
workspace.RunNet(train_net.Proto().name)
# print("During training, loss is: {}".format(workspace.FetchBlob("loss")))
print("Caffe2 loss is: {}".format(workspace.FetchBlob("loss")))
# end of caffe2 part
# printing out results
print("Done {} iterations over the batch data, with noise coefficient set to {}".
format(iter_num, noise_scale))
print("Caffe2 after training, W is: {}".format(workspace.FetchBlob("W")))
print("Caffe2 after training, B is: {}".format(workspace.FetchBlob("B")))
print("Ngraph after training, W is: {}".format(w_val))
print("Ngraph after training, B is: {}".format(b_val))
print("Target W was: {}".format(target_w))
print("Target B was: {}".format(target_b))
assert(workspace.FetchBlob("loss") < 0.01)
assert(loss_val < 0.01)
if __name__ == "__main__":
iter_num, lrate, gamma, step_size, noise_scale = 200, 0.01, 0.9, 20, 0.01
linear_regression(iter_num, lrate, gamma, step_size, noise_scale)
| StarcoderdataPython |
21444 | <filename>MilightWifiBridge/MilightWifiBridge.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Milight 3.0 (LimitlessLED Wifi Bridge v6.0) library: Control wireless lights (Milight 3.0) with Wifi
Note that this library was tested with Milight Wifi iBox v1 and RGBW lights. It should work with any other
lights and bridge using Milight 3.0 / LimitlessLED v6.0 protocol.
Non-exhaustive functionality using the python class or using this file from shell
(launch this python file with '-h' parameter to get more information):
- Initialize the Wifi bridge
- Link/Unlink lights
- Light on/off
- Wifi bridge lamp on/off
- Set night mode
- Set white mode
- Set color (using Milight format)
- Set saturation
- Set brightness
- Set disco mode (9 available)
- Increase/Decrease disco mode speed
- Get Milight wifi bridge MAC address
- ...
Used protocol: http://www.limitlessled.com/dev/ (LimitlessLED Wifi Bridge v6.0 section)
"""
__author__ = '<NAME>'
__email__ = "<EMAIL>"
__license__ = "MIT License"
__copyright__ = "Copyright Quentin Comte-Gaz (2019)"
__python_version__ = "2.7+ and 3.+"
__version__ = "2.1 (2019/11/09)"
__status__ = "Usable for any project"
import socket
import collections
import sys, getopt
import logging
import binascii
class MilightWifiBridge:
"""Milight 3.0 Wifi Bridge class
Calling setup() function is necessary in order to make this class work properly.
"""
######################### Enums #########################
class eZone:
ALL = 0
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class eDiscoMode:
DISCO_1 = 1
DISCO_2 = 2
DISCO_3 = 3
DISCO_4 = 4
DISCO_5 = 5
DISCO_6 = 6
DISCO_7 = 7
DISCO_8 = 8
DISCO_9 = 9
class eTemperature:
WARM = 0 # 2700K
WARM_WHITE = 8 # 3000K
COOL_WHITE = 35 # 4000K
DAYLIGHT = 61 # 5000K
COOL_DAYLIGHT = 100 # 6500K
class eColor:
RED = 0xFF
LAVENDER = 0xD9
BLUE = 0xBA
AQUA = 0x85
GREEN = 0x7A
LIME = 0x54
YELLOW = 0x3B
ORANGE = 0x1E
######################### static variables/static functions/internal struct #########################
__START_SESSION_MSG = bytearray([0x20, 0x00, 0x00, 0x00, 0x16, 0x02, 0x62, 0x3A, 0xD5, 0xED, 0xA3, 0x01, 0xAE, 0x08,
0x2D, 0x46, 0x61, 0x41, 0xA7, 0xF6, 0xDC, 0xAF, 0xD3, 0xE6, 0x00, 0x00, 0x1E])
# Response sent by the milight wifi bridge after a start session query
# Keyword arguments:
# responseReceived -- (bool) Response valid
# mac -- (string) MAC address of the wifi bridge
# sessionId1 -- (int) First part of the session ID
# sessionId2 -- (int) Second part of the session ID
# sequenceNumber -- (int) Sequence number
__START_SESSION_RESPONSE = collections.namedtuple("StartSessionResponse", "responseReceived mac sessionId1 sessionId2")
__ON_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x01, 0x00, 0x00, 0x00])
__OFF_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x02, 0x00, 0x00, 0x00])
__NIGHT_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x05, 0x00, 0x00, 0x00])
__WHITE_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x05, 0x64, 0x00, 0x00, 0x00])
__DISCO_MODE_SPEED_UP_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x03, 0x00, 0x00, 0x00])
__DISCO_MODE_SLOW_DOWN_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x04, 0x00, 0x00, 0x00])
__LINK_CMD = bytearray([0x3D, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00])
__UNLINK_CMD = bytearray([0x3E, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_ON_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_OFF_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x04, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_WHITE_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x05, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_DISCO_MODE_SPEED_UP_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_DISCO_MODE_SLOW_DOWN_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBridgeLampColorCmd(color):
"""Give 'Set color for bridge lamp' command
Keyword arguments:
color -- (int or eColor) Color value between 0x00 and 0xFF
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bytearray) 'Set colo for bridge lamp' command
"""
color = int(color)
if color < 0:
color = 0
elif color > 0xFF:
color = 0xFF
color &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x01, color, color, color, color])
@staticmethod
def __getSetColorCmd(color):
"""Give 'Set color' command
Keyword arguments:
color -- (int or eColor) Color value between 0x00 and 0xFF
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bytearray) 'Set color' command
"""
color = int(color)
if color < 0:
color = 0
elif color > 0xFF:
color = 0xFF
color &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x01, color, color, color, color])
@staticmethod
def __getSetDiscoModeForBridgeLampCmd(mode):
"""Give 'Set disco mode for bridge lamp' command
Keyword arguments:
mode -- (int) Disco mode between 1 and 9
return: (bytearray) 'Set disco mode for bridge lamp' command
"""
mode = int(mode)
if mode < 1:
mode = 1
elif mode > 9:
mode = 9
mode &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x04, mode, 0x00, 0x00, 0x00])
@staticmethod
def __getSetDiscoModeCmd(mode):
"""Give 'Set disco mode' command
Keyword arguments:
mode -- (int) Disco mode between 1 and 9
return: (bytearray) 'Set disco mode' command
"""
mode = int(mode)
if mode < 1:
mode = 1
elif mode > 9:
mode = 9
mode &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x06, mode, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBrightnessForBridgeLampCmd(brightness):
"""Give 'Set brightness for bridge lamp' command
Keyword arguments:
brightness -- (int) Brightness percentage between 0 and 100
return: (bytearray) 'Set brightness for bridge lamp' command
"""
brightness = int(brightness)
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
brightness &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x02, brightness, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBrightnessCmd(brightness):
"""Give 'Set brightness' command
Keyword arguments:
brightness -- (int) Brightness percentage between 0 and 100
return: (bytearray) 'Set brightness' command
"""
brightness = int(brightness)
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
brightness &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x03, brightness, 0x00, 0x00, 0x00])
@staticmethod
def __getSetSaturationCmd(saturation):
"""Give 'Set saturation' command
Keyword arguments:
saturation -- (int) Saturation percentage between 0 and 100
return: (bytearray) 'Set saturation' command
"""
saturation = int(saturation)
if saturation < 0:
saturation = 0
elif saturation > 100:
saturation = 100
saturation &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x02, saturation, 0x00, 0x00, 0x00])
@staticmethod
def __getSetTemperatureCmd(temperature):
"""Give 'Set temperature' command
Keyword arguments:
temperature -- (int) Temperature percentage between 0 and 100
0% <=> Warm white (2700K)
100% <=> Cool white (6500K)
return: (bytearray) 'Set temperature' command
"""
temperature = int(temperature)
if temperature < 0:
temperature = 0
elif temperature > 100:
temperature = 100
temperature &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x05, temperature, 0x00, 0x00, 0x00])
@staticmethod
def __calculateCheckSum(command, zoneId):
"""Calculate request checksum
Note: Request checksum is equal to SUM(all command bytes and of the zone number) & 0xFF
Keyword arguments:
command -- (bytearray) Command
zoneId -- (int) Zone ID
return: (int) Request checksum
"""
checkSum = 0
for byteCommand in command:
checkSum += byteCommand
checkSum += zoneId
return (checkSum & 0xFF)
@staticmethod
def __getStringFromUnicode(value):
try:
return ord(value)
except Exception:
return value
################################### INIT ####################################
def __init__(self):
"""Class must be initialized with setup()"""
self.close()
################################### SETUP ####################################
def close(self):
"""Close connection with Milight wifi bridge"""
self.__initialized = False
self.__sequence_number = 0
try:
self.__sock.shutdown(socket.SHUT_RDWR)
self.__sock.close()
logging.debug("Socket closed")
# If close before initialization, better handle attribute error
except AttributeError:
pass
def setup(self, ip, port=5987, timeout_sec=5.0):
"""Initialize the class (can be launched multiple time if setup changed or module crashed)
Keyword arguments:
ip -- (string) IP to communication with the Milight wifi bridge
port -- (int, optional) UDP port to communication with the Milight wifi bridge
timeout_sec -- (int, optional) Timeout in sec for Milight wifi bridge to answer commands
return: (bool) Milight wifi bridge initialized
"""
# Close potential previous Milight wifi bridge session
self.close()
# Create new milight wifi bridge session
try:
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.__ip = ip
self.__port = port
self.__sock.connect((self.__ip, self.__port))
self.__sock.settimeout(timeout_sec)
self.__initialized = True
logging.debug("UDP connection initialized with ip {} and port {}".format(str(ip), str(port)))
except (socket.error, socket.herror, socket.gaierror, socket.timeout) as err:
logging.error("Impossible to initialize the UDP connection with ip {} and port {}: {}".format(str(ip), str(port), str(err)))
return self.__initialized
######################### INTERNAL UTILITY FUNCTIONS #########################
def __startSession(self):
"""Send start session request and return start session information
return: (MilightWifiBridge.__START_SESSION_RESPONSE) Start session information containing response received,
mac address and session IDs
"""
# Send start session request
data_to_send = MilightWifiBridge.__START_SESSION_MSG
logging.debug("Sending frame '{}' to {}:{}".format(str(binascii.hexlify(data_to_send)),
str(self.__ip), str(self.__port)))
self.__sock.send(data_to_send)
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=False, mac="", sessionId1=-1, sessionId2=-1)
try:
# Receive start session response
data = self.__sock.recvfrom(1024)[0]
if len(data) == 22:
# Parse valid start session response
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=True,
mac=str("{}:{}:{}:{}:{}:{}".format(format(MilightWifiBridge.__getStringFromUnicode(data[7]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[8]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[9]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[10]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[11]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[12]), 'x'))),
sessionId1=int(MilightWifiBridge.__getStringFromUnicode(data[19])),
sessionId2=int(MilightWifiBridge.__getStringFromUnicode(data[20])))
logging.debug("Start session (mac address: {}, session ID 1: {}, session ID 2: {})"
.format(str(response.mac), str(response.sessionId1), str(response.sessionId2)))
else:
logging.warning("Invalid start session response size")
except socket.timeout:
logging.warning("Timed out for start session response")
return response
def __sendRequest(self, command, zoneId):
"""Send command to a specific zone and get response (ACK from the wifi bridge)
Keyword arguments:
command -- (bytearray) Command
zoneId -- (int) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = False
# Send request only if valid parameters
if len(bytearray(command)) == 9:
if int(zoneId) >= 0 and int(zoneId) <= 4:
startSessionResponse = self.__startSession()
if startSessionResponse.responseReceived:
# For each request, increment the sequence number (even if the session ID is regenerated)
# Sequence number must be between 0x01 and 0xFF
self.__sequence_number = (self.__sequence_number + 1) & 0xFF
if self.__sequence_number == 0:
self.__sequence_number = 1
# Prepare request frame to send
bytesToSend = bytearray([0x80, 0x00, 0x00, 0x00, 0x11, startSessionResponse.sessionId1,
startSessionResponse.sessionId2, 0x00, int(self.__sequence_number), 0x00])
bytesToSend += bytearray(command)
bytesToSend += bytearray([int(zoneId), 0x00])
bytesToSend += bytearray([int(MilightWifiBridge.__calculateCheckSum(bytearray(command), int(zoneId)))])
# Send request frame
logging.debug("Sending request with command '{}' with session ID 1 '{}', session ID 2 '{}' and sequence number '{}'"
.format(str(binascii.hexlify(command)), str(startSessionResponse.sessionId1),
str(startSessionResponse.sessionId2), str(self.__sequence_number)))
self.__sock.send(bytesToSend)
try:
# Receive response frame
data = self.__sock.recvfrom(64)[0]
if len(data) == 8:
if int(MilightWifiBridge.__getStringFromUnicode(data[6])) == self.__sequence_number:
returnValue = True
logging.debug("Received valid response for previously sent request")
else:
logging.warning("Invalid sequence number ack {} instead of {}".format(str(data[6]),
self.__sequence_number))
else:
logging.warning("Invalid response size {} instead of 8".format(str(len(data))))
except socket.timeout:
logging.warning("Timed out for response")
else:
logging.warning("Start session failed")
else:
logging.error("Invalid zone {} (must be between 0 and 4)".format(str(zoneId)))
else:
logging.error("Invalid command size {} instead of 9".format(str(len(bytearray(command)))))
return returnValue
######################### PUBLIC FUNCTIONS #########################
def turnOn(self, zoneId):
"""Request 'Light on' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__ON_CMD, zoneId)
logging.debug("Turn on zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOff(self, zoneId):
"""Request 'Light off' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__OFF_CMD, zoneId)
logging.debug("Turn off zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOnWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp on' to a zone
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_ON_CMD, 0x01)
logging.debug("Turn on wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def turnOffWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp off'
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_OFF_CMD, 0x01)
logging.debug("Turn off wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def setNightMode(self, zoneId):
"""Request 'Night mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__NIGHT_MODE_CMD, zoneId)
logging.debug("Set night mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteMode(self, zoneId):
"""Request 'White mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WHITE_MODE_CMD, zoneId)
logging.debug("Set white mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteModeBridgeLamp(self):
"""Request 'White mode' to the bridge lamp
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_WHITE_MODE_CMD, 0x01)
logging.debug("Set white mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def setDiscoMode(self, discoMode, zoneId):
"""Request 'Set disco mode' to a zone
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeCmd(discoMode), zoneId)
logging.debug("Set disco mode {} to zone {}: {}".format(str(discoMode), str(zoneId), str(returnValue)))
return returnValue
def setDiscoModeBridgeLamp(self, discoMode):
"""Request 'Set disco mode' to the bridge lamp
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeForBridgeLampCmd(discoMode), 0x01)
logging.debug("Set disco mode {} to wifi bridge: {}".format(str(discoMode), str(returnValue)))
return returnValue
def speedUpDiscoMode(self, zoneId):
"""Request 'Disco mode speed up' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SPEED_UP_CMD, zoneId)
logging.debug("Speed up disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def speedUpDiscoModeBridgeLamp(self):
"""Request 'Disco mode speed up' to the wifi bridge
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SPEED_UP_CMD, 0x01)
logging.debug("Speed up disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def slowDownDiscoMode(self, zoneId):
"""Request 'Disco mode slow down' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SLOW_DOWN_CMD, zoneId)
logging.debug("Slow down disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def slowDownDiscoModeBridgeLamp(self):
"""Request 'Disco mode slow down' to wifi bridge
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SLOW_DOWN_CMD, 0x01)
logging.debug("Slow down disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def link(self, zoneId):
"""Request 'Link' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__LINK_CMD, zoneId)
logging.debug("Link zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def unlink(self, zoneId):
"""Request 'Unlink' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__UNLINK_CMD, zoneId)
logging.debug("Unlink zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setColor(self, color, zoneId):
"""Request 'Set color' to a zone
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetColorCmd(color), zoneId)
logging.debug("Set color {} to zone {}: {}".format(str(color), str(zoneId), str(returnValue)))
return returnValue
def setColorBridgeLamp(self, color):
"""Request 'Set color' to wifi bridge
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBridgeLampColorCmd(color), 0x01)
logging.debug("Set color {} to wifi bridge: {}".format(str(color), str(returnValue)))
return returnValue
def setBrightness(self, brightness, zoneId):
"""Request 'Set brightness' to a zone
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessCmd(brightness), zoneId)
logging.debug("Set brightness {}% to zone {}: {}".format(str(brightness), str(zoneId), str(returnValue)))
return returnValue
def setBrightnessBridgeLamp(self, brightness):
"""Request 'Set brightness' to the wifi bridge
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessForBridgeLampCmd(brightness), 0x01)
logging.debug("Set brightness {}% to the wifi bridge: {}".format(str(brightness), str(returnValue)))
return returnValue
def setSaturation(self, saturation, zoneId):
"""Request 'Set saturation' to a zone
Keyword arguments:
brightness -- (int) Saturation in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetSaturationCmd(saturation), zoneId)
logging.debug("Set saturation {}% to zone {}: {}".format(str(saturation), str(zoneId), str(returnValue)))
return returnValue
def setTemperature(self, temperature, zoneId):
"""Request 'Set temperature' to a zone
Keyword arguments:
brightness -- (int or MilightWifiBridge.eTemperature) Temperature in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetTemperatureCmd(temperature), zoneId)
logging.debug("Set temperature {}% ({} kelvin) to zone {}: {}"
.format(str(temperature), str(int(2700 + 38*temperature)), str(zoneId), str(returnValue)))
return returnValue
def getMacAddress(self):
"""Request the MAC address of the milight wifi bridge
return: (string) MAC address of the wifi bridge (empty if an error occured)
"""
returnValue = self.__startSession().mac
logging.debug("Get MAC address: {}".format(str(returnValue)))
return returnValue
################################# HELP FUNCTION ################################
def __help(func="", filename=__file__):
"""Show help on how to use command line milight wifi bridge functions
Keyword arguments:
func -- (string, optional) Command line function requiring help, none will show all function
filename -- (string, optional) File name of the python script implementing the commands
"""
func = func.lower()
# Help
if func in ("h", "help"):
print("Give information to use all or specific milight wifi bridge commands\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -h [command (default: none)]\r\n"
+filename+" --help [command (default: none)]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -h \r\n"
+filename+" -h turnOn \r\n"
+filename+" --help \r\n"
+filename+" --help link")
return
elif func == "":
print("HELP (-h, --help): Give information to use all or specific milight wifi bridge commands")
# Ip
if func in ("i", "ip"):
print("Specify milight wifi bridge IP (mandatory to use any command)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -i [ip]\r\n"
+filename+" --ip [ip]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -i 192.168.1.23\r\n"
+filename+" --ip 192.168.1.23\r\n")
return
elif func == "":
print("IP (-i, --ip): Specify milight wifi bridge IP (mandatory to use any command)")
# Port
if func in ("p", "port"):
print("Specify milight wifi bridge port\r\n"
+"\r\n"
+"Default value (if not called): 5987\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -port [port]\r\n"
+filename+" --port [port]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -p 1234\r\n"
+filename+" --port 1234\r\n")
return
elif func == "":
print("PORT (-p, --port): Specify milight wifi bridge port (default value: 5987)")
# Timeout
if func in ("t", "timeout"):
print("Specify timeout for communication with the wifi bridge (in sec)\r\n"
+"\r\n"
+"Default value (if not called): 5.0\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -t [timeout]\r\n"
+filename+" --timeout [timeout]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -t 1\r\n"
+filename+" --timeout 1\r\n")
return
elif func == "":
print("TIMEOUT (-t, --timeout): Specify timeout for communication with the wifi bridge in sec (default value: 5.0sec)")
# Zone
if func in ("z", "zone"):
print("Specify milight light zone to control\r\n"
+"\r\n"
+"Default value (if not called): 0\r\n"
+"\r\n"
+"Possible values: 0 for all zone or zone 1 to 4\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -z [zone]\r\n"
+filename+" --zone [zone]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -z 1\r\n"
+filename+" --zone 1\r\n")
return
elif func == "":
print("ZONE (-z, --zone): Specify milight light zone to control (default value: All zone)")
# Get MAC address
if func in ("m", "getmacaddress"):
print("Get the milight wifi bridge mac address\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -m\r\n"
+filename+" --ip 192.168.1.23 --getMacAddress\r\n")
return
elif func == "":
print("GET MAC ADDRESS (-m, --getMacAddress): Get the milight wifi bridge mac address")
# Link
if func in ("l", "link"):
print("Link lights to a specific zone\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -l\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --link\r\n")
return
elif func == "":
print("LINK (-l, --link): Link lights to a specific zone")
# Unlink
if func in ("u", "unlink"):
print("Unlink lights\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -u\r\n"
+filename+" --ip 192.168.1.23 --unlink\r\n")
return
elif func == "":
print("UNLINK (-u, --unlink): Unlink lights")
# Turn lights ON
if func in ("o", "turnon"):
print("Turn lights on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -o\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOn\r\n")
return
elif func == "":
print("TURN ON (-o, --turnOn): Turn lights on")
# Turn lights OFF
if func in ("f", "turnoff"):
print("Turn lights off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -f\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOff\r\n")
return
elif func == "":
print("TURN OFF (-o, --turnOff): Turn lights off")
# Turn wifi bridge lamp ON
if func in ("x", "turnonwifibridgelamp"):
print("Turn wifi bridge lamp on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -x\r\n"
+filename+" --ip 192.168.1.23 --turnOnWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP ON (-x, --turnOnWifiBridgeLamp): Turn wifi bridge lamp on")
# Turn wifi bridge lamp OFF
if func in ("y", "turnoffwifibridgelamp"):
print("Turn wifi bridge lamp off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -y\r\n"
+filename+" --ip 192.168.1.23 --turnOffWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP OFF (-y, --turnOffWifiBridgeLamp): Turn wifi bridge lamp off")
# Set night mode
if func in ("n", "setnightmode"):
print("Set night mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -n\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setNightMode\r\n")
return
elif func == "":
print("SET NIGHT MODE (-n, --setNightMode): Set night mode")
# Set white mode
if func in ("w", "setwhitemode"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -w\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setWhiteMode\r\n")
return
elif func == "":
print("SET WHITE MODE (-w, --setWhiteMode): Set white mode")
# Set white mode for bridge lamp
if func in ("j", "setwhitemodebridgelamp"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -j\r\n"
+filename+" --ip 192.168.1.23 --setWhiteModeBridgeLamp\r\n")
return
elif func == "":
print("SET WHITE MODE ON BRIDGE LAMP (-j, --setWhiteModeBridgeLamp): Set white mode on bridge lamp")
# Speed up disco mode for bridge lamp
if func in ("k", "speedupdiscomodebridgelamp"):
print("Speed up disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -k\r\n"
+filename+" --ip 192.168.1.23 --speedUpDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE FOR BRIDGE LAMP (-k, --speedUpDiscoModeBridgeLamp): Speed up disco mode for bridge lamp")
# Slow down disco mode for bridge lamp
if func in ("q", "slowdowndiscomodebridgelamp"):
print("Slow down disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -q\r\n"
+filename+" --ip 192.168.1.23 --slowDownDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE FOR BRIDGE LAMP (-q, --slowDownDiscoModeBridgeLamp): Slow down disco mode for bridge lamp")
# Speed up disco mode
if func in ("a", "speedupdiscomode"):
print("Speed up disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -a\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --speedUpDiscoMode\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE (-a, --speedUpDiscoMode): Speed up disco mode")
# Slow down disco mode
if func in ("g", "slowdowndiscomode"):
print("Slow down disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -g\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --slowDownDiscoMode\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE (-g, --slowDownDiscoMode): Slow down disco mode")
# Set specific color
if func in ("c", "setcolor"):
print("Set specific color (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -c 255\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setColor 255\r\n")
return
elif func == "":
print("SET COLOR (-c, --setColor): Set specific color (between 0 and 255)")
# Set brightness
if func in ("b", "setbrightness"):
print("Set brightness (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -b 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setBrightness 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS (-b, --setBrightness): Set brightness (in %)")
# Set specific color for bridge lamp
if func in ("r", "setcolorbridgelamp"):
print("Set specific color for the bridge lamp (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -r 255\r\n"
+filename+" --ip 192.168.1.23 --setColorBridgeLamp 255\r\n")
return
elif func == "":
print("SET COLOR FOR THE BRIDGE LAMP (-r, --setColorBridgeLamp): Set specific color for the bridge lamp (between 0 and 255)")
# Set brightness for bridge lamp
if func in ("v", "setbrightnessbridgelamp"):
print("Set brightness for the bridge lamp (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -v 50\r\n"
+filename+" --ip 192.168.1.23 --setBrightnessBridgeLamp 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS FOR THE BRIDGE LAMP (-v, --setBrightnessBridgeLamp): Set brightness for the bridge lamp(in %)")
# Set saturation
if func in ("s", "setsaturation"):
print("Set saturation (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -s 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setSaturation 50\r\n")
return
elif func == "":
print("SET SATURATION (-s, --setSaturation): Set saturation (in %)")
# Set temperature
if func in ("s", "settemperature"):
print("Set temperature (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -e 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setTemperature 50\r\n")
return
elif func == "":
print("SET TEMPERATURE (-e, --setTemperature): Set temperature (in %)")
# Set disco mode
if func in ("d", "setdiscomode"):
print("Set disco mode (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -d 5\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setDiscoMode 5\r\n")
return
elif func == "":
print("SET DISCO MODE (-d, --setDiscoMode): Set disco mode (between 1 and 9)")
# Set disco mode for bridge lamp
if func in ("d", "setdiscomodebridgelamp"):
print("Set disco mode for bridge lamp (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -1 5\r\n"
+filename+" --ip 192.168.1.23 --setDiscoModeBridgeLamp 5\r\n")
return
elif func == "":
print("SET DISCO MODE FOR BRIDGE LAMP (-1, --setDiscoModeBridgeLamp): Set disco mode for bridge lamp (between 1 and 9)")
# Add use case examples:
if func == "":
print("\r\n"
+"Some examples (if ip '192.168.1.23', port is 5987):\r\n"
+" - Get the mac address: "+filename+" --ip 192.168.1.23 --port 5987 --getMacAddress\r\n"
+" - Set disco mode 5 in light zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --setDiscoMode 5\r\n"
+" - Light on zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn\r\n"
+" - Light off zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOff\r\n"
+" - Light on and set with light in zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn --setWhiteMode\r\n"
+" - Light on all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOn\r\n"
+" - Light off all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOff")
################################# MAIN FUNCTION ###############################
def main(parsed_args = sys.argv[1:]):
"""Shell Milight utility function"""
# Set the log level (no log will be shown if "logging.CRITICAL" is used)
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL) #Other parameters: logging.DEBUG, logging.WARNING, logging.ERROR
ip = "" # No default IP, must be specified by the user
port = 5987 # Default milight 3.0 port
zone = 0 # By default, all zone are controlled
timeout = 5.0 # By default, Wait maximum 5sec
# Get options
try:
opts, args = getopt.getopt(parsed_args, "i:p:t:z:hmluofx23ynwagc:b:s:e:d:jkqr:v:1:",
["ip=", "port=", "timeout=", "zone=", "help", "debug", "nodebug",
"getMacAddress", "link", "unlink", "turnOn", "turnOff", "turnOnWifiBridgeLamp",
"turnOffWifiBridgeLamp", "setNightMode", "setWhiteMode", "speedUpDiscoMode", "slowDownDiscoMode",
"setColor=", "setBrightness=", "setSaturation=", "setTemperature=", "setDiscoMode=",
"setWhiteModeBridgeLamp", "speedUpDiscoModeBridgeLamp", "slowDownDiscoModeBridgeLamp",
"setColorBridgeLamp=", "setBrightnessBridgeLamp=", "setDiscoModeBridgeLamp="])
except getopt.GetoptError as err:
print("[ERROR] "+str(err))
__help()
sys.exit(1)
# Show help (if requested)
for o, a in opts:
if o in ("-h", "--help"):
if len(args) >= 1:
__help(args[0])
else:
__help()
sys.exit(0)
elif o in ("-l", "--debug"):
print("Debugging...")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
elif o in ("-z", "--nodebug"):
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Get base parameters
for o, a in opts:
if o in ("-i", "--ip"):
ip = str(a)
continue
if o in ("-p", "--port"):
port = int(a)
continue
if o in ("-t", "--timeout"):
timeout = int(a)
continue
if o in ("-z", "--zone"):
zone = int(a)
continue
# Check base parameters
if ip == "":
print("[ERROR] You need to specify the ip...\r\n")
__help("ip")
sys.exit(1)
if zone < 0 or zone > 4:
print("[ERROR] You need to specify a valid zone ID (between 0 and 4)\r\n")
__help("zone")
sys.exit(1)
if timeout <= 0:
print("[ERROR] You need to specify a valid timeout (more than 0sec)\r\n")
__help("timeout")
sys.exit(1)
if port <= 0:
print("[ERROR] You need to specify a valid port (more than 0)\r\n")
__help("port")
sys.exit(1)
# Show base parameters
print("Ip: "+str(ip))
print("Zone: "+str(zone))
print("Timeout: "+str(timeout))
print("Port: "+str(port))
# Initialize Milight bridge
milight = MilightWifiBridge()
milight.close()
is_init = milight.setup(ip, port, timeout)
logging.debug("Milight bridge connection initialized with ip {}:{} : {}".format(ip, port, is_init))
if (not is_init):
print("[ERROR] Initialization failed, re-check the ip (and the port), use '-h' to get more information.")
sys.exit(2)
# Execute requested commands in the requested order
returnValue = True
atLeastOneRequestDone = False
for o, a in opts:
if o in ("-m", "--getMacAddress"):
atLeastOneRequestDone = True
macAddress = milight.getMacAddress()
returnValue &= (macAddress != "")
if macAddress != "":
print("Mac address: "+str(macAddress))
else:
print("Failed to get mac address")
elif o in ("-l", "--link"):
atLeastOneRequestDone = True
res = milight.link(zoneId=zone)
returnValue &= res
print("Link zone "+str(zone)+": "+str(res))
elif o in ("-u", "--unlink"):
atLeastOneRequestDone = True
res = milight.unlink(zoneId=zone)
returnValue &= res
print("Unlink zone "+str(zone)+": "+str(res))
elif o in ("-o", "--turnOn"):
atLeastOneRequestDone = True
res = milight.turnOn(zoneId=zone)
returnValue &= res
print("Turn on zone "+str(zone)+": "+str(res))
elif o in ("-f", "--turnOff"):
atLeastOneRequestDone = True
res = milight.turnOff(zoneId=zone)
returnValue &= res
print("Turn off zone "+str(zone)+": "+str(res))
elif o in ("-x", "--turnOnWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOnWifiBridgeLamp()
returnValue &= res
print("Turn on wifi bridge lamp: "+str(res))
elif o in ("-y", "--turnOffWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOffWifiBridgeLamp()
returnValue &= res
print("Turn off wifi bridge lamp: "+str(res))
elif o in ("-j", "--setWhiteModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.setWhiteModeBridgeLamp()
returnValue &= res
print("Set white mode to wifi bridge: "+str(res))
elif o in ("-k", "--speedUpDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoModeBridgeLamp()
returnValue &= res
print("Speed up disco mode to wifi bridge: "+str(res))
elif o in ("-q", "--slowDownDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoModeBridgeLamp()
returnValue &= res
print("Slow down disco mode to wifi bridge: "+str(res))
elif o in ("-r", "--setColorBridgeLamp"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColorBridgeLamp(color=userColor)
returnValue &= res
print("Set color "+str(userColor)+" to wifi bridge: "+str(res))
elif o in ("-v", "--setBrightnessBridgeLamp"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightnessBridgeLamp(brightness=userBrightness)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to the wifi bridge: "+str(res))
elif o in ("-1", "--setDiscoModeBridgeLamp"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoModeBridgeLamp(discoMode=mode)
returnValue &= res
print("Set disco mode "+str(mode)+" to wifi bridge: "+str(res))
elif o in ("-n", "--setNightMode"):
atLeastOneRequestDone = True
res = milight.setNightMode(zoneId=zone)
returnValue &= res
print("Set night mode to zone "+str(zone)+": "+str(res))
elif o in ("-w", "--setWhiteMode"):
atLeastOneRequestDone = True
res = milight.setWhiteMode(zoneId=zone)
returnValue &= res
print("Set white mode to zone "+str(zone)+": "+str(res))
elif o in ("-a", "--speedUpDiscoMode"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoMode(zoneId=zone)
returnValue &= res
print("Speed up disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-g", "--slowDownDiscoMode"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoMode(zoneId=zone)
returnValue &= res
print("Slow down disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-d", "--setDiscoMode"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoMode(discoMode=mode, zoneId=zone)
returnValue &= res
print("Set disco mode "+str(mode)+" to zone "+str(zone)+": "+str(res))
elif o in ("-c", "--setColor"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColor(color=userColor, zoneId=zone)
returnValue &= res
print("Set color "+str(userColor)+" to zone "+str(zone)+": "+str(res))
elif o in ("-b", "--setBrightness"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightness(brightness=userBrightness, zoneId=zone)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-s", "--setSaturation"):
userSaturation = int(a)
if userSaturation < 0 or userSaturation > 100:
print("[ERROR] Saturation must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setSaturation(saturation=userSaturation, zoneId=zone)
returnValue &= res
print("Set saturation "+str(userSaturation)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-e", "--setTemperature"):
userTemperature = int(a)
if userTemperature < 0 or userTemperature > 100:
print("[ERROR] Temperature must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setTemperature(temperature=userTemperature, zoneId=zone)
returnValue &= res
print("Set temperature "+str(userTemperature)+"% to zone "+str(zone)+": "+str(res))
# In case an error occured in any of the request, stop the program
if not returnValue:
break
if not atLeastOneRequestDone:
print("[ERROR] You must call one action, use '-h' to get more information.")
sys.exit(1)
if not returnValue:
print("[ERROR] Request failed")
sys.exit(1)
if atLeastOneRequestDone and returnValue:
sys.exit(0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1785868 | <gh_stars>0
def find_top_confirmed(n = 15):
import pandas as pd
corona_df=pd.read_csv("dataset2.csv")
by_country = corona_df.groupby('Country_Region').sum()[['Confirmed', 'Deaths', 'Recovered', 'Active']]
cdf = by_country.nlargest(n, 'Confirmed')[['Confirmed']]
return cdf
cdf=find_top_confirmed()
pairs=[(country,confirmed) for country,confirmed in zip(cdf.index,cdf['Confirmed'])]
import folium
import pandas as pd
corona_df = pd.read_csv("dataset2.csv")
corona_df=corona_df[['Lat','Long_','Confirmed','Country_Region']]
corona_df=corona_df.dropna()
m=folium.Map(location=[52,1],
max_bounds=True,
tiles='cartodbpositron',
min_zoom=2,
zoom_start=3)
def circle_maker(x):
folium.Circle(location=[x[0],x[1]],
radius=float(x[2]),
color="blue",
weight=1,
fill=True,
fill_color="blue",
fill_opacity=0.3,
tooltip='{}\n Confirmed Cases:{}'.format(x[3],x[2])).add_to(m)
corona_df.apply(lambda x:circle_maker(x),axis=1)
html_map=m._repr_html_()
def find_top_Active(n = 15):
import pandas as pd
corona_df=pd.read_csv("dataset2.csv")
by_country = corona_df.groupby('Country_Region').sum()[['Confirmed', 'Deaths', 'Recovered', 'Active']]
adf = by_country.nlargest(n, 'Active')[['Active']]
return adf
adf=find_top_Active()
pairsActive=[(country,active) for country,active in zip(adf.index,adf['Active'])]
import folium
import pandas as pd
corona_df = pd.read_csv("dataset2.csv")
corona_df=corona_df[['Lat','Long_','Active','Country_Region']]
corona_df=corona_df.dropna()
a=folium.Map(location=[52,1],
max_bounds=True,
tiles='cartodbpositron',
min_zoom=2,
zoom_start=3)
def circle_maker(x):
folium.Circle(location=[x[0],x[1]],
radius=float(x[2]),
color="blue",
weight=1,
fill=True,
fill_color="blue",
fill_opacity=0.3,
tooltip='{}\n Active Cases:{}'.format(x[3],x[2])).add_to(a)
corona_df.apply(lambda x:circle_maker(x),axis=1)
html_Activemap=a._repr_html_()
def find_top_Recovered(n = 15):
import pandas as pd
corona_df=pd.read_csv("dataset2.csv")
by_country = corona_df.groupby('Country_Region').sum()[['Confirmed', 'Deaths', 'Recovered', 'Active']]
rdf = by_country.nlargest(n, 'Recovered')[['Recovered']]
return rdf
rdf=find_top_Recovered()
pairsRecovered=[(country,recovered) for country,recovered in zip(rdf.index,rdf['Recovered'])]
import folium
import pandas as pd
corona_df = pd.read_csv("dataset2.csv")
corona_df=corona_df[['Lat','Long_','Recovered','Country_Region']]
corona_df=corona_df.dropna()
r=folium.Map(location=[52,1],
max_bounds=True,
tiles='cartodbpositron',
min_zoom=2,
zoom_start=3)
def circle_maker(x):
folium.Circle(location=[x[0],x[1]],
radius=float(x[2]),
color="blue",
weight=1,
fill=True,
fill_color="blue",
fill_opacity=0.3,
tooltip='{}\n Recoverd Cases:{}'.format(x[3],x[2])).add_to(r)
corona_df.apply(lambda x:circle_maker(x),axis=1)
html_Recovermap=r._repr_html_()
def find_top_Death(n = 15):
import pandas as pd
corona_df=pd.read_csv("dataset2.csv")
by_country = corona_df.groupby('Country_Region').sum()[['Confirmed', 'Deaths', 'Recovered', 'Active']]
ddf = by_country.nlargest(n, 'Deaths')[['Deaths']]
return ddf
ddf=find_top_Death()
pairsDeaths=[(country,deaths) for country,deaths in zip(ddf.index,ddf['Deaths'])]
import folium
import pandas as pd
corona_df = pd.read_csv("dataset2.csv")
corona_df=corona_df[['Lat','Long_','Deaths','Country_Region']]
corona_df=corona_df.dropna()
d=folium.Map(location=[52,1],
max_bounds=True,
tiles='cartodbpositron',
min_zoom=2,
zoom_start=3)
def circle_maker(x):
folium.Circle(location=[x[0],x[1]],
radius=float(x[2])*3,
color="blue",
weight=1,
fill=True,
fill_color="blue",
fill_opacity=0.3,
tooltip='{}\n Death Cases:{}'.format(x[3],x[2])).add_to(d)
corona_df.apply(lambda x:circle_maker(x),axis=1)
html_Deadmap=d._repr_html_()
from flask import Flask,render_template
app=Flask(__name__)
@app.route('/')
def home():
return render_template("home.html",table=cdf, cmap=html_map,amap=html_Activemap,rmap=html_Recovermap,dmap=html_Deadmap,pairs=pairs,pairsActive=pairsActive,pairsRecovered=pairsRecovered,pairsDeaths=pairsDeaths)
if __name__=="__main__":
app.run(debug=True) | StarcoderdataPython |
5189725 | <gh_stars>0
#!/usr/bin/python
# Grab data from the current cost envi in the lounge room
import time
import pycurrentcost
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
sensor_map = {
'0': 'all',
'1': 'fridge_kitchen',
'3': 'freezer_laundry',
'6': 'fridge_laundry',
}
def Collect():
cc = pycurrentcost.CurrentCostReader(port="/dev/currentcost")
metrics = {}
while True:
reading = cc.get_reading()
print reading.xml_str
print 'Temperature: %s' % reading.temperature
print 'Sensor: %s (%s)' %(reading.sensor_num, sensor_map.get(reading.sensor_num, '???'))
print 'Watts: %s' % reading.channels[1]['watts']
print
registry = CollectorRegistry()
Gauge('job_last_success_unixtime', 'Last time the current cost daemon saw a reading',
registry=registry).set_to_current_time()
Gauge('temp_c', 'Temperature in celcius', registry=registry).set(reading.temperature)
push_to_gateway('localhost:9091', job="currentcost", registry=registry)
registry = CollectorRegistry()
Gauge('job_last_success_unixtime', 'Last time the current cost daemon saw a reading',
registry=registry).set_to_current_time()
Gauge('watts', 'Watts consumed instantaneous',
registry=registry).set(reading.channels[1]['watts'])
push_to_gateway('localhost:9091',
job='currentcost',
grouping_key={'instance': sensor_map.get(reading.sensor_num, 'unknown')},
registry=registry)
if __name__ == '__main__':
Collect()
| StarcoderdataPython |
9721876 | import cv2
print ("package improted")
print('Opencv version {0}'.format(cv2.__version__))
path = 'Resources/'
# reading and showing images that stored in current project folder Resources
img = cv2.imread("Resources/base.jpeg")
# imread second argument could be cv2.IMREAD_COLOR, cv2.IMREAD_COLOR, cv2.IMREAD_UNCHANGED
# instead of using three flags, you can simply pass integers 1, 0, -1 respectively
img = cv2.imread('Resources/base.jpeg',0)
# first argument is a window name which is a string
cv2.imshow("Output",img)
# if 0 is passed, it waits indefinitely for a key stroke, for specified milliseconds
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'): #wait for 's' key to save adn exit
# write an image
cv2.imwrite(path+'newWritten2.jpeg', img)
cv2.destroyAllWindows()
##using matplotlib
from matplotlib import pyplot as plt
plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
#warning: color image loaded by opencv is in BGR mode while Matplotlib displays in RGB mode, is ti will not be displayed | StarcoderdataPython |
1932802 | # Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
import os
import sys
from ops.cli.parser import SubParserConfig
from ops.hierarchical.composition_config_generator import CompositionConfigGenerator
logger = logging.getLogger(__name__)
class HelmfileParserConfig(SubParserConfig):
def get_name(self):
return 'helmfile'
def get_help(self):
return 'Wrap common helmfile tasks using hierarchical configuration support'
def configure(self, parser):
parser.add_argument(
'--helmfile-path',
type=str,
default=None,
help='Dir to where helmfile.yaml is located')
return parser
def get_epilog(self):
return '''
Examples:
# Run helmfile sync
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile sync
# Run helmfile sync for a single chart
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile --selector chart=nginx-controller sync
# Run helmfile sync with concurrency flag
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile --selector chart=nginx-controller sync --concurrency=1
'''
class HelmfileRunner(CompositionConfigGenerator, object):
def __init__(self, ops_config, cluster_config_path, execute):
super(HelmfileRunner, self).__init__(["helmfiles"])
logging.basicConfig(level=logging.INFO)
self.ops_config = ops_config
self.cluster_config_path = cluster_config_path
self.execute = execute
def run(self, args, extra_args):
config_path_prefix = os.path.join(self.cluster_config_path, '')
default_helmfiles = '../ee-k8s-infra/compositions/helmfiles'
args.helmfile_path = default_helmfiles if args.helmfile_path is None else os.path.join(
args.helmfile_path, '')
compositions = self.get_sorted_compositions(config_path_prefix)
if len(compositions) == 0 or compositions[0] != "helmfiles":
raise Exception(
"Please provide the full path to composition=helmfiles")
composition = compositions[0]
conf_path = self.get_config_path_for_composition(
config_path_prefix, composition)
data = self.generate_helmfile_config(conf_path, args)
self.setup_kube_config(data)
command = self.get_helmfile_command(args, extra_args)
return dict(command=command)
def setup_kube_config(self, data):
if data['helm']['global']['clusterType'] == 'eks':
cluster_name = data['helm']['global']['fqdn']
aws_profile = data['helm']['global']['aws']['profile']
region = data['helm']['global']['region']['location']
file_location = self.generate_eks_kube_config(
cluster_name, aws_profile, region)
os.environ['KUBECONFIG'] = file_location
else:
logger.warning('currently only eks type clusters supported')
sys.exit(1)
def generate_eks_kube_config(self, cluster_name, aws_profile, region):
file_location = self.get_tmp_file()
cmd = "aws eks update-kubeconfig --name {} --profile {} --region {} --kubeconfig {}".format(cluster_name,
aws_profile,
region,
file_location)
return_code = self.execute(dict(command=cmd))
if return_code != 0:
raise Exception(
"Unable to generate EKS kube config. Exit code was {}".format(return_code))
return file_location
@staticmethod
def get_tmp_file():
import tempfile
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
return tmp_file.name
def generate_helmfile_config(self, path, args):
output_file = args.helmfile_path + "/hiera-generated.yaml"
logger.info('Generating helmfiles config %s', output_file)
try:
excluded_keys = self.ops_config["compositions"]["excluded_config_keys"]["helmfile"]
except KeyError:
excluded_keys = []
try:
filtered_keys = self.ops_config["compositions"]["filtered_output_keys"]["helmfile"]
except KeyError:
filtered_keys = []
return self.config_generator.generate_config(config_path=path,
filters=filtered_keys,
exclude_keys=excluded_keys,
output_format="yaml",
output_file=output_file,
print_data=True)
def get_helmfile_command(self, args, extra_args):
helmfile_args = ' '.join(extra_args)
return "cd {helmfile_path} && helmfile {helmfile_args}".format(
helmfile_path=args.helmfile_path,
helmfile_args=helmfile_args)
| StarcoderdataPython |
5026895 | #!/usr/bin/env python3.5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import inspect
import click
async def cleanup(loop, *, timeout=None, cancel=False):
current_task = asyncio.Task.current_task(loop=loop)
tasks = [
t for t in asyncio.Task.all_tasks(loop=loop)
if t is not current_task
]
if tasks:
if cancel:
for task in tasks:
task.cancel()
await asyncio.wait(tasks, timeout=timeout, loop=loop)
class AsyncCommand(click.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Check to see if the callback is a coroutine function, and if it is
# we'll wrap it so that it gets called with the global event loop.
if (inspect.iscoroutinefunction(self.callback) or
inspect.iscoroutinefunction(
getattr(self.callback, "__wrapped__", None))):
original_callback = self.callback
@functools.wraps(original_callback)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
main_t = asyncio.ensure_future(
original_callback(*args, **kwargs),
loop=loop,
)
try:
try:
loop.run_until_complete(main_t)
except KeyboardInterrupt:
main_t.cancel()
# This won't actually run forever because the call to
# loop.run_until_complete added a callback to the
# future that will stop the loop once main_t has
# finished and return control back to this function.
loop.run_forever()
# Try to clean up all of the tasks by waiting for any
# existing tasks to finish. Ideally the main function
# triggered everything to try and finish up and exit on
# it's own. However, if it hadn't then we'll cancel
# everything after we wait a small amount of time.
cleanup_t = asyncio.ensure_future(
cleanup(loop, timeout=15),
loop=loop,
)
try:
loop.run_until_complete(cleanup_t)
except KeyboardInterrupt:
# We got another KeyboardInterrupt while waiting on the
# pending tasks to finish. We'll cancel that cleanup
# job and let everything fall through to the final
# cleanup that just cancels everything.
cleanup_t.cancel()
# Like above, this will not actually run forever
# because of callback added to the cleanup_t task.
loop.run_forever()
finally:
# Just cancel everything at this point, we don't want
# anything to still be executing once this is over.
loop.run_until_complete(cleanup(loop, cancel=True))
loop.stop()
self.callback = wrapper
def make_context(self, *args, **kwargs):
ctx = super().make_context(*args, **kwargs)
ctx.event_loop = asyncio.get_event_loop()
return ctx
| StarcoderdataPython |
1809773 | <filename>metakernel/_metakernel.py
from __future__ import print_function
import base64
import codecs
import glob
import importlib
import inspect
import json
import logging
import os
import pkgutil
import subprocess
from subprocess import CalledProcessError
import sys
import warnings
from collections import OrderedDict
warnings.filterwarnings('ignore', module='IPython.html.widgets')
from jupyter_core.paths import jupyter_config_path, jupyter_config_dir
from IPython.paths import get_ipython_dir
from ipykernel.kernelapp import IPKernelApp
from ipykernel.kernelbase import Kernel
from ipykernel.comm import CommManager
from traitlets.config import Application
from traitlets import Dict, Unicode
PY3 = sys.version_info[0] == 3
try:
from ipywidgets.widgets.widget import Widget
except ImportError:
Widget = None
try:
from IPython.utils.PyColorize import NeutralColors
RED = NeutralColors.colors["header"]
NORMAL = NeutralColors.colors["normal"]
except:
from IPython.core.excolors import TermColors
RED = TermColors.Red
NORMAL = TermColors.Normal
from IPython.core.formatters import IPythonDisplayFormatter
from IPython.display import HTML
from IPython.core.display import publish_display_data
from IPython.utils.tempdir import TemporaryDirectory
from .config import get_history_file, get_local_magics_dir
from .parser import Parser
class ExceptionWrapper(object):
"""
Utility wrapper that we can use to get the kernel to respond properly for errors.
When the return value of your execute is an instance of this, an error will be thrown similar to Ipykernel
"""
def __init__(self, ename, evalue, traceback):
self.ename = ename
self.evalue = evalue
self.traceback = traceback
def __repr__(self):
return '{}: {}\n{}'.format(self.ename, self.evalue, self.traceback)
def lazy_import_handle_comm_opened(*args, **kwargs):
if Widget is None:
return
Widget.handle_comm_opened(*args, **kwargs)
def get_metakernel():
"""
Get the MetaKernel instance.
"""
return MetaKernel.meta_kernel
class MetaKernel(Kernel):
"""The base MetaKernel class."""
app_name = 'metakernel'
identifier_regex = r'[^\d\W][\w\.]*'
func_call_regex = r'([^\d\W][\w\.]*)\([^\)\()]*\Z'
magic_prefixes = dict(magic='%', shell='!', help='?')
help_suffix = '?'
help_links = [
{
'text': "MetaKernel Magics",
'url': "https://metakernel.readthedocs.io/en/latest/source/README.html",
},
]
language_info = {
# 'mimetype': 'text/x-python',
# 'name': 'python',
# ------ If different from 'language':
# 'codemirror_mode': {
# "version": 2,
# "name": "ipython"
# }
# 'pygments_lexer': 'language',
# 'version' : "x.y.z",
# 'file_extension': '.py',
'help_links': help_links,
}
plot_settings = Dict(dict(backend='inline')).tag(config=True)
meta_kernel = None
@classmethod
def run_as_main(cls, *args, **kwargs):
"""Launch or install a metakernel.
Modules implementing a metakernel subclass can use the following lines:
if __name__ == '__main__':
MetaKernelSubclass.run_as_main()
"""
kwargs['app_name'] = cls.app_name
MetaKernelApp.launch_instance(kernel_class=cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(MetaKernel, self).__init__(*args, **kwargs)
if MetaKernel.meta_kernel is None:
MetaKernel.meta_kernel = self
if self.log is None:
# This occurs if we call as a stand-alone kernel
# (eg, not as a process)
# FIXME: take care of input/output, eg StringIO
# make work without a session
self.log = logging.Logger(".metakernel")
else:
# Write has already been set
try:
sys.stdout.write = self.Write
except:
pass # Can't change stdout
self.redirect_to_log = False
self.shell = None
self.sticky_magics = OrderedDict()
self._i = None
self._ii = None
self._iii = None
self._ = None
self.__ = None
self.___ = None
self.max_hist_cache = 1000
self.hist_cache = []
kwargs = {'parent': self,
'kernel': self}
if not PY3:
kwargs['shell'] = None
self.comm_manager = CommManager(**kwargs)
self.comm_manager.register_target('ipython.widget',
lazy_import_handle_comm_opened)
self.hist_file = get_history_file(self)
self.parser = Parser(self.identifier_regex, self.func_call_regex,
self.magic_prefixes, self.help_suffix)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
self._ipy_formatter = IPythonDisplayFormatter()
self.env = {}
self.reload_magics()
# provide a way to get the current instance
self.set_variable("kernel", self)
# Run command line filenames, if given:
if self.parent is not None and self.parent.extra_args:
level = self.log.level
self.log.setLevel("INFO")
self.redirect_to_log = True
self.Write("Executing files...")
for filename in self.parent.extra_args:
self.Write(" %s..." % filename)
try:
self.do_execute_file(filename)
except Exception as exc:
self.log.info(" %s" % (exc,))
self.Write("Executing files: done!")
self.log.setLevel(level)
self.redirect_to_log = False
def makeSubkernel(self, kernel):
"""
Run this method in an IPython kernel to set
this kernel's input/output settings.
"""
from IPython import get_ipython
from IPython.display import display
shell = get_ipython()
if shell: # we are running under an IPython kernel
self.session = shell.kernel.session
self.Display = display
self.send_response = self._send_shell_response
else:
self.session = kernel.session
self.send_response = kernel.send_response
self.Display = kernel.Display
#####################################
# Methods which provide kernel - specific behavior
def set_variable(self, name, value):
"""
Set a variable to a Python-typed value.
"""
pass
def get_variable(self, name):
"""
Lookup a variable name and return a Python-typed value.
"""
pass
def repr(self, item):
"""The repr of the kernel."""
return repr(item)
def get_usage(self):
"""Get the usage statement for the kernel."""
return "This is a usage statement."
def get_kernel_help_on(self, info, level=0, none_on_fail=False):
"""Get help on an object. Called by the help magic."""
if none_on_fail:
return None
else:
return "Sorry, no help is available on '%s'." % info['code']
def handle_plot_settings(self):
"""Handle the current plot settings"""
pass
def get_local_magics_dir(self):
"""
Returns the path to local magics dir (eg ~/.ipython/metakernel/magics)
"""
base = get_ipython_dir()
return os.path.join(base, 'metakernel', 'magics')
def get_completions(self, info):
"""
Get completions from kernel based on info dict.
"""
return []
def do_execute_direct(self, code, silent=False):
"""
Execute code in the kernel language.
"""
pass
def do_execute_file(self, filename):
"""
Default code for running a file. Just opens the file, and sends
the text to do_execute_direct.
"""
with open(filename) as f:
return self.do_execute_direct("".join(f.readlines()))
def do_execute_meta(self, code):
"""
Execute meta code in the kernel. This uses the execute infrastructure
but allows JavaScript to talk directly to the kernel bypassing normal
processing.
When responding to the %%debug magic, the step and reset meta
commands can answer with a string in the format:
"highlight: [start_line, start_col, end_line, end_col]"
for highlighting expressions in the frontend.
"""
if code == "reset":
raise Exception("This kernel does not implement this meta command")
elif code == "stop":
raise Exception("This kernel does not implement this meta command")
elif code == "step":
raise Exception("This kernel does not implement this meta command")
elif code.startswith("inspect "):
raise Exception("This kernel does not implement this meta command")
else:
raise Exception("Unknown meta command: '%s'" % code)
def initialize_debug(self, code):
"""
This function is used with the %%debug magic for highlighting
lines of code, and for initializing debug functions.
Return the empty string if highlighting is not supported.
"""
#return "highlight: [%s, %s, %s, %s]" % (line1, col1, line2, col2)
return ""
def do_function_direct(self, function_name, arg):
"""
Call a function in the kernel language with args (as a single item).
"""
f = self.do_execute_direct(function_name)
return f(arg)
def restart_kernel(self):
"""Restart the kernel"""
pass
############################################
# Implement base class methods
def do_execute(self, code, silent=False, store_history=True, user_expressions=None,
allow_stdin=False):
"""Handle code execution.
https://jupyter-client.readthedocs.io/en/stable/messaging.html#execute
"""
# Set the ability for the kernel to get standard-in:
self._allow_stdin = allow_stdin
# Create a default response:
self.kernel_resp = {
'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
# TODO: remove this when IPython fixes this
# This happens at startup when the language is set to python
if '_usage.page_guiref' in code:
return self.kernel_resp
if code and store_history:
self.hist_cache.append(code.strip())
if not code.strip():
return self.kernel_resp
info = self.parse_code(code)
self.payload = []
retval = None
if info['magic'] and info['magic']['name'] == 'help':
if info['magic']['type'] == 'line':
level = 0
else:
level = 1
text = self.get_help_on(code, level)
if text:
content = {
"start_line_number": 0,
"source": "page",
}
if isinstance(text, dict):
content["data"] = text ## {mime-type: ..., mime-type:...}
self.log.debug(str(text))
else:
content["data"] = {"text/plain": text}
self.log.debug(text)
self.payload = [content]
elif info['magic'] or self.sticky_magics:
retval = None
if self.sticky_magics:
magics, code = _split_magics_code(code, self.magic_prefixes)
code = magics + self._get_sticky_magics() + code
stack = []
# Handle magics:
magic = None
prefixes = ((self.magic_prefixes['shell'],
self.magic_prefixes['magic']))
while code.startswith(prefixes):
magic = self.get_magic(code)
if magic is not None:
stack.append(magic)
code = str(magic.get_code())
# signal to exit, maybe error or no block
if not magic.evaluate:
break
else:
break
# Execute code, if any:
if ((magic is None or magic.evaluate) and code.strip() != ""):
if code.startswith("~~META~~:"):
retval = self.do_execute_meta(code[9:].strip())
else:
retval = self.do_execute_direct(code)
# Post-process magics:
for magic in reversed(stack):
retval = magic.post_process(retval)
else:
if code.startswith("~~META~~:"):
retval = self.do_execute_meta(code[9:].strip())
else:
retval = self.do_execute_direct(code)
self.post_execute(retval, code, silent)
if 'payload' in self.kernel_resp:
self.kernel_resp['payload'] = self.payload
return self.kernel_resp
def post_execute(self, retval, code, silent):
"""Post-execution actions
Handle special kernel variables and display response if not silent.
"""
# Handle in's
self.set_variable("_iii", self._iii)
self.set_variable("_ii", self._ii)
self.set_variable("_i", code)
self.set_variable("_i" + str(self.execution_count), code)
self._iii = self._ii
self._ii = code
if (retval is not None):
# --------------------------------------
# Handle out's (only when non-null)
self.set_variable("___", self.___)
self.set_variable("__", self.__)
self.set_variable("_", retval)
self.set_variable("_" + str(self.execution_count), retval)
self.___ = self.__
self.__ = retval
self.log.debug(retval)
if isinstance(retval, ExceptionWrapper):
self.kernel_resp['status'] = 'error'
content = {
'traceback': retval.traceback,
'evalue': retval.evalue,
'ename': retval.ename,
}
self.kernel_resp.update(content)
if not silent:
self.send_response(self.iopub_socket, 'error', content)
else:
try:
data = _formatter(retval, self.repr)
except Exception as e:
self.Error(e)
return
content = {
'execution_count': self.execution_count,
'data': data[0],
'metadata': data[1],
}
if not silent:
if Widget and isinstance(retval, Widget):
self.Display(retval)
return
self.send_response(self.iopub_socket, 'execute_result', content)
def do_history(self, hist_access_type, output, raw, session=None,
start=None, stop=None, n=None, pattern=None, unique=False):
"""
Access history at startup.
https://jupyter-client.readthedocs.io/en/stable/messaging.html#history
"""
with open(self.hist_file) as fid:
self.hist_cache = json.loads(fid.read() or "[]")
return {'status': 'ok', 'history': [(None, None, h) for h in self.hist_cache]}
def do_shutdown(self, restart):
"""
Shut down the app gracefully, saving history.
https://jupyter-client.readthedocs.io/en/stable/messaging.html#kernel-shutdown
"""
if self.hist_file:
with open(self.hist_file, "w") as fid:
json.dump(self.hist_cache[-self.max_hist_cache:], fid)
if restart:
self.Print("Restarting kernel...")
self.restart_kernel()
self.reload_magics()
self.Print("Done!")
return {'status': 'ok', 'restart': restart}
def do_is_complete(self, code):
"""
Given code as string, returns dictionary with 'status' representing
whether code is ready to evaluate. Possible values for status are:
'complete' - ready to evaluate
'incomplete' - not yet ready
'invalid' - invalid code
'unknown' - unknown; the default unless overridden
Optionally, if 'status' is 'incomplete', you may indicate
an indentation string.
Example:
return {'status' : 'incomplete',
'indent': ' ' * 4}
https://jupyter-client.readthedocs.io/en/stable/messaging.html#code-completeness
"""
if code.startswith(self.magic_prefixes['magic']):
## force requirement to end with an empty line
if code.endswith("\n"):
return {'status' : 'complete'}
else:
return {'status' : 'incomplete'}
# otherwise, how to know is complete?
elif code.endswith("\n"):
return {'status' : 'complete'}
else:
return {'status' : 'incomplete'}
def do_complete(self, code, cursor_pos):
"""Handle code completion for the kernel.
https://jupyter-client.readthedocs.io/en/stable/messaging.html#completion
"""
info = self.parse_code(code, 0, cursor_pos)
content = {
'matches': [],
'cursor_start': info['start'],
'cursor_end': info['end'],
'status': 'ok',
'metadata': {}
}
matches = info['path_matches']
if info['magic']:
# if the last line contains another magic, use that
line_info = self.parse_code(info['line'])
if line_info['magic']:
info = line_info
if info['magic']['type'] == 'line':
magics = self.line_magics
else:
magics = self.cell_magics
if info['magic']['name'] in magics:
magic = magics[info['magic']['name']]
info = info['magic']
if info['type'] == 'cell' and info['code']:
info = self.parse_code(info['code'])
else:
info = self.parse_code(info['args'])
matches.extend(magic.get_completions(info))
elif not info['magic']['code'] and not info['magic']['args']:
matches = []
for name in magics.keys():
if name.startswith(info['magic']['name']):
pre = info['magic']['prefix']
matches.append(pre + name)
info['start'] -= len(pre)
info['full_obj'] = pre + info['full_obj']
info['obj'] = pre + info['obj']
else:
matches.extend(self.get_completions(info))
if info['full_obj'] and len(info['full_obj']) > len(info['obj']):
new_list = [m for m in matches if m.startswith(info['full_obj'])]
if new_list:
content['cursor_end'] = (content['cursor_end'] +
len(info['full_obj']) -
len(info['obj']))
matches = new_list
content["matches"] = sorted(matches)
return content
def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()):
"""Object introspection.
https://jupyter-client.readthedocs.io/en/stable/messaging.html#introspection
"""
if cursor_pos > len(code):
return
content = {'status': 'aborted', 'data': {}, 'found': False, 'metadata': {}}
docstring = self.get_help_on(code, detail_level, none_on_fail=True,
cursor_pos=cursor_pos)
if docstring:
content["status"] = "ok"
content["found"] = True
if isinstance(docstring, dict): ## {"text/plain": ..., mime-type: ...}
content["data"] = docstring
self.log.debug(str(docstring))
else:
content["data"] = {"text/plain": docstring}
self.log.debug(docstring)
return content
def clear_output(self, wait=False):
"""Clear the output of the kernel."""
self.send_response(self.iopub_socket, 'clear_output',
{'wait': wait})
def Display(self, *objects, **kwargs):
"""Display one or more objects using rich display.
Supports a `clear_output` keyword argument that clears the output before displaying.
See https://ipython.readthedocs.io/en/stable/config/integrating.html?highlight=display#rich-display
"""
if kwargs.get('clear_output'):
self.clear_output(wait=True)
for item in objects:
if Widget and isinstance(item, Widget):
self.log.debug('Display Widget')
data = {
'text/plain': repr(item),
'application/vnd.jupyter.widget-view+json': {
'version_major': 2,
'version_minor': 0,
'model_id': item._model_id
}
}
content = {
'data': data,
'metadata': {}
}
self.send_response(
self.iopub_socket,
'display_data',
content
)
else:
self.log.debug('Display Data')
try:
data = _formatter(item, self.repr)
except Exception as e:
self.Error(e)
return
content = {
'data': data[0],
'metadata': data[1]
}
self.send_response(
self.iopub_socket,
'display_data',
content
)
def Print(self, *objects, **kwargs):
"""Print `objects` to the iopub stream, separated by `sep` and followed by `end`.
Items can be strings or `Widget` instances.
"""
for item in objects:
if Widget and isinstance(item, Widget):
self.Display(item)
objects = [i for i in objects if not (Widget and isinstance(i, Widget))]
message = format_message(*objects, **kwargs)
stream_content = {
'name': 'stdout', 'text': message}
self.log.debug('Print: %s' % message.rstrip())
if self.redirect_to_log:
self.log.info(message.rstrip())
else:
self.send_response(self.iopub_socket, 'stream', stream_content)
def Write(self, message):
"""Write message directly to the iopub stdout with no added end character."""
stream_content = {
'name': 'stdout', 'text': message}
self.log.debug('Write: %s' % message)
if self.redirect_to_log:
self.log.info(message)
else:
self.send_response(self.iopub_socket, 'stream', stream_content)
def Error(self, *objects, **kwargs):
"""Print `objects` to stdout, separated by `sep` and followed by `end`.
Objects are cast to strings.
"""
message = format_message(*objects, **kwargs)
self.log.debug('Error: %s' % message.rstrip())
stream_content = {
'name': 'stderr',
'text': RED + message + NORMAL
}
if self.redirect_to_log:
self.log.info(message.rstrip())
else:
self.send_response(self.iopub_socket, 'stream', stream_content)
def Error_display(self, *objects, **kwargs):
"""Print `objects` to stdout is they area strings, separated by `sep` and followed by `end`.
All other objects are rendered using the Display method
Objects are cast to strings.
"""
msg = []
msg_dict = {}
for item in objects:
if not isinstance(item, str):
self.log.debug('Item type:{}'.format(type(item)) )
self.Display(item)
else:
# msg is the error for str
msg.append(item)
for k,v in kwargs:
if not isinstance(v, str):
self.Display(k,v)
else:
msg_dict[k] = v
message = format_message(' '.join(msg), **kwargs)
if len(msg_dict.keys()) > 0:
message = format_message(' '.join(msg), msg_dict)
self.log.debug('Error: %s' % message.rstrip())
stream_content = {
'name': 'stderr',
'text': RED + message + NORMAL
}
if self.redirect_to_log:
self.log.info(message.rstrip())
else:
self.send_response(self.iopub_socket, 'stream', stream_content)
##############################
# Private API and methods not likely to be overridden
def reload_magics(self):
"""Reload all of the line and cell magics."""
self.line_magics = {}
self.cell_magics = {}
# get base magic files and those relative to the current class
# directory
magic_files = []
# Make a metakernel/magics if it doesn't exist:
local_magics_dir = get_local_magics_dir()
# Search all of the places there could be magics:
try:
paths = [os.path.join(os.path.dirname(
os.path.abspath(inspect.getfile(self.__class__))), "magics")]
except:
paths = []
paths += [local_magics_dir,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "magics")]
for magic_dir in paths:
sys.path.append(magic_dir)
magic_files.extend(glob.glob(os.path.join(magic_dir, "*.py")))
for magic in magic_files:
basename = os.path.basename(magic)
if basename == "__init__.py":
continue
try:
module = __import__(os.path.splitext(basename)[0])
importlib.reload(module)
module.register_magics(self)
except Exception as e:
self.log.error("Can't load '%s': error: %s" % (magic, e))
def register_magics(self, magic_klass):
"""Register magics for a given magic_klass."""
magic = magic_klass(self)
line_magics = magic.get_magics('line')
cell_magics = magic.get_magics('cell')
for name in line_magics:
self.line_magics[name] = magic
for name in cell_magics:
self.cell_magics[name] = magic
def send_response(self, *args, **kwargs):
### if we are running via %parallel, we might not have a
### session
if self.session:
super(MetaKernel, self).send_response(*args, **kwargs)
def call_magic(self, line):
"""
Given an line, such as "%download http://example.com/", parse
and execute magic.
"""
return self.get_magic(line)
def get_magic(self, text):
## FIXME: Bad name, use call_magic instead.
# if first line matches a magic,
# call magic.call_magic() and return magic object
info = self.parse_code(text)
magic = self.line_magics['magic']
return magic.get_magic(info)
def get_magic_args(self, text):
# if first line matches a magic,
# call magic.call_magic() and return magic args
info = self.parse_code(text)
magic = self.line_magics['magic']
return magic.get_magic(info, get_args=True)
def get_help_on(self, expr, level=0, none_on_fail=False,
cursor_pos=-1):
"""Get help for an expression using the help magic."""
help_magic = self.line_magics['help']
return help_magic.get_help_on(expr, level, none_on_fail, cursor_pos)
def parse_code(self, code, cursor_start=0, cursor_end=-1):
"""Parse code using our parser."""
return self.parser.parse_code(code, cursor_start, cursor_end)
def _get_sticky_magics(self):
retval = ""
for key in self.sticky_magics:
retval += (key + " " + self.sticky_magics[key] + "\n")
return retval
def _send_shell_response(self, socket, stream_type, content):
publish_display_data({ 'text/plain': content['text'] })
class MetaKernelApp(IPKernelApp):
"""The MetaKernel launcher application."""
config_dir = Unicode()
def _config_dir_default(self):
return jupyter_config_dir()
@property
def config_file_paths(self):
path = jupyter_config_path()
if self.config_dir not in path:
path.insert(0, self.config_dir)
path.insert(0, os.getcwd())
return path
@classmethod
def launch_instance(cls, *args, **kwargs):
cls.name = kwargs.pop('app_name', 'metakernel')
super(MetaKernelApp, cls).launch_instance(*args, **kwargs)
@property
def subcommands(self):
# Slightly awkward way to pass the actual kernel class to the install
# subcommand.
class KernelInstallerApp(Application):
kernel_class = self.kernel_class
def initialize(self, argv=None):
self.argv = argv
def start(self):
kernel_spec = self.kernel_class().kernel_json
with TemporaryDirectory() as td:
dirname = os.path.join(td, kernel_spec['name'])
os.mkdir(dirname)
with open(os.path.join(dirname, 'kernel.json'), 'w') as f:
json.dump(kernel_spec, f, sort_keys=True)
filenames = ['logo-64x64.png', 'logo-32x32.png']
name = self.kernel_class.__module__
for filename in filenames:
try:
data = pkgutil.get_data(name.split('.')[0],
'images/' + filename)
except (OSError, IOError):
data = pkgutil.get_data('metakernel',
'images/' + filename)
with open(os.path.join(dirname, filename), 'wb') as f:
f.write(data)
try:
subprocess.check_call(
[sys.executable, '-m', 'jupyter',
'kernelspec', 'install'] + self.argv + [dirname])
except CalledProcessError as exc:
sys.exit(exc.returncode)
return {'install': (KernelInstallerApp, 'Install this kernel')}
def _split_magics_code(code, prefixes):
lines = code.split("\n")
ret_magics = []
ret_code = []
index = 0
shell = prefixes['shell']
magic = prefixes['magic']
while index < len(lines) and lines[index].startswith((shell, magic)):
ret_magics.append(lines[index])
index += 1
while index < len(lines):
ret_code.append(lines[index])
index += 1
ret_magics_str = "\n".join(ret_magics)
if ret_magics_str:
ret_magics_str += "\n"
ret_code_str = "\n".join(ret_code)
if ret_code_str:
ret_code_str += "\n"
return (ret_magics_str, ret_code_str)
def _formatter(data, repr_func):
reprs = {}
reprs['text/plain'] = repr_func(data)
lut = [("_repr_png_", "image/png"),
("_repr_jpeg_", "image/jpeg"),
("_repr_html_", "text/html"),
("_repr_markdown_", "text/markdown"),
("_repr_svg_", "image/svg+xml"),
("_repr_latex_", "text/latex"),
("_repr_json_", "application/json"),
("_repr_javascript_", "application/javascript"),
("_repr_pdf_", "application/pdf")]
for (attr, mimetype) in lut:
obj = getattr(data, attr, None)
if obj:
reprs[mimetype] = obj
format_dict = {}
metadata_dict = {}
for (mimetype, value) in reprs.items():
metadata = None
try:
value = value()
except Exception:
pass
if not value:
continue
if isinstance(value, tuple):
metadata = value[1]
value = value[0]
if isinstance(value, bytes):
try:
value = value.decode('utf-8')
except Exception:
value = base64.encodestring(value)
value = value.decode('utf-8')
try:
format_dict[mimetype] = str(value)
except:
format_dict[mimetype] = value
if metadata is not None:
metadata_dict[mimetype] = metadata
return (format_dict, metadata_dict)
def format_message(*objects, **kwargs):
"""
Format a message like print() does.
"""
objects = [str(i) for i in objects]
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
return sep.join(objects) + end
class IPythonKernel(MetaKernel):
"""
Class to make an IPython Kernel look like a MetaKernel Kernel.
"""
language_info = {
'mimetype': 'text/x-python',
'name': 'python',
'file_extension': '.py',
}
def __init__(self):
from metakernel.magics.magic_magic import MagicMagic
self.line_magics = {'magic': MagicMagic(self)}
self.cell_magics = {}
self.parser = Parser(self.identifier_regex, self.func_call_regex,
self.magic_prefixes, self.help_suffix)
self.shell = None
def Display(self, *objects, **kwargs):
"""Display an object in the kernel, using `IPython.display`."""
from IPython.display import display
return display(*objects, **kwargs)
def Error(self, *objects, **kwargs):
"""Print `objects` to stderr, separated by `sep` and followed by `end`.
"""
sys.stderr.write(format_message(*objects, **kwargs))
def Print(self, *objects, **kwargs):
"""Print `objects` to stdout, separated by `sep` and followed by `end`.
"""
sys.stdout.write(format_message(*objects, **kwargs))
def register_ipython_magics(*magics):
"""
Loads all magics (or specified magics) that have a
register_ipython_magics function defined.
"""
if magics:
# filename is name of magic + "_magic.py"
magics = [name + "_magic.py" for name in magics]
local_magics_dir = get_local_magics_dir()
# Search all of the places there could be magics:
paths = [local_magics_dir,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "magics")]
magic_files = []
for magic_dir in paths:
sys.path.append(magic_dir)
magic_files.extend(glob.glob(os.path.join(magic_dir, "*.py")))
for magic in magic_files:
basename = os.path.basename(magic)
if basename == "__init__.py":
continue
if len(magics) == 0 or basename in magics:
module = __import__(os.path.splitext(basename)[0])
importlib.reload(module)
if hasattr(module, "register_ipython_magics"):
module.register_ipython_magics()
| StarcoderdataPython |
3526645 | # -*- coding: utf-8 -*-
from io import open
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="taqu",
version="1.0.0",
description="Taqu Task Queue system",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/lietu/taqu",
author="<NAME>",
author_email="<EMAIL>",
packages=["taqu", "taqu.aio"],
keywords="task queue azure service bus",
python_requires=">=3.6,<4",
install_requires=["pydantic>=1.4,<2", "async_generator~=1.10"],
extras_require={"azure": ["azure-servicebus>=0.50,<1"]},
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
project_urls={
"Bug Reports": "https://github.com/lietu/taqu/issues",
"Source": "https://github.com/lietu/taqu/",
},
)
| StarcoderdataPython |
178507 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-13 20:17
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vacs', '0012_auto_20170711_0616'),
]
operations = [
migrations.RemoveField(
model_name='assignment',
name='current_lexicon',
),
migrations.AddField(
model_name='assignment',
name='current_comparison',
field=models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(16)]),
),
migrations.AddField(
model_name='assignment',
name='lexicon_order',
field=models.CharField(default='1.2.3.4.5.6.7.8.9', max_length=200, unique=True),
preserve_default=False,
),
]
| StarcoderdataPython |
1841288 | import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import numpy as np
from collections import defaultdict
class EasyDict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class ConceptBank:
def __init__(self, concept_dict, device):
all_vectors, concept_names, all_intercepts = [], [], []
all_margin_info = defaultdict(list)
for k, (tensor, _, _, intercept, margin_info) in concept_dict.items():
all_vectors.append(tensor)
concept_names.append(k)
all_intercepts.append(intercept.reshape(1, 1))
for key, value in margin_info.items():
if key != "train_margins":
all_margin_info[key].append(value.reshape(1, 1))
for key, val_list in all_margin_info.items():
margin_tensor = torch.tensor(np.concatenate(val_list, axis=0), requires_grad=False).float().to(device)
all_margin_info[key] = margin_tensor
self.concept_info = EasyDict()
self.concept_info.margin_info = EasyDict(dict(all_margin_info))
self.concept_info.bank = torch.tensor(np.concatenate(all_vectors, axis=0), requires_grad=False).float().to(
device)
self.concept_info.norms = torch.norm(self.concept_info.bank, p=2, dim=1, keepdim=True).detach()
self.concept_info.intercepts = torch.tensor(np.concatenate(all_intercepts, axis=0),
requires_grad=False).float().to(device)
self.concept_info.concept_names = concept_names
print("Concept Bank is initialized.")
def __getattr__(self, item):
return self.concept_info[item]
def learn_concept(activations, c_labels, args, C=0.001):
# Learn concept vectors
X_train, X_test, y_train, y_test = train_test_split(activations, c_labels, test_size=args.n_samples,
random_state=args.seed)
svm = SVC(kernel="linear", C=C, probability=False)
svm.fit(X_train, y_train)
train_acc = svm.score(X_train, y_train)
test_acc = svm.score(X_test, y_test)
print(f"Accuracy - Training: {train_acc}, Test: {test_acc}")
train_margin = ((np.dot(svm.coef_, X_train.T) + svm.intercept_) / np.linalg.norm(svm.coef_)).T
#test_margin = ((np.dot(svm.coef_, X_test.T) + svm.intercept_) / np.linalg.norm(svm.coef_)).T
margin_info = {"max": np.max(train_margin),
"min": np.min(train_margin),
"pos_mean": np.mean(train_margin[train_margin > 0]),
"pos_std": np.std(train_margin[train_margin > 0]),
"neg_mean": np.mean(train_margin[train_margin < 0]),
"neg_std": np.std(train_margin[train_margin < 0]),
"q_90": np.quantile(train_margin, 0.9),
# "train_margins": train_margin,
"q_10": np.quantile(train_margin, 0.1)
}
# print test accuracy
print(train_acc, test_acc)
return svm.coef_, train_acc, test_acc, svm.intercept_, margin_info
def get_concept_scores_mv_valid(tensor, labels, concept_bank, model_bottom, model_top,
alpha=1e-4, beta=1e-4, n_steps=100,
lr=1e-1, momentum=0.9, enforce_validity=True,
kappa="mean"):
max_margins = concept_bank.margin_info.max
min_margins = concept_bank.margin_info.min
concept_norms = concept_bank.norms
concept_intercepts = concept_bank.intercepts
concepts = concept_bank.bank
concept_names = concept_bank.concept_names.copy()
device = tensor.device
embedding = model_bottom(tensor)
model_shape = embedding.shape
embedding = embedding.detach().flatten(1)
criterion = nn.CrossEntropyLoss()
#criterion = nn.BCEWithLogitsLoss()
W = nn.Parameter(torch.zeros(1, concepts.shape[0], device=device), requires_grad=True)
# Normalize the concept vectors
normalized_C = max_margins * concepts / concept_norms
# Compute the current distance of the sample to decision boundaries of SVMs
margins = (torch.matmul(concepts, embedding.T) + concept_intercepts) / concept_norms
# Computing constraints for the concepts scores
W_clamp_max = (max_margins * concept_norms - concept_intercepts - torch.matmul(concepts, embedding.T))
W_clamp_min = (min_margins * concept_norms - concept_intercepts - torch.matmul(concepts, embedding.T))
W_clamp_max = (W_clamp_max / (max_margins * concept_norms)).T
W_clamp_min = (W_clamp_min / (max_margins * concept_norms)).T
if enforce_validity:
if kappa == "mean":
W_clamp_max[(margins > concept_bank.margin_info.pos_mean).T] = 0.
W_clamp_min[(margins < concept_bank.margin_info.neg_mean).T] = 0.
else:
W_clamp_max[(margins > torch.zeros_like(margins)).T] = 0.
W_clamp_min[(margins < torch.zeros_like(margins)).T] = 0.
zeros = torch.zeros_like(W_clamp_max)
W_clamp_max = torch.where(W_clamp_max < zeros, zeros, W_clamp_max).detach().flatten(1)
W_clamp_min = torch.where(W_clamp_min > zeros, zeros, W_clamp_min).detach().flatten(1)
optimizer = optim.SGD([W], lr=lr, momentum=momentum)
history = []
for i in range(n_steps):
optimizer.zero_grad()
new_embedding = embedding + torch.matmul(W, normalized_C)
new_out = model_top(new_embedding.view(*model_shape))
l1_loss = torch.norm(W, dim=1, p=1)/W.shape[1]
l2_loss = torch.norm(W, dim=1, p=2)/W.shape[1]
ce_loss = criterion(new_out, labels)
loss = ce_loss + l1_loss * alpha + l2_loss * beta
history.append(f"{ce_loss.detach().item()}, L1:{l1_loss.detach().item()}, L2: {l2_loss.detach().item()}")
loss.backward()
optimizer.step()
if enforce_validity:
W_projected = torch.where(W < W_clamp_min, W_clamp_min, W).detach()
W_projected = torch.where(W > W_clamp_max, W_clamp_max, W_projected)
W.data = W_projected.detach()
W.grad.zero_()
final_emb = embedding + torch.matmul(W, normalized_C)
W = W[0].detach().cpu().numpy().tolist()
concept_scores = dict()
for i, n in enumerate(concept_names):
concept_scores[n] = W[i]
concept_names = sorted(concept_names, key=concept_scores.get, reverse=True)
new_out, orig_out = model_top(final_emb.view(*model_shape)), model_top(embedding.view(*model_shape))
# Check if the counterfactual can flip the label
if (new_out.argmax(dim=1) == labels):
success = True
else:
success = False
opt_result = {"success": success,
"concept_scores": concept_scores,
"concept_scores_list": concept_names,
"W": np.array(W)}
opt_result = EasyDict(opt_result)
return opt_result
| StarcoderdataPython |
6511901 | <reponame>face-pass/KAO-PASS-backend
import os
config = {
'host': os.environ['HOST'],
'port': int(os.environ['PORT']),
'user': os.environ['USER'],
'password': os.environ['<PASSWORD>'],
'database': os.environ['DB'],
'ssl': {'ssl':
{'ca': os.environ['SSL']}
}
}
| StarcoderdataPython |
3286856 |
# I am a module package (for mutexcntl)
# Note that my parent dir (Errata) doesn't need a __init__.py,
# nor does Errata need to be on PYTHONPATH -- it's the home
# dir ('.') of the cgi script invoked on browse/submit requests,
# so module/package searches start there automatically; but
# if mutexcntl is ever used elsewhere, this will need to change;
| StarcoderdataPython |
4942968 | <filename>eosim/gui/mainapplication.py<gh_stars>0
import tkinter as tk
from tkinter import ttk
from .welcomeframe import WelcomeFrame
from .configure.cfframe import ConfigureFrame
from .executeframe import ExecuteFrame
from .visualize.visualizeframe import VisualizeFrame
from .operations.operationsframe import OperationsFrame
from eosim import config
import eosim.gui.helpwindow as helpwindow
import tkinter.scrolledtext
import os
import sys
import logging
import time
import json
import logging
logger = logging.getLogger(__name__)
class MainApplication:
def __init__(self, parent, loglevel=logging.INFO):
parent.report_callback_exception = self.report_callback_exception
self.parent = parent
self.parent.title("Earth Observation Simulator")
dir_path = os.path.dirname(os.path.realpath(__file__))
#self.parent.iconbitmap(True, dir_path+"/../../icon.ico")
self.parent.geometry(config.GuiStyle.main_window_geom)
MainApplication.build_main_window(self, loglevel)
config.GuiStyle() # configure all the styles used in the GUI (shall affect the other modules too)
def report_callback_exception(self, exc_type, exc_value, exc_traceback):
logging.error(
"Uncaught exception",
exc_info=(exc_type, exc_value, exc_traceback)
)
def build_main_window(self, loglevel):
""" This function configures the various frames within the self.parent (root) window."""
TopMenuBar(self.parent)
# create a parent frame to encompass all frames
self.parent_frame = ttk.Frame(self.parent)
self.parent_frame.grid(row=0, column=0, padx=10, pady=10)
parent_frame_width = config.GuiStyle.main_win_width - 20
parent_frame_height = config.GuiStyle.main_win_height - 20
# parent window grid configure
self.parent_frame.rowconfigure(0,weight=1)
self.parent_frame.rowconfigure(1,weight=1)
# left-sidebar frame
# grid configure
lsidebar = ttk.Frame(self.parent_frame, width=0.2*parent_frame_width, height=0.9*parent_frame_height, style="lsidebar.TFrame")
lsidebar.grid_propagate(0)
lsidebar.grid(row=0, column=0, rowspan=2, sticky='nswe')
lsidebar.columnconfigure(0,weight=1)
lsidebar.rowconfigure(0,weight=1)
lsidebar.rowconfigure(1,weight=1)
lsidebar.rowconfigure(2,weight=1)
lsidebar.rowconfigure(3,weight=1)
lsidebar.rowconfigure(4,weight=1)
lsidebar.rowconfigure(5,weight=8)
welcome_btn = ttk.Button(lsidebar, text='WELCOME',command=lambda: self.show_frame("WelcomeFrame"), style="lsidebar.TButton")
welcome_btn.grid(row=0, column=0, sticky='nswe', padx=5, pady=5)
welcome_btn.bind('<Enter>',lambda event, widget_id="welcome": helpwindow.update_help_window(event, widget_id))
configure_btn = ttk.Button(lsidebar, text='CONFIGURE',command=lambda: self.show_frame("ConfigureFrame"), style="lsidebar.TButton")
configure_btn.grid(row=1, column=0, sticky='nswe', padx=5, pady=5)
configure_btn.bind('<Enter>',lambda event, widget_id="configure": helpwindow.update_help_window(event, widget_id))
execute_btn = ttk.Button(lsidebar, text='EXECUTE',command=lambda: self.show_frame("ExecuteFrame"), style="lsidebar.TButton")
execute_btn.grid(row=2, column=0, sticky='nswe', padx=5, pady=5)
execute_btn.bind('<Enter>',lambda event, widget_id="execute": helpwindow.update_help_window(event, widget_id))
visualize_btn = ttk.Button(lsidebar, text='VISUALIZE',command=lambda: self.show_frame("VisualizeFrame"), style="lsidebar.TButton")
visualize_btn.grid(row=3, column=0, sticky='nswe', padx=5, pady=5)
visualize_btn.bind('<Enter>',lambda event, widget_id="visualize": helpwindow.update_help_window(event, widget_id))
operations_btn = ttk.Button(lsidebar, text='OPERATIONS',command=lambda: self.show_frame("OperationsFrame"), style="lsidebar.TButton")
operations_btn.grid(row=4, column=0, sticky='nswe', padx=5, pady=5)
# message area frame
# grid configure
messagearea = ttk.Frame(self.parent_frame, width= 0.8*parent_frame_width, height=0.2*parent_frame_height, style ='messagearea.TFrame')
messagearea.grid_propagate(0)
messagearea.grid(row=1, column=1, columnspan=1, sticky='nswe')
messagearea.columnconfigure(0,weight=1)
messagearea.rowconfigure(0,weight=1)
messages = tk.scrolledtext.ScrolledText(messagearea)
messages.grid(row=0, column=0, sticky='nsew')
messages.configure(state ='disabled') # Making the text read only
# redirect stdout, logging messages to messages ScrolledText widget
sys.stdout = TextRedirector(messages, "stdout")
sys.stderr = TextRedirector(messages, "stderr")
logging.basicConfig(level=loglevel, handlers=[
logging.FileHandler("debug.log", 'w'),
logging.StreamHandler(stream=sys.stdout)
])
logging.info("Application started at: "+ str(time.asctime()))
# main content area
# the container is where we'll stack a bunch of frames
# on top of each other, then the one we want visible
# will be raised above the others
container = ttk.Frame(self.parent_frame, width=0.6*parent_frame_width, height=0.8*parent_frame_height)
# grid configure
container.grid_propagate(0)
container.grid(row=0, column=1, sticky='nswe')
container.columnconfigure(0,weight=1)
container.rowconfigure(0,weight=1)
self.frames = {}
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
for F in (WelcomeFrame, ConfigureFrame, ExecuteFrame, VisualizeFrame, OperationsFrame):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("WelcomeFrame")
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = self.frames[page_name]
frame.tkraise()
def donothing():
pass
def click_new_sim():
""" Configure the working directory where the MissionSpecs.json file, auxillary i/p files and the results are to be stored.
"""
sim_dir_path = tkinter.filedialog.askdirectory(initialdir=os.getcwd(), title="Please select an empty folder:")
config.mission.update_settings(outDir=sim_dir_path+"/")
logger.info("New workspace directory selected.")
def open_sim():
""" Load a previously run simulation.
.. todo:: Add checks to see if the any required auxillary files are present (grid files, ground-station i/p files, etc).
"""
sim_dir_path = tkinter.filedialog.askdirectory(initialdir=os.getcwd(), title="Please select the simulation directory:")
try:
with open(sim_dir_path+'/MissionSpecs.json') as f:
mission_dict = json.load(f)
except:
logger.error('Selected directory does not contain the required MissionSpecs.json file.')
mission_dict = None
if mission_dict is not None:
config.mission = config.mission.from_dict(mission_dict)
config.mission.update_settings(outDir=sim_dir_path+"/") # as a precaution since the folder could be copied from elsewhere, update the output-directory specification.
logger.info("Simulation loaded.")
logger.warning("Incomplete mission specifications (e.g. missing propagator settings) shall be populated with default values.")
else:
config.mission.update_settings(outDir=sim_dir_path+"/")
logger.info("Directory is treated as a new workspace.")
return
def click_save():
""" Save the mission configuration as a JSON file."""
wdir = config.mission.settings.outDir
if wdir is None:
logger.info("Please select the workspace directory in the menubar by going to Sim->New.")
return
with open(wdir+'MissionSpecs.json', 'w', encoding='utf-8') as f:
json.dump(config.mission.to_dict(), f, ensure_ascii=False, indent=4)
logger.info("Mission configuration Saved.")
class TopMenuBar:
def __init__(self, parent):
self.parent = parent
menubar = tk.Menu(self.parent)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=click_new_sim)
filemenu.add_command(label="Open", command=open_sim)
filemenu.add_command(label="Save", command=click_save)
filemenu.add_command(label="Save as...", command=donothing)
filemenu.add_separator()
#filemenu.add_command(label="Exit", command=self.parent.quit)
menubar.add_cascade(label="Sim", menu=filemenu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Window", command=lambda: helpwindow.click_help(parent))
helpmenu.add_command(label="About...", command=donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.parent.config(menu=menubar)
class TextRedirector(object):
def __init__(self, widget, tag="stdout"):
self.widget = widget
self.tag = tag
def write(self, str):
self.widget.configure(state="normal")
self.widget.insert("end", str, (self.tag,))
self.widget.see("end")
self.widget.insert("end",'\n')
self.widget.configure(state="disabled") | StarcoderdataPython |
5052909 | <reponame>aldanor/skggm<gh_stars>100-1000
from __future__ import print_function
import sys
from setuptools import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import platform
try:
import numpy # NOQA
except ImportError:
print('numpy is required during installation')
sys.exit(1)
try:
import scipy # NOQA
except ImportError:
print('scipy is required during installation')
sys.exit(1)
try:
import Cython # NOQA
except ImportError:
print('Cython is required during installation')
sys.exit(1)
import numpy as np
if platform.system() == 'Darwin':
extra_compile_args = [
'-I/System/Library/Frameworks/vecLib.framework/Headers'
]
if 'ppc' in platform.machine():
extra_compile_args.append('-faltivec')
extra_link_args = ["-Wl,-framework", "-Wl,Accelerate"]
include_dirs = [np.get_include()]
else:
include_dirs = [np.get_include(), "/usr/local/include"]
extra_compile_args = ['-msse2', '-O2', '-fPIC', '-w']
extra_link_args = ["-llapack"]
# pyquic extension
# --> inverse_covariance.pyquic.pyquic (contains func quic)
ext_module = Extension(
name="pyquic.pyquic", # note: we ext_package= flag in setup()
sources=[
"inverse_covariance/pyquic/QUIC.C",
"inverse_covariance/pyquic/pyquic.pyx"],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language="c++"
)
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readlines() if l]
setup(
name='skggm',
version='0.2.8',
description='Gaussian graphical models for scikit-learn.',
author='<NAME> and <NAME>',
license='MIT',
packages=[
'inverse_covariance',
'inverse_covariance.profiling',
'inverse_covariance.pyquic'],
install_requires=INSTALL_REQUIRES,
url='https://github.com/skggm/skggm',
author_email='<EMAIL>',
ext_package='inverse_covariance',
ext_modules=cythonize(ext_module),
)
| StarcoderdataPython |
328899 | # spaceconfig = {"usemodules" : ["unicodedata"]}
import ast
import warnings
def test_error_unknown_code():
def fn():
f'{1000:j}'
exc_info = raises(ValueError, fn)
assert str(exc_info.value).startswith("Unknown format code")
def test_ast_lineno_and_col_offset():
m = ast.parse("\nf'a{x}bc{y}de'")
x_ast = m.body[0].value.values[1].value
y_ast = m.body[0].value.values[3].value
assert x_ast.lineno == 2
assert x_ast.col_offset == 4
assert y_ast.lineno == 2
assert y_ast.col_offset == 9
def test_ast_lineno_and_col_offset_unicode():
s = "\nf'α{χ}βγ{ψ}δε'"
assert s.encode('utf-8') ==b"\nf'\xce\xb1{\xcf\x87}\xce\xb2\xce\xb3{\xcf\x88}\xce\xb4\xce\xb5'"
m = ast.parse(s)
x_ast = m.body[0].value.values[1].value
y_ast = m.body[0].value.values[3].value
assert x_ast.lineno == 2
assert x_ast.col_offset == 5
assert y_ast.lineno == 2
assert y_ast.col_offset == 13
def test_ast_mutiline_lineno_and_col_offset():
m = ast.parse("\n\nf'''{x}\nabc{y}\n{\nz}''' \n\n\n")
x_ast = m.body[0].value.values[0].value
y_ast = m.body[0].value.values[2].value
z_ast = m.body[0].value.values[4].value
assert x_ast.lineno == 3
assert x_ast.col_offset == 5
assert y_ast.lineno == 4
assert y_ast.col_offset == 5
assert z_ast.lineno == 6
assert z_ast.col_offset == 0
def test_double_braces():
assert f'{{' == '{'
assert f'a{{' == 'a{'
assert f'{{b' == '{b'
assert f'a{{b' == 'a{b'
assert f'}}' == '}'
assert f'a}}' == 'a}'
assert f'}}b' == '}b'
assert f'a}}b' == 'a}b'
assert f'{{}}' == '{}'
assert f'a{{}}' == 'a{}'
assert f'{{b}}' == '{b}'
assert f'{{}}c' == '{}c'
assert f'a{{b}}' == 'a{b}'
assert f'a{{}}c' == 'a{}c'
assert f'{{b}}c' == '{b}c'
assert f'a{{b}}c' == 'a{b}c'
assert f'{{{10}' == '{10'
assert f'}}{10}' == '}10'
assert f'}}{{{10}' == '}{10'
assert f'}}a{{{10}' == '}a{10'
assert f'{10}{{' == '10{'
assert f'{10}}}' == '10}'
assert f'{10}}}{{' == '10}{'
assert f'{10}}}a{{' '}' == '10}a{}'
# Inside of strings, don't interpret doubled brackets.
assert f'{"{{}}"}' == '{{}}'
exc_info = raises(TypeError, eval, "f'{ {{}} }'") # dict in a set
assert 'unhashable' in str(exc_info.value)
def test_backslashes_in_string_part():
assert f'\t' == '\t'
assert r'\t' == '\\t'
assert rf'\t' == '\\t'
assert f'{2}\t' == '2\t'
assert f'{2}\t{3}' == '2\t3'
assert f'\t{3}' == '\t3'
assert f'\u0394' == '\u0394'
assert r'\u0394' == '\\u0394'
assert rf'\u0394' == '\\u0394'
assert f'{2}\u0394' == '2\u0394'
assert f'{2}\u0394{3}' == '2\u03943'
assert f'\u0394{3}' == '\u03943'
assert f'\U00000394' == '\u0394'
assert r'\U00000394' == '\\U00000394'
assert rf'\U00000394' == '\\U00000394'
assert f'{2}\U00000394' == '2\u0394'
assert f'{2}\U00000394{3}' == '2\u03943'
assert f'\U00000394{3}' == '\u03943'
assert f'\N{GREEK CAPITAL LETTER DELTA}' == '\u0394'
assert f'{2}\N{GREEK CAPITAL LETTER DELTA}' == '2\u0394'
assert f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}' == '2\u03943'
assert f'\N{GREEK CAPITAL LETTER DELTA}{3}' == '\u03943'
assert f'2\N{GREEK CAPITAL LETTER DELTA}' == '2\u0394'
assert f'2\N{GREEK CAPITAL LETTER DELTA}3' == '2\u03943'
assert f'\N{GREEK CAPITAL LETTER DELTA}3' == '\u03943'
assert f'\x20' == ' '
assert r'\x20' == '\\x20'
assert rf'\x20' == '\\x20'
assert f'{2}\x20' == '2 '
assert f'{2}\x20{3}' == '2 3'
assert f'\x20{3}' == ' 3'
assert f'2\x20' == '2 '
assert f'2\x203' == '2 3'
assert f'\x203' == ' 3'
with warnings.catch_warnings(record=True) as w: # invalid escape sequence
warnings.simplefilter("always", DeprecationWarning)
value = eval(r"f'\{6*7}'")
assert len(w) == 1 and w[0].category == DeprecationWarning
assert value == '\\42'
assert f'\\{6*7}' == '\\42'
assert fr'\{6*7}' == '\\42'
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
assert f'\N{AMPERSAND}' == '&'
assert f'\\N{AMPERSAND}' == '\\Nspam'
assert fr'\N{AMPERSAND}' == '\\Nspam'
assert f'\\\N{AMPERSAND}' == '\\&'
| StarcoderdataPython |
8161751 | <reponame>donsheehy/dsviz<gh_stars>1-10
from ds2viz.primitives import *
styledefaults = {'radius': 3,
'fill': (1,1,1),
'stroke': (0,0,0),
'stroke_width' : 0,
'font_size': 24,
'font_family' : 'monospace',
'font_weight': 'normal',
'text_anchor' : 'middle',
'dominant_baseline' : 'central',
}
class ImageEngine:
def __init__(self, canvas, filename = None):
self.filename = filename
for p in canvas.primitives():
if isinstance(p, DP_Circle):
self.draw_circle(p)
elif isinstance(p, DP_Polyline):
self.draw_polyline(p)
elif isinstance(p, DP_Polygon):
self.draw_polygon(p)
elif isinstance(p, DP_Text):
self.draw_text(p)
elif isinstance(p, DP_Bezier):
self.draw_bezier(p)
else:
raise TypeError('The drawing primitive has an unknown type.')
| StarcoderdataPython |
1744421 | """Solon decorators"""
import logging
import pylons
from decorator import decorator
from pylons.controllers.util import abort
from tw2.core import ValidationError
log = logging.getLogger(__name__)
def in_group(group):
"""Requires a user to be logged in, and the group specified"""
def wrapper(func, *args, **kwargs):
user = pylons.tmpl_context.user
if not user:
log.debug("No user logged in for permission restricted function")
abort(401, "Not Authorized")
if user.in_group(group):
log.debug("User %s verified in group %s", user, group)
return func(*args, **kwargs)
else:
log.debug("User %s not in group %s", user, group)
abort(401, "Not Authorized")
return decorator(wrapper)
@decorator
def logged_in(func, *args, **kwargs):
if not pylons.tmpl_context.user:
abort(401, "Not Authorized")
else:
return func(*args, **kwargs)
def validate(form, error_handler):
"""Validate a form with tw2"""
@decorator
def wrapper(func, self, *args, **kwargs):
try:
self.form_result=form.validate(pylons.request.params.mixed())
except ValidationError, e:
#Don't bother saving the widget, it's saved by tw2 on the request object anyway
pylons.request.method = 'GET'
return getattr(self, error_handler)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
| StarcoderdataPython |
376798 | <reponame>znerol/spreadflow-delta
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from testtools import TestCase
from spreadflow_delta.proc import Filter, Extractor
class SpreadflowDeltaTestCase(TestCase):
pass
| StarcoderdataPython |
11304700 | <filename>apple/tvos.bzl
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bazel rules for creating tvOS applications and bundles."""
load(
"@build_bazel_rules_apple//apple/internal/testing:tvos_rules.bzl",
_tvos_ui_test = "tvos_ui_test",
_tvos_unit_test = "tvos_unit_test",
)
load(
"@build_bazel_rules_apple//apple/internal:binary_support.bzl",
"binary_support",
)
load(
"@build_bazel_rules_apple//apple/internal:tvos_rules.bzl",
_tvos_application = "tvos_application",
_tvos_extension = "tvos_extension",
_tvos_framework = "tvos_framework",
)
def tvos_application(name, **kwargs):
"""Builds and bundles a tvOS application."""
bundling_args = binary_support.add_entitlements_and_swift_linkopts(
name,
platform_type = str(apple_common.platform_type.tvos),
**kwargs
)
_tvos_application(
name = name,
dylibs = kwargs.get("frameworks", []),
**bundling_args
)
def tvos_extension(name, **kwargs):
"""Builds and bundles a tvOS extension."""
bundling_args = binary_support.add_entitlements_and_swift_linkopts(
name,
platform_type = str(apple_common.platform_type.tvos),
**kwargs
)
_tvos_extension(
name = name,
dylibs = kwargs.get("frameworks", []),
**bundling_args
)
def tvos_framework(name, **kwargs):
"""Builds and bundles a tvOS dynamic framework."""
# TODO(b/120861201): The linkopts macro additions here only exist because the Starlark linking
# API does not accept extra linkopts and link inputs. With those, it will be possible to merge
# these workarounds into the rule implementations.
linkopts = kwargs.pop("linkopts", [])
bundle_name = kwargs.get("bundle_name", name)
linkopts += ["-install_name", "@rpath/%s.framework/%s" % (bundle_name, bundle_name)]
kwargs["linkopts"] = linkopts
bundling_args = binary_support.add_entitlements_and_swift_linkopts(
name,
platform_type = str(apple_common.platform_type.tvos),
**kwargs
)
# Remove any kwargs that shouldn't be passed to the underlying rule.
bundling_args.pop("entitlements", None)
_tvos_framework(
name = name,
dylibs = kwargs.get("frameworks", []),
**bundling_args
)
def tvos_unit_test(
name,
test_host = None,
**kwargs):
"""Builds an tvOS XCTest test target."""
# Discard binary_tags for now, as there is no apple_binary target any more to apply them to.
# TODO(kaipi): Cleanup binary_tags for tests and remove this.
kwargs.pop("binary_tags", None)
# Discard any testonly attributes that may have been passed in kwargs. Since this is a test
# rule, testonly should be a noop. Instead, force the add_entitlements_and_swift_linkopts method
# to have testonly to True since it's always going to be a dependency of a test target. This can
# be removed when we migrate the swift linkopts targets into the rule implementations.
testonly = kwargs.pop("testonly", None)
bundling_args = binary_support.add_entitlements_and_swift_linkopts(
name,
platform_type = str(apple_common.platform_type.tvos),
include_entitlements = False,
testonly = True,
**kwargs
)
bundle_loader = None
if test_host:
bundle_loader = test_host
_tvos_unit_test(
name = name,
bundle_loader = bundle_loader,
test_host = test_host,
**bundling_args
)
def tvos_ui_test(
name,
**kwargs):
"""Builds an tvOS XCUITest test target."""
# Discard binary_tags for now, as there is no apple_binary target any more to apply them to.
# TODO(kaipi): Cleanup binary_tags for tests and remove this.
kwargs.pop("binary_tags", None)
# Discard any testonly attributes that may have been passed in kwargs. Since this is a test
# rule, testonly should be a noop. Instead, force the add_entitlements_and_swift_linkopts method
# to have testonly to True since it's always going to be a dependency of a test target. This can
# be removed when we migrate the swift linkopts targets into the rule implementations.
testonly = kwargs.pop("testonly", None)
bundling_args = binary_support.add_entitlements_and_swift_linkopts(
name,
platform_type = str(apple_common.platform_type.tvos),
include_entitlements = False,
testonly = True,
**kwargs
)
_tvos_ui_test(name = name, **bundling_args)
| StarcoderdataPython |
9780493 | """Extract labeling data from the question labeling HITs.
See ``python extractlabels.py --help`` for more information.
"""
import ast
import collections
import json
import logging
import click
from scripts import _utils
logger = logging.getLogger(__name__)
# constants
EXPECTED_NUM_LABELS = 3
KEY_SCHEMA = {
'subject': str,
'question': str,
'answer': lambda x: None if x == 'None' else str(x),
'quality_labels': ast.literal_eval, # List[str]
'score': int,
'high_quality': bool
}
LABEL_TO_BIT = {
'always': 1,
'usually': 1,
'sometimes': 1,
'rarely': 0,
'never': 0,
'bad': 0
}
# main function
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'xml_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def extractlabels(xml_dir, output_path):
"""Extract labeling data from XML_DIR and write to OUTPUT_PATH.
Extract the subject-question pair labeling data from a batch of the
question labeling HITs. XML_DIR should be an XML directory extracted
with AMTI. OUTPUT_PATH is the location to which the data will be
written in a JSON Lines format. Each instance will have a "labels"
attribute, which is a list of the labels, and a "majority" attribute
giving the majority (true / false) vote, a "true_votes" attribute
giving the number of votes for "true", and an "is_bad" attribute
giving whether or not any annotators labeled the assertion as "bad".
"""
# submissions : the form data submitted from the question labeling
# HITs as a list of dictionaries mapping the question identifiers to
# the free text, i.e.:
#
# [
# {
# 'attribute-idx': attribute_value,
# ...
# },
# ...
# ]
#
# See the data for individual attributes and values. The index (idx)
# is used because each HIT had the worker label multiple instances
# for efficiency purposes.
submissions = _utils.extract_xml_dir(xml_dir)
# decode the data from the ``"attribute-idx": value`` style to the
# individual rows.
rows = _utils.decode_attribute_idx_data(submissions)
# aggregate all the labels for each instance, since we had multiple
# assignments / workers per instance.
key_to_labels = collections.defaultdict(list)
for row in rows:
key = _utils.key(row, KEY_SCHEMA.keys())
key_to_labels[key].append(row['label'])
# create the new rows by processing the aggregated labels
new_row_strs = []
for key, labels in key_to_labels.items():
assert len(labels) == EXPECTED_NUM_LABELS, (
f'{key} only has {len(labels)} assertion labels.'
f' It should have exactly {EXPECTED_NUM_LABELS}.'
)
# create the new row
# use an OrderedDict so the keys appear in the right order in
# the JSON.
new_row = collections.OrderedDict([
(attribute, as_type(value))
for (attribute, as_type), value
in zip(KEY_SCHEMA.items(), key)
])
# compute new attributes to add
is_bad = 'bad' in labels
true_votes = sum([LABEL_TO_BIT[label] for label in labels])
majority = true_votes > (len(labels) / 2.0)
# add the new attributes
new_row['labels'] = labels
new_row['is_bad'] = is_bad
new_row['true_votes'] = true_votes
new_row['majority'] = majority
new_row_strs.append(json.dumps(new_row))
# write out the data
with click.open_file(output_path, 'w') as output_file:
output_file.write('\n'.join(sorted(new_row_strs)))
if __name__ == '__main__':
extractlabels()
| StarcoderdataPython |
3301848 | <reponame>howawong/legco-api-server
import datetime
from haystack import indexes
from .models import Party, MeetingSpeech, Question
class PartyIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name_en = indexes.CharField(model_attr='name_en')
name_ch = indexes.CharField(model_attr='name_ch')
def get_model(self):
return Party
def index_queryset(self, using=None):
return self.get_model().objects.filter()
class SpeechIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name_ch = indexes.CharField(model_attr='title_ch')
year = indexes.IntegerField(model_attr='hansard__date__year')
def get_model(self):
return MeetingSpeech
def index_queryset(self, using=None):
return self.get_model().objects.filter().prefetch_related('hansard')
class QuestionIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name_ch = indexes.CharField(model_attr='title_ch')
year = indexes.IntegerField(model_attr='date__year')
def get_model(self):
return Question
def index_queryset(self, using=None):
return self.get_model().objects.filter()
| StarcoderdataPython |
1931474 | <reponame>pykit3/k3num<gh_stars>0
import unittest
import doctest
import k3num
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(k3num))
return tests
| StarcoderdataPython |
1949990 | <filename>python/GPIO.py
#! /usr/bin/env python3
import enum
import PyQt5
import PyQt5.QtCore
class GPIODirection(enum.Enum):
INPUT = 0
OUTPUT = 1
BIDIRECTIONAL = 2
ALTERNATE = 3
class GPIOState(enum.Enum):
OFF = 0
ON = 1
class GPIO(PyQt5.QtCore.QObject):
"""
Container class for the GPIO on the FPGA board
"""
pushButtonSignal = PyQt5.QtCore.pyqtSignal(int)
def __init__(self, state=GPIOState.OFF, direction=GPIODirection.INPUT, index=None, enableIcon=None, disableIcon=None, pushButton=None):
super(GPIO, self).__init__()
self.direction = direction
self.state = state
self.index = index
self.enableIcon = enableIcon
self.disableIcon = disableIcon
self.currentIcon = self.disableIcon
self.pushButton = pushButton
self.pushButton.clicked.connect(self.clicked)
self.pushButton.setIcon(self.currentIcon)
# self.pushButton.setText(str(index))
def updateState(self):
print("Update State {}".format(self.index))
if self.state == GPIOState.OFF:
self.state = GPIOState.ON
self.currentIcon = self.enableIcon
elif self.state == GPIOState.ON:
self.state = GPIOState.OFF
self.currentIcon = self.disableIcon
self.pushButton.setIcon(self.currentIcon)
return
def clicked(self):
"""
"""
print("Clicked {} {}".format(self.index, self.state))
self.pushButtonSignal.emit(self.index)
return
| StarcoderdataPython |
5087375 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django_gotolong.fofeti.views import FofetiListView, \
Fofeti_upload, Fofeti_fetch
from django_gotolong.fofeti.views import FofetiIndustryView, \
FofetiListView_Type, FofetiListView_Benchmark, FofetiListView_Benchmark_Select, \
FofetiListView_AUM, FofetiListView_NonGold_FOF, FofetiListView_NonGold_ETF, \
FofetiListView_Gold_ETF, FofetiListView_Gold_FOF, \
FofetiListView_Nifty_ETF, FofetiListView_Nifty_FOF, \
FofetiListView_Next_ETF, FofetiListView_Next_FOF, \
FofetiListView_Mid_ETF, FofetiListView_Mid_FOF
urlpatterns = [
path('list/', FofetiListView.as_view(), name='fofeti-list'),
path('list/aum/', FofetiListView_AUM.as_view(), name='fofeti-list-aum'),
path('list/type/', FofetiListView_Type.as_view(), name='fofeti-list-type'),
path('list/benchmark/', FofetiListView_Benchmark.as_view(), name='fofeti-list-benchmark'),
path('list/benchmark-select/', FofetiListView_Benchmark_Select.as_view(), name='fofeti-list-benchmark-select'),
path('list/etf/non-gold/', FofetiListView_NonGold_ETF.as_view(), name='fofeti-list-etf-non-gold'),
path('list/etf/gold/', FofetiListView_Gold_ETF.as_view(), name='fofeti-list-etf-gold'),
path('list/etf/nifty-50/', FofetiListView_Nifty_ETF.as_view(), name='fofeti-list-etf-nifty'),
path('list/etf/next-50/', FofetiListView_Next_ETF.as_view(), name='fofeti-list-etf-next'),
path('list/etf/mid-150/', FofetiListView_Mid_ETF.as_view(), name='fofeti-list-etf-mid'),
path('list/fof/gold/', FofetiListView_Gold_FOF.as_view(), name='fofeti-list-fof-gold'),
path('list/fof/non-gold/', FofetiListView_NonGold_FOF.as_view(), name='fofeti-list-fof-non-gold'),
path('list/fof/nifty-50/', FofetiListView_Nifty_FOF.as_view(), name='fofeti-list-fof-nifty'),
path('list/fof/next-50/', FofetiListView_Next_FOF.as_view(), name='fofeti-list-fof-next'),
path('list/fof/mid-150/', FofetiListView_Mid_FOF.as_view(), name='fofeti-list-fof-mid'),
path('industry/', FofetiIndustryView.as_view(), name='fofeti-industry-list'),
path('fetch/', Fofeti_fetch, name='fofeti-fetch'),
path('upload/', Fofeti_upload, name='fofeti-upload'),
]
| StarcoderdataPython |
262503 | <filename>mkt/monolith/urls.py<gh_stars>0
from django.conf.urls import include, patterns, url
from tastypie.api import Api
from .resources import MonolithData
api = Api(api_name='monolith')
api.register(MonolithData())
urlpatterns = patterns('',
url(r'^', include(api.urls)),
)
| StarcoderdataPython |
11200115 | <gh_stars>0
import logging
import time
from pydantic import HttpUrl
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as expected_conditions
class YELPFetcherController:
WAIT_ELEMENT = 20
XPATHS = {
'title': "/html/body/div[2]/div[3]/yelp-react-root/div/div[3]/div/div/div[2]/div/div/div[1]/div/div[1]/div[1]/div/div/div[1]/h1"
}
def __init__(self, url: HttpUrl) -> None:
firefox_options = Options()
firefox_options.headless = True
self.driver = webdriver.Firefox(
executable_path="/usr/local/share/geckodriver", options=firefox_options)
self.url_base = url
def _read_web(self):
try:
print("enttreee aquiii")
# Delete cookies
self.driver.delete_all_cookies()
print(
f"Attempting to crawl Yelp using business: {self.url_base}")
# As YELP don't use to fail just try to connect directly
self.driver.get(self.url_base)
try:
# Wait for title be displayed
# WebDriverWait(self.driver, self.WAIT_ELEMENT).until(
# expected_conditions.presence_of_element_located(
# (By.XPATH, self.XPATHS.get('title'))
# )
# )
time.sleep(5)
except TimeoutException:
print(
"Title is not in the screen after wait. Aborting!")
return None
return self.driver.page_source
except Exception as e:
print(e)
finally:
self.driver.close()
| StarcoderdataPython |
9614593 | <gh_stars>0
# Generated by Django 2.2.5 on 2020-10-31 15:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contents', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='goodscategory',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subs', to='contents.GoodsCategory', verbose_name='父类别'),
),
]
| StarcoderdataPython |
9777513 | #!/usr/bin/env python
import glob
import json
import soundfile as sf
from soundfile import SoundFile
def convert_flac_to_wav(wav_path: str) -> None:
""" Convert a .flac speech file to .wav file
Parameters
-----------
:params wav_path: Path to the flac file
Notes
-----------
Open the .flac file using soundfile and write to .wav
format.
Usage
-----------
>>> #TODO
"""
with SoundFile(wav_path,) as wav:
wav_arr = wav.read()
sample_rate = wav.samplerate
nframes = wav.frames
duration = nframes/sample_rate
print(f"Wave duration is {duration} .")
output_paths = wav_path.split(".")
output_path = output_paths[0] + ".wav"
wav_id = output_paths[0].split("/")[-1]
sf.write(output_path,
wav_arr,
sample_rate)
return (output_path, duration, wav_id)
def create_nemo_manifest(flac_path: str, manifest_path: str) -> None:
""" Convert flac files in a dierctory and generate manifest fro Nemo
Parameters
------------
:params flac_path: Path to direcctory with flac files
:params manifest_path: Path (with filename) to write the manifest
Usage
-------------
>>> #TODO
"""
all_flac_file = glob.glob(flac_path + "*.flac")
# import pdb; pdb.set_trace()
transcript_path = glob.glob(flac_path + "*.trans.txt")[0]
transcripts = open(transcript_path).readlines()
for file_li in all_flac_file:
with open(manifest_path, 'a') as manifest:
meta_one = convert_flac_to_wav(file_li)
transcript = list(filter(lambda x: x.startswith(meta_one[2]),
transcripts))[0].strip()
transcript = transcript.replace(meta_one[2], "")
metadata = {
"audio_filepath": meta_one[0],
"duration": meta_one[1],
"text": transcript
}
json.dump(metadata, manifest)
manifest.write("\n")
if __name__ == "__main__":
saample_file = "/home/jaganadhg/AI_RND/nvidianemo/LibriSpeech/dev-clean/84/121550/84-121550-0035.flac"
# convert_flac_to_wav(saample_file)
flac_path = "/home/jaganadhg/AI_RND/nvidianemo/LibriSpeech/dev-clean/84/121123/"
meta_apth = "metadata_validation.json"
create_nemo_manifest(flac_path,
meta_apth) | StarcoderdataPython |
6570823 | from db import db
from db import db
import datetime
import logging
from pymongo import DESCENDING
from biliob_tracer.task import ProgressTask
coll = db['author'] # 获得collection的句柄
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s @ %(name)s: %(message)s')
logger = logging.getLogger(__name__)
def format_p_rank(i, count):
return round(i/count * 100, 2)
def calculate_author_rank():
task_name = "计算作者排名数据"
keys = ['cFans', 'cArchive_view', 'cArticle_view']
count = coll.count_documents({keys[0]: {'$exists': 1}})
t = ProgressTask(task_name, count * len(keys), collection=db['tracer'])
for each_key in keys:
logger.info("开始计算作者{}排名".format(each_key))
i = 1
authors = coll.find({each_key: {'$exists': 1}}, {'mid': 1, 'rank': 1, each_key: 1}).batch_size(
300).sort(each_key, DESCENDING)
if each_key == 'cFans':
each_rank = 'fansRank'
each_d_rank = 'dFansRank'
each_p_rank = 'pFansRank'
elif each_key == 'cArchive_view':
each_rank = 'archiveViewRank'
each_d_rank = 'dArchiveViewRank'
each_p_rank = 'pArchiveViewRank'
elif each_key == 'cArticle_view':
each_rank = 'articleViewRank'
each_d_rank = 'dArticleViewRank'
each_p_rank = 'pArticleViewRank'
for each_author in authors:
t.current_value += 1
logger.info("计算{}排名".format(each_author['mid']))
# 如果没有data 直接下一个
if each_key in each_author:
# 如果已经计算过rank
if 'rank' in each_author:
rank = each_author['rank']
if each_rank in each_author['rank']:
rank[each_d_rank] = each_author['rank'][each_rank] - i
else:
rank[each_d_rank] = 0
rank[each_rank] = i
rank[each_p_rank] = format_p_rank(i, count)
else:
# 初始化
rank = {
each_rank: i,
each_d_rank: 0,
each_p_rank: format_p_rank(i, count)
}
if each_author[each_key] == 0:
if 'rank' in each_author:
rank = each_author['rank']
rank[each_d_rank] = 0
rank[each_rank] = -1
rank[each_p_rank] = -1
else:
rank = {
each_rank: -1,
each_d_rank: 0,
each_p_rank: -1
}
if each_key == 'cArticle_view':
rank['updateTime'] = datetime.datetime.now()
coll.update_one({'mid': each_author['mid']}, {
'$set': {
'rank': rank,
}
})
i += 1
t.current_value = t.total_value
logger.info("计算作者排名结束")
| StarcoderdataPython |
3422602 | <reponame>mikekeda/tools
from django.contrib.auth import get_user_model
from django.test import TestCase
User = get_user_model()
class BaseTestCase(TestCase):
test_user = None
test_admin = None
@classmethod
def setUpClass(cls):
super().setUpClass()
# Create usual user.
cls.password = User.objects.make_random_password()
cls.test_user = User.objects.create_user(
username="testuser", password=<PASSWORD>
)
cls.test_user.save()
# Create admin user.
cls.test_admin = User.objects.create_superuser(
username="testadmin",
email="<EMAIL>",
password=<PASSWORD>,
first_name="Bob",
last_name="Smit",
)
cls.test_admin.save()
| StarcoderdataPython |
239449 | <filename>Probability Statistics Beginner/Linear regression-15.py
## 2. Drawing lines ##
import matplotlib.pyplot as plt
import numpy as np
x = [0, 1, 2, 3, 4, 5]
# Going by our formula, every y value at a position is the same as the x-value in the same position.
# We could write y = x, but let's write them all out to make this more clear.
y = [0, 1, 2, 3, 4, 5]
# As you can see, this is a straight line that passes through the points (0,0), (1,1), (2,2), and so on.
plt.plot(x, y)
plt.show()
# Let's try a slightly more ambitious line.
# What if we did y = x + 1?
# We'll make x an array now, so we can add 1 to every element more easily.
x = np.asarray([0, 1, 2, 3, 4, 5])
y = x + 1
# y is the same as x, but every element has 1 added to it.
print(y)
# This plot passes through (0,1), (1,2), and so on.
# It's the same line as before, but shifted up 1 on the y-axis.
plt.plot(x, y)
plt.show()
# By adding 1 to the line, we moved what's called the y-intercept -- where the line intersects with the y-axis.
# Moving the intercept can shift the whole line up (or down when we subtract).
plt.plot(x,x-1)
plt.show()
plt.plot(x,x+10)
plt.show()
## 3. Working with slope ##
import matplotlib.pyplot as plt
import numpy as np
x = np.asarray([0, 1, 2, 3, 4, 5])
# Let's set the slope of the line to 2.
y = 2 * x
# See how this line is "steeper" than before? The larger the slope is, the steeper the line becomes.
# On the flipside, fractional slopes will create a "shallower" line.
# Negative slopes will create a line where y values decrease as x values increase.
plt.plot(x, y)
plt.show()
plt.plot(x,4*x)
plt.show()
plt.plot(x,0.5*x)
plt.show()
plt.plot(x, (-2*x))
plt.show()
## 4. Starting out with linear regression ##
# The wine quality data is loaded into wine_quality
from numpy import cov
slope_density = cov(wine_quality['density'],wine_quality['quality'])
## 5. Finishing linear regression ##
from numpy import cov
# This function will take in two columns of data, and return the slope of the linear regression line.
def calc_slope(x, y):
return cov(x, y)[0, 1] / x.var()
intercept_density = wine_quality["quality"].mean() - (calc_slope(wine_quality["density"], wine_quality["quality"]) * wine_quality["density"].mean())
## 6. Making predictions ##
from numpy import cov
def calc_slope(x, y):
return cov(x, y)[0, 1] / x.var()
# Calculate the intercept given the x column, y column, and the slope
def calc_intercept(x, y, slope):
return y.mean() - (slope * x.mean())
def predicted(x):
return x * slope + intercept
slope = calc_slope(wine_quality["density"], wine_quality["quality"])
intercept = calc_intercept(wine_quality["density"], wine_quality["quality"], slope)
predicted_quality = wine_quality["density"].apply(predicted)
## 7. Finding error ##
from scipy.stats import linregress
# We've seen the r_value before -- we'll get to what p_value and stderr_slope are soon -- for now, don't worry about them.
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
# As you can see, these are the same values we calculated (except for slight rounding differences)
print(slope)
print(intercept)
def pred(x):
return x*slope + intercept
pred_d = wine_quality['density'].apply(pred)
rss = 0
for i in range(len(pred_d)):
rss += (wine_quality['quality'][i] - pred_d[i])**2
## 8. Standard error ##
from scipy.stats import linregress
import numpy as np
# We can do our linear regression
# Sadly, the stderr_slope isn't the standard error, but it is the standard error of the slope fitting only
# We'll need to calculate the standard error of the equation ourselves
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
predicted_y = np.asarray([slope * x + intercept for x in wine_quality["density"]])
residuals = (wine_quality["quality"] - predicted_y) ** 2
rss = sum(residuals)
stderr = (rss / (len(wine_quality["quality"]) - 2)) ** .5
def within_percentage(y, predicted_y, stderr, error_count):
within = stderr * error_count
differences = abs(predicted_y - y)
lower_differences = [d for d in differences if d <= within]
within_count = len(lower_differences)
return within_count / len(y)
within_one = within_percentage(wine_quality["quality"], predicted_y, stderr, 1)
within_two = within_percentage(wine_quality["quality"], predicted_y, stderr, 2)
within_three = within_percentage(wine_quality["quality"], predicted_y, stderr, 3) | StarcoderdataPython |
6563655 | from rest_framework import serializers
from core.models import *
class DiscountCouponSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DiscountCoupon
fields = ('discount_type', 'value', 'expiration_date', 'course')
class SmallCourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('id', 'name')
class SubscriptionSerializer(serializers.ModelSerializer):
course = SmallCourseSerializer(
many=False,
read_only=True
)
class Meta:
model = Subscription
fields = ('status', 'course','price', 'is_favorite', 'rating')
class TeacherSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(read_only=True,many=False)
class Meta:
model = User
fields = ('user', 'name', 'contents')
class StudentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('id', 'name')
class UserSerializer(serializers.ModelSerializer):
subscriptions = SubscriptionSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ('id','who_is', 'name', 'username', 'subscriptions')
class CourseSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.HyperlinkedRelatedField(read_only=True,view_name='teacher_detail')
class Meta:
model = Course
fields = ('id','thumb', 'name', 'created_date', 'price', 'user')
| StarcoderdataPython |
243145 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 20:53:10 2021
PycaretとStreamlitで作るGUI AutoML
"""
# streamlit run filename
import streamlit as st
import pandas as pd
import datetime
st.markdown("# 1. データをアップロードします")
uploaded_file = st.file_uploader("CSVファイルをアップロードしてください", type='csv', key='train')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.markdown("# 2. アップロードされたデータを確認します")
st.dataframe(df.head(10))
st.markdown("# 3. ターゲットを入力してください")
target = st.text_input(label='ターゲット名を文字列で正しく入力してください', value=df.columns[-1])
st.markdown("# 4. 回帰の場合はregression、分類の場合はclassificationを選択してください")
ml_usecase = st.selectbox(label='regressionかclassificationを選択してください',
options=('', 'regression', 'classification'),
key='ml_usecase')
if ml_usecase == 'regression':
from pycaret.regression import *
elif ml_usecase == 'classification':
from pycaret.classification import *
else:
st.text('「regressionか「classification」か選択してください')
if (ml_usecase == 'regression') | (ml_usecase == 'classification'):
st.markdown("# 5. 実行します")
#compare_button = st.button(label='実行', key='compare_model')
#if compare_button:
st.markdown("実行中です…しばらくお待ち下さい")
ml = setup(data=df,
target=target,
session_id=1234,
silent=True,
)
best = compare_models() #
st.dataframe(best)
st.markdown("# 6. モデルを選択してください。")
select_model = st.selectbox(label='モデルを選択してください',
options=tuple(best.index),
key='select_model')
save_button = st.button(label='モデル構築', key='save_model')
if save_button:
model = create_model(select_model)
final = finalize_model(model)
save_model(final, select_model+'_saved_'+datetime.date.today().strftime('%Y%m%d'))
st.markdown("# 8. 予測したいデータを追加してください。")
uploaded_file_new = st.file_uploader("CSVファイルをアップロードしてください。", type='csv', key='test')
if uploaded_file_new is not None:
df_new = pd.read_csv(uploaded_file_new)
predictions = predict_model(final, data=df_new)
predictions.to_csv(select_model+'_predict_'+datetime.date.today().strftime('%Y%m%d')+'.csv')
st.dataframe(predictions)
| StarcoderdataPython |
6498763 | <filename>spinoffs/oryx/oryx/util/summary_test.py
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.spinoffs.oryx.util.summary."""
from absl.testing import absltest
from jax import lax
import jax.numpy as jnp
import numpy as np
from oryx.internal import test_util
from oryx.util import summary
class SummaryTest(test_util.TestCase):
def test_can_pull_out_summarized_values_in_strict_mode(self):
def f(x):
return summary.summary(x, name='x')
_, summaries = summary.get_summaries(f)(1.)
self.assertDictEqual(dict(x=1.), summaries)
def test_can_pull_out_non_dependent_values(self):
def f(x):
summary.summary(x ** 2, name='y')
return x
_, summaries = summary.get_summaries(f)(2.)
self.assertDictEqual(dict(y=4.), summaries)
def test_duplicate_names_error_in_strict_mode(self):
def f(x):
summary.summary(x, name='x')
summary.summary(x, name='x')
return x
with self.assertRaisesRegex(ValueError, 'has already been reaped: x'):
summary.get_summaries(f)(2.)
def test_can_pull_summaries_out_of_scan_in_append_mode(self):
def f(x):
def body(x, _):
summary.summary(x, name='x', mode='append')
return x + 1, ()
return lax.scan(body, x, jnp.arange(10.))[0]
value, summaries = summary.get_summaries(f)(0.)
self.assertEqual(value, 10.)
np.testing.assert_allclose(summaries['x'], np.arange(10.))
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
4897509 | <reponame>maykinmedia/bluebottle<filename>bluebottle/bb_fundraisers/urls/api.py
from django.conf.urls import patterns, url
from ..views import FundraiserListView, FundraiserDetailView
urlpatterns = patterns('',
url(r'^$', FundraiserListView.as_view(), name='fundraiser-list'),
url(r'(?P<pk>[\d]+)$', FundraiserDetailView.as_view(), name='fundraiser-detail'),
)
| StarcoderdataPython |
4926306 | <gh_stars>0
# Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from collections import defaultdict, namedtuple
from .descriptors import DiffResultDescriptor
from .difference import Difference, SymbolWrapper, Wrapper
from .mls import RangeWrapper
modified_mlsrule_record = namedtuple("modified_mlsrule", ["rule",
"added_default",
"removed_default"])
class MLSRulesDifference(Difference):
"""Determine the difference in MLS rules between two policies."""
added_range_transitions = DiffResultDescriptor("diff_range_transitions")
removed_range_transitions = DiffResultDescriptor("diff_range_transitions")
modified_range_transitions = DiffResultDescriptor("diff_range_transitions")
# Lists of rules for each policy
_left_mls_rules = defaultdict(list)
_right_mls_rules = defaultdict(list)
def diff_range_transitions(self):
"""Generate the difference in range_transition rules between the policies."""
self.log.info(
"Generating range_transition differences from {0.left_policy} to {0.right_policy}".
format(self))
if not self._left_mls_rules or not self._right_mls_rules:
self._create_mls_rule_lists()
added, removed, matched = self._set_diff(
self._expand_generator(self._left_mls_rules["range_transition"], MLSRuleWrapper),
self._expand_generator(self._right_mls_rules["range_transition"], MLSRuleWrapper))
modified = []
for left_rule, right_rule in matched:
# Criteria for modified rules
# 1. change to default range
if RangeWrapper(left_rule.default) != RangeWrapper(right_rule.default):
modified.append(modified_mlsrule_record(left_rule,
right_rule.default,
left_rule.default))
self.added_range_transitions = added
self.removed_range_transitions = removed
self.modified_range_transitions = modified
#
# Internal functions
#
def _create_mls_rule_lists(self):
"""Create rule lists for both policies."""
# do not expand yet, to keep memory
# use down as long as possible
self.log.debug("Building MLS rule lists from {0.left_policy}".format(self))
for rule in self.left_policy.mlsrules():
self._left_mls_rules[rule.ruletype].append(rule)
self.log.debug("Building MLS rule lists from {0.right_policy}".format(self))
for rule in self.right_policy.mlsrules():
self._right_mls_rules[rule.ruletype].append(rule)
self.log.debug("Completed building MLS rule lists.")
def _reset_diff(self):
"""Reset diff results on policy changes."""
self.log.debug("Resetting MLS rule differences")
self.added_range_transitions = None
self.removed_range_transitions = None
self.modified_range_transitions = None
# Sets of rules for each policy
self._left_mls_rules.clear()
self._right_mls_rules.clear()
class MLSRuleWrapper(Wrapper):
"""Wrap MLS rules to allow set operations."""
def __init__(self, rule):
self.origin = rule
self.ruletype = rule.ruletype
self.source = SymbolWrapper(rule.source)
self.target = SymbolWrapper(rule.target)
self.tclass = SymbolWrapper(rule.tclass)
self.key = hash(rule)
def __hash__(self):
return self.key
def __lt__(self, other):
return self.key < other.key
def __eq__(self, other):
# because MLSRuleDifference groups rules by ruletype,
# the ruletype always matches.
return self.source == other.source and \
self.target == other.target and \
self.tclass == other.tclass
| StarcoderdataPython |
3299159 | <gh_stars>1-10
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBiomFormat(PythonPackage):
"""The BIOM file format (canonically pronounced biome) is designed to be
a general-use format for representing biological sample by observation
contingency tables."""
homepage = "https://pypi.python.org/pypi/biom-format/2.1.6"
url = "https://pypi.io/packages/source/b/biom-format/biom-format-2.1.6.tar.gz"
version('2.1.6', '1dd4925b74c56e8ee864d5e1973068de')
variant('h5py', default=True, description='For use with BIOM 2.0+ files')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-h5py', type=('build', 'run'), when='+h5py')
depends_on('py-click', type=('build', 'run'))
depends_on('py-numpy@1.3.0:', type=('build', 'run'))
depends_on('py-future@0.16.0:', type=('build', 'run'))
depends_on('py-scipy@0.13.0:', type=('build', 'run'))
depends_on('py-pandas@0.19.2:', type=('build', 'run'))
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-pyqi', type=('build', 'run'))
| StarcoderdataPython |
1690921 |
"""Utilities for deriving new names from existing names.
Style objects are used to customize how :ref:`storable objects are found for DataSet objects <using-loadable-fixture-style>`
"""
__all__ = [
'CamelAndUndersStyle', 'TrimmedNameStyle', 'NamedDataStyle',
'PaddedNameStyle', 'ChainedStyle']
class Style(object):
"""
Utility for deriving new names from existing names.
each method receives a name and returns a new name.
"""
def __add__(self, newstyle):
return ChainedStyle(self, newstyle)
def to_attr(self, name):
"""converts name to a new name suitable for an attribute."""
raise NotImplementedError
def guess_storable_name(self, name):
"""converts a dataset class name to a storage class name."""
return name
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, hex(id(self)))
class ChainedStyle(Style):
"""
Combination of two styles, piping first translation
into second translation.
"""
def __init__(self, first_style, next_style):
self.first_style = first_style
self.next_style = next_style
def __getattribute__(self, c):
def assert_callable(attr):
if not callable(attr):
raise AttributeError(
"%s cannot chain %s" % (self.__class__, attr))
def chained_call(name):
f = object.__getattribute__(self, 'first_style')
first_call = getattr(f, c)
assert_callable(first_call)
n = object.__getattribute__(self, 'next_style')
next_call = getattr(n, c)
assert_callable(next_call)
return next_call(first_call(name))
return chained_call
def __repr__(self):
return "%s + %s" % (self.first_style, self.next_style)
class OriginalStyle(Style):
"""
Style that honors all original names.
"""
def to_attr(self, name):
return name
def guess_storable_name(self, name):
return name
class CamelAndUndersStyle(Style):
"""
Style that assumes classes are already in came case
but attributes should be underscore separated
"""
def to_attr(self, name):
"""
Derives lower case, underscored names from camel case class names.
i.e. EmployeeData translates to employee_data
"""
return camel_to_under(name)
def guess_storable_name(self, name):
"""
Assume a storage name is the same as original.
i.e. Employee becomes Employee
"""
return name
class TrimmedNameStyle(Style):
"""
Derives new names from trimming off prefixes/suffixes.
"""
def __init__(self, prefix=None, suffix=None):
self.prefix = prefix
self.suffix = suffix
def _trim(self, name):
def assert_s(s, name_contains):
assert name_contains(s), (
"%s expected that '%s' %s '%s'" % (
self, name, name_contains.__name__, s))
if self.prefix:
assert_s(self.prefix, name.startswith)
name = name[len(self.prefix):]
if self.suffix:
assert_s(self.suffix, name.endswith)
name = name[0:-len(self.suffix)]
return name
def to_attr(self, name):
return self._trim(name)
def guess_storable_name(self, name):
return self._trim(name)
class PaddedNameStyle(Style):
"""
Derives new names from padding names with prefixes/suffixes.
"""
def __init__(self, prefix=None, suffix=None):
self.prefix = prefix
self.suffix = suffix
def _pad(self, name):
if self.prefix:
name = "%s%s" % (self.prefix, name)
if self.suffix:
name = "%s%s" % (name, self.suffix)
return name
def to_attr(self, name):
return self._pad(name)
def guess_storable_name(self, name):
return self._pad(name)
class NamedDataStyle(TrimmedNameStyle):
"""
Derives names from datasets assuming "Data" as a suffix.
for example, consider this data object and this DataSet::
>>> class Author(object):
... name = None
...
>>> from fixture import DataSet
>>> class AuthorData(DataSet):
... class freude:
... name = "<NAME>"
...
if a LoadableFixture is configured with style=NamedDataStyle() then it will
automatically look in its env for the object "Author" when loading the
DataSet named "AuthorData"
"""
def __init__(self):
TrimmedNameStyle.__init__(self, suffix='Data')
def camel_to_under(s):
chunks = []
chkid = None
def newchunk():
chunks.append('')
return len(chunks)-1
for ltr in s:
if ord(ltr) < 97:
# capital letter :
chkid = newchunk()
if chkid is None:
chkid = newchunk()
chunks[chkid] = chunks[chkid] + ltr
return '_'.join([c.lower() for c in chunks])
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
1660095 | <reponame>LCS2-IIITD/Code-mixed-classification<gh_stars>0
from __future__ import absolute_import
import sys
import os
sys.path.append('./drive/My Drive/CMC/')
# In[7]:
get_ipython().system('wandb login')
# In[2]:
from __future__ import absolute_import
import sys
import os
import shutil
try:
from dotenv import find_dotenv, load_dotenv
except:
pass
import argparse
try:
sys.path.append(os.path.join(os.path.dirname(__file__), './drive/My Drive/CMC/'))
except:
sys.path.append(os.path.join(os.getcwd(), './drive/My Drive/CMC/'))
try:
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
except:
sys.path.append(os.path.join(os.getcwd(), '../'))
import pandas as pd
import numpy as np
import pickle
from collections import Counter
from tqdm import tqdm
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_addons as tfa
try:
import wandb
load_dotenv(find_dotenv())
wandb.login(key=os.environ['WANDB_API_KEY'])
from wandb.keras import WandbCallback
_has_wandb = True
except:
_has_wandb = False
import tokenizers
from transformers import TFAutoModel, AutoTokenizer, AutoConfig, BertTokenizer
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn_crfsuite.metrics import flat_classification_report, flat_f1_score
from src import data, models
pd.options.display.max_colwidth = -1
# In[3]:
print (_has_wandb)
# In[4]:
parser = argparse.ArgumentParser(prog='Trainer',conflict_handler='resolve')
parser.add_argument('--train_data', type=str, default='../data/POS Hindi English Code Mixed Tweets/POS Hindi English Code Mixed Tweets.tsv', required=False,
help='train data')
#parser.add_argument('--train_data', type=str, default='./drive/My Drive/CMC/data/POS Hindi English Code Mixed Tweets/POS Hindi English Code Mixed Tweets.tsv', required=False,
# help='train data')
parser.add_argument('--val_data', type=str, default=None, required=False,
help='validation data')
parser.add_argument('--test_data', type=str, default=None, required=False,
help='test data')
parser.add_argument('--transformer_model_pretrained_path', type=str, default='roberta-base', required=False,
help='transformer model pretrained path or huggingface model name')
parser.add_argument('--transformer_config_path', type=str, default='roberta-base', required=False,
help='transformer config file path or huggingface model name')
parser.add_argument('--transformer_tokenizer_path', type=str, default='roberta-base', required=False,
help='transformer tokenizer file path or huggingface model name')
parser.add_argument('--max_text_len', type=int, default=50, required=False,
help='maximum length of text')
parser.add_argument('--max_char_len', type=int, default=100, required=False,
help='maximum length of text')
parser.add_argument('--max_word_char_len', type=int, default=20, required=False,
help='maximum length of text')
parser.add_argument('--emb_dim', type=int, default=128, required=False,
help='maximum length of text')
parser.add_argument('--n_layers', type=int, default=2, required=False,
help='maximum length of text')
parser.add_argument('--n_units', type=int, default=128, required=False,
help='maximum length of text')
parser.add_argument('--epochs', type=int, default=500, required=False,
help='number of epochs')
parser.add_argument('--lr', type=float, default=.001, required=False,
help='learning rate')
parser.add_argument('--early_stopping_rounds', type=int, default=50, required=False,
help='number of epochs for early stopping')
parser.add_argument('--lr_schedule_round', type=int, default=30, required=False,
help='number of epochs for learning rate scheduling')
parser.add_argument('--train_batch_size', type=int, default=32, required=False,
help='train batch size')
parser.add_argument('--eval_batch_size', type=int, default=16, required=False,
help='eval batch size')
parser.add_argument('--model_save_path', type=str, default='../models/hindi_pos/', required=False,
help='model')
#parser.add_argument('--model_save_path', type=str, default='./drive/My Drive/CMC/models/hindi_pos/', required=False,
# help='seed')
parser.add_argument('--wandb_logging', type=bool, default=True, required=False,
help='wandb logging needed')
parser.add_argument('--seed', type=int, default=42, required=False,
help='seed')
args, _ = parser.parse_known_args()
# In[5]:
tf.random.set_seed(args.seed)
np.random.seed(args.seed)
# In[6]:
pos_data = data.data_utils.CoNLLSeqData(filepath=args.train_data,label_index=2)
df = pd.DataFrame()
df['sentence'] = pos_data.sentence
df['text'] = [" ".join(i) for i in pos_data.words]
df['category'] = [" ".join(i) for i in pos_data.labels]
kf = KFold(n_splits=5, shuffle=True, random_state=args.seed)
for train_index, test_index in kf.split(df.text):
break
train_df = df.iloc[train_index]
kf2 = KFold(n_splits=2, shuffle=True, random_state=args.seed)
for val_index, test_index in kf2.split(df.iloc[test_index].text):
break
val_df = df.iloc[val_index]
test_df = df.iloc[test_index]
# In[7]:
print (train_df.shape, val_df.shape, test_df.shape)
model_save_dir = args.model_save_path
try:
os.makedirs(model_save_dir)
except OSError:
pass
# ### Learn tokenizer
# In[11]:
#data.custom_tokenizers.custom_wp_tokenizer(train_df.text.values, args.model_save_path, args.model_save_path)
tokenizer = BertTokenizer.from_pretrained(args.model_save_path)
# In[12]:
word_tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=50000, split=' ',oov_token=1, filters='')
char_tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True, split='',oov_token=1)
tag_tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=50000, split=' ', filters='', lower=False)
word_tokenizer.fit_on_texts(train_df.text.values)
char_tokenizer.fit_on_texts(train_df.text.values)
tag_tokenizer.fit_on_texts(train_df.category.values)
# In[13]:
label2idx = tag_tokenizer.word_index
idx2label = tag_tokenizer.index_word
# In[14]:
transformer_train_inputs, _, _ = data.data_utils.compute_transformer_input_arrays(train_df, 'text', tokenizer, args.max_char_len)
word_train_inputs = word_tokenizer.texts_to_sequences(train_df.text.values)
word_train_inputs = tf.keras.preprocessing.sequence.pad_sequences(word_train_inputs, maxlen=args.max_text_len)
subword_train_inputs = np.asarray([data.data_utils.subword_tokenization(text, char_tokenizer, args.max_text_len, args.max_word_char_len)\
for text in tqdm(train_df.text.values)])
char_train_inputs = char_tokenizer.texts_to_sequences(train_df.text.values)
char_train_inputs = tf.keras.preprocessing.sequence.pad_sequences(char_train_inputs, maxlen=args.max_char_len)
train_outputs = tag_tokenizer.texts_to_sequences(train_df.category.values)
train_outputs = tf.keras.preprocessing.sequence.pad_sequences(train_outputs, maxlen=args.max_text_len)
train_outputs2 = tag_tokenizer.texts_to_sequences(train_df.category.values)
train_outputs2 = tf.keras.preprocessing.sequence.pad_sequences(train_outputs2, maxlen=args.max_char_len)
transformer_val_inputs, _, _ = data.data_utils.compute_transformer_input_arrays(val_df, 'text', tokenizer, args.max_char_len)
word_val_inputs = word_tokenizer.texts_to_sequences(val_df.text.values)
word_val_inputs = tf.keras.preprocessing.sequence.pad_sequences(word_val_inputs, maxlen=args.max_text_len)
subword_val_inputs = np.asarray([data.data_utils.subword_tokenization(text, char_tokenizer, args.max_text_len, args.max_word_char_len)\
for text in tqdm(val_df.text.values)])
char_val_inputs = char_tokenizer.texts_to_sequences(val_df.text.values)
char_val_inputs = tf.keras.preprocessing.sequence.pad_sequences(char_val_inputs, maxlen=args.max_char_len)
val_outputs = tag_tokenizer.texts_to_sequences(val_df.category.values)
val_outputs = tf.keras.preprocessing.sequence.pad_sequences(val_outputs, maxlen=args.max_text_len)
val_outputs2 = tag_tokenizer.texts_to_sequences(val_df.category.values)
val_outputs2 = tf.keras.preprocessing.sequence.pad_sequences(val_outputs2, maxlen=args.max_char_len)
transformer_test_inputs, _, _ = data.data_utils.compute_transformer_input_arrays(test_df, 'text', tokenizer, args.max_char_len)
word_test_inputs = word_tokenizer.texts_to_sequences(test_df.text.values)
word_test_inputs = tf.keras.preprocessing.sequence.pad_sequences(word_test_inputs, maxlen=args.max_text_len)
subword_test_inputs = np.asarray([data.data_utils.subword_tokenization(text, char_tokenizer, args.max_text_len, args.max_word_char_len)\
for text in tqdm(test_df.text.values)])
char_test_inputs = char_tokenizer.texts_to_sequences(test_df.text.values)
char_test_inputs = tf.keras.preprocessing.sequence.pad_sequences(char_test_inputs, maxlen=args.max_char_len)
test_outputs = tag_tokenizer.texts_to_sequences(test_df.category.values)
test_outputs = tf.keras.preprocessing.sequence.pad_sequences(test_outputs, maxlen=args.max_text_len)
test_outputs2 = tag_tokenizer.texts_to_sequences(test_df.category.values)
test_outputs2 = tf.keras.preprocessing.sequence.pad_sequences(test_outputs2, maxlen=args.max_char_len)
train_outputs = tf.keras.utils.to_categorical(train_outputs, num_classes=len(label2idx)+1)
val_outputs = tf.keras.utils.to_categorical(val_outputs, num_classes=len(label2idx)+1)
test_outputs = tf.keras.utils.to_categorical(test_outputs, num_classes=len(label2idx)+1)
train_outputs2 = tf.keras.utils.to_categorical(train_outputs2,num_classes=len(label2idx)+1)
val_outputs2 = tf.keras.utils.to_categorical(val_outputs2,num_classes=len(label2idx)+1)
test_outputs2 = tf.keras.utils.to_categorical(test_outputs2,num_classes=len(label2idx)+1)
tfidf1 = TfidfVectorizer(stop_words='english',ngram_range=(1,3), max_df=.6,min_df=2)
tfidf2 = TfidfVectorizer(analyzer='char_wb',ngram_range=(1,3), max_df=.6,min_df=2)
tfidf1.fit(train_df.text)
tfidf2.fit(train_df.text)
train_tfidf = np.hstack([tfidf1.transform(train_df.text).toarray(),tfidf2.transform(train_df.text).toarray()])
val_tfidf = np.hstack([tfidf1.transform(val_df.text).toarray(),tfidf2.transform(val_df.text).toarray()])
test_tfidf = np.hstack([tfidf1.transform(test_df.text).toarray(),tfidf2.transform(test_df.text).toarray()])
print (transformer_train_inputs.shape, subword_train_inputs.shape, word_train_inputs.shape, char_train_inputs.shape,\
train_tfidf.shape, train_outputs.shape, train_outputs2.shape)
print (transformer_val_inputs.shape, subword_val_inputs.shape, word_val_inputs.shape, char_val_inputs.shape,\
val_tfidf.shape, val_outputs.shape, val_outputs2.shape)
print (transformer_test_inputs.shape, subword_test_inputs.shape, word_test_inputs.shape, char_test_inputs.shape, \
test_tfidf.shape, test_outputs.shape, test_outputs2.shape)
# ### Modeling
# In[15]:
n_words = len(word_tokenizer.word_index)+1
n_chars = len(char_tokenizer.word_index)+1
n_subwords = tokenizer.vocab_size
tfidf_shape = train_tfidf.shape[1]
n_out = len(label2idx)+1
# In[16]:
label2idx
# In[17]:
label2idx['PAD'] = 0
idx2label[0] = 'PAD'
idx2label = {value-1: key for (key,value) in label2idx.items()}
# In[18]:
from src.models.models import *
#from src.models.layers import *
#all_models = {CS_ELMO.__name__: CS_ELMO, HIT.__name__:HIT, HAN.__name__: HAN, WLSTM.__name__: WLSTM }
all_models = {HIT_outer.__name__:HIT_outer,HIT.__name__: HIT}
# In[19]:
if os.path.exists(os.path.join(args.model_save_path,'results.csv')):
results = pd.read_csv(os.path.join(args.model_save_path,'results.csv'))
index = results.shape[0]
print (results)
else:
results = pd.DataFrame(columns=['config','weighted_f1','macro_f1'])
index = 0
for model_name, model_ in all_models.items():
for loss in ['ce','focal']:
model = model_(word_vocab_size=n_words,char_vocab_size=n_chars, wpe_vocab_size=n_subwords, n_out=n_out,seq_output=True,max_word_char_len=args.max_word_char_len,\
max_text_len=args.max_text_len, max_char_len=args.max_char_len, \
n_layers=args.n_layers, n_units=args.n_units, emb_dim=args.emb_dim)
print ("Running {} without features for {} loss".format(model_name, loss))
print (model.summary())
if loss == 'focal':
model.compile(loss=models.utils.categorical_focal_loss(alpha=1), optimizer='adam', metrics=['accuracy', models.utils.f1_keras]) #binary_crossentropy
elif loss == 'ce':
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', models.utils.f1_keras])
lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.7,\
patience=args.lr_schedule_round, verbose=1, mode='auto', min_lr=0.000001)
config = {
'text_max_len': args.max_text_len,
'char_max_len': args.max_char_len,
'word_char_max_len': args.max_word_char_len,
'n_units': args.n_units,
'emb_dim': args.emb_dim,
'n_layers': args.n_layers,
'epochs': args.epochs,
"learning_rate": args.lr,
"model_name": model_name,
"loss": loss,
"use_features": False
}
model_save_path = os.path.join(args.model_save_path, '{}_{}.h5'.format(model_name, config['loss']))
if model_name != 'Transformer':
f1callback = models.utils.SeqF1Callback(model, [word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs, filename=model_save_path, patience=args.early_stopping_rounds)
K.clear_session()
if _has_wandb and args.wandb_logging:
wandb.init(project='hindi_pos',config=config)
model.fit([word_train_inputs, char_train_inputs, subword_train_inputs, transformer_train_inputs, train_tfidf], train_outputs, \
validation_data=([word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs), \
epochs=args.epochs,batch_size=args.train_batch_size, callbacks=[lr, f1callback, WandbCallback()], verbose=2)
else:
model.fit([word_train_inputs, char_train_inputs, subword_train_inputs, transformer_train_inputs, train_tfidf], train_outputs, \
validation_data=([word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs), \
epochs=args.epochs,batch_size=args.train_batch_size, callbacks=[lr, f1callback], verbose=2)
model.load_weights(model_save_path)
test_pred = model.predict([word_test_inputs, char_test_inputs, subword_test_inputs, transformer_test_inputs, test_tfidf])
test_pred = model.predict([word_test_inputs, char_test_inputs, subword_test_inputs, transformer_test_inputs])
test_pred = test_pred[:,:,1:]
report = flat_classification_report([[idx2label[j] for j in i] for i in test_outputs[:,:,1:].argmax(-1)], \
[[idx2label[j] for j in i] for i in test_pred.argmax(-1)])
f1 = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='weighted')
results.loc[index,'config'] = str(config)
results.loc[index, 'weighted_f1'] = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='weighted')
results.loc[index, 'macro_f1'] = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='macro')
results.to_csv(os.path.join(args.model_save_path,'results.csv'),index=False)
else:
f1callback = models.utils.SeqF1Callback(model, [word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs2, filename=model_save_path, patience=args.early_stopping_rounds)
K.clear_session()
if _has_wandb and args.wandb_logging:
wandb.init(project='hindi_pos',config=config)
model.fit([word_train_inputs, char_train_inputs, subword_train_inputs, transformer_train_inputs, train_tfidf], train_outputs2, \
validation_data=([word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs2), \
epochs=args.epochs,batch_size=args.train_batch_size, callbacks=[lr, f1callback, WandbCallback()], verbose=2)
else:
model.fit([word_train_inputs, char_train_inputs, subword_train_inputs, transformer_train_inputs, train_tfidf], train_outputs2, \
validation_data=([word_val_inputs, char_val_inputs, subword_val_inputs, transformer_val_inputs, val_tfidf], val_outputs2), \
epochs=args.epochs,batch_size=args.train_batch_size, callbacks=[lr, f1callback], verbose=2)
model.load_weights(model_save_path)
test_pred = model.predict([word_test_inputs, char_test_inputs, subword_test_inputs, transformer_test_inputs, test_tfidf])
test_pred = model.predict([word_test_inputs, char_test_inputs, subword_test_inputs, transformer_test_inputs])
test_pred = test_pred[:,:,1:]
report = flat_classification_report([[idx2label[j] for j in i] for i in test_outputs[:,:,1:].argmax(-1)], \
[[idx2label[j] for j in i] for i in test_pred.argmax(-1)])
f1 = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='weighted')
results.loc[index,'config'] = str(config)
results.loc[index, 'weighted_f1'] = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='weighted')
results.loc[index, 'macro_f1'] = flat_f1_score(test_outputs[:,:,1:].argmax(-1), test_pred.argmax(-1), average='macro')
results.to_csv(os.path.join(args.model_save_path,'results.csv'),index=False)
index += 1
print (report, f1)
| StarcoderdataPython |
3300178 | from mundiapi.mundiapi_client import MundiapiClient
from mundiapi.models import *
from mundiapi.controllers import *
from mundiapi.exceptions.error_exception import *
MundiapiClient.config.basic_auth_user_name = "YOUR_SECRET_KEY:"
charges_controller = charges_controller.ChargesController()
chargeId = "ch_8YQ1JeTLzF8zlqWy"
request = create_capture_charge_request.CreateCaptureChargeRequest()
request.code = "new_code"
try:
result = charges_controller.capture_charge(chargeId, request)
assert result is not None
assert result.paid_amount == result.amount
assert result.status == "paid"
assert result.last_transaction.status == "captured"
print("Captured amount: ", result.paid_amount)
print("Charge status: ", result.status)
print("Last Transaction status: ", result.last_transaction.status)
print("Charge is captured")
except ErrorException as ex:
print(ex.message)
print("Errors: ", ex.errors)
except Exception as ex:
raise ex
| StarcoderdataPython |
5000675 | <reponame>UmaTaru/run
""" This file allows multiple jobs to be run on a server.
After each job, an email is sent to notify desired people
of its completion.
Must specify a text job file that contains the names and
commands for each job. Each job has 4 lines, containing:
1) the name, 2) the command, 3) the location of a file where
the job output should be saved, 4) a blank line.
An example job file format is as follows:
Job1
python job.py path1 arg1
path/output1.txt
Job2
python job.py path2 arg2
path/output2.txt
Usage: python run_jobs.py jobs.txt
"""
import os
import sys
import string
import socket
from time import time
from run_utils import *
MINIMUM_JOB_SECONDS = 600 # 10 minutes
PRINT_LAST_X_LINES = 300
ERROR = 1
SUCCESS = 0
WARNING = 2
class Job:
def __init__(self, name, command, output_file):
self.name = name
self.command = command
self.output_file = output_file.rstrip('\n')
def load_job_file(filename):
f = open(filename, 'r')
lines = f.readlines()
jobs = []
i = 0
while i < len(lines):
jobname = lines[i]
command = lines[i+1]
output_file = lines[i+2]
job = Job(jobname, command, output_file)
jobs.append(job)
i = i+4
return jobs
def run_job(job_obj):
""" Runs a system command for a job, returns whether it
succeeded and output text to be emailed.
Inputs:
job_obj: an instance of the Job class
Returns
A code indicating whether the job was successful, and
a string containing text about the job and job output to
be mailed to the user
"""
print("\nRunning job", job_obj.name)
if os.path.exists(job_obj.output_file):
message = "The desired output file " + job_obj.output_file + " already exists."
print("Error!", message)
return ERROR, message
t0 = time()
# execute the command
stream = os.popen(job_obj.command)
output = stream.read()
print(output)
# save output to desired file
of = open(job_obj.output_file, 'w')
of.write(output)
of.close()
t1 = time()
total_secs = t1 - t0
hours, mins, secs = get_secs_mins_hours_from_secs(total_secs)
time_str = "Job ended. Total time taken: " + str(int(hours)) + "h " + str(int(mins)) + "m " + str(int(secs)) + "s"
print(time_str)
if not os.path.exists(job_obj.output_file):
message = "Job failed to create the desired output file."
print("Error!", message)
code = ERROR
elif total_secs < MINIMUM_JOB_SECONDS:
message = "The total time taken for the job was suspiciously short."
print("Warning!", message)
code = WARNING
else:
message = ""
print("Job finished successfully!")
code = SUCCESS
lines = output.split('\n')
tail = "\n".join(lines[-PRINT_LAST_X_LINES:])
message += "\n\n" + time_str + "\n\n"
message += "The last " + str(PRINT_LAST_X_LINES) + " lines of job output were:\n\n"
message += tail
return code, message
def email_about_job(job_obj, status, output):
if status == ERROR:
title = "Error! Problem with job " + job_obj.name
elif status == SUCCESS:
title = "Success! Job " + job_obj.name + " is finished"
else:
title = "Warning! Job " + job_obj.name + " finished too quickly"
send_email(title, output)
def run_jobs(jobfile):
hostname = socket.gethostname()
jobs = load_job_file(filename)
# Send initial test mail
send_email("Starting to run " + jobfile + " jobs on " + hostname,
"This is a test email to confirm email updates are enabled for " + jobfile + " jobs.")
# Run all jobs
for job in jobs:
status, output = run_job(job)
email_about_job(job, status, output)
# Send final email
send_email("ALL JOBS FINISHED!!", "Congratulations, all of the jobs in the file " + jobfile + " have finished running.")
if __name__ == "__main__":
if len(sys.argv) < 1:
print("Error! Usage is python run_jobs.py jobs.txt")
print("See this file's documentation for required format for jobs.txt")
filename= sys.argv[1]
jobfile=sys.argv[1]
print("Running all jobs in file", jobfile, ". . .")
run_jobs(jobfile)
| StarcoderdataPython |
4826012 | import socket
import uuid
ENCODING_FORMAT = 'UTF-8'
def get_hostname():
return socket.gethostname()
def get_ip():
return socket.gethostbyname(get_hostname())
def get_uuid():
return uuid.uuid1().hex
def get_rand_name():
return get_ip() + "_" + get_uuid()[:6]
def is_ip(ip_str):
return True
# p = re.compile("^((?:(2[0-4]\d)|(25[0-5])|([01]?\d\d?))\.){3}(?:(2[0-4]\d)|(255[0-5])|([01]?\d\d?))$")
# return re.match(p, ip_str)
def ip_2_str(ip):
return '.'.join([str(i) for i in ip])
def str_2_ip(ip_str):
return [int(i) for i in ip_str.split('.')]
def str_2_bytes(s):
return str(s).encode(encoding=ENCODING_FORMAT)
def bytes_2_str(b):
return bytes(b).decode(encoding=ENCODING_FORMAT)
| StarcoderdataPython |
3540934 | from __future__ import absolute_import, print_function
import signal
import sys
from multiprocessing import cpu_count
import click
from sentry.runner.decorators import configuration, log_options
from sentry.bgtasks.api import managed_bgtasks
class AddressParamType(click.ParamType):
name = "address"
def __call__(self, value, param=None, ctx=None):
if value is None:
return (None, None)
return self.convert(value, param, ctx)
def convert(self, value, param, ctx):
if ":" in value:
host, port = value.split(":", 1)
port = int(port)
else:
host = value
port = None
return host, port
Address = AddressParamType()
class QueueSetType(click.ParamType):
name = "text"
def convert(self, value, param, ctx):
if value is None:
return None
# Providing a compatibility with splitting
# the `events` queue until multiple queues
# without the need to explicitly add them.
queues = set()
for queue in value.split(","):
if queue == "events":
queues.add("events.preprocess_event")
queues.add("events.process_event")
queues.add("events.save_event")
from sentry.runner.initializer import show_big_error
show_big_error(
[
"DEPRECATED",
"`events` queue no longer exists.",
"Switch to using:",
"- events.preprocess_event",
"- events.process_event",
"- events.save_event",
]
)
else:
queues.add(queue)
return frozenset(queues)
QueueSet = QueueSetType()
@click.group()
def run():
"Run a service."
@run.command()
@click.option("--bind", "-b", default=None, help="Bind address.", type=Address)
@click.option(
"--workers", "-w", default=0, help="The number of worker processes for handling requests."
)
@click.option("--upgrade", default=False, is_flag=True, help="Upgrade before starting.")
@click.option(
"--with-lock", default=False, is_flag=True, help="Use a lock if performing an upgrade."
)
@click.option(
"--noinput", default=False, is_flag=True, help="Do not prompt the user for input of any kind."
)
@click.option(
"--uwsgi/--no-uwsgi",
default=True,
is_flag=True,
help="Use uWSGI (default) or non-uWSGI (useful for debuggers such as PyCharm's)",
)
@log_options()
@configuration
def web(bind, workers, upgrade, with_lock, noinput, uwsgi):
"Run web service."
if upgrade:
click.echo("Performing upgrade before service startup...")
from sentry.runner import call_command
try:
call_command(
"sentry.runner.commands.upgrade.upgrade",
verbosity=0,
noinput=noinput,
lock=with_lock,
)
except click.ClickException:
if with_lock:
click.echo("!! Upgrade currently running from another process, skipping.", err=True)
else:
raise
with managed_bgtasks(role="web"):
if not uwsgi:
click.echo(
"Running simple HTTP server. Note that chunked file "
"uploads will likely not work.",
err=True,
)
from django.conf import settings
host = bind[0] or settings.SENTRY_WEB_HOST
port = bind[1] or settings.SENTRY_WEB_PORT
click.echo("Address: http://%s:%s/" % (host, port))
from wsgiref.simple_server import make_server
from sentry.wsgi import application
httpd = make_server(host, port, application)
httpd.serve_forever()
else:
from sentry.services.http import SentryHTTPServer
SentryHTTPServer(host=bind[0], port=bind[1], workers=workers).run()
@run.command()
@click.option("--bind", "-b", default=None, help="Bind address.", type=Address)
@click.option("--upgrade", default=False, is_flag=True, help="Upgrade before starting.")
@click.option(
"--noinput", default=False, is_flag=True, help="Do not prompt the user for input of any kind."
)
@configuration
def smtp(bind, upgrade, noinput):
"Run inbound email service."
if upgrade:
click.echo("Performing upgrade before service startup...")
from sentry.runner import call_command
call_command("sentry.runner.commands.upgrade.upgrade", verbosity=0, noinput=noinput)
from sentry.services.smtp import SentrySMTPServer
with managed_bgtasks(role="smtp"):
SentrySMTPServer(host=bind[0], port=bind[1]).run()
@run.command()
@click.option(
"--hostname",
"-n",
help=(
"Set custom hostname, e.g. 'w1.%h'. Expands: %h" "(hostname), %n (name) and %d, (domain)."
),
)
@click.option(
"--queues",
"-Q",
type=QueueSet,
help=(
"List of queues to enable for this worker, separated by "
"comma. By default all configured queues are enabled. "
"Example: -Q video,image"
),
)
@click.option("--exclude-queues", "-X", type=QueueSet)
@click.option(
"--concurrency",
"-c",
default=cpu_count(),
help=(
"Number of child processes processing the queue. The "
"default is the number of CPUs available on your "
"system."
),
)
@click.option(
"--logfile", "-f", help=("Path to log file. If no logfile is specified, stderr is used.")
)
@click.option("--quiet", "-q", is_flag=True, default=False)
@click.option("--no-color", is_flag=True, default=False)
@click.option("--autoreload", is_flag=True, default=False, help="Enable autoreloading.")
@click.option("--without-gossip", is_flag=True, default=False)
@click.option("--without-mingle", is_flag=True, default=False)
@click.option("--without-heartbeat", is_flag=True, default=False)
@click.option("--max-tasks-per-child", default=10000)
@log_options()
@configuration
def worker(**options):
"Run background worker instance."
from django.conf import settings
if settings.CELERY_ALWAYS_EAGER:
raise click.ClickException(
"Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers."
)
from sentry.celery import app
with managed_bgtasks(role="worker"):
worker = app.Worker(
# without_gossip=True,
# without_mingle=True,
# without_heartbeat=True,
pool_cls="processes",
**options
)
worker.start()
try:
sys.exit(worker.exitcode)
except AttributeError:
# `worker.exitcode` was added in a newer version of Celery:
# https://github.com/celery/celery/commit/dc28e8a5
# so this is an attempt to be forwards compatible
pass
@run.command()
@click.option(
"--pidfile",
help=(
"Optional file used to store the process pid. The "
"program will not start if this file already exists and "
"the pid is still alive."
),
)
@click.option(
"--logfile", "-f", help=("Path to log file. If no logfile is specified, stderr is used.")
)
@click.option("--quiet", "-q", is_flag=True, default=False)
@click.option("--no-color", is_flag=True, default=False)
@click.option("--autoreload", is_flag=True, default=False, help="Enable autoreloading.")
@click.option("--without-gossip", is_flag=True, default=False)
@click.option("--without-mingle", is_flag=True, default=False)
@click.option("--without-heartbeat", is_flag=True, default=False)
@log_options()
@configuration
def cron(**options):
"Run periodic task dispatcher."
from django.conf import settings
if settings.CELERY_ALWAYS_EAGER:
raise click.ClickException(
"Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers."
)
from sentry.celery import app
with managed_bgtasks(role="cron"):
app.Beat(
# without_gossip=True,
# without_mingle=True,
# without_heartbeat=True,
**options
).run()
@run.command("post-process-forwarder")
@click.option(
"--consumer-group",
default="snuba-post-processor",
help="Consumer group used to track event offsets that have been enqueued for post-processing.",
)
@click.option(
"--commit-log-topic",
default="snuba-commit-log",
help="Topic that the Snuba writer is publishing its committed offsets to.",
)
@click.option(
"--synchronize-commit-group",
default="snuba-consumers",
help="Consumer group that the Snuba writer is committing its offset as.",
)
@click.option(
"--commit-batch-size",
default=1000,
type=int,
help="How many messages to process (may or may not result in an enqueued task) before committing offsets.",
)
@click.option(
"--initial-offset-reset",
default="latest",
type=click.Choice(["earliest", "latest"]),
help="Position in the commit log topic to begin reading from when no prior offset has been recorded.",
)
@log_options()
@configuration
def post_process_forwarder(**options):
from sentry import eventstream
from sentry.eventstream.base import ForwarderNotRequired
try:
eventstream.run_post_process_forwarder(
consumer_group=options["consumer_group"],
commit_log_topic=options["commit_log_topic"],
synchronize_commit_group=options["synchronize_commit_group"],
commit_batch_size=options["commit_batch_size"],
initial_offset_reset=options["initial_offset_reset"],
)
except ForwarderNotRequired:
sys.stdout.write(
"The configured event stream backend does not need a forwarder "
"process to enqueue post-process tasks. Exiting...\n"
)
return
@run.command("query-subscription-consumer")
@click.option(
"--group",
default="query-subscription-consumer",
help="Consumer group to track query subscription offsets. ",
)
@click.option("--topic", default=None, help="Topic to get subscription updates from.")
@click.option(
"--commit-batch-size",
default=100,
type=int,
help="How many messages to process before committing offsets.",
)
@click.option(
"--initial-offset-reset",
default="latest",
type=click.Choice(["earliest", "latest"]),
help="Position in the commit log topic to begin reading from when no prior offset has been recorded.",
)
@log_options()
@configuration
def query_subscription_consumer(**options):
from sentry.snuba.query_subscription_consumer import QuerySubscriptionConsumer
subscriber = QuerySubscriptionConsumer(
group_id=options["group"],
topic=options["topic"],
commit_batch_size=options["commit_batch_size"],
initial_offset_reset=options["initial_offset_reset"],
)
def handler(signum, frame):
subscriber.shutdown()
signal.signal(signal.SIGINT, handler)
subscriber.run()
| StarcoderdataPython |
1899635 | <reponame>Iwan-Zotow/runEGS
# -*- coding: utf-8 -*-
import math
import logging
import numpy as np
import phandim
EPS = 1.0e-4
def invariant(shot, the_range, steps, nr):
"""
Check phantom parameters
Parameters
----------
shot: float
shot position, mm
the_range: (float,float)
phantom range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: int
number of small steps, defined by collimator size
returns: boolean
True if ok, False otherwise
"""
rmin, rmax = the_range
smin, smax = steps
if (np.isnan(shot)):
return False
if (np.isnan(rmin)):
return False
if (np.isnan(rmax)):
return False
if (rmax <= rmin):
return False
if (np.isnan(smin)):
return False
if (np.isnan(smax)):
return False
if (smin > smax):
return False
if (nr < 1):
return False
return True
def build_one_boundary(shot, the_range, steps, nr):
"""
Build phantom one dimensionboundaries from shot position and min/max range
Parameters
----------
shot: float
shot position, mm
the_range: (float,float)
phantom range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: integer
number of small steps, defined by collimator size
returns: array
phantom one dimension boundaries
"""
logging.info("building one boundary")
if (not invariant(shot, the_range, steps, nr)):
raise ValueError("build_one_boundary", "invariant failed")
rmin, rmax = the_range
smin, smax = steps
# we know shot position is within the range
# going backward
bs = []
# first, with small steps
pos = shot
for k in range(0, nr+1):
pos = shot - float(k) * smin
bs.append(pos)
if math.fabs(pos - rmin) < EPS:
break
if pos < rmin:
break
# now large steps, continue from previous position
while True:
pos = pos - smax
bs.append(pos)
if math.fabs(pos - rmin) < EPS:
break
if pos < rmin:
break
# revert the list
bs.reverse()
# going forward
# first, with small steps
for k in range(1, nr+1):
pos = shot + float(k) * smin
bs.append(pos)
if math.fabs(pos - rmax) < EPS:
break
if pos > rmax:
break
# now large steps, continue from previous position
while True:
pos = pos + smax
bs.append(pos)
if math.fabs(pos - rmax) < EPS:
break
if pos > rmax:
break
logging.info("done building one boundary")
return bs
def build_phandim(shot, x_range, y_range, z_range, steps, nr):
"""
Build phantom dimensions from shot position and min/max ranges
Parameters
----------
shot: (float,float)
shot Y,Z position, mm
x_range: (float,float)
phantom X range, (min,max), mm
y_range: (float,float)
phantom Y range, (min,max), mm
z_range: (float,float)
phantom Z range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: integer
number of small steps, defined by collimator size
returns: phandim
phantom dimensions object
"""
logging.info("building phandim")
logging.debug(str(shot))
logging.debug(str(x_range))
logging.debug(str(y_range))
logging.debug(str(z_range))
logging.debug(str(steps))
logging.debug(str(nr))
ys, zs = shot
# X boundaries, shot position always at 0
bx = build_one_boundary(0.0, x_range, steps, nr)
by = build_one_boundary( ys, y_range, steps, nr)
bz = build_one_boundary( zs, z_range, steps, nr)
logging.info("done building phandim")
return phandim.phandim(bx, by, bz)
| StarcoderdataPython |
1184 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
| StarcoderdataPython |
342695 | <gh_stars>0
from Bot.models import TelegramUser
from Profile.models import Profile
from ..bot import TelegramBot
from .BotComponent import ReplyKeyboardHome
from Bot.BotSetting import ChannelName
# Home
def go_home(chat_id, bot: TelegramBot):
message = "صفحه اول " \
"🏠"
bot.sendMessage(chat_id, message, reply_markup=ReplyKeyboardHome)
def fail_home(chat_id, bot: TelegramBot):
message = "لطفا از دستورات زیر استفاده کنید." \
"👇"
bot.sendMessage(chat_id, message, reply_markup=ReplyKeyboardHome)
def join_channel(chat_id, user_id, bot: TelegramBot):
user_status_in_channel = bot.getChatMember(chat_id=ChannelName, user_id=user_id)
if user_status_in_channel.status == 'left':
message = f"لطفا قبل از انجام ترکانش ها، در کانال زیر عضو شوید\n" \
f"{ChannelName}"
bot.sendMessage(chat_id, message, reply_markup=ReplyKeyboardHome)
return False
return True
def check_profile(user: TelegramUser):
try:
profile: Profile = user.profile
state = profile.Name and \
profile.Family and \
profile.ClubCode and \
profile.PhoneNumber and \
profile.UserConf
return state
except:
return False
def go_wallet_charge(chat_id, bot: TelegramBot):
message = "💵 برای شارژ کیف پول خود لطفا مبلغ مورد نظر را به شماره حساب زیر واریز نمایید.\n\n" \
"🏦 بانک: آینده\n\n" \
"👤 صاحب حساب مشترک: محمد امین احمدی و حسین لاوری منفرد\n\n" \
"شماره کارت: \n" \
"6362141121991930\n\n" \
"کد شبا: \n" \
"IR970620000000302950973001\n\n" \
"شماره حساب:\n" \
"0302950973001\n\n" \
"سپس در همین قسمت عکس مربوط به تراکنش را ارسال نمایید، تا به صورت اتومات برای ادمین ارسال شود و کیف پول شما نیز شارژ شود .\n" \
"با تشکر 🌹"
bot.sendMessage(chat_id, message, parse_mode="HTML")
| StarcoderdataPython |
288055 |
class OslotsFeature(object):
def __init__(self, api, metaData, data):
self.api = api
self.meta = metaData
self.data = data[0]
self.maxSlot = data[1]
self.maxNode = data[2]
def items(self):
maxSlot = self.maxSlot
data = self.data
maxNode = self.maxNode
shift = maxSlot + 1
for n in range(maxSlot + 1, maxNode + 1):
yield (n, data[n - shift])
def s(self, n):
if n == 0:
return ()
if n < self.maxSlot + 1:
return (n,)
m = n - self.maxSlot
if m <= len(self.data):
return self.data[m - 1]
return ()
| StarcoderdataPython |
8126306 | import asyncio
import logging
import os
import json
import rustsgxgen
from .base import Module
from ..nodes import NativeNode
from .. import tools
from .. import glob
from ..crypto import Encryption
from ..dumpers import *
from ..loaders import *
from ..manager import get_manager
BUILD_APP = "cargo build {} {} --manifest-path={}/Cargo.toml"
class Object():
pass
class Error(Exception):
pass
class NativeModule(Module):
def __init__(self, name, node, priority, deployed, nonce, attested, features,
id_, binary, key, data, folder, port):
self.out_dir = os.path.join(glob.BUILD_DIR, "native-{}".format(folder))
super().__init__(name, node, priority, deployed, nonce, attested, self.out_dir)
self.__generate_fut = tools.init_future(data, key)
self.__build_fut = tools.init_future(binary)
self.features = [] if features is None else features
self.id = id_ if id_ is not None else node.get_module_id()
self.port = port or self.node.reactive_port + self.id
self.folder = folder
@staticmethod
def load(mod_dict, node_obj):
name = mod_dict['name']
node = node_obj
priority = mod_dict.get('priority')
deployed = mod_dict.get('deployed')
nonce = mod_dict.get('nonce')
attested = mod_dict.get('attested')
features = mod_dict.get('features')
id_ = mod_dict.get('id')
binary = parse_file_name(mod_dict.get('binary'))
key = parse_key(mod_dict.get('key'))
data = mod_dict.get('data')
folder = mod_dict.get('folder') or name
port = mod_dict.get('port')
return NativeModule(name, node, priority, deployed, nonce, attested,
features, id_, binary, key, data, folder, port)
def dump(self):
return {
"type": "native",
"name": self.name,
"node": self.node.name,
"priority": self.priority,
"deployed": self.deployed,
"nonce": self.nonce,
"attested": self.attested,
"features": self.features,
"id": self.id,
"binary": dump(self.binary) if self.deployed else None,
# For native, key is generated at compile time
"key": dump(self.key) if self.deployed else None,
"data": dump(self.data) if self.deployed else None,
"folder": self.folder,
"port": self.port
}
# --- Properties --- #
@property
async def data(self):
data, _key = await self.generate_code()
return data
@property
async def inputs(self):
data = await self.data
return data["inputs"]
@property
async def outputs(self):
data = await self.data
return data["outputs"]
@property
async def entrypoints(self):
data = await self.data
return data["entrypoints"]
@property
async def handlers(self):
data = await self.data
return data["handlers"]
@property
async def requests(self):
data = await self.data
return data["requests"]
@property
async def key(self):
_data, key = await self.generate_code()
return key
@property
async def binary(self):
return await self.build()
# --- Implement abstract methods --- #
async def build(self):
if self.__build_fut is None:
self.__build_fut = asyncio.ensure_future(self.__build())
return await self.__build_fut
async def deploy(self):
await self.node.deploy(self)
async def attest(self):
if get_manager() is not None:
await self.__attest_manager()
else:
await self.key
self.attested = True
async def get_id(self):
return self.id
async def get_input_id(self, input_):
if isinstance(input_, int):
return input_
inputs = await self.inputs
if input_ not in inputs:
raise Error("Input not present in inputs")
return inputs[input_]
async def get_output_id(self, output):
if isinstance(output, int):
return output
outputs = await self.outputs
if output not in outputs:
raise Error("Output not present in outputs")
return outputs[output]
async def get_entry_id(self, entry):
try:
return int(entry)
except:
entrypoints = await self.entrypoints
if entry not in entrypoints:
raise Error("Entry not present in entrypoints")
return entrypoints[entry]
async def get_request_id(self, request):
if isinstance(request, int):
return request
requests = await self.requests
if request not in requests:
raise Error("Request not present in requests")
return requests[request]
async def get_handler_id(self, handler):
if isinstance(handler, int):
return handler
handlers = await self.handlers
if handler not in handlers:
raise Error("Handler not present in handlers")
return handlers[handler]
async def get_key(self):
return await self.key
@staticmethod
def get_supported_nodes():
return [NativeNode]
@staticmethod
def get_supported_encryption():
return [Encryption.AES, Encryption.SPONGENT]
# --- Static methods --- #
# --- Others --- #
async def generate_code(self):
if self.__generate_fut is None:
self.__generate_fut = asyncio.ensure_future(self.__generate_code())
return await self.__generate_fut
async def __generate_code(self):
args = Object()
args.input = self.folder
args.output = self.out_dir
args.moduleid = self.id
args.emport = self.node.deploy_port
args.runner = rustsgxgen.Runner.NATIVE
args.spkey = None
args.print = None
data, key = rustsgxgen.generate(args)
logging.info("Generated code for module {}".format(self.name))
return data, key
async def __build(self):
await self.generate_code()
release = "--release" if glob.get_build_mode() == glob.BuildMode.RELEASE else ""
features = "--features " + \
" ".join(self.features) if self.features else ""
cmd = BUILD_APP.format(release, features, self.out_dir).split()
await tools.run_async(*cmd)
# TODO there might be problems with two (or more) modules built from
# the same source code but with different features. Since the
# working dir is the same (for caching reasons) there might be some
# problems when these SMs are built at the same time.
# Find a way to solve this issue.
binary = os.path.join(self.out_dir,
"target", glob.get_build_mode().to_str(), self.folder)
logging.info("Built module {}".format(self.name))
return binary
async def __attest_manager(self):
data = {
"id": self.id,
"name": self.name,
"host": str(self.node.ip_address),
"port": self.port,
"em_port": self.node.reactive_port,
"key": list(await self.key)
}
data_file = tools.create_tmp(suffix=".json")
with open(data_file, "w") as f:
json.dump(data, f)
args = "--config {} --request attest-native --data {}".format(
get_manager().config, data_file).split()
out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args)
key_arr = eval(out) # from string to array
key = bytes(key_arr) # from array to bytes
if await self.key != key:
raise Error(
"Received key is different from {} key".format(self.name))
logging.info("Done Remote Attestation of {}. Key: {}".format(
self.name, key_arr))
| StarcoderdataPython |
9788753 | import argparse
parser = None
if not parser:
parser = argparse.ArgumentParser()
parser.add_argument('--nodeos-ip', metavar='', help="Ip address of nodeos ", default='127.0.0.1', dest="nodeos_ip")
parser.add_argument('--keosd-ip', metavar='', help="Ip address of keosd", default='127.0.0.1', dest="keosd_ip")
parser.add_argument('--nodeos-port', metavar='', help="Port", default='8888')
parser.add_argument('--keosd-port', metavar='', help="Port", default='8900')
parser.add_argument('--master-wallet-name', metavar='', help="Name of main wallet.", default="beos_master_wallet" )
parser.add_argument('--path-to-cleos', metavar='', help="Path to cleos executable." )
parser.add_argument('--scenarios', metavar='', help="Path to scenario(s) *.py file(s)." )
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.