content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
__all__ = ["test_gscv_fit", "test_rscv_fit"]
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import ParameterGrid, ParameterSampler
from sktime.datasets import load_airline
from sktime.forecasting.compose import ReducedForecaster
from sktime.forecasting.compose import TransformedTargetForecaster
from sktime.forecasting.model_selection import ForecastingGridSearchCV
from sktime.forecasting.model_selection import ForecastingRandomizedSearchCV
from sktime.forecasting.model_selection import SingleWindowSplitter
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.tests._config import TEST_OOS_FHS
from sktime.forecasting.tests._config import TEST_STEP_LENGTHS
from sktime.forecasting.tests._config import TEST_WINDOW_LENGTHS
from sktime.forecasting.tests._config import TEST_RANDOM_SEEDS
from sktime.forecasting.tests._config import TEST_N_ITERS
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import make_forecasting_scorer
from sktime.performance_metrics.forecasting import sMAPE
from sktime.transformations.series.detrend import Detrender
def compute_expected_gscv_scores(forecaster, cv, param_grid, y, scoring):
training_window, test_window = cv.split_initial(y)
y_train, y_test = y.iloc[training_window], y.iloc[test_window]
scores = np.zeros(len(param_grid))
for i, params in enumerate(param_grid):
f = clone(forecaster)
f.set_params(**params)
f.fit(y_train, fh=cv.fh)
y_pred = f.update_predict(y_test, cv)
y_test_subset = y_test.loc[
y_pred.index
] # select only time points which we predicted
scores[i] = scoring(y_test_subset, y_pred)
return scores
@pytest.mark.parametrize(
"forecaster, param_dict",
[
(NaiveForecaster(strategy="mean"), {"window_length": TEST_WINDOW_LENGTHS}),
# atomic estimator
(
TransformedTargetForecaster(
[ # composite estimator
("t", Detrender(PolynomialTrendForecaster())),
("f", ReducedForecaster(LinearRegression(), scitype="regressor")),
]
),
{
"f__window_length": TEST_WINDOW_LENGTHS,
"f__step_length": TEST_STEP_LENGTHS,
},
), # multiple params
],
)
@pytest.mark.parametrize(
"scoring",
[sMAPE(), make_forecasting_scorer(mean_squared_error, greater_is_better=False)],
)
@pytest.mark.parametrize(
"cv",
[
*[SingleWindowSplitter(fh=fh) for fh in TEST_OOS_FHS],
# single split with multi-step fh
SlidingWindowSplitter(fh=1, initial_window=50)
# multiple splits with single-step fh
],
)
def test_gscv_fit(forecaster, param_dict, cv, scoring):
param_grid = ParameterGrid(param_dict)
y = load_airline()
gscv = ForecastingGridSearchCV(
forecaster, param_grid=param_dict, cv=cv, scoring=scoring
)
gscv.fit(y)
# check scores
gscv_scores = gscv.cv_results_[f"mean_test_{scoring.name}"]
expected_scores = compute_expected_gscv_scores(
forecaster, cv, param_grid, y, scoring
)
np.testing.assert_array_equal(gscv_scores, expected_scores)
# check best parameters
assert gscv.best_params_ == param_grid[gscv_scores.argmin()]
# check best forecaster is the one with best parameters
assert {
key: value
for key, value in gscv.best_forecaster_.get_params().items()
if key in gscv.best_params_.keys()
} == gscv.best_params_
@pytest.mark.parametrize(
"forecaster, param_dict",
[
(NaiveForecaster(strategy="mean"), {"window_length": TEST_WINDOW_LENGTHS}),
# atomic estimator
(
TransformedTargetForecaster(
[ # composite estimator
("t", Detrender(PolynomialTrendForecaster())),
("f", ReducedForecaster(LinearRegression(), "regressor")),
]
),
{
"f__window_length": TEST_WINDOW_LENGTHS,
"f__step_length": TEST_STEP_LENGTHS,
},
), # multiple params
],
)
@pytest.mark.parametrize(
"scoring",
[sMAPE(), make_forecasting_scorer(mean_squared_error, greater_is_better=False)],
)
@pytest.mark.parametrize(
"cv",
[
*[SingleWindowSplitter(fh=fh) for fh in TEST_OOS_FHS],
# single split with multi-step fh
SlidingWindowSplitter(fh=1, initial_window=50)
# multiple splits with single-step fh
],
)
@pytest.mark.parametrize(
"n_iter",
TEST_N_ITERS,
)
@pytest.mark.parametrize(
"random_state",
TEST_RANDOM_SEEDS,
)
def test_rscv_fit(forecaster, param_dict, cv, scoring, n_iter, random_state):
"""Tests that ForecastingRandomizedSearchCV successfully searches the
parameter distributions to identify the best parameter set
"""
# samples uniformly from param dict values
param_distributions = ParameterSampler(
param_dict, n_iter, random_state=random_state
)
y = load_airline()
rscv = ForecastingRandomizedSearchCV(
forecaster,
param_distributions=param_dict,
cv=cv,
scoring=scoring,
n_iter=n_iter,
random_state=random_state,
)
rscv.fit(y)
# check scores
rscv_scores = rscv.cv_results_[f"mean_test_{scoring.name}"]
# convert ParameterSampler to list to ensure consistent # of scores
expected_scores = compute_expected_gscv_scores(
forecaster, cv, list(param_distributions), y, scoring
)
np.testing.assert_array_equal(rscv_scores, expected_scores)
# check best parameters
assert rscv.best_params_ == list(param_distributions)[rscv_scores.argmin()]
# check best forecaster is the one with best parameters
assert {
key: value
for key, value in rscv.best_forecaster_.get_params().items()
if key in rscv.best_params_.keys()
} == rscv.best_params_
|
nilq/baby-python
|
python
|
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for any plugin- or framework-specific behaviour of the plugin devices"""
import pytest
import numpy as np
from plugin_name.qiskit_device import z_eigs
from plugin_name import Device1
Z = np.diag([1, -1])
class TestZEigs:
r"""Test that eigenvalues of Z^{\otimes n} are correctly generated"""
def test_one(self):
"""Test that eigs(Z) = [1, -1]"""
assert np.all(z_eigs(1) == np.array([1, -1]))
@pytest.mark.parametrize("n", [2, 3, 6])
def test_multiple(self, n):
r"""Test that eigs(Z^{\otimes n}) is correct"""
res = z_eigs(n)
Zn = np.kron(Z, Z)
for _ in range(n - 2):
Zn = np.kron(Zn, Z)
expected = np.diag(Zn)
assert np.all(res == expected)
class TestProbabilities:
"""Tests for the probability function"""
def test_probability_no_results(self):
"""Test that the probabilities function returns
None if no job has yet been run."""
dev = Device1(backend="statevector_simulator", wires=1, shots=0)
assert dev.probabilities() is None
|
nilq/baby-python
|
python
|
## @file test_git_dependency.py
# Unit test suite for the GitDependency class.
#
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import unittest
from edk2toolext.environment import var_dict
class TestVarDict(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_var_dict_basic_set_get(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm basic get
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_get_key_is_none(self):
v = var_dict.VarDict()
self.assertIsNone(v.GetValue(None))
def test_var_dict_get_key_unknown_return_value(self):
v = var_dict.VarDict()
self.assertIsNone(v.GetValue("invalidkey"))
self.assertEqual("test1", v.GetValue("invalidkey", "test1"))
def test_var_dict_cant_override(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm override == false
v.SetValue("test1", "value2", "test for override")
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
v.SetValue("test1", "value1", "set same") # to get coverage
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_can_override(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment", True)
## confirm override == true
v.SetValue("test1", "value2", "test for override")
vv = v.GetValue("test1")
self.assertEqual("value2", vv)
def test_var_dict_key_not_case_sensitive(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("TEST1")
self.assertEqual("value1", vv)
def test_var_dict_key_not_case_sensitive2(self):
v = var_dict.VarDict()
v.SetValue("TEST1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_key_not_case_sensitive3(self):
v = var_dict.VarDict()
v.SetValue("TeSt1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("tEsT1")
self.assertEqual("value1", vv)
def test_var_dict_build_value_when_type_para_used(self):
v = var_dict.VarDict()
v.SetValue("bld_debug_test1", "builddvalue1", "build dtest 1 comment")
v.SetValue("bld_release_test1", "buildrvalue1", "build rtest 1 comment")
## confirm with correct build type debug
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("builddvalue1", vv)
## confirm with correct build type release
vv = v.GetBuildValue("TEST1", "release")
self.assertEqual("buildrvalue1", vv)
def test_var_dict_build_value_none_for_key(self):
v = var_dict.VarDict()
v.SetValue("bld_debug_test1", "builddvalue1", "build test 1 comment")
self.assertIsNone(v.GetBuildValue(None, "DEBUG"))
def test_var_dict_build_value_when_type_para_used_wc(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
## confirm wildcard support build type fail back to *
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("buildvalue1", vv)
vv = v.GetBuildValue("TEST1", "RELEASE")
self.assertEqual("buildvalue1", vv)
## confirm match has higher priority
v.SetValue("bld_debug_test1", "builddvalue1", "build test 1 comment")
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("builddvalue1", vv)
v.SetValue("bld_release_test1", "buildrvalue1", "build test 1 comment")
vv = v.GetBuildValue("TEST1", "release")
self.assertEqual("buildrvalue1", vv)
vv = v.GetBuildValue("TEST1", "NOOPT")
self.assertEqual("buildvalue1", vv)
def test_var_dict_build_value_when_target_set(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("TARGET", "DEBUG", "Set to Debug")
## confirm can get it with target set
vv = v.GetBuildValue("TEST1")
self.assertEqual("buildvalue1", vv)
def test_var_dict_build_value_when_no_build_type(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
## confirm can't get it without build type or target set
vv = v.GetBuildValue("TEST1")
self.assertEqual(None, vv)
def test_var_dict_get_all_with_no_entires(self):
v = var_dict.VarDict()
v.SetValue("test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues("DEBUG")
self.assertEqual(len(vlist), 0)
def test_var_dict_get_all_with_no_target(self):
v = var_dict.VarDict()
v.SetValue("test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues()
self.assertEqual(len(vlist), 0)
def test_var_dict_get_all_build_key_values_and_not_other_values(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues("DEBUG")
self.assertEqual(len(vlist), 1)
## confirm override behavior
v.SetValue("Target", "DEBUG", "Set target to debug")
v.SetValue("bld_release_test1", "buildvalue1", "build test 1 comment")
vlist = v.GetAllBuildKeyValues()
self.assertEqual(len(vlist), 1)
## override using parameter for build type
vlist = v.GetAllBuildKeyValues("RELEASE")
self.assertEqual(len(vlist), 1)
def test_var_dict_print_all(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "value1", "test 1 comment overrideable", True)
v.PrintAll()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
number = 2
mypath = "pillPictures/" + str(number)
savepath = "pillPictures/saved"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
img_count = 0
for file in onlyfiles:
img_count = img_count + 1
image_path = mypath + "/" + file
img = cv2.imread(image_path)
#print(np.shape(img))
img = img[500:2500,1000:3000]
#print(np.shape(img))
print(img_count)
cv2.imwrite(os.path.join(savepath +"/" + str(number) + "_pill" + "_" +str(img_count)+'.jpg'),img)
|
nilq/baby-python
|
python
|
import numpy as np
import cv2
from mss import mss
from PIL import Image
# There's no native way of handling the feature of getting the window "always on top"
# It's OS dependent forcing it to not be cross platform
# -> this is a windows way of handling things. Marked with TODOs
#import os
# signals and signal handlers for garbage collection -> obsolete as there's an easier solution with a shared variable
# import signal
# shared_flag shared by multiple threads
#shared_flag = 0
class SCR():
# class var
arr = [0] * 4
bounding_box = {'top': 0, 'left': 0, 'width': 1000, 'height': 1000}
# To keep up with the active monitors, array elements are used as placeholders for each active screen
def __init__(self):
self.sct = mss()
def setVar(self,top,left,width,height):
self.bounding_box={'top':top,'left':left,'width':width,'height':height}
def run(self, name):
if (self.arr[int(name[6])] == 0):
#print(name[6] + "\'th bucket got filled up !")
self.arr[int(name[6])] = 1
while (self.arr[int(name[6])] == 1):
sct_img = self.sct.grab(self.bounding_box)
cv2.namedWindow(name, cv2.WINDOW_NORMAL|cv2.WINDOW_KEEPRATIO)
cv2.setMouseCallback(name, self.callback_func, param=name[6])
cv2.imshow(name, np.array(sct_img))
if (cv2.waitKey(1) & 0xFF) == ord('p'):
self.arr[int(name[6])] = 0
cv2.destroyWindow(name)
def callback_func(self, event, x,y,flags,param):
if event == cv2.EVENT_RBUTTONDOWN:
self.arr[int(param)]=0
cv2.destroyWindow('screen'+param)
#print("destroyed screen" + param)
|
nilq/baby-python
|
python
|
from overrides import overrides
from typing import Dict, Iterator, List, Tuple
import json
from functools import reduce
from operator import mul
import os
def compute_alignment_differences(align_str: str):
aligns = align_str.split(" ")
align_diff = 0.
for align in aligns:
i, j = align.split("-")
align_diff += abs(int(i) - int(j))
align_diff = align_diff/len(aligns)
return align_diff
class Prediction():
def __init__(
self,
rawdata_file: str, labeleddata_file: str, leftdata_file: str,
align_file: str, leftalign_file: str,
conf_threshold: float, aligndiff_threshold: float,
test_lang: str, train_lang: str,
) -> None:
super().__init__()
self.rawdata_file = rawdata_file
self.labeleddata_file = labeleddata_file
self.leftdata_file = leftdata_file
self.align_file = align_file
self.leftalign_file = leftalign_file
self.test_lang = test_lang
self.train_lang = train_lang
self.conf_threshold = conf_threshold
self.aligndiff_threshold = aligndiff_threshold
def filtered_snts(self, snts: List[Dict]):
filtered_snts = []
aligns = self.get_aligns()
if len(aligns) != len(snts):
raise ValueError(
f"the num of alignment differences:{len(aligns)}\
and sentences:{len(snts)} are not equal."
)
data_writer = open(self.leftdata_file, "w", encoding="utf-8")
align_writer = open(self.leftalign_file, "w", encoding="utf-8")
for snt, align in zip(snts, aligns):
confidence_score = reduce(mul, snt["confidences"])
align_diff = compute_alignment_differences(align)
if (confidence_score > self.conf_threshold) and (align_diff <= self.aligndiff_threshold):
filtered_snts.append(snt)
else:
data_writer.write(json.dumps({
"tokens": snt["tokens"],
"postags": snt["postags"]
}, ensure_ascii=False)+"\n")
align_writer.write(align+"\n")
data_writer.close()
align_writer.close()
print(f"the num of the filtered sentences is {len(filtered_snts)}")
return filtered_snts
def get_aligns(self) -> List[str]:
aligns = []
with open(self.align_file, "r", encoding="utf-8") as reader:
for line in reader:
aligns.append(line.strip())
return aligns
def writing_snts(self, snts: List[Dict]) -> None:
with open(self.labeleddata_file, 'a', encoding='utf-8') as writer:
print(f'append sentences to {self.labeleddata_file}')
print(f"please check that language will be overrided to {self.train_lang}.")
for snt in snts:
writer.write(json.dumps({
"tokens": snt['tokens'],
"postags": snt['postags'],
"heads": snt['heads'],
"deprels": snt['deprels'],
"confidences": snt['confidences'],
"language": self.train_lang,
}, ensure_ascii=False)+'\n')
print(f'{len(snts)} sentences were written to {self.labeleddata_file}')
def jsonl_reader(
self,
inputfile: str,
override_lang: str = None,
) -> Iterator[Dict]:
print(f"reading data from {inputfile}")
if override_lang is not None:
print(f'please check that language will be overrided to {override_lang}')
with open(inputfile, 'r', encoding='utf-8') as reader:
for line in reader:
data = json.loads(line.strip())
if override_lang:
data['language'] = override_lang
yield data
def rawdata_processing(self):
raise NotImplementedError()
def processing(self):
raise NotImplementedError()
class PipelinePrediction(Prediction):
def __init__(
self,
model_inputfile: str, model_outputfile: str,
rawdata_file: str, labeleddata_file: str, leftdata_file: str,
align_file: str, leftalign_file: str,
conf_threshold: float, aligndiff_threshold: float,
test_lang: str, train_lang: str,
) -> None:
super().__init__(
rawdata_file, labeleddata_file, leftdata_file,
align_file, leftalign_file,
conf_threshold, aligndiff_threshold,
test_lang, train_lang
)
self.model_inputfile = model_inputfile
self.model_outputfile = model_outputfile
@overrides
def rawdata_processing(self):
num = 0
with open(self.model_inputfile, 'w', encoding='utf-8') as writer:
for snt in self.jsonl_reader(self.rawdata_file, override_lang=self.test_lang):
writer.write(json.dumps(snt, ensure_ascii=False)+'\n')
num += 1
print(f"{num} sentences were writted to {self.model_inputfile}")
@overrides
def processing(self):
snts_p = list(self.jsonl_reader(self.model_outputfile))
snts_p = self.filtered_snts(snts_p)
self.writing_snts(snts_p)
print('finish')
def jsonl_reader(inputfile: str, override_lang: str = None) -> List[Dict]:
if override_lang is not None:
print(f'please check that language will be overrided to {override_lang}')
snts = []
with open(inputfile, 'r', encoding='utf-8') as reader:
for line in reader:
snt = json.loads(line.strip())
if override_lang is not None:
snt["language"] = override_lang
snts.append(snt)
print(f"reading {len(snts)} sentences from {inputfile}")
return snts
def prepare_predict_input(
rawcorpus: str,
outputfile: str,
lang: str,
snt_start: int = None,
snt_end: int = None
) -> None:
snts = jsonl_reader(rawcorpus, override_lang=lang)
if snt_start is not None:
snts = snts[snt_start: snt_end]
print(f"filtering sentences from {snt_start} to {snt_end}")
writing_jsonl(snts, "w", outputfile)
def filtering(
snts: List[Dict],
snts_num: int,
) -> Tuple[List[Dict], List[Dict]]:
snts = sorted(snts, key=lambda inst: reduce(mul, inst['confidences']), reverse=True)
return snts[:snts_num], snts[snts_num:]
def writing_jsonl(snts: List[Dict], mode: str, file: str) -> None:
if mode == "w":
assert not os.path.exists(file), f"{file} exists"
with open(file, mode, encoding="utf-8") as writer:
for snt in snts:
writer.write(json.dumps(snt, ensure_ascii=False)+"\n")
print(f"writing {len(snts)} sentences to {file} with mode {mode}")
def filter_and_append_pseudo_sentences(
predictfile: str,
left_rawcorpus: str,
labeled_datafile: str,
lang: str,
snts_num: int
) -> None:
print(f"filter sentences from {predictfile} and append them to {labeled_datafile}")
snts = jsonl_reader(predictfile, override_lang=lang)
filtered_snts, left_snts = filtering(snts, snts_num)
left_snts = [{"tokens": snt["tokens"], "postags": snt["postags"]} for snt in left_snts]
writing_jsonl(filtered_snts, "a", labeled_datafile)
writing_jsonl(left_snts, "w", left_rawcorpus)
if __name__ == '__main__':
# prepare_predict_input(
# rawcorpus="./data/data2/origin/gd/gd.sorted.jsonl",
# outputfile="./results/base0/gd_input.jsonl",
# lang="en0",
# snt_start=0,
# snt_end=16000
# )
# filter_and_append_pseudo_sentences(
# predictfile="./results/base/roberta0/eva/sv_output.sub.jsonl",
# left_rawcorpus="./results/base/roberta0/eva/im_ex/sv.jsonl",
# labeled_datafile="./data/data2/train/base/im_ex/sv.jsonl",
# lang="sv1",
# snts_num=2000
# )
|
nilq/baby-python
|
python
|
from rockstar import RockStar
css_code = """body:before {
content: "Hello, world!";
}"""
rock_it_bro = RockStar(days=400, file_name='helloworld.css', code=css_code)
rock_it_bro.make_me_a_rockstar()
|
nilq/baby-python
|
python
|
"""Read command line argument.
Assign to _x the string value of the first command line parameter, after the program name.
Source: programming-idioms.org
"""
# Implementation author: nickname
# Created on 2016-02-18T16:58:00.600634Z
# Last modified on 2016-02-18T16:58:00.600634Z
# Version 1
# argv[0] is the program name
import sys
x = sys.argv[1]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 11:24:29 2018
@author: mayank
"""
import numpy as np
#import pandas as pd
#from time import time
from sklearn.model_selection import StratifiedKFold
#import os
#from sklearn.cluster import KMeans
from sklearn.utils import resample
from scipy.stats import mode
#from sklearn.metrics import f1_score
from sklearn.neighbors import NearestNeighbors
from numpy.matlib import repmat
from sklearn.metrics.pairwise import linear_kernel,rbf_kernel,manhattan_distances,polynomial_kernel,sigmoid_kernel,cosine_similarity,laplacian_kernel,paired_euclidean_distances,pairwise_distances
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.decomposition import IncrementalPCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from numpy.linalg import eigh
#%%
#from scipy.io import loadmat
#from sklearn.decomposition import IncrementalPCA
#from sklearn import mixture
class MCM:
def __init__(self, C1 = 1.0, C2 = 1e-05, C3 =1.0, C4 =1.0, problem_type ='classification', algo_type ='MCM' ,kernel_type = 'rbf', gamma = 1e-05, epsilon = 0.1,
feature_ratio = 1.0, sample_ratio = 1.0, feature_sel = 'random', n_ensembles = 1,
batch_sz = 128, iterMax1 = 1000, iterMax2 = 1, eta = 0.01, tol = 1e-08, update_type = 'adam',
reg_type = 'l1', combine_type = 'concat', class_weighting = 'balanced', upsample1 = False,
PV_scheme = 'kmeans', n_components = 100, do_pca_in_selection = False ):
self.C1 = C1 #hyperparameter 1 #loss function parameter
self.C2 = C2 #hyperparameter 2 #when using L1 or L2 or ISTA penalty
self.C3 = C3 #hyperparameter 2 #when using elastic net penalty (this parameter should be between 0 and 1) or margin penalty value need not be between 0 and 1
self.C4 = C4 #hyperparameter for final regressor or classifier used to ensemble when concatenating
# the outputs of previos layer of classifier or regressors
self.problem_type = problem_type #{0:'classification', 1:'regression'}
self.algo_type = algo_type #{0:MCM,1:'LSMCM'}
self.kernel_type = kernel_type #{0:'linear', 1:'rbf', 2:'sin', 3:'tanh', 4:'TL1', 5:'linear_primal', 6:'rff_primal', 7:'nystrom_primal'}
self.gamma = gamma #hyperparameter3 (kernel parameter for non-linear classification or regression)
self.epsilon = epsilon #hyperparameter4 ( It specifies the epsilon-tube within which
#no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value.)
self.n_ensembles = n_ensembles #number of ensembles to be learnt, if setting n_ensembles > 1 then keep the sample ratio to be around 0.7
self.feature_ratio = feature_ratio #percentage of features to select for each PLM
self.sample_ratio = sample_ratio #percentage of data to be selected for each PLM
self.batch_sz = batch_sz #batch_size
self.iterMax1 = iterMax1 #max number of iterations for inner SGD loop
self.iterMax2 = iterMax2 #max number of iterations for outer SGD loop
self.eta = eta #initial learning rate
self.tol = tol #tolerance to cut off SGD
self.update_type = update_type #{0:'sgd',1:'momentum',3:'nesterov',4:'rmsprop',5:'adagrad',6:'adam'}
self.reg_type = reg_type #{0:'l1', 1:'l2', 2:'en', 4:'ISTA', 5:'M'}#ISTA: iterative soft thresholding (proximal gradient), M: margin + l1
self.feature_sel = feature_sel #{0:'sliding', 1:'random'}
self.class_weighting = class_weighting #{0:'average', 1:'balanced'}
self.combine_type = combine_type #{0:'concat',1:'average',2:'mode'}
self.upsample1 = upsample1 #{0:False, 1:True}
self.PV_scheme = PV_scheme # {0:'kmeans',1:'renyi'}
self.n_components = n_components #number of components to choose as Prototype Vector set, or the number of features to form for kernel_approximation as in RFF and Nystroem
self.do_pca_in_selection = do_pca_in_selection #{0:False, 1:True}
def add_bias(self,xTrain):
N = xTrain.shape[0]
if(xTrain.size!=0):
xTrain=np.hstack((xTrain,np.ones((N,1))))
return xTrain
def standardize(self,xTrain):
me=np.mean(xTrain,axis=0)
std_dev=np.std(xTrain,axis=0)
#remove columns with zero std
idx=(std_dev!=0.0)
# print(idx.shape)
xTrain[:,idx]=(xTrain[:,idx]-me[idx])/std_dev[idx]
return xTrain,me,std_dev
def generate_samples(self,X_orig,old_imbalance_ratio,new_imbalance_ratio):
N=X_orig.shape[0]
M=X_orig.shape[1]
neighbors_thresh=10
new_samples=int(new_imbalance_ratio/old_imbalance_ratio*N - N)
#each point must generate these many samples
new_samples_per_point_orig=new_imbalance_ratio/old_imbalance_ratio - 1
new_samples_per_point=int(new_imbalance_ratio/old_imbalance_ratio - 1)
#check if the number of samples each point has to generate is > 1
X1=np.zeros((0,M))
if(new_samples_per_point_orig>0 and new_samples_per_point_orig<=1):
idx_samples=resample(np.arange(0,N), n_samples=int(N*new_samples_per_point_orig), random_state=1,replace=False)
X=X_orig[idx_samples,]
new_samples_per_point=1
N=X.shape[0]
else:
X=X_orig
if(N==1):
X1=repmat(X,new_samples,1)
elif(N>1):
if(N<=neighbors_thresh):
n_neighbors=int(N/2)
else:
n_neighbors=neighbors_thresh
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
for i in range(N):
#for each point find its n_neighbors nearest neighbors
inds=nbrs.kneighbors(X[i,:].reshape(1,-1), n_neighbors, return_distance=False)
temp_data=X[inds[0],:]
std=np.std(temp_data,axis=0)
me=np.mean(temp_data,axis=0)
np.random.seed(i)
x_temp=me + std*np.random.randn(new_samples_per_point,M)
X1=np.append(X1,x_temp,axis=0)
return X_orig, X1
def upsample(self,X,Y,new_imbalance_ratio,upsample_type):
#xTrain: samples X features
#yTrain : samples,
#for classification only
numClasses=np.unique(Y).size
class_samples=np.zeros((numClasses,))
X3=np.zeros((0,X.shape[1]))
Y3=np.zeros((0,))
#first find the samples per class per class
for i in range(numClasses):
idx1=(Y==i)
class_samples[i]=np.sum(idx1)
max_samples=np.max(class_samples)
# new_imbalance_ratio=0.5
if(upsample_type==1):
old_imbalance_ratio_thresh=0.5
else:
old_imbalance_ratio_thresh=1
for i in range(numClasses):
idx1=(Y==i)
old_imbalance_ratio=class_samples[i]/max_samples
X1=X[idx1,:]
Y1=Y[idx1,]
if(idx1.size==1):
X1=np.reshape(X1,(1,X.shape[1]))
if(old_imbalance_ratio<=old_imbalance_ratio_thresh and class_samples[i]!=0):
X1,X2=self.generate_samples(X1,old_imbalance_ratio,new_imbalance_ratio)
new_samples=X2.shape[0]
Y2=np.ones((new_samples,))
Y2=Y2*Y1[0,]
#append original and generated samples
X3=np.append(X3,X1,axis=0)
X3=np.append(X3,X2,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.append(Y3,Y2,axis=0)
else:
#append original samples only
X3=np.append(X3,X1,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.array(Y3,dtype=np.int32)
return X3,Y3
def kmeans_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on the farthest distance from the kmeans centers
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N = X.shape[0]
if(do_pca == True):
if(X.shape[1]>50):
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
kmeans = MiniBatchKMeans(n_clusters=represent_points, batch_size=np.min([128,X.shape[0]]),random_state=0).fit(X)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
sv= []
unique_labels = np.unique(labels).size
all_ind = np.arange(N)
for j in range(unique_labels):
X1 = X[labels == j,:]
all_ind_temp = all_ind[labels==j]
tempK = pairwise_distances(X1,np.reshape(centers[j,:],(1,X1.shape[1])))**2
inds = np.argmax(tempK,axis=0)
sv.append(all_ind_temp[inds[0]])
return sv
def renyi_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on maximization of quadratic renyi entropy, which can be
written in terms of log sum exp which is a tightly bounded by max operator. Now for rbf kernel,
the max_{ij}(-\|x_i-x_j\|^2) is equivalent to min_{ij}(\|x_i-x_j\|^2).
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N= X.shape[0]
capacity=represent_points
selectionset=set([])
set_full=set(list(range(N)))
np.random.seed(1)
if(len(selectionset)==0):
selectionset = np.random.permutation(N)
sv = list(selectionset)[0:capacity]
else:
extrainputs = represent_points - len(selectionset)
leftindices =list(set_full.difference(selectionset))
info = np.random.permutation(len(leftindices))
info = info[1:extrainputs]
sv = selectionset.append(leftindices[info])
if(do_pca == True):
if(X.shape[1]>50): #takes more time
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
svX = X[sv,:]
min_info = np.zeros((capacity,2))
KsV = pairwise_distances(svX,svX)**2 #this is fast
KsV[KsV==0] = np.inf
min_info[:,1] = np.min(KsV,axis=1)
min_info[:,0] = np.arange(capacity)
minimum = np.min(min_info[:,1])
counter = 0
for i in range(N):
# find for which data the value is minimum
replace = np.argmin(min_info[:,1])
ids = int(min_info[min_info[:,0]==replace,0])
#Subtract from totalcrit once for row
tempminimum = minimum - min_info[ids,1]
#Try to evaluate kernel function
tempsvX = np.zeros(svX.shape)
tempsvX[:] = svX[:]
inputX = X[i,:]
tempsvX[replace,:] = inputX
tempK = pairwise_distances(tempsvX,np.reshape(inputX,(1,X.shape[1])))**2 #this is fast
tempK[tempK==0] = np.inf
distance_eval = np.min(tempK)
tempminimum = tempminimum + distance_eval
if (minimum < tempminimum):
minimum = tempminimum
min_info[ids,1] = distance_eval
svX[:] = tempsvX[:]
sv[ids] = i
counter +=1
return sv
def subset_selection(self,X,Y):
n_components = self.n_components
PV_scheme = self.PV_scheme
problem_type = self.problem_type
N = X.shape[0]
# M = X.shape[1]
numClasses = np.unique(Y).size
use_global_sig = False
use_global_sig1 = False
if(use_global_sig ==True or problem_type == 'regression'):
if(PV_scheme == 'renyi'):
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset = self.renyi_select(X,n_components)
elif(PV_scheme == 'kmeans'):
subset = self.kmeans_select(X,n_components)
else:
print('No PV_scheme provided... using all the samples!')
subset = list(np.arange(N))
else:
all_samples = np.arange(N)
subset=[]
subset_per_class = np.zeros((numClasses,))
class_dist = np.zeros((numClasses,))
for i in range(numClasses):
class_dist[i] = np.sum(Y == i)
subset_per_class[i] = int(np.ceil((class_dist[i]/N)*n_components))
for i in range(numClasses):
xTrain = X[Y == i,]
samples_in_class = all_samples[Y == i]
N1 = xTrain.shape[0]
# sig = np.power((np.std(xTrain)*(np.power(N1,(-1/(M+4))))),2)
if(PV_scheme == 'renyi'):
if(use_global_sig1 == False):
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
else:
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
elif(PV_scheme == 'kmeans'):
subset1 = self.kmeans_select(xTrain,int(subset_per_class[i]))
else:
print('No PV_scheme provided... using all the samples!')
subset1 = list(np.arange(N1))
temp=list(samples_in_class[subset1])
subset.extend(temp)
return subset
def divide_into_batches_stratified(self,yTrain):
batch_sz=self.batch_sz
#data should be of the form samples X features
N=yTrain.shape[0]
num_batches=int(np.ceil(N/batch_sz))
sample_weights=list()
numClasses=np.unique(yTrain).size
idx_batches=list()
skf=StratifiedKFold(n_splits=num_batches, random_state=1, shuffle=True)
j=0
for train_index, test_index in skf.split(np.zeros(N), yTrain):
idx_batches.append(test_index)
class_weights=np.zeros((numClasses,))
sample_weights1=np.zeros((test_index.shape[0],))
temp=yTrain[test_index,]
for i in range(numClasses):
idx1=(temp==i)
class_weights[i]=1.0/(np.sum(idx1)+1e-09)#/idx.shape[0]
sample_weights1[idx1]=class_weights[i]
sample_weights.append(sample_weights1)
j+=1
return idx_batches,sample_weights,num_batches
def kernel_transform(self, X1, X2 = None, kernel_type = 'linear_primal', n_components = 100, gamma = 1.0):
"""
X1: n_samples1 X M
X2: n_samples2 X M
X: n_samples1 X n_samples2 : if kernel_type is non primal
X: n_samples1 X n_components : if kernel_type is primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X2)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X2,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X2,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X2))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X2))
elif(kernel_type == 'rff_primal'):
rbf_feature = RBFSampler(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'nystrom_primal'):
#cannot have n_components more than n_samples1
if(n_components > X1.shape[0]):
n_components = X1.shape[0]
self.n_components = n_components
rbf_feature = Nystroem(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'linear_primal'):
X = X1
else:
print('No kernel_type passed: using linear primal solver')
X = X1
return X
def margin_kernel(self, X1, kernel_type = 'linear', gamma =1.0):
"""
X1: n_samples1 X M
X: n_samples1 X n_samples1 : if kernel_type is non primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X1)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X1,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X1,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X1))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X1))
else:
print('no kernel_type, returning None')
return None
return X
def matrix_decomposition(self, X):
"""
Finds the matrices consisting of positive and negative parts of kernel matrix X
Parameters:
----------
X: n_samples X n_samples
Returns:
--------
K_plus: kernel corresponding to +ve part
K_minus: kernel corresponding to -ve part
"""
[D,U]=eigh(X)
U_plus = U[:,D>0.0]
U_minus = U[:,D<=0.0]
D_plus = np.diag(D[D>0.0])
D_minus = np.diag(D[D<=0.0])
K_plus = np.dot(np.dot(U_plus,D_plus),U_plus.T)
K_minus = -np.dot(np.dot(U_minus,D_minus),U_minus.T)
return K_plus, K_minus
def inner_opt(self, X, Y, data1, level):
gamma = self.gamma
kernel_type = self.kernel_type
iterMax2 = self.iterMax2
iterMax1 = self.iterMax1
tol = self.tol
algo_type = self.algo_type
#if data1 = None implies there is no kernel computation, i.e., there is only primal solvers applicable
if(data1 is not None):
if(self.reg_type == 'M'):
K = self.margin_kernel( X1 = data1, kernel_type = kernel_type, gamma = gamma)
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
if(kernel_type == 'linear' or kernel_type == 'rbf'):
#for mercer kernels no need to train for outer loop
print('Returning for mercer kernels')
return W_prev,f,iters,fvals
else:
print('Solving for non - mercer kernels')
#for non mercer kernels, train for outer loop with initial point as W_prev
W_best = np.zeros(W_prev.shape)
W_best[:] = W_prev[:]
f_best = np.inf
iter_best = 0
fvals = np.zeros((iterMax1+1,))
iters = 0
fvals[iters] = f
rel_error = 1.0
print('iters =%d, f_outer = %0.9f'%(iters,f))
while(iters < iterMax2 and rel_error > tol):
iters = iters + 1
if(algo_type == 'MCM'):
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
elif(algo_type == 'LSMCM'):
W,f,iters1,fvals1 = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
rel_error = np.abs((np.linalg.norm(W,'fro')-np.linalg.norm(W_prev,'fro'))/(np.linalg.norm(W_prev,'fro') + 1e-08))
W_prev[:] = W[:]
print('iters =%d, f_outer = %0.9f'%(iters,f))
if(f < f_best):
W_best[:] = W[:]
f_best = f
iter_best = iters
else:
break
fvals[iters] = -1
return W_best,f_best,iter_best,fvals
else:
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work ')
print('Using a linear kernel')
self.kernel_type = 'linear'
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
return W_prev,f,iters,fvals
else:
#i.e., reg_type is not M, then train accordingly using either l1, l2, ISTA or elastic net penalty
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
return W, f, iters, fvals
else:
#i.e., data1 is None -> we are using primal solvers with either l1, l2, ISTA or elastic net penalty
if(self.reg_type == 'M'):
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work')
print('doing linear classifier with l1 norm on weights')
self.reg_type = 'l1'
self.C3 = 0.0
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
else:
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
return W,f,iters,fvals
def select_(self, xTest, xTrain, kernel_type, subset, idx_features, idx_samples):
#xTest corresponds to X1
#xTrain corresponds to X2
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
X2 = xTrain[idx_samples,:]
X2 = X2[:,idx_features]
X2 = X2[subset,]
X1 = xTest[:,idx_features]
else:
X1 = xTest[:,idx_features]
X2 = None
return X1, X2
def normalize_(self,xTrain, me, std):
idx = (std!=0.0)
xTrain[:,idx] = (xTrain[:,idx]-me[idx])/std[idx]
return xTrain
def fit(self,xTrain,yTrain):
#xTrain: samples Xfeatures
#yTrain: samples
#for classification: entries of yTrain should be between {0 to numClasses-1}
#for regresison : entries of yTrain should be real values
N = xTrain.shape[0]
M = xTrain.shape[1]
if(self.problem_type =='classification'):
numClasses=np.unique(yTrain).size
if(self.problem_type =='regression'):
if(yTrain.size == yTrain.shape[0]):
yTrain = np.reshape(yTrain,(yTrain.shape[0],1))
numClasses = yTrain.shape[1] #for multi target SVM, assuming all targets are independent to each other
feature_indices=np.zeros((self.n_ensembles,int(M*self.feature_ratio)),dtype=np.int32)
sample_indices=np.zeros((self.n_ensembles,int(N*self.sample_ratio)),dtype=np.int32)
W_all={}
me_all= {}
std_all = {}
subset_all = {}
if(self.combine_type=='concat'):
P_all=np.zeros((N,self.n_ensembles*numClasses)) #to concatenate the classes
level=0
gamma = self.gamma
kernel_type = self.kernel_type
n_components = self.n_components
for i in range(self.n_ensembles):
print('training PLM %d'%i)
if(self.sample_ratio!=1.0):
idx_samples=resample(np.arange(0,N), n_samples=int(N*self.sample_ratio), random_state=i,replace=False)
else:
idx_samples = np.arange(N)
if(self.feature_ratio!=1.0):
idx_features=resample(np.arange(0,M), n_samples=int(M*self.feature_ratio), random_state=i,replace=False)
else:
idx_features = np.arange(0,M)
feature_indices[i,:] = idx_features
sample_indices[i,:] = idx_samples
xTrain_temp = xTrain[idx_samples,:]
xTrain_temp = xTrain_temp[:,idx_features]
yTrain1 = yTrain[idx_samples,]
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
subset = self.subset_selection(xTrain_temp,yTrain1)
data1 = xTrain_temp[subset,]
subset_all[i] = subset
else:
subset_all[i] = []
data1 = None
xTrain1 = self.kernel_transform( X1 = xTrain_temp, X2 = data1, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
#standardize the dataset
xTrain1, me, std = self.standardize(xTrain1)
me_all[i] = me
std_all[i] = std
if(self.problem_type == 'regression'):
epsilon = self.epsilon
N1 = yTrain1.shape[0]
W = np.zeros((xTrain1.shape[1]+2,numClasses*2)) #2 is added to incorporate the yTrain2 and bias term appended to xTrain1
for j in range(numClasses):
yTrain3 = np.append(np.ones((N1,)), np.zeros((N1,)))
yTrain2 = np.append(yTrain1[:,j] + epsilon, yTrain1[:,j] - epsilon, axis = 0)
xTrain2 = np.append(xTrain1, xTrain1, axis = 0)
xTrain2 = np.append(xTrain2, np.reshape(yTrain2,(2*N1,1)), axis =1)
# Wa,f,iters,fvals=self.train(xTrain2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(xTrain2, yTrain3, data1, level)
W[:,j:j+2] = Wa
W_all[i]=W # W will be of the shape (M+2,), here numClasses = 1
if(self.problem_type == 'classification'):
# W,f,iters,fvals=self.train(xTrain1,yTrain1,level)
W,f,iters,fvals = self.inner_opt(xTrain1, yTrain1, data1, level)
W_all[i]=W # W will be of the shape (M+2,numClasses)
if(self.n_ensembles == 1 or self.combine_type != 'concat'):
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
else:
if(self.combine_type=='concat'):
level=1
for i in range(self.n_ensembles):
X1, X2 = self.select_(xTrain, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
xTrain1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
xTrain1 = self.normalize_(xTrain1,me_all[i],std_all[i])
M = xTrain1.shape[1]
xTrain1=self.add_bias(xTrain1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((xTrain1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = xTrain1[:,0:M].dot(W1[0:M,]) + np.dot(xTrain1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
if(self.problem_type == 'classification'):
scores = xTrain1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
#train another regressor or classifier on top
if(self.problem_type == 'regression'):
epsilon = self.epsilon
P_all_1 = np.zeros((P_all.shape[0],self.n_ensembles))
W1 = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
for k in range(self.n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
yTrain3 = np.append(np.ones((N,)), np.zeros((N,)))
yTrain2 = np.append(yTrain[:,j] + epsilon, yTrain[:,j] - epsilon, axis = 0)
P_all_2 = np.append(P_all_1, P_all_1, axis = 0)
P_all_2 = np.append(P_all_2, np.reshape(yTrain2,(2*N,1)), axis =1)
# Wa,f,iters,fvals = self.train(P_all_2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(P_all_2, yTrain3, None, level)
W1[:,j:j+2] = Wa
if(self.problem_type == 'classification'):
# W1,f1,iters1,fvals1 = self.train(P_all,yTrain,level)
W1,f,iters,fvals = self.inner_opt(P_all, yTrain, None, level)
W_all[self.n_ensembles] = W1
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
def train(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM with margin term
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u = np.zeros(W.shape)
else:
u = 0.0
q=np.zeros(W.shape)
z=np.zeros(W.shape)
all_zeros=np.zeros(W.shape)
eta1=eta_zero
v=np.zeros(W.shape)
v_prev=np.zeros(W.shape)
vt=np.zeros(W.shape)
m=np.zeros(W.shape)
vt=np.zeros(W.shape)
cache=np.zeros(W.shape)
eps=1e-08
decay_rate=0.99
mu1=0.9
mu=mu1
beta1 = 0.9
beta2 = 0.999
iter_eval=10 #evaluate after every 10 iterations
idx_batches, sample_weights_batch, num_batches = self.divide_into_batches_stratified(yTrain)
while(iter1<iterMax1 and rel_error>tol):
iter1=iter1+1
for batch_num in range(0,num_batches):
# batch_size=batch_sizes[j]
test_idx=idx_batches[batch_num]
data=xTrain[test_idx,]
labels=yTrain[test_idx,]
N=labels.shape[0]
scores=data.dot(W)
correct_scores=scores[range(N),np.array(labels,dtype='int32')]#label_batches[j] for this line should be in the range [0,numClasses-1]
mat=(scores.transpose()-correct_scores.transpose()).transpose()
mat=mat+1.0
mat[range(N),np.array(labels,dtype='int32')]=0.0
thresh1=np.zeros(mat.shape)
thresh1[mat>0.0]=mat[mat>0.0]
binary1 = np.zeros(thresh1.shape)
binary1[thresh1>0.0] = 1.0
row_sum=np.sum(binary1,axis=1)
binary1[range(N),np.array(labels,dtype='int32')]=-row_sum
if(C !=0.0):
binary2 = np.zeros(scores.shape)
binary2[scores>0.0] = 1.0
binary2[scores<0.0] = -1.0
else:
binary2 = 0
dscores1 = binary1
dscores2 = binary2
if(class_weighting=='average'):
gradW = np.dot((dscores1 + C*dscores2).transpose(),data)
gradW=gradW.transpose()
gradW = (1.0/N)*gradW
# gradW += gradW1 - gradW2
else:
sample_weights_b=sample_weights_batch[batch_num]
gradW=np.dot((dscores1 + C*dscores2).transpose(),data*sample_weights_b[:,None])
gradW=gradW.transpose()
gradW=(1.0/numClasses)*gradW
# gradW += gradW1 - gradW2
if(np.sum(gradW**2)>G_clip_threshold):#gradient clipping
gradW = G_clip_threshold*gradW/np.sum(gradW**2)
if(update_type=='sgd'):
W = W - eta*gradW
elif(update_type=='momentum'):
v = mu * v - eta * gradW # integrate velocity
W += v # integrate position
elif(update_type=='nesterov'):
v_prev[:] = v[:] # back this up
v = mu * v - eta * gradW # velocity update stays the same
W += -mu * v_prev + (1 + mu) * v # position update changes form
elif(update_type=='adagrad'):
cache += gradW**2
W += - eta1* gradW / (np.sqrt(cache) + eps)
elif(update_type=='rmsprop'):
cache = decay_rate * cache + (1 - decay_rate) * gradW**2
W += - eta1 * gradW / (np.sqrt(cache) + eps)
elif(update_type=='adam'):
m = beta1*m + (1-beta1)*gradW
mt = m / (1-beta1**(iter1+1))
v = beta2*v + (1-beta2)*(gradW**2)
vt = v / (1-beta2**(iter1+1))
W += - eta1 * mt / (np.sqrt(vt) + eps)
else:
W = W - eta*gradW
if(reg_type == 'M'):
gradW1= np.zeros(W.shape)
gradW2= np.zeros(W.shape)
for i in range(numClasses):
w=W[0:(M-1),i]
if(K_plus is not None):
gradW1[0:(M-1),i]=((E*0.5)/(numClasses))*2*np.dot(K_plus,w)
if(K_minus is not None):
gradW2[0:(M-1),i]=((E*0.5)/(numClasses))*temp_mat[:,i]
if(update_type == 'adam'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(cache) + eps))
else:
W += -(gradW1-gradW2)*(eta)
if(reg_type == 'ISTA'):
if(update_type == 'adam'):
idx_plus = W > D*(eta1/(np.sqrt(vt) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(vt) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(vt) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(vt[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(vt[idx_minus]) + eps))
W[idx_zero] = 0.0
elif(update_type == 'adagrad' or update_type =='rmsprop'):
idx_plus = W > D*(eta1/(np.sqrt(cache) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(cache) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(cache) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(cache[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(cache[idx_minus]) + eps))
W[idx_zero] = 0.0
else:
idx_plus = W > D*(eta)
idx_minus = W < -D*(eta)
idx_zero = np.abs(W) < D*(eta)
W[idx_plus] = W[idx_plus] - D*(eta)
W[idx_minus] = W[idx_minus] + D*(eta)
W[idx_zero] = 0.0
if(reg_type=='l2'):
if(update_type == 'adam'):
W += -D*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='en'):
if(update_type == 'adam'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='l1' or reg_type == 'M'):
if(update_type=='adam'):
u = u + D*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(reg_type=='en'):
if(update_type=='adam'):
u = u + D*E*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*E*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*E*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(np.sum(W**2)>W_clip_threshold):#gradient clipping
W = W_clip_threshold*W/np.sum(W**2)
if(iter1%iter_eval==0):
#once the W are calculated for each epoch we calculate the scores
scores=xTrain.dot(W)
# scores=scores-np.max(scores)
N=scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
for i in range(numClasses):
#first term in objective function for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f += ((0.5*E)/(numClasses))*f2
#the second term in the objective function for margin
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f += -((0.5*E)/(numClasses))*f3
if(verbose == True):
print('iter1=%d, f=%0.3f'%(iter1,f))
fvals[iter1]=f
rel_error=np.abs(f_prev-f)/np.abs(f_prev)
max_W = np.max(np.abs(W))
W[np.abs(W)<1e-03*max_W]=0.0
if(f<f_best):
f_best=f
W_best[:]=W[:]
max_W = np.max(np.abs(W))
W_best[np.abs(W_best)<1e-03*max_W]=0.0
iter_best=iter1
else:
break
f_prev=f
eta=eta_zero/np.power((iter1+1),1)
fvals[iter1]=-1
return W_best,f_best,iter_best,fvals
def predict(self,data, xTrain, W_all, sample_indices, feature_indices, me_all, std_all, subset_all):
#type=2 -> mode of all labels
#type=1 -> average of all labels
#type=3 -> concat of all labels
types = self.combine_type
kernel_type = self.kernel_type
gamma = self.gamma
n_components = self.n_components
n_ensembles = feature_indices.shape[0]
N = data.shape[0]
M = data.shape[1]
if(self.problem_type == 'classification'):
numClasses = W_all[0].shape[1]
label = np.zeros((N,))
if(self.problem_type == 'regression'):
numClasses = int(W_all[0].shape[1]/2)
print('numClasses=%d'%numClasses)
label = np.zeros((N,numClasses))
# print('numClasses =%d'%numClasses)
if(types=='mode'):
label_all_1 = np.zeros((N,n_ensembles))
label_all_2 = np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform(X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label_all_2[:,i*numClasses:i*numClasses+numClasses] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_1[:,i] = np.argmax(scores,axis=1)
if(self.problem_type == 'classification'):
label = mode(label_all_1,axis=1)[0]
label = np.int32(np.reshape(label,(N,)))
return label
if(self.problem_type == 'regression'):
label = np.zeros((N,numClasses))
for j in range(numClasses):
label_temp = np.zeros((N,n_ensembles))
for k in range(n_ensembles):
label_temp[:,k] = label_all_2[:,k*numClasses+j]
label[:,j] = np.reshape(mode(label_temp,axis=1)[0],(label.shape[0],))
return label
elif(types=='average'):
label_all_2=np.zeros((N,numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
# W1 = (W[:,0]-W[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label += label + scores/n_ensembles
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_2 += label_all_2 + scores
if(self.problem_type == 'classification'):
label=np.argmax(label_all_2,axis=1)
return label
if(self.problem_type == 'regression'):
return label
elif(types =='concat'):
# if(self.problem_type == 'regression'):
# P_all=np.zeros((N,n_ensembles))
# if(self.problem_type == 'classification'):
N = data.shape[0]
P_all=np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
# if(self.problem_type == 'regression'):
# W1 = (W[:,0]-W[:,1])/2
# scores=data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# P_all[:,i] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
if(n_ensembles == 1):
if(self.problem_type == 'regression'):
if(numClasses == 1):
label = np.reshape(P_all,(P_all.shape[0],))
else:
label = P_all
if(self.problem_type == 'classification'):
label=np.argmax(P_all,axis=1)
return label
W = W_all[n_ensembles]
M = P_all.shape[1]
# P_all = self.add_bias(P_all)
if(self.problem_type == 'regression'):
scores = np.zeros((P_all.shape[0],numClasses))
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
# W = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
for k in range(n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
M = P_all_1.shape[1]
P_all_1 = self.add_bias(P_all_1)
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = P_all_1[:,0:M].dot(W1[0:M,]) + np.dot(P_all_1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label = scores
return label
# W1 = (W[:,0]-W[:,1])/2
# scores=P_all[:,0:M].dot(W1[0:M,]) + np.dot(P_all[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# label = scores
if(self.problem_type == 'classification'):
P_all = self.add_bias(P_all)
scores = P_all.dot(W)
label = np.argmax(scores,axis=1)
return label
def accuracy_classifier(self,actual_label,found_labels):
acc=np.divide(np.sum(actual_label==found_labels)*100.0 , actual_label.shape[0],dtype='float64')
return acc
def accuracy_regressor(self,actual_label,found_labels):
acc=np.divide(np.linalg.norm(actual_label - found_labels)**2 , actual_label.shape[0],dtype='float64')
return acc
def train_LSMCM(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM with margin term
# print('LSMCM Training')
# print('reg_type=%s, algo_type=%s, problem_type=%s,kernel_type=%s'%(self.reg_type,self.algo_type,self.problem_type,self.kernel_type))
# print('C1=%0.4f, C2=%0.4f, C3=%0.4f'%(self.C1,self.C2,self.C3))
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(yTrain,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
mat1 = 1 - correct_scores + max_scores
# thresh1 = np.zeros(mat.shape)
# thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
#(1- f_yi + max_j neq yi f_j)^2
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*0.5*np.sum(scores**2) + 0.5*np.sum((mat1)**2)
f += (1.0/N)*f1
else:
f1 = C*0.5*np.sum((scores**2)*sample_weights[:,None]) + 0.5*np.sum((mat1**2)*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u = np.zeros(W.shape)
else:
u = 0.0
q=np.zeros(W.shape)
z=np.zeros(W.shape)
all_zeros=np.zeros(W.shape)
eta1=eta_zero
v=np.zeros(W.shape)
v_prev=np.zeros(W.shape)
vt=np.zeros(W.shape)
m=np.zeros(W.shape)
vt=np.zeros(W.shape)
cache=np.zeros(W.shape)
eps=1e-08
decay_rate=0.99
mu1=0.9
mu=mu1
beta1 = 0.9
beta2 = 0.999
iter_eval=10 #evaluate after every 10 iterations
idx_batches, sample_weights_batch, num_batches = self.divide_into_batches_stratified(yTrain)
while(iter1<iterMax1 and rel_error>tol):
iter1=iter1+1
for batch_num in range(0,num_batches):
# batch_size=batch_sizes[j]
test_idx=idx_batches[batch_num]
data=xTrain[test_idx,]
labels=yTrain[test_idx,]
N=labels.shape[0]
scores=data.dot(W)
correct_scores=scores[range(N),np.array(labels,dtype='int32')]#label_batches[j] for this line should be in the range [0,numClasses-1]
mat=(scores.transpose()-correct_scores.transpose()).transpose()
mat=mat+1.0
mat[range(N),np.array(labels,dtype='int32')]=0.0
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(labels,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
max_scores_idx = np.argmax(scores1, axis = 1)
mat1 = 1 - correct_scores + max_scores
dscores1 = np.zeros(mat.shape)
dscores1[range(N),np.array(max_scores_idx,dtype='int32')] = mat1
row_sum = np.sum(dscores1,axis=1)
dscores1[range(N),np.array(labels,dtype='int32')] = -row_sum
if(C !=0.0):
dscores2 = np.zeros(scores.shape)
dscores2[:] = scores[:]
else:
dscores2 = 0
dscores1 = 2*dscores1
dscores2 = 2*dscores2
if(class_weighting=='average'):
gradW = np.dot((dscores1 + C*dscores2).transpose(),data)
gradW = gradW.transpose()
gradW = (0.5/N)*gradW
# gradW += gradW1 - gradW2
else:
sample_weights_b = sample_weights_batch[batch_num]
gradW = np.dot((dscores1 + C*dscores2).transpose(),data*sample_weights_b[:,None])
gradW = gradW.transpose()
gradW = (0.5/numClasses)*gradW
# gradW += gradW1 - gradW2
if(np.sum(gradW**2)>G_clip_threshold):#gradient clipping
# print('clipping gradients')
gradW = G_clip_threshold*gradW/np.sum(gradW**2)
if(update_type=='sgd'):
W = W - eta*gradW
elif(update_type=='momentum'):
v = mu * v - eta * gradW # integrate velocity
W += v # integrate position
elif(update_type=='nesterov'):
v_prev[:] = v[:] # back this up
v = mu * v - eta * gradW # velocity update stays the same
W += -mu * v_prev + (1 + mu) * v # position update changes form
elif(update_type=='adagrad'):
cache += gradW**2
W += - eta1* gradW / (np.sqrt(cache) + eps)
elif(update_type=='rmsprop'):
cache = decay_rate * cache + (1 - decay_rate) * gradW**2
W += - eta1 * gradW / (np.sqrt(cache) + eps)
elif(update_type=='adam'):
m = beta1*m + (1-beta1)*gradW
mt = m / (1-beta1**(iter1+1))
v = beta2*v + (1-beta2)*(gradW**2)
vt = v / (1-beta2**(iter1+1))
W += - eta1 * mt / (np.sqrt(vt) + eps)
else:
W = W - eta*gradW
if(reg_type == 'M'):
gradW1= np.zeros(W.shape)
gradW2= np.zeros(W.shape)
for i in range(numClasses):
w=W[0:(M-1),i]
if(K_plus is not None):
gradW1[0:(M-1),i]=((E*0.5)/(numClasses))*2*np.dot(K_plus,w)
if(K_minus is not None):
gradW2[0:(M-1),i]=((E*0.5)/(numClasses))*temp_mat[:,i]
if(update_type == 'adam'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(cache) + eps))
else:
W += -(gradW1-gradW2)*(eta)
if(reg_type == 'ISTA'):
if(update_type == 'adam'):
idx_plus = W > D*(eta1/(np.sqrt(vt) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(vt) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(vt) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(vt[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(vt[idx_minus]) + eps))
W[idx_zero] = 0.0
elif(update_type == 'adagrad' or update_type =='rmsprop'):
idx_plus = W > D*(eta1/(np.sqrt(cache) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(cache) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(cache) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(cache[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(cache[idx_minus]) + eps))
W[idx_zero] = 0.0
else:
idx_plus = W > D*(eta)
idx_minus = W < -D*(eta)
idx_zero = np.abs(W) < D*(eta)
W[idx_plus] = W[idx_plus] - D*(eta)
W[idx_minus] = W[idx_minus] + D*(eta)
W[idx_zero] = 0.0
if(reg_type=='l2'):
if(update_type == 'adam'):
W += -D*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='en'):
if(update_type == 'adam'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='l1' or reg_type == 'M'):
if(update_type=='adam'):
u = u + D*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(reg_type=='en'):
if(update_type=='adam'):
u = u + D*E*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*E*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*E*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(np.sum(W**2)>W_clip_threshold):#gradient clipping
# print('clipping normW')
W = W_clip_threshold*W/np.sum(W**2)
if(iter1%iter_eval==0):
#once the W are calculated for each epoch we calculate the scores
scores=xTrain.dot(W)
# scores=scores-np.max(scores)
N=scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
# thresh1 = np.zeros(mat.shape)
# thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(yTrain,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
mat1 = 1 - correct_scores + max_scores
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*0.5*np.sum(scores**2) + 0.5*np.sum(mat1**2)
f += (1.0/N)*f1
else:
f1 = C*0.5*np.sum((scores**2)*sample_weights[:,None]) + 0.5*np.sum((mat1**2)*sample_weights[:,None])
f+= (1.0/numClasses)*f1
for i in range(numClasses):
#first term in objective function for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f += ((0.5*E)/(numClasses))*f2
#the second term in the objective function for margin
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f += -((0.5*E)/(numClasses))*f3
if(verbose == True):
print('iter1=%d, f=%0.3f'%(iter1,f))
fvals[iter1]=f
rel_error=np.abs(f_prev-f)/np.abs(f_prev)
max_W = np.max(np.abs(W))
W[np.abs(W)<1e-03*max_W]=0.0
if(f<f_best):
f_best=f
W_best[:]=W[:]
max_W = np.max(np.abs(W))
W_best[np.abs(W_best)<1e-03*max_W]=0.0
iter_best=iter1
else:
break
f_prev=f
eta=eta_zero/np.power((iter1+1),1)
fvals[iter1]=-1
return W_best,f_best,iter_best,fvals
|
nilq/baby-python
|
python
|
#!/usr/local/Cellar/python/2.7.6/bin/python
# -*- coding: utf-8 -*-
import sys
import scipy.misc, scipy.io, scipy.optimize
from sklearn import svm, grid_search
from numpy import *
import pylab
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.mlab as mlaba
from util import Util
def plot(data):
positives = data[data[:, 2] == 1]
negatives = data[data[:, 2] == 0]
pyplot.plot( positives[:, 0], positives[:, 1], 'b+' )
pyplot.plot( negatives[:, 0], negatives[:, 1], 'yo' )
def gaussianKernel(x1, x2, sigma):
return exp( -sum((x1 - x2) **2.0) / (2 * sigma**2.0) )
def visualizeBoundary( X, trained_svm ):
kernel = trained_svm.get_params()['kernel']
if kernel == 'linear':
w = trained_svm.dual_coef_.dot( trained_svm.support_vectors_ ).flatten()
xp = linspace( min(X[:, 0]), max(X[:, 0]), 100 )
yp = (-w[0] * xp + trained_svm.intercept_) / w[1]
pyplot.plot( xp, yp, 'b-')
elif kernel == 'rbf':
x1plot = linspace( min(X[:, 0]), max(X[:, 0]), 100 )
x2plot = linspace( min(X[:, 1]), max(X[:, 1]), 100 )
X1, X2 = meshgrid( x1plot, x2plot )
vals = zeros(shape(X1))
for i in range(0, shape(X1)[1]):
this_X = c_[ X1[:, i], X2[:, i] ]
vals[:, i] = trained_svm.predict( this_X )
pyplot.contour( X1, X2, vals, colors='blue' )
def dataset3ParamsVer3( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
gammas = map( lambda x: 1.0 / x, sigma_values )
raveled_y = y.ravel()
rbf_svm = svm.SVC()
parameters = {'kernel':('rbf', ), 'C':[0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30], 'gamma':map( lambda x: 1.0 / x, sigma_values ) }
grid = grid_search.GridSearchCV( rbf_svm, parameters )
best = grid.fit( X, raveled_y ).best_params_
return best
def dataset3ParamsVer2( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
raveled_y = y.ravel() # Else the SVM will give you annoying warning
m_val = shape( X_val )[0] # number of entries in validation data
rbf_svm = svm.SVC(kernel='rbf')
best = {'score': -999, 'C': 0.0, 'sigma': 0.0 }
for C in C_values:
for sigma in sigma_values:
# train the SVM first
rbf_svm.set_params( C=C )
rbf_svm.set_params( gamma = 1.0 / sigma )
rbf_svm.fit( X, raveled_y )
score = rbf_svm.score( X_val, y_val )
# get the lowest error
if score > best['score']:
best['score'] = score
best['C'] = C
best['sigma'] = sigma
best['gamma'] = 1.0 / best['sigma']
return best
def dataset3ParamsVer1( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
raveled_y = y.ravel() # Else the SVM will give you annoying warning
m_val = shape( X_val )[0] # number of entries in validation data
rbf_svm = svm.SVC(kernel='rbf')
best = {'error': 999, 'C': 0.0, 'sigma': 0.0 }
for C in C_values:
for sigma in sigma_values:
# train the SVM first
rbf_svm.set_params( C=C )
rbf_svm.set_params( gamma = 1.0 / sigma )
rbf_svm.fit( X, raveled_y )
# test it out on validation data
predictions = []
for i in range( 0, m_val ):
prediction_result = rbf_svm.predict( X_val[i] )
predictions.append( prediction_result[0] )
# sadly if you don't reshape it, numpy doesn't know if it's row or column vector
predictions = array(predictions).reshape( m_val, 1)
error = (predictions != y_val.reshape(m_val, 1)).mean()
# get the lowest error
if error < best['error']:
best['error'] = error
best['C'] = C
best['sigma'] = sigma
best['gamma'] = 1.0 / best['sigma']
return best
def part1_1():
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data1.mat" )
X, y = mat['X'], mat['y']
plot( c_[X, y] )
pyplot.show( block=True )
# linear SVM with C = 1
linear_svm = svm.SVC(C=1, kernel='linear')
linear_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, linear_svm )
pyplot.show( block=True )
# try with C = 100
linear_svm.set_params( C=100 )
linear_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, linear_svm )
pyplot.show( block=True )
def part1_2():
x1 = array([1, 2, 1])
x2 = array([0, 4, -1])
sigma = 2
print "Gaussian kernel: %f" % gaussianKernel( x1, x2, sigma )
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data2.mat" )
X, y = mat['X'], mat['y']
plot( c_[X, y] )
pyplot.show( block=True )
sigma = 0.01
rbf_svm = svm.SVC(C=1, kernel='rbf', gamma = 1.0 / sigma ) # gamma is actually inverse of sigma
rbf_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True )
def part1_3():
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data3.mat" )
X, y = mat['X'], mat['y']
X_val, y_val = mat['Xval'], mat['yval']
rbf_svm = svm.SVC(kernel='rbf')
best = dataset3ParamsVer1( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
rbf_svm.fit( X, y )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
best = dataset3ParamsVer2( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
best = dataset3ParamsVer3( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
def main():
set_printoptions(precision=6, linewidth=200)
part1_1()
part1_2()
part1_3()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the OCPReportProcessor."""
import datetime
from unittest.mock import patch
from api.utils import DateHelper
from masu.database import OCP_REPORT_TABLE_MAP
from masu.database.ocp_report_db_accessor import OCPReportDBAccessor
from masu.database.report_manifest_db_accessor import ReportManifestDBAccessor
from masu.processor.ocp.ocp_report_parquet_summary_updater import OCPReportParquetSummaryUpdater
from masu.test import MasuTestCase
from masu.test.database.helpers import ReportObjectCreator
from reporting_common.models import CostUsageReportManifest
class OCPReportSummaryUpdaterTest(MasuTestCase):
"""Test cases for the OCPReportSummaryUpdater class."""
@classmethod
def setUpClass(cls):
"""Set up the test class with required objects."""
super().setUpClass()
cls.accessor = OCPReportDBAccessor(cls.schema)
cls.report_schema = cls.accessor.report_schema
cls.all_tables = list(OCP_REPORT_TABLE_MAP.values())
cls.creator = ReportObjectCreator(cls.schema)
cls.date_accessor = DateHelper()
cls.manifest_accessor = ReportManifestDBAccessor()
cls.dh = DateHelper()
def setUp(self):
"""Set up each test."""
super().setUp()
self.provider = self.ocp_provider
self.today = self.dh.today
billing_start = datetime.datetime(year=self.today.year, month=self.today.month, day=self.today.day).replace(
day=1
)
self.manifest_dict = {
"assembly_id": "1234",
"billing_period_start_datetime": billing_start,
"num_total_files": 2,
"num_processed_files": 1,
"provider_uuid": self.ocp_provider_uuid,
}
self.cluster_id = self.ocp_cluster_id
self.manifest = CostUsageReportManifest.objects.filter(
provider_id=self.ocp_provider_uuid, billing_period_start_datetime=self.dh.this_month_start
).first()
self.manifest.num_total_files = 2
self.manifest.save()
self.updater = OCPReportParquetSummaryUpdater(self.schema, self.provider, self.manifest)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportParquetSummaryUpdater._check_parquet_date_range"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor.populate_openshift_cluster_information_tables" # noqa: E501
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor.delete_line_item_daily_summary_entries_for_date_range" # noqa: E501
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater."
"OCPReportDBAccessor.populate_volume_label_summary_table"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater." "OCPReportDBAccessor.populate_pod_label_summary_table"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater."
"OCPReportDBAccessor.populate_line_item_daily_summary_table_presto"
)
def test_update_summary_tables(
self, mock_sum, mock_tag_sum, mock_vol_tag_sum, mock_delete, mock_cluster_populate, mock_date_check
):
"""Test that summary tables are run for a full month when no report period is found."""
start_date = self.dh.today
end_date = start_date
start_date_str = start_date.strftime("%Y-%m-%d")
end_date_str = end_date.strftime("%Y-%m-%d")
mock_date_check.return_value = (start_date, end_date)
self.updater.update_summary_tables(start_date_str, end_date_str)
mock_delete.assert_called_with(self.ocp_provider.uuid, start_date.date(), end_date.date())
mock_sum.assert_called()
mock_tag_sum.assert_called()
mock_vol_tag_sum.assert_called()
mock_date_check.assert_called()
def test_update_daily_tables(self):
start_date = self.dh.today
end_date = start_date
start_date_str = start_date.strftime("%Y-%m-%d")
end_date_str = end_date.strftime("%Y-%m-%d")
expected = (
"INFO:masu.processor.ocp.ocp_report_parquet_summary_updater:"
"NO-OP update_daily_tables for: %s-%s" % (start_date_str, end_date_str)
)
with self.assertLogs("masu.processor.ocp.ocp_report_parquet_summary_updater", level="INFO") as _logger:
self.updater.update_daily_tables(start_date_str, end_date_str)
self.assertIn(expected, _logger.output)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor."
"get_max_min_timestamp_from_parquet" # noqa: E501
)
def test_check_parquet_date_range(self, mock_get_timestamps):
"""Check that we modify start date when needed."""
start_date = self.dh.this_month_start.date()
end_date = self.dh.this_month_end.date()
parquet_start_date = self.dh.today.replace(tzinfo=None)
parquet_end_date = self.dh.today.replace(tzinfo=None)
mock_get_timestamps.return_value = (parquet_start_date, parquet_end_date)
result_start, result_end = self.updater._check_parquet_date_range(start_date, end_date)
self.assertNotEqual(start_date, result_start)
self.assertEqual(parquet_start_date.date(), result_start)
|
nilq/baby-python
|
python
|
# MQTT
import sensor
# Shock sensor
import RPi.GPIO as GPIO
class ShockSensor(sensor.Sensor):
def __init__(self):
super(ShockSensor, self).__init__()
GPIO.setmode(GPIO.BCM)
self.SHOCK_PIN = 17
GPIO.setup(self.SHOCK_PIN, GPIO.IN)
def get_value(self):
# The vibration sensor is 1 when no vibration is detected, and 0 when there is vibration
for i in range(0,windowsize):
shock=GPIO.input(SHOCK_PIN)
if not shock: return 1
return not shock
def get_shock2():
v=1
for i in range(0,windowsize):
v = random.randint(1, 10)
return v
while True:
s=get_shock2()
(result,mid)=mqttc.publish("sensors/newpipe",s,2)
time.sleep(1)
mqttc.loop_stop()
mqttc.disconnect()
def publish():
#s = get_shock()
s = "testing shock"
publish.single('sensors/newpipe', payload=s, qos=1, hostname='brix.d.cs.uoregon.edu', port='8100' )
|
nilq/baby-python
|
python
|
from comm.ntlmrelayx.servers.httprelayserver import HTTPRelayServer
from impacket.examples.ntlmrelayx.servers.smbrelayserver import SMBRelayServer
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.4 on 2021-01-10 00:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0003_auto_20210109_1855'),
]
operations = [
migrations.AlterField(
model_name='resumesubsection',
name='subtext',
field=models.CharField(max_length=500, null=True),
),
]
|
nilq/baby-python
|
python
|
#coding=utf-8
from django import forms
from common.models import PersonTelephoneNumber, TelephoneNumber
from django.core import validators
from django.forms.models import ModelForm
from personal.models import Firefighter
class PersonPhoneForm(forms.Form):
id = forms.CharField(widget=forms.HiddenInput, required=False)
type = forms.ChoiceField(label=u'Tipo', choices=PersonTelephoneNumber.TELEPHONE_TYPE_CHOICES)
code = forms.CharField(label=u'Código', validators=[validators.MaxLengthValidator(4), validators.RegexValidator(regex="\d\d\d\d")])
number = forms.CharField(label=u'Número', validators=[validators.MaxLengthValidator(7), validators.RegexValidator(regex="\d\d\d\d\d\d\d")])
def save(self, instance):
if self.cleaned_data.get("id", ""):
phone = instance.persontelephonenumber_set.get(id=self.cleaned_data["id"])
phone.type = self.data["type"]
phone.telephone_number.code = self.cleaned_data["code"]
phone.telephone_number.number = self.cleaned_data["number"]
phone.telephone_number.save()
phone.save()
else:
tphone = TelephoneNumber(code=self.cleaned_data["code"], number=self.cleaned_data["number"])
tphone.save()
phone = PersonTelephoneNumber(person=instance, type=self.cleaned_data["type"], telephone_number=tphone)
phone.save()
class PartialFirefighterForm(ModelForm):
class Meta:
model = Firefighter
fields = ('profile_picture',)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from threading import Thread
import socket
import pickle as pickle
import time
import os
from collections import deque
import shutil
import re
import sys
import hashlib
from rpyc import Service, connect, async_
from rpyc.utils.server import ThreadPoolServer
from tgen.futil import file_stream
from tgen.logf import log_info, set_debug_stream, log_debug
from tgen.logf import log_warn, is_debug_stream
from tgen.rnd import rnd
from tgen.parallel_percrank_train import ServiceConn
from tgen.seq2seq import Seq2SeqGen
from tgen.seq2seq_ensemble import Seq2SeqEnsemble
from tgen.cluster import Job
def get_worker_registrar_for(head):
"""Return a class that will handle worker registration for the given head."""
class WorkerRegistrarService(Service):
"""An RPyC service to register workers with a head."""
def exposed_register_worker(self, host, port):
"""Register a worker with my head, initialize it."""
# initiate connection in the other direction
log_info('Worker %s:%d connected, initializing training.' % (host, port))
conn = connect(host, port, config={'allow_pickle': True})
# initialize the remote server (with training data etc.)
init_func = async_(conn.root.init_training)
# add unique 'scope suffix' so that the models don't clash in ensembles
head.cfg['scope_suffix'] = hashlib.md5("%s:%d" % (host, port)).hexdigest()
req = init_func(pickle.dumps(head.cfg, pickle.HIGHEST_PROTOCOL))
# add it to the list of running services
sc = ServiceConn(host, port, conn)
head.services.add(sc)
head.pending_requests.add((sc, None, req))
log_info('Worker %s:%d initialized.' % (host, port))
return WorkerRegistrarService
class ParallelSeq2SeqTraining(object):
"""Main (head) that handles parallel Seq2Seq generator training, submitting training jobs and
collecting their results"""
DEFAULT_PORT = 25125
TEMPFILE_NAME = 'seq2seq_temp_dump.pickle.gz'
def __init__(self, cfg, work_dir, experiment_id=None):
# initialize base class
super(ParallelSeq2SeqTraining, self).__init__()
# store config
self.cfg = cfg
# initialize myself
self.work_dir = work_dir
self.jobs_number = cfg.get('jobs_number', 10)
self.job_memory = cfg.get('job_memory', 8)
self.port = cfg.get('port', self.DEFAULT_PORT)
self.queue_settings = cfg.get('queue_settings')
self.host = socket.getfqdn()
self.poll_interval = cfg.get('poll_interval', 1)
self.average_models = cfg.get('average_models', False)
self.average_models_top_k = cfg.get('average_models_top_k', 0)
self.experiment_id = experiment_id if experiment_id is not None else ''
# this will be needed when running
self.server = None
self.server_thread = None
self.jobs = None
self.pending_requests = None
self.services = None
self.free_services = None
self.results = None
# this is needed for saving the model
self.model_temp_path = None
def train(self, das_file, ttree_file, data_portion=1.0, context_file=None, validation_files=None):
"""Run parallel perceptron training, start and manage workers."""
# initialize the ranker instance
log_info('Initializing...')
# run server to process registering clients
self._init_server()
# spawn training jobs
log_info('Spawning jobs...')
host_short, _ = self.host.split('.', 1) # short host name for job names
for j in range(self.jobs_number):
# set up debugging logfile only if we have it on the head
debug_logfile = ('"PRT%02d.debug-out.txt.gz"' % j) if is_debug_stream() else 'None'
job = Job(header='from tgen.parallel_seq2seq_train import run_training',
code=('run_training("%s", %d, %s)' %
(self.host, self.port, debug_logfile)),
name=self.experiment_id + ("PRT%02d-%s-%d" % (j, host_short, self.port)),
work_dir=self.work_dir)
job.submit(memory=self.job_memory, queue=self.queue_settings)
self.jobs.append(job)
# run the training passes
try:
cur_assign = 0
results = [None] * self.jobs_number
rnd_seeds = [rnd.random() for _ in range(self.jobs_number)]
# assign training and wait for it to finish
while cur_assign < self.jobs_number or self.pending_requests:
log_debug('Starting loop over services.')
# check if some of the pending computations have finished
for sc, job_no, req in list(self.pending_requests):
res = self._check_pending_request(sc, job_no, req)
if res is not None:
results[job_no] = res, sc
# check for free services and assign new computation
while cur_assign < self.jobs_number and self.free_services:
log_debug('Assigning request %d' % cur_assign)
sc = self.free_services.popleft()
log_info('Assigning request %d to %s:%d' % (cur_assign, sc.host, sc.port))
if validation_files is not None:
validation_files = ','.join([os.path.relpath(f, self.work_dir)
for f in validation_files.split(',')])
train_func = async_(sc.conn.root.train)
req = train_func(rnd_seeds[cur_assign],
os.path.relpath(das_file, self.work_dir),
os.path.relpath(ttree_file, self.work_dir),
data_portion,
os.path.relpath(context_file, self.work_dir)
if context_file else None,
validation_files)
self.pending_requests.add((sc, cur_assign, req))
cur_assign += 1
log_debug('Assigned %d' % cur_assign)
# sleep for a while
log_debug('Sleeping.')
time.sleep(self.poll_interval)
log_info("Results:\n" + "\n".join("%.5f %s:%d" % (cost, sc.host, sc.port)
for cost, sc in results))
self.model_temp_path = os.path.join(self.work_dir, self.TEMPFILE_NAME)
results.sort(key=lambda res: res[0])
# average the computed models
if self.average_models:
log_info('Creating ensemble models...')
# use only top k if required
results_for_ensemble = (results[:self.average_models_top_k]
if self.average_models_top_k > 0
else results)
ensemble_model = self.build_ensemble_model(results_for_ensemble)
log_info('Saving the ensemble model temporarily to %s...' % self.model_temp_path)
ensemble_model.save_to_file(self.model_temp_path)
# select the best result on devel data + save it
else:
best_cost, best_sc = results[0]
log_info('Best cost: %f (computed at %s:%d).' % (best_cost, best_sc.host, best_sc.port))
log_info('Saving best generator temporarily to %s...' % self.model_temp_path)
# use relative path (working directory of worker jobs is different)
best_sc.conn.root.save_model(os.path.relpath(self.model_temp_path, self.work_dir))
# kill all jobs
finally:
for job in self.jobs:
job.delete()
def _check_pending_request(self, sc, job_no, req):
"""Check whether the given request has finished (i.e., job is loaded or job has
processed the given data portion.
If the request is finished, the worker that processed it is moved to the pool
of free services.
@param iter_no: current iteration number (for logging)
@param sc: a ServiceConn object that stores the worker connection parameters
@param job_no: current job number (is None for jobs loading)
@param req: the request itself
@return: the value returned by the finished data processing request, or None \
(for loading requests or unfinished requests)
"""
result = None
if job_no is not None:
log_debug('Checking %d' % job_no)
# checking if the request has finished
if req.ready:
if job_no is not None:
log_debug('Ready %d' % job_no)
log_info('Retrieved finished request %d' % job_no)
if req.error:
log_info('Error found on request: job #%d, worker %s:%d' %
(job_no if job_no is not None else -1, sc.host, sc.port))
result = req.value
# remove from list of pending requests
# TODO return to pool of free requests (but needs to store the results somewhere)
self.pending_requests.remove((sc, job_no, req))
if job_no is None:
self.free_services.append(sc)
return result
def _init_server(self):
"""Initializes a server that registers new workers."""
registrar_class = get_worker_registrar_for(self)
n_tries = 0
self.server = None
last_error = None
while self.server is None and n_tries < 10:
try:
n_tries += 1
self.server = ThreadPoolServer(service=registrar_class, nbThreads=1, port=self.port)
except socket.error as e:
log_warn('Port %d in use, trying to use a higher port...' % self.port)
self.port += 1
last_error = e
if self.server is None:
if last_error is not None:
raise last_error
raise Exception('Could not initialize server')
self.services = set()
self.free_services = deque()
self.pending_requests = set()
self.jobs = []
self.server_thread = Thread(target=self.server.start)
self.server_thread.setDaemon(True)
self.server_thread.start()
def save_to_file(self, model_fname):
"""This will actually just move the best generator (which is saved in a temporary file)
to the final location."""
log_info('Moving generator to %s...' % model_fname)
orig_model_fname = self.model_temp_path
shutil.move(orig_model_fname, model_fname)
orig_tf_session_fname = re.sub(r'(.pickle)?(.gz)?$', '.tfsess', orig_model_fname)
tf_session_fname = re.sub(r'(.pickle)?(.gz)?$', '.tfsess', model_fname)
if os.path.isfile(orig_tf_session_fname):
shutil.move(orig_tf_session_fname, tf_session_fname)
# move the reranking classifier model files as well, if they exist
orig_clfilter_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tftreecl\1', orig_model_fname)
orig_clfilter_tf_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tfsess', orig_clfilter_fname)
if os.path.isfile(orig_clfilter_fname) and os.path.isfile(orig_clfilter_tf_fname):
clfilter_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tftreecl\1', model_fname)
clfilter_tf_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tfsess', clfilter_fname)
shutil.move(orig_clfilter_fname, clfilter_fname)
shutil.move(orig_clfilter_tf_fname, clfilter_tf_fname)
def build_ensemble_model(self, results):
"""Load the models computed by the individual jobs and compose them into a single
ensemble model.
@param results: list of tuples (cost, ServiceConn object), where cost is not used"""
ensemble = Seq2SeqEnsemble(self.cfg)
models = []
for _, sc in results:
models.append((pickle.loads(sc.conn.root.get_all_settings()),
pickle.loads(sc.conn.root.get_model_params())))
rerank_settings = results[0][1].conn.root.get_rerank_settings()
if rerank_settings is not None:
rerank_settings = pickle.loads(rerank_settings)
rerank_params = results[0][1].conn.root.get_rerank_params()
if rerank_params is not None:
rerank_params = pickle.loads(rerank_params)
ensemble.build_ensemble(models, rerank_settings, rerank_params)
return ensemble
class Seq2SeqTrainingService(Service):
"""RPyC Worker class for a job training a Seq2Seq generator."""
def __init__(self, conn_ref):
super(Seq2SeqTrainingService, self).__init__(conn_ref)
self.seq2seq = None
def exposed_init_training(self, cfg):
"""Create the Seq2SeqGen object."""
cfg = pickle.loads(cfg)
tstart = time.time()
log_info('Initializing training...')
self.seq2seq = Seq2SeqGen(cfg)
log_info('Training initialized. Time taken: %f secs.' % (time.time() - tstart))
def exposed_train(self, rnd_seed, das_file, ttree_file, data_portion, context_file, validation_files):
"""Run the whole training.
"""
rnd.seed(rnd_seed)
log_info('Random seed: %f' % rnd_seed)
tstart = time.time()
log_info('Starting training...')
self.seq2seq.train(das_file, ttree_file, data_portion, context_file, validation_files)
log_info('Training finished -- time taken: %f secs.' % (time.time() - tstart))
top_cost = self.seq2seq.top_k_costs[0]
log_info('Best cost: %f' % top_cost)
return top_cost
def exposed_save_model(self, model_fname):
"""Save the model to the given file (must be given relative to the worker's working
directory!).
@param model_fname: target path where to save the model (relative to worker's \
working directory)
"""
self.seq2seq.save_to_file(model_fname)
def exposed_get_model_params(self):
"""Retrieve all parameters of the worker's local model (as a dictionary)
@return: model parameters in a pickled dictionary -- keys are names, values are numpy arrays
"""
p_dump = pickle.dumps(self.seq2seq.get_model_params(), protocol=pickle.HIGHEST_PROTOCOL)
return p_dump
def exposed_get_all_settings(self):
"""Call `get_all_settings` on the worker and return the result as a pickle."""
settings = pickle.dumps(self.seq2seq.get_all_settings(), protocol=pickle.HIGHEST_PROTOCOL)
return settings
def exposed_get_rerank_params(self):
"""Call `get_model_params` on the worker's reranker and return the result as a pickle."""
if not self.seq2seq.classif_filter:
return None
p_dump = pickle.dumps(self.seq2seq.classif_filter.get_model_params(),
protocol=pickle.HIGHEST_PROTOCOL)
return p_dump
def exposed_get_rerank_settings(self):
"""Call `get_all_settings` on the worker's reranker and return the result as a pickle."""
if not self.seq2seq.classif_filter:
return None
settings = pickle.dumps(self.seq2seq.classif_filter.get_all_settings(),
protocol=pickle.HIGHEST_PROTOCOL)
return settings
def run_training(head_host, head_port, debug_out=None):
"""Main worker training routine (creates the Seq2SeqTrainingService and connects it to the
head.
@param head_host: hostname of the head
@param head_port: head port number
@param debug_out: path to the debugging output file (debug output discarded if None)
"""
# setup debugging output, if applicable
if debug_out is not None:
set_debug_stream(file_stream(debug_out, mode='w'))
# start the server (in the background)
log_info('Creating training server...')
server = ThreadPoolServer(service=Seq2SeqTrainingService, nbThreads=1)
server_thread = Thread(target=server.start)
server_thread.start()
my_host = socket.getfqdn()
log_info('Worker server created at %s:%d. Connecting to head at %s:%d...' %
(my_host, server.port, head_host, head_port))
# notify main about this server
conn = connect(head_host, head_port, config={'allow_pickle': True})
conn.root.register_worker(my_host, server.port)
conn.close()
log_info('Worker is registered with the head.')
# now serve until we're killed (the server thread will continue to run)
server_thread.join()
if __name__ == '__main__':
try:
host = sys.argv[1]
port = int(sys.argv[2])
except:
sys.exit('Usage: ' + sys.argv[0] + ' <head-address> <head-port>')
run_training(host, port)
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand
from flatblocks.models import FlatBlock
from camper.pages.models import Chunk
class Command(BaseCommand):
help = 'Copes FlatBlock content into new Chunk objects'
def handle(self, *args, **options):
for fb in FlatBlock.objects.all():
try:
c = Chunk.objects.get(slug=fb.slug)
print("%s already exists" % fb.slug)
except Chunk.DoesNotExist:
c = Chunk()
c.slug = fb.slug
c.content = fb.content
c.content.markup_type = 'markdown'
c.save()
print("saved %s" % fb.slug)
|
nilq/baby-python
|
python
|
__all__ = ["configreader"]
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
class Hater(commands.Cog):
def __init__(self, client):
self.client = client
self.client.hated_list = []
@commands.command()
async def hate(self, ctx, hated):
hated_id = int(hated[3:-1])
hated_member = ctx.guild.get_member(hated_id)
self.client.hated_list.append(hated_member)
await ctx.send(f'Added **{hated_member.name}** ({hated_member.mention}) to the naughties list.')
@commands.command()
async def show_hated(self, ctx):
message = []
message.append('**--- The naughties list ---**')
[message.append(f'{member.name} ({member.mention})') for member in self.client.hated_list]
await ctx.send('\n'.join(message))
def setup(client):
client.add_cog(Hater(client))
|
nilq/baby-python
|
python
|
# Copyright 2018 NTRlab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import json
import logging
from bgx_pbft.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
#from bgx_pbft.consensus.wait_certificate import WaitCertificate
LOGGER = logging.getLogger(__name__)
def block_id_is_genesis(block_id):
"""Determines if the block ID represents the genesis block.
Args:
block_id (str): The block ID to check
Returns:
True if this ID represents the block ID, or False otherwise.
"""
return block_id == NULL_BLOCK_IDENTIFIER
def deserialize_wait_certificate(block, pbft_enclave_module):
"""Deserializes the wait certificate associated with the block.
Args:
block (Block or BlockWrapper): The block that has the wait certificate
pbft_enclave_module (module): The PBFT enclave module
Returns:
WaitCertificate: The reconstituted wait certificate associated
with the block or None if cannot deserialize
"""
# The wait certificate is a JSON string placed in the consensus
# field/property of the block header. Parse the JSON and then use the
# serialized wait certificate and signature to create a
# WaitCertificate object.
wait_certificate = None
"""
if block is not None:
try:
wait_certificate_dict = \
json.loads(block.header.consensus.decode())
wait_certificate = \
WaitCertificate.wait_certificate_from_serialized(
pbft_enclave_module=None,#pbft_enclave_module=pbft_enclave_module,
serialized=wait_certificate_dict['SerializedCertificate'],
signature=wait_certificate_dict['Signature'])
except (json.decoder.JSONDecodeError, KeyError):
pass
"""
return wait_certificate
def get_previous_certificate_id(block_header,
block_cache,
pbft_enclave_module):
"""Returns the wait certificate ID for the block immediately preceding the
block represented by block_header.
Args:
block_header (BlockHeader): The header for the block
block_cache (BlockCache): The cache of blocks that are predecessors
to the block represented by block_header
pbft_enclave_module (module): The PBFT enclave module
Returns:
str: The ID of the wait certificate for the block immediately
preceding the block represented by block_header
"""
wait_certificate = None
if not block_id_is_genesis(block_header.previous_block_id):
wait_certificate = deserialize_wait_certificate(
block=block_cache[block_header.previous_block_id],pbft_enclave_module=None) #pbft_enclave_module)
return \
NULL_BLOCK_IDENTIFIER if wait_certificate is None \
else wait_certificate.identifier
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""This module provides functionality to create a custom preoptimization
sequence from a directed acyclic graph (DAG) using topological sorting.
In the current version the DAG have to be specified manually via constants.
"""
import multiprocessing
import random
import logging
import polyjit.experiments.sequences.polly_stats as polly_stats
import pprof_utilities
__author__ = "Christoph Woller"
__credits__ = ["Christoph Woller"]
__maintainer__ = "Christoph Woller"
__email__ = "wollerch@fim.uni-passau.de"
SEQUENCE_FILE_PATH = '.../pprof-study/results/'
SEQUENCE_FILE = 'best_sequences.raw'
SEQUENCE_PREFIX = 'Best: '
def calculate_fitness_value(sequence, seq_to_fitness, key, program):
"""Calculates the fitness value of the provided sequence.
This method calculates the fitness of the sequence by using the number
of regions that are no valid SCoPs if this sequence is used for
preoptimization before Polly's SCoP detection.
Args:
sequence (list[string]): the sequence for that the fitness value should
be calculated.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
key (string): the key of the provided sequence for the dictionary.
program (string): the name of the application this sequence
should be used for.
"""
if key not in seq_to_fitness:
seq_to_fitness[key] = polly_stats.get_regions_without_scops(sequence,
program)
def evaluate_best_sequence(program):
""""Generates optimization sequences from a dependency graph and calculates
the best of these sequences for the specified program."""
log = logging.getLogger(__name__)
# Get different topological sorting arrangements.
sequences = pprof_utilities.read_sequences(SEQUENCE_FILE_PATH,
SEQUENCE_FILE, SEQUENCE_PREFIX)
possible_sequences = len(sequences)
seq_to_fitness = multiprocessing.Manager().dict()
pool = multiprocessing.Pool()
# Calculate the fitness value of the topological sorting arrangements.
for sequence in sequences:
pool.apply_async(calculate_fitness_value, args=(
sequence, seq_to_fitness, str(sequence), program))
pool.close()
pool.join()
# Get the best sequences.
sequences.sort(key=lambda s: seq_to_fitness[str(s)])
sequences = sequences[::-1]
fittest = sequences.pop()
fittest_fitness_value = seq_to_fitness[str(fittest)]
fittest_sequences = [fittest]
equal = True
while sequences and equal:
other = sequences.pop()
if seq_to_fitness[str(other)] == fittest_fitness_value:
fittest_sequences.append(other)
else:
equal = False
log.info("Best sequences %d of %s", len(fittest_sequences),
str(possible_sequences))
for sequence in fittest_sequences:
log.info("Best: %s", str(sequence))
log.info("----------------------------------------------------------------")
return random.choice(fittest_sequences)
|
nilq/baby-python
|
python
|
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class AnimatorOverrideController:
def __new__(cls, arg1=None):
'''
:returns: AnimatorOverrideController
:rtype: UnityEngine.AnimatorOverrideController
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_runtimeAnimatorController():
'''
:returns: RuntimeAnimatorController
:rtype: UnityEngine.RuntimeAnimatorController
'''
pass
@staticmethod
def set_runtimeAnimatorController(arg1):
'''
:param arg1: RuntimeAnimatorController
:type arg1: UnityEngine.RuntimeAnimatorController
'''
pass
@staticmethod
@overload
def get_Item(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: AnimationClip
:rtype: UnityEngine.AnimationClip
'''
pass
@staticmethod
@overload
def get_Item(arg1):
'''
:param arg1: AnimationClip
:type arg1: UnityEngine.AnimationClip
:returns: AnimationClip
:rtype: UnityEngine.AnimationClip
'''
pass
@staticmethod
def get_Item(arg1=None):
pass
@staticmethod
@overload
def set_Item(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: AnimationClip
:type arg2: UnityEngine.AnimationClip
'''
pass
@staticmethod
@overload
def set_Item(arg1, arg2):
'''
:param arg1: AnimationClip
:type arg1: UnityEngine.AnimationClip
:param arg2: AnimationClip
:type arg2: UnityEngine.AnimationClip
'''
pass
@staticmethod
def set_Item(arg1=None, arg2=None):
pass
@staticmethod
def get_overridesCount():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetOverrides(arg1):
'''
:param arg1: Undefined variable
:type arg1: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
def ApplyOverrides(arg1):
'''
:param arg1: Undefined variable
:type arg1: SystemCollectionsGenericIList.SystemCollectionsGenericIList
'''
pass
@staticmethod
def get_animationClips():
'''
:returns: AnimationClipArray
:rtype: UnityEngine.AnimationClipArray
'''
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
|
nilq/baby-python
|
python
|
from pydantic import BaseModel, Field
class DOIDoc(BaseModel):
"""
DOIs to reference specific materials on Materials Project.
"""
doi: str = Field(
None, description="DOI of the material.",
)
bibtex: str = Field(
None, description="Bibtex reference of the material.",
)
task_id: str = Field(
None,
description="The Materials Project ID of the material. This comes in the form: mp-******",
)
|
nilq/baby-python
|
python
|
from flask import g, jsonify, request
from app import auth
from app.services.base.models import User, LoginLog
from app.services.base.views import bp
@bp.route('/login_logs')
@auth.login_required
def list_login_logs():
query = LoginLog.query \
.join(User, LoginLog.userIntID == User.id) \
.with_entities(LoginLog, User.username)
username = request.args.get('username_like')
if username:
query = query.filter(User.username.like(u'%{0}%'.format(username)))
if g.role_id not in [1, 2, 3]:
query = query.filter(User.id == g.user_id)
records = query.pagination(code_list=['isLogged'])
return jsonify(records)
|
nilq/baby-python
|
python
|
import json
import os
from typing import List
from stonehenge.db.operations import Operation
from stonehenge.db.migrations.exceptions import UnappliedMigrationException
class Migration:
def __init__(
self,
operations: List[Operation],
migrations_dir: str,
):
self.operations = operations
self.migrations_dir = migrations_dir
def save_to_file(self) -> str:
next_migration_index = self.get_next_migration_index()
filename = f"Migration_{next_migration_index}.json"
filepath = os.path.join(self.migrations_dir, filename)
if os.path.isfile(filepath):
raise UnappliedMigrationException(filename)
with open(filepath, "w+") as f:
content = self.to_json()
content_str = json.dumps(content, indent=4)
f.write(content_str)
return filename
def get_next_migration_index(self) -> int:
highest = 1
for filename in os.listdir(self.migrations_dir):
try:
index = int(filename[10])
except ValueError:
continue
if index >= highest:
highest = index + 1
return highest
def to_json(self):
return {
"operations": [o.to_json() for o in self.operations],
}
|
nilq/baby-python
|
python
|
"""
web server
为使用者提供一个类,
使用这可以快速的搭建web服务,
展示自己的网页
"""
from socket import *
from select import select
# 主体功能
class HTTPServer:
def __init__(self,host='0.0.0.0',port=8080,dir=None):
self.host = host
self.port = port
self.dir = dir
def start(self):
pass
if __name__ == '__main__':
# 需要用户决定 : 网络地址 和要发送的数据
host = '0.0.0.0'
port = 8000
dir = "./static" # 数据位置
# 实例化对象,调用方法启动服务
httpd = HTTPServer(host=host,port=port,dir=dir)
httpd.start() # 启动服务
|
nilq/baby-python
|
python
|
## HOST and PORT info
HOST = "127.0.0.1"
PORT = 8000
## Server name
SERVER = "Lemon Server"
## folder config
STATIC = "static"
RENDER = "render"
## Token info for sessions
token = "SessionToken"
token_length = 100
#blacklist
blacklist = []
#Temp Folder
TEMP = "Temp"
#File extension for files that can have variables in them
FILE_EXTENSION_VAR = ".html"
errorHtmlFile = "config/error.html"
DEFAULT_MIME_TYPE = "text/plain"
LOG_LOCATION = "app/log/log.txt"
ALLOWED_HOSTS = ["localhost","127.0.0.1"]
EXTENSIONS_CONFIG = "app/extensions/config.json"
# These are for the dev server
SOCKET_BUFFER = 65536
NORMAL_SERVER = True
DEBUG = False
ASYNCIO_MAX_WORKERS = 1000
#These are for ssl in the dev server
SSL_CERT = "config/ssl/ssl.crt"
SSL_KEY = "config/ssl/ssl.key"
SSL = False
SSL_PORT = 4433
# This should be changed to True when using gunicorn. If your using something
# else and its not working try setting this to False
RETURN_BYTES = True
# These configurations are for gunicorn
bind = HOST+":"+str(PORT)
workers = 1
worker_connections = 1000
keepalive = 2
|
nilq/baby-python
|
python
|
#
# Copyright 2018 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Defines basic SDK constants and classes.
All public names here are also re-exported from :mod:`oneagent.sdk` and should
preferably be used from there.
'''
import os
_DEBUG_LEAKS = False
if _DEBUG_LEAKS:
import traceback
#: The Dynatrace Tag request header name which is used to transport the tag between agents
#: (as a string tag).
DYNATRACE_HTTP_HEADER_NAME = 'X-dynaTrace'
#: The Dynatrace Tag messaging property name which is is used to transport the tag between agents
#: (as a byte tag).
#:
#: .. versionadded:: 1.3
DYNATRACE_MESSAGE_PROPERTY_NAME = "dtdTraceTagInfo"
#: DEPRECATED alias for :data:`DYNATRACE_MESSAGE_PROPERTY_NAME`
#:
#: .. deprecated:: 1.3
DYNATRACE_MESSAGE_PROPERTYNAME = DYNATRACE_MESSAGE_PROPERTY_NAME
#: Allow SDK to be used in forked child processes.
_ONESDK_INIT_FLAG_FORKABLE = 1
class _Uninstantiable(object):
'''Classes deriving from this class cannot be instantiated.'''
def __new__(cls):
raise ValueError('Attempt to instantiate')
def _add_enum_helpers(decorated_cls):
# pylint:disable=protected-access
decorated_cls._enum_name_by_val = dict()
for key in dir(decorated_cls):
val = getattr(decorated_cls, key)
if isinstance(val, int):
decorated_cls._enum_name_by_val.setdefault(val, key)
@classmethod
def _value_name(cls, val):
result = cls._enum_name_by_val.get(val) # pylint:disable=no-member
if result is None:
return "<Unknown " + cls.__name__ + " value " + repr(val) + ">"
return cls.__name__ + "." + result
decorated_cls._value_name = _value_name
return decorated_cls
class AgentState(_Uninstantiable):
'''Constants for the agent's state. See
:attr:`oneagent.sdk.SDK.agent_state`.'''
#: The SDK stub is connected to the agent, which is currently active.
ACTIVE = 0
#: The SDK stub is connected to the agent, which is temporarily inactive.
TEMPORARILY_INACTIVE = 1
#: The SDK stub is connected to the agent, which is permanently inactive.
PERMANENTLY_INACTIVE = 2
#: The agent has not been initialized.
NOT_INITIALIZED = 3
#: Some unexpected error occurred while trying to determine the agent state.
ERROR = -1
class ErrorCode(_Uninstantiable):
'''Constants for error codes of the native agent, as may be contained in
:attr:`.SDKError.code`.'''
# Same bit pattern if interpreted in 32 bit unsigned / two's complement
_ERROR_BASE = 0xAFFE0000 if os.name == 'nt' else -0x50020000
#: The operation completed successfully. You usually won't get any object
#: with error code at all in that case.
SUCCESS = 0
#: The operation failed, but no more specific error code fits the failure.
GENERIC = _ERROR_BASE + 1
#: A function was called with an invalid argument.
INVALID_ARGUMENT = _ERROR_BASE + 2
NOT_IMPLEMENTED = _ERROR_BASE + 3 #: The called function is not implemented.
NOT_INITIALIZED = _ERROR_BASE + 4 #: The SDK has not been initialized.
#: There is not enough available memory to complete the operation.
OUT_OF_MEMORY = _ERROR_BASE + 5
#: The native SDK stub was configured to _not_ try to load the actual agent
#: module.
AGENT_NOT_ACTIVE = _ERROR_BASE + 6
#: Either the OneAgent SDK for C/C++ or the OneAgent binary could not be loaded.
LOAD_AGENT = _ERROR_BASE + 7
#: The expected exports could not be found either in the OneAgent SDK for C/C++
#: or the OneAgent binary.
INVALID_AGENT_BINARY = _ERROR_BASE + 8
#: The operation failed because of an unexpected error.
UNEXPECTED = _ERROR_BASE + 9
#: The command line argument / stub variable definition was ignored because
#: an entry with the same key was already present.
ENTRY_ALREADY_EXISTS = _ERROR_BASE + 10
#: The SDK agent module doesn't support the feature level required by this
#: version of the SDK stub.
FEATURE_LEVEL_NOT_SUPPORTED = _ERROR_BASE + 11
#: The SDK agent module doesn't support the SDK interface required by this
#: version of the SDK stub
INTERFACE_NOT_SUPPORTED = _ERROR_BASE + 12
#: The operation failed because this is the child process of a fork that
#: occurred while the SDK was initialized.
FORK_CHILD = _ERROR_BASE + 13
class AgentForkState(_Uninstantiable):
'''Constants for the agent's fork state. See
:attr:`oneagent.sdk.SDK.agent_fork_state`.'''
#: SDK cannot be used in this process, but forked processes may use the SDK.
#: This is the state of the process
#: that called :func:`oneagent.initialize` with :code:`forkable=True`
PARENT_INITIALIZED = 1
#: Forked processes can use the SDK.
#: Using the SDK in this process is allowed but
#: changes the state to :attr:`.FULLY_INITIALIZED`
#: This is the state of all child processes
#: of a process that is :attr:`.PARENT_INITIALIZED`.
PRE_INITIALIZED = 2
#: SDK can be used, forked processes may not use the SDK.
#: This is the state of a process that was previously :attr:`.PRE_INITIALIZED`
#: and then called an SDK function.
FULLY_INITIALIZED = 3
#: SDK can be used, forked processes may not use the SDK,
#: :func:`oneagent.initialize` was called without :code:`forkable=True`.
NOT_FORKABLE = 4
#: Some error occurred while trying to determine the agent fork state.
ERROR = -1
class MessageSeverity(_Uninstantiable): # Private
'''Constants for the severity of log messages.
The levels with the lower numerical values include all messages of the ones
with the higher values. Note that :attr:`.DEBUG` is the highest severity,
contrary to usual conventions.'''
FINEST = 0 #: Most verbose logging (higly detailed tracing).
FINER = 1 #: Slightly less verbose logging (fairly detailed tracing).
FINE = 2 #: Still verbose logging (informational tracing messages).
CONFIG = 3 #: Log configuration messages.
INFO = 4 #: Log informational messages.
WARNING = 5 #: Log conditions that indicate a potential problem.
SEVERE = 6 #: Log messages indicating a serious failure.
#: Debug message. None should be logged by default, unless they are
#: specifically enabled with special debug options. Note that contrary to
#: usual conventions, this is the highest severity.
DEBUG = 7
#: No messages of this level exist, so using this level disables all log
#: messages.
NONE = 8
class MessagingDestinationType(_Uninstantiable):
'''Messaging Destination Type Constants
'''
QUEUE = 1 #: A message queue: a message sent to this destination will be (successfully)
#: received by only one consumer.
TOPIC = 2 #: A message topic: a message sent to this destination will be received by all
#: subscribed consumers.
class MessagingVendor(_Uninstantiable):
'''Messaging System Vendor Strings
'''
HORNETQ = "HornetQ" #: vendor string for HornetQ
ACTIVE_MQ = "ActiveMQ" #: vendor string for ActiveMQ
RABBIT_MQ = "RabbitMQ" #: vendor string for RabbitMQ
ARTEMIS = "Artemis" #: vendor string for Artemis
WEBSPHERE = "WebSphere" #: vendor string for WebSphere
MQSERIES_JMS = "MQSeries JMS" #: vendor string for MQSeries JMS
MQSERIES = "MQSeries" #: vendor string for MQSeries
TIBCO = "Tibco" #: vendor string for Tibco
class DatabaseVendor(_Uninstantiable):
'''String constants for well-known database vendors. Use for the
:code:`vendor` parameter of
:meth:`oneagent.sdk.SDK.create_database_info`.'''
APACHE_HIVE = "ApacheHive" #: Database vendor string for Apache Hive.
#: Database vendor string for Apache Derby (aka. IBM Cloudscape).
CLOUDSCAPE = "Cloudscape"
HSQLDB = "HSQLDB" #: Database vendor string for HyperSQL DB.
#: Database vendor string for OpenEdge Database (aka. Progress).
PROGRESS = "Progress"
MAXDB = "MaxDB" #: Database vendor string for SAP MaxDB.
HANADB = "HanaDB" #: Database vendor string for SAP HANA DB.
INGRES = "Ingres" #: Database vendor string for Ingres Database.
FIRST_SQL = "FirstSQL" #: Database vendor string for FirstSQL.
ENTERPRISE_DB = "EnterpriseDB" #: Database vendor string for EnterpriseDB.
CACHE = "Cache" #: Database vendor string for InterSystems Cache.
ADABAS = "Adabas" #: Database vendor string for ADABAS.
FIREBIRD = "Firebird" #: Database vendor string for Firebird Database.
DB2 = "DB2" #: Database vendor string for IBM Db2.
#: Database vendor string for JDBC connections to Apache Derby
#: (aka. IBM Cloudscape).
DERBY_CLIENT = "Derby Client"
#: Database vendor string for Derby Embedded.
DERBY_EMBEDDED = "Derby Embedded"
FILEMAKER = "Filemaker" #: Database vendor string for FileMaker Pro.
INFORMIX = "Informix" #: Database vendor string for IBM Informix.
INSTANT_DB = "InstantDb" #: Database vendor string for InstantDB.
INTERBASE = "Interbase" #: Database vendor string for Embarcadero InterBase.
MYSQL = "MySQL" #: Database vendor string for MySQL.
MARIADB = "MariaDB" #: Database vendor string for MariaDB.
NETEZZA = "Netezza" #: Database vendor string for IBM Netezza.
ORACLE = "Oracle" #: Database vendor string for Oracle Database.
PERVASIVE = "Pervasive" #: Database vendor string for Pervasive PSQL.
POINTBASE = "Pointbase" #: Database vendor string for PointBase.
POSTGRESQL = "PostgreSQL" #: Database vendor string for PostgreSQL.
SQLSERVER = "SQL Server" #: Database vendor string for Microsoft SQL Server.
SQLITE = "sqlite" #: Database vendor string for SQLite.
#: Database vendor string for SAP ASE
#: (aka. Sybase SQL Server, Sybase DB, Sybase ASE).
SYBASE = "Sybase"
TERADATA = "Teradata" #: Database vendor string for Teradata Database.
VERTICA = "Vertica" #: Database vendor string for Vertica.
CASSANDRA = "Cassandra" #: Database vendor string for Cassandra.
H2 = "H2" #: Database vendor string for H2 Database Engine.
#: Database vendor string for ColdFusion In-Memory Query
#: (aka. Query of Queries).
COLDFUSION_IMQ = "ColdFusion IMQ"
REDSHIFT = "Amazon Redshift" #: Database vendor string for Amazon Redshift.
class ChannelType(_Uninstantiable):
'''Constants for communication channel types, for use as
:attr:`oneagent.sdk.Channel.type_`'''
OTHER = 0 #: Some other channel type or unknown channel type.
#: The channel is a TCP/IP connection.
#:
#: The channel endpoint string should be the host name, followed by a colon,
#: followed by the port number (in decimal). E.g. :code:`localhost:1234` or
#: :code:`example.com:80`.
TCP_IP = 1
#: The channel is a connection via Unix domain sockets.
#:
#: The channel endpoint string should be the path of the Unix domain
#: sockets.
UNIX_DOMAIN_SOCKET = 2
#: The channel is a named pipe.
#:
#: The channel endpoint string should be the pipe name.
NAMED_PIPE = 3
#: The channel is some in-process means of communication.
IN_PROCESS = 4
class SDKError(Exception):
'''Exception for SDK errors (mostly during initialization, see
:func:`oneagent.initialize`).'''
def __init__(self, code, msg):
super(SDKError, self).__init__(code, msg)
#: An :class:`int` error code. Can be one of the :class:`.ErrorCode`
#: constants. If not, it is a Windows error code on Windows and an errno
#: number on other systems.
self.code = code
#: The :class:`str` error message associated with :attr:`code`
#: (potentially contains more information than could be deduced from
#: :attr:`code` alone).
self.message = msg
class SDKInitializationError(SDKError):
'''Exception for initialization errors.'''
def __init__(self, code, msg, agent_version='-/-'):
super(SDKInitializationError, self).__init__(code, msg)
#: The :class:`str` agent version associated with this error.
self.agent_version = agent_version
class SDKHandleBase(object):
'''Base class for SDK handles that must be closed explicitly.
You can use this class as a context manager (i.e. with a :code:`with`-block)
to automatically close the handle.'''
def __init__(self, nsdk, handle):
self.handle = handle
self.nsdk = nsdk
if _DEBUG_LEAKS:
self.alloc_at = ''.join(traceback.format_stack())
def close_handle(self, nsdk, handle):
raise NotImplementedError(
'Must implement close_handle in derived class')
def __del__(self):
if self.handle is None:
return
try:
warn = self.nsdk.agent_get_logging_callback()
if not warn:
return
if _DEBUG_LEAKS:
warn(
'Unclosed SDK handle '
+ repr(self)
+ b' from '
+ self.alloc_at)
else:
warn('Unclosed SDK handle ' + repr(self))
finally:
self.close()
def __str__(self):
return '{}({})'.format(type(self), self.handle)
def close(self):
'''Closes the handle, if it is still open.
Usually, you should prefer using the handle as a context manager to
calling :meth:`close` manually.'''
if self.handle is not None:
self.close_handle(self.nsdk, self.handle)
self.handle = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __bool__(self):
return bool(self.handle)
__nonzero__ = __bool__
class DbInfoHandle(SDKHandleBase):
'''Opaque handle to database information. See
:meth:`oneagent.sdk.SDK.create_database_info`.'''
def close_handle(self, nsdk, handle):
nsdk.databaseinfo_delete(handle)
class WebapplicationInfoHandle(SDKHandleBase):
'''Opaque handle to web application information. See
:meth:`oneagent.sdk.SDK.create_web_application_info`.'''
def close_handle(self, nsdk, handle):
nsdk.webapplicationinfo_delete(handle)
class MessagingSystemInfoHandle(SDKHandleBase):
'''Opaque handle for messaging system info object. See
:meth:`oneagent.sdk.SDK.create_messaging_system_info`.'''
def close_handle(self, nsdk, handle):
nsdk.messagingsysteminfo_delete(handle)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys
import re
import os
fasta_file = sys.argv[1]
fasta_file_AT_only = sys.argv[2]
if not os.path.exists(os.path.dirname(fasta_file_AT_only)):
try:
os.makedirs(os.path.dirname(fasta_file_AT_only))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(fasta_file,'r') as fasta:
with open(fasta_file_AT_only,'w') as fasta_out:
for line in fasta:
if line[0] == '>':
fasta_out.write(line)
if line[0] != '>':
line = str(line).upper()
line = line.replace('G','A')
line = line.replace('C','T')
fasta_out.write(line)
|
nilq/baby-python
|
python
|
"""
* Vehicle Routing Problem *
Steps of the algorithm:
1. Creation of a given number of clusters
2. Creation of an optimal path (loop) for each cluster
Graph Optimisation : basic 2-opt algorithm
Clustering : centroid-based method
"""
from random import *
from math import sqrt
import matplotlib.pyplot as plt
import networkx as nx
import time
def dist(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# cluster's functions
def create_clusters(reference_elements, elements_to_organise):
global target_index
new_node_color = []
new_clusters = [[] for _ in range(NUMBER_CLUSTERS)] # initialisation of the clusters list
for k in range(len(elements_to_organise)):
record = dist(0, 0, WIDTH, HEIGHT)
for j in range(len(reference_elements)):
d = dist(elements_to_organise[k][0], elements_to_organise[k][1],
reference_elements[j][0], reference_elements[j][1])
if d < record:
record = d
target_index = j
new_clusters[target_index].append(elements_to_organise[k])
new_node_color.append(COLORS[target_index])
return new_clusters, new_node_color
def centroid_of(lst):
xG = yG = 0
for a in range(len(lst)):
xG += lst[a][0] / len(lst)
yG += lst[a][1] / len(lst)
new_centroid = (xG, yG)
return new_centroid
# graph's functions
def total_distance(lst):
d = 0
for j in range(len(lst) - 1):
d += dist(vertices[lst[j]][0], vertices[lst[j]][1], vertices[lst[j + 1]][0], vertices[lst[j + 1]][1])
return d
def reverse_sublist(lst, start, end):
lst[start:end + 1] = lst[start:end + 1][::-1]
return lst
# Code from https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain#Python
def convex_hull(points):
points = sorted(set(points))
if len(points) <= 1:
return points
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
return lower[:-1] + upper[:-1]
NUMBER_VERTICES = 20
NUMBER_CLUSTERS = 2 # up to 6
NUMBER_ITERATIONS = 10 ** 4
NUMBER_ITERATIONS2 = 10 ** 3
WIDTH = HEIGHT = 100 # dimension of the canvas
VERTEX_SIZE = 150
COLORS = ['orange', 'red', 'cyan', 'green', 'pink', 'purple']
vertices = []
G = nx.Graph()
print("* Vehicle Routing Problem *")
print("Number of vertices :", NUMBER_VERTICES,
"| Number of clusters :", NUMBER_CLUSTERS,
"| Dimensions of the canvas : (" + str(WIDTH), ";", str(HEIGHT) + ")\n")
start_time = time.time()
# creation of the vertices
for i in range(NUMBER_VERTICES):
new_vertex = (randint(1, WIDTH), randint(1, HEIGHT))
vertices.append(new_vertex)
G.add_node(i, pos=(new_vertex[0], new_vertex[1]))
# initialisation
initial_vertices = sample(vertices, NUMBER_CLUSTERS)
clusters, node_color = create_clusters(initial_vertices, vertices)
# clusters
# --------------------------------------------------------------
previous_state = clusters
current_state = []
iteration = 0
while previous_state != current_state:
previous_state = clusters
current_state = []
centroids = []
for cluster in clusters:
centroids.append(centroid_of(cluster))
clusters, node_color = create_clusters(centroids, vertices)
current_state = clusters
iteration += 1
print("Clusters : ✓")
print("--- %s seconds ---" % (time.time() - start_time))
# --------------------------------------------------------------
# graphs
# --------------------------------------------------------------
platform = (WIDTH / 2, HEIGHT / 2)
vertices.append(platform)
G.add_node(NUMBER_VERTICES, pos=(platform[0], platform[1]))
node_color.append('silver')
pos = nx.get_node_attributes(G, 'pos')
for cluster in clusters:
current_color = COLORS[clusters.index(cluster)]
if len(cluster) > 2:
path = [vertices.index(vertex) for vertex in cluster] # initial path
# adding "platform" at the beginning and the end of the path
path.insert(0, NUMBER_VERTICES)
path.append(path[0])
record_distance = dist(0, 0, WIDTH, HEIGHT) * NUMBER_VERTICES
for i in range(NUMBER_ITERATIONS):
selected_vertices = sample(range(1, len(cluster) + 1), 2)
test = path.copy()
test = reverse_sublist(test, selected_vertices[0], selected_vertices[1])
test_distance = total_distance(test)
if test_distance < record_distance:
record_distance = test_distance
path = test
for i in range(len(cluster) + 1):
G.add_edge(path[i], path[i + 1], color=current_color)
if len(cluster) == 2:
G.add_edge(vertices.index(cluster[0]), vertices.index(cluster[1]), color=current_color)
print("Graphs : ✓")
print("--- %s seconds ---" % (time.time() - start_time))
plt.figure(str(NUMBER_CLUSTERS) + "-means | Iteration " + str(iteration) + " (before exchange between clusters)")
# --------------------------------------------------------------
# exchange vertices between clusters
# --------------------------------------------------------------
# determine the convex hull of each cluster
hulls = []
for cluster in clusters:
hulls.append([vertex for vertex in convex_hull(cluster)])
# 1. select two clusters:
# one from which we will select vertex ([0]) and one in which we will try to insert it at a random location ([1])
# for i in range(len(NUMBER_ITERATIONS2)):
selected_clusters = sample(clusters, 2)
selected_hull = hulls[clusters.index(selected_clusters[0])]
selected_vertex = choice(selected_hull)
selected_location = choice(range(len(selected_clusters[1])))
print(vertices.index(selected_vertex), vertices.index(selected_clusters[1][selected_location]))
# --------------------------------------------------------------
edge_colors = [G[u][v]['color'] for u,v in G.edges()]
plt.figure(str(NUMBER_CLUSTERS) + "-means | Iteration " + str(iteration))
nx.draw(G,
pos,
node_size=VERTEX_SIZE,
node_color=node_color,
edge_color=edge_colors,
width=4,
with_labels=True,
font_size=12)
plt.show()
|
nilq/baby-python
|
python
|
def append_new_line(file_name, text_to_append):
"""Append given text as a new line at the end of file"""
# Open the file in append & read mode ('a+')
with open(file_name, "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0:
file_object.write("\n")
# Append text at the end of file
file_object.write(text_to_append)
|
nilq/baby-python
|
python
|
import discord
import os
import requests
import json
import random
from replit import db
from keepmealive import keep_alive
client = discord.Client()
sad_words=["sad","depressed","unhappy","lost","angry","miserable","depressing"]
starter_encouragements=[
"cheer Up! ",
"You are a great Guy!"
]
def get_quotes():
responce=requests.get("https://zenquotes.io/api/random")
json_data=json.loads(responce.text)
quote=json_data[0]['q'] + "-" + json_data[0]['a']
return quote;
def update_encouragements(encouraging_message):
if "encouragements" in db.keys():
encouragements = db["encouragements"]
encouragements.append(encouraging_message)
db["encouragements"] = encouragements
else:
db["encouragements"] = [encouraging_message]
def delete_encouragements(index):
encouragements=db["encouragements"]
if len(encouragements)> index:
del encouragements[index]
db["encouragements"]=encouragements
@client.event
async def on_ready():
print('We have Logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
msg = message.content
if message.content.startswith('$inspire'):
quote=get_quotes()
await message.channel.send(quote)
options=starter_encouragements
if "encouragements" in db.keys():
options= options.extend(db["encouragements"])
if any(word in msg for word in sad_words):
await message.channel.send(random.choice(starter_encouragements))
if msg.startswith("$new"):
encouraging_message = msg.split("$new",1)[1]
update_encouragements(encouraging_message)
await message.channel.send("New Encourage message added!")
if msg.startswith("$del"):
encouragement=[]
if "encouragements" in db.keys():
index= int(msg.split("$del",1)[1])
delete_encouragements(index)
encouragements = db["encouragements"]
await message.channel.send(encouragements)
if msg.startswith("$list"):
encouragements = []
if "encouragements" in db.keys():
encouragements = db["encouragements"]
await message.channel.send(encouragements)
if msg.startswith("$responding"):
value = msg.split("$responding ",1)[1]
if value.lower() == "true":
db["responding"] = True
await message.channel.send("Responding is on.")
else:
db["responding"] = False
await message.channel.send("Responding is off.")
keep_alive()
client.run(os.getenv('TOKEN'))
|
nilq/baby-python
|
python
|
from copy import deepcopy
import numpy
from theano.gof.op import PureOp
from theano.gof import Apply, generic, Container
from theano.gof.link import LocalLinker, map_storage, add_clear_storage
from theano import function, Mode
from theano.ifelse import ifelse
import theano.tensor as T
class IfElseIfElseIf(PureOp):
def __init__(self, inplace=False):
self.inplace=inplace # check destroyhandler and others to ensure that a view_map with
#multiple inputs can work
assert not self.inplace
def make_node(self, c1, t1, c2,t2,c3,t3,f3):
assert t1.type == f3.type
assert t2.type == t3.type
assert t3.type == f3.type
return Apply(self, [c1,t1,c2,t2,c3,t3,f3], [t1.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling):
input_computed = [compute_map[v] for v in node.inputs]
output_computed = [compute_map[v] for v in node.outputs]
input_registers = [storage_map[v] for v in node.inputs]
output_registers = [storage_map[v] for v in node.outputs]
outtype = node.outputs[0].type
def thunk():
if not input_computed[0][0]:
return [0]
else:
truthval = input_registers[0][0]
if truthval:
if not input_computed[1][0]:
return [1]
else:
output_computed[0][0]=1
output_registers[0][0]=outtype.filter(deepcopy(input_registers[1][0]))
return []
else:
if not input_computed[2][0]:
return [2]
else:
truthval = input_registers[2][0]
if truthval:
if not input_computed[3][0]:
return [3]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[3][0]))
return []
else:
if not input_computed[4][0]:
return [4]
else:
truthval = input_registers[4][0]
if truthval:
if not input_computed[5][0]:
return [5]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[5][0]))
return []
else:
if not input_computed[6][0]:
return [6]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[6][0]))
return []
thunk.lazy = True
return thunk
class NotImplementedOp(PureOp):
class E(Exception): pass
def make_node(self, x):
return Apply(self, [x], [x.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling):
def thunk():
raise self.E()
thunk.lazy=False
return thunk
def test_ifelse():
a = T.scalar()
b = generic()
c = generic()
notimpl = NotImplementedOp()
f = function([a,b,c], ifelse(a, notimpl(b), c),
mode=Mode(linker='vm', optimizer='fast_run'))
try:
print "case 1"
f( 1, 'a', 'b')
assert False
except NotImplementedOp.E:
pass
print "... passed"
print "case 2"
print f( 0, 'a', 'b')
assert f( 0, 'a', 'b') == 'b'
print "... passed"
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1,x1,notimpl(x2))
t1.name = 't1'
t2 = t1*10
t2.name = 't2'
t3 = ifelse(c2,t2, x1+t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1,x2), x1, T.eq(x1,5), x2, c2, t3, t3+0.5)
t4.name = 't4'
f = function([c1,c2,x1,x2], t4, mode=Mode(linker='vm', optimizer='fast_run'))
print f(1, 0, numpy.array(10,dtype=x1.dtype),0)
assert f(1,0,numpy.array(10,dtype=x1.dtype),0) == 20.5
print '... passed'
if __name__ == '__main__':
more_complex_test()
|
nilq/baby-python
|
python
|
import sqlite3
def connectTab(db_name: str = 'dados.db') -> sqlite3.Connection:
conexao = sqlite3.connect(f'../{db_name}')
conexao.row_factory = sqlite3.Row
return conexao
def createTab(tab_name: str = 'pessoas'):
conexao = connectTab()
print(type(conexao))
with conexao:
cursor = conexao.cursor()
sql = f'CREATE TABLE IF NOT EXISTS {tab_name}(' \
f'id INTEGER NOT NULL PRIMARY KEY,' \
f'nome TEXT NOT NULL' \
f');'
cursor.execute(sql)
conexao.commit()
def insert(tab_name: str = 'pessoas', *args: str):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'INSERT INTO {tab_name} VALUES \n'
c, ids = len(args), list()
for arg in args:
sql += f"(?, '{arg}')"
if c > 1:
sql += ', \n'
ids.append(None)
c -= 1
sql += ';'
cursor.execute(sql, ids)
conexao.commit()
def remove(tab_name: str = 'pessoas', ident: int):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'DELETE FROM {tab_name} WHERE id={ident};'
cursor.execute(sql)
conexao.commit()
def showData(tab_name: str = 'pessoas', only_keys: bool = False):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'SELECT * FROM {tab_name};'
cursor.execute(sql)
result = cursor.fetchall()
pessoas = list()
for data in result:
data = dict(data)
if only_keys:
data = data.keys()
pessoas = list(data)
else:
pessoas.append(data)
return pessoas
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Attributes
from ._models_py3 import BackupSecretResult
from ._models_py3 import DeletedSecretBundle
from ._models_py3 import DeletedSecretItem
from ._models_py3 import DeletedSecretListResult
from ._models_py3 import Error
from ._models_py3 import KeyVaultError
from ._models_py3 import SecretAttributes
from ._models_py3 import SecretBundle
from ._models_py3 import SecretItem
from ._models_py3 import SecretListResult
from ._models_py3 import SecretProperties
from ._models_py3 import SecretRestoreParameters
from ._models_py3 import SecretSetParameters
from ._models_py3 import SecretUpdateParameters
except (SyntaxError, ImportError):
from ._models import Attributes # type: ignore
from ._models import BackupSecretResult # type: ignore
from ._models import DeletedSecretBundle # type: ignore
from ._models import DeletedSecretItem # type: ignore
from ._models import DeletedSecretListResult # type: ignore
from ._models import Error # type: ignore
from ._models import KeyVaultError # type: ignore
from ._models import SecretAttributes # type: ignore
from ._models import SecretBundle # type: ignore
from ._models import SecretItem # type: ignore
from ._models import SecretListResult # type: ignore
from ._models import SecretProperties # type: ignore
from ._models import SecretRestoreParameters # type: ignore
from ._models import SecretSetParameters # type: ignore
from ._models import SecretUpdateParameters # type: ignore
from ._key_vault_client_enums import (
DeletionRecoveryLevel,
)
__all__ = [
'Attributes',
'BackupSecretResult',
'DeletedSecretBundle',
'DeletedSecretItem',
'DeletedSecretListResult',
'Error',
'KeyVaultError',
'SecretAttributes',
'SecretBundle',
'SecretItem',
'SecretListResult',
'SecretProperties',
'SecretRestoreParameters',
'SecretSetParameters',
'SecretUpdateParameters',
'DeletionRecoveryLevel',
]
|
nilq/baby-python
|
python
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.exceptions import TaskError
from pants.task.lint_task_mixin import LintTaskMixin
from pants.contrib.go.tasks.go_fmt_task_base import GoFmtTaskBase
class GoCheckstyle(LintTaskMixin, GoFmtTaskBase):
"""Checks Go code matches gofmt style."""
def execute(self):
with self.go_fmt_invalid_targets(['-d']) as output:
if output:
self.context.log.error(output)
raise TaskError('Found style errors. Use `./pants fmt` to fix.')
|
nilq/baby-python
|
python
|
import yaml
import torch
from torch import package
import sys
sys.path.append('../../')
import config
class Punctuation(object):
def __init__(self,
model_path=config.model_path_punctuation,
step=config.step_punctuation):
self.model_path = model_path
self.imp = package.PackageImporter(self.model_path)
self.model = self.imp.load_pickle("te_model", "model")
self.step =step
def apply_te(self, text_val):
self.lan = "ru"
len_text = len(text_val.split())
if len_text > self.step:
temp_pred = ''
for i in range(0, len_text, self.step):
temp_text = self.model.enhance_text(' '.join(text_val.split()[i:i+self.step]), self.lan)[:-1] + ' '
temp_pred += temp_text[0].lower() + temp_text[1:]
self.text_with_punctuation = temp_pred
else:
self.text_with_punctuation = self.model.enhance_text(text_val, self.lan)
return self.text_with_punctuation
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from __future__ import print_function
from FWCore.ParameterSet.pfnInPath import pfnInPath
import FWCore.ParameterSet.Config as cms
import sys
import os
import re
if os.getenv('LOCAL_TOP_DIR') == None:
print("The environment variable LOCAL_TOP_DIR must be set to run this script")
print("Usually setting it equal to the value of CMSSW_BASE will do what you want")
print("In the context of a unit test this variable is always set automatically")
sys.exit(1)
# get the list of XML files from the cfi file
process = cms.Process("TEST")
cfiFile = 'Geometry/CMSCommonData/cmsIdealGeometryXML_cfi'
if len(sys.argv) > 1:
cfiFile = sys.argv[1]
process.load(cfiFile)
xmlFiles = process.es_sources['XMLIdealGeometryESSource'].geomXMLFiles.value()
def callDOMCount(schemaPath, xmlPath):
xmlFilename = os.path.basename(xmlPath)
xmlFile = open(xmlPath, 'r')
tmpXMLFile = open(xmlFilename, 'w')
# Inside each XML file, there is a path to the schema file.
# We modify this path in a copy of the XML file for two reasons.
# The XML file might be in a package checked out in a working release
# area and the schema file might not be checked out or vice versa.
# This allows DOMCount to run in spite of that. The second reason
# is that the relative path is erroneous in many of the XML files
# and has to be fixed.
for line in xmlFile.readlines():
line = line.replace("../../../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
tmpXMLFile.write(line)
tmpXMLFile.close()
xmlFile.close()
# Run DOMCount
command = 'DOMCount -v=always -n -s -f %s' % (xmlFilename)
os.system ( command )
# Cleanup
os.system ("rm %s" % (xmlFilename))
# Find the schema file
schema = pfnInPath("DetectorDescription/Schema/DDLSchema.xsd").replace('file:','')
print("schema file is:")
print(schema)
sys.stdout.flush()
# Loop over the XML files listed in the cfi file and find them
# NOTE: Now that the files are in an external package, they will
# not be in a 'LOCAL_TOP_DIR'. Checking them for each IB may not
# be needed.
#
## for name in xmlFiles:
## fullpath = '%s/src/%s' % (os.environ['LOCAL_TOP_DIR'], name)
## if os.path.isfile(fullpath):
## callDOMCount(schema, fullpath)
## else:
## # It is an error if the file is not there but the package is
## packageDirectory = os.environ['LOCAL_TOP_DIR'] + '/src/' + re.split('/', name)[0] + '/' + re.split('/', name)[1]
## if os.path.isdir(packageDirectory):
## print 'Error, xml file not found:'
## print fullpath
## print 'Package is there but the xml file is not'
## sys.stdout.flush()
## continue
## # if there is a base release then try to find the file there
## fullpath = '%s/src/%s' % (os.getenv('CMSSW_RELEASE_BASE'), name)
## if os.path.isfile(fullpath):
## callDOMCount(schema, fullpath)
## else:
## print 'Error, xml file not found'
## print name
## sys.stdout.flush()
|
nilq/baby-python
|
python
|
# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from flask import g
from werkzeug.exceptions import Forbidden, Unauthorized
from warehouse import jwt
def test_required_decorator(app):
wrapper = jwt.jwt_required(lambda: None)
# Valid JWT raises no exception
g.jwt_valid = True
wrapper()
# Invalid JWT raises exception
g.jwt_valid = False
with pytest.raises(Unauthorized):
wrapper()
def test_invalid_access_level(app):
with pytest.raises(ValueError):
jwt.jwt_require_claim(1, "bogus")
def test_no_write_public_project(app):
g.jwt_claims = {"prj": {}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(None, "admin")
def test_insufficient_access_level(app):
g.jwt_claims = {"prj": {1: "read"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "write")
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "admin")
g.jwt_claims = {"prj": {1: "write"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "admin")
def test_sufficient_access_level(app):
g.jwt_claims = {"prj": {1: "read"}}
jwt.jwt_require_claim(1, "read")
g.jwt_claims = {"prj": {1: "write"}}
jwt.jwt_require_claim(1, "read")
jwt.jwt_require_claim(1, "write")
g.jwt_claims = {"prj": {1: "admin"}}
jwt.jwt_require_claim(1, "read")
jwt.jwt_require_claim(1, "write")
jwt.jwt_require_claim(1, "admin")
def test_missing_access_level(app):
g.jwt_claims = {"prj": {1: "admin"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(2, "admin")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import unittest
import subprocess as sub
from astropy.time import Time
from bin import epics_fetch
class TestEPICSFetch(unittest.TestCase):
def test_known_date(self):
t = Time('2020-06-07T00:00', format='isot')
data = epics_fetch.get_data(['25m:mcp:cwPositions'], t.datetime,
(t-1).datetime)
epics_fetch._print_data(data, ["25m:mcp:cwPositions"])
def test_archive(self):
"""Checks to see if the directory for new data is available to this
computer"""
# This serves no purpose because simply importing the library is a pass
print(epics_fetch.telemetry)
return
def test_help(self):
""""Prints the help if -h is provided"""
sub.call('{} -h'.format(epics_fetch.__file__), shell=True)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
""" Customfield.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import AccountNotFound
from ambra_sdk.exceptions.service import FilterNotFound
from ambra_sdk.exceptions.service import InvalidCondition
from ambra_sdk.exceptions.service import InvalidDicomTag
from ambra_sdk.exceptions.service import InvalidDicomTagObject
from ambra_sdk.exceptions.service import InvalidField
from ambra_sdk.exceptions.service import InvalidHl7Field
from ambra_sdk.exceptions.service import InvalidHl7Object
from ambra_sdk.exceptions.service import InvalidHl7Segment
from ambra_sdk.exceptions.service import InvalidJson
from ambra_sdk.exceptions.service import InvalidObject
from ambra_sdk.exceptions.service import InvalidOptions
from ambra_sdk.exceptions.service import InvalidSearchSource
from ambra_sdk.exceptions.service import InvalidSortField
from ambra_sdk.exceptions.service import InvalidSortOrder
from ambra_sdk.exceptions.service import InvalidType
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import NoDicomTagDefined
from ambra_sdk.exceptions.service import NotASearch
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import NotPermitted
from ambra_sdk.service.query import QueryO
from ambra_sdk.service.query import AsyncQueryO
from ambra_sdk.service.query import QueryOPSF
from ambra_sdk.service.query import AsyncQueryOPSF
class Customfield:
"""Customfield."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/customfield/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'customfields'
return QueryOPSF(**query_data)
def add(
self,
account_id,
name,
object,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
type=None,
wrapped_dicom_only=None,
):
"""Add.
:param account_id: uuid of the account
:param name: Name of the customfield
:param object: The object to associate the customfield with (Study|User_account|Group|Location|Account|Patient|Case|Order|Appointment|Dicomdata|Scanner|Query)
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (only applicable to study fields) (optional)
:param capture_on_share_code: Flag if the field should be captured during a share code exchange (only applicable to study fields) (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: DICOM tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param type: Type of the custom field (text|number|date|memo|select|multiselect|radio|checkbox|search|bool) (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'account_id': account_id,
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'object': object,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'type': type,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('ACCOUNT_NOT_FOUND', None)] = AccountNotFound('The account can not be found')
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OBJECT', None)] = InvalidObject('An invalid object was passed.')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('INVALID_SEARCH_SOURCE', None)] = InvalidSearchSource('An invalid search source was passed.')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed.')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The Customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a customfield to this account')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def set(
self,
uuid,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
name=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
wrapped_dicom_only=None,
):
"""Set.
:param uuid: uuid of the customfield
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (optional)
:param capture_on_share_code: Flag if the study type field should be captured during a share code exchange (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: Dicom tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study. (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param name: Name of the customfield (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'uuid': uuid,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_FIELD', None)] = InvalidHl7Field('Invalid field number')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The object was not found. The error_subtype holds the name of the key for the object that can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit the customfield')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/set',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def get(
self,
uuid,
):
"""Get.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view the customfield')
query_data = {
'api': self._api,
'url': '/customfield/get',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def delete(
self,
uuid,
):
"""Delete.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to delete the customfield')
query_data = {
'api': self._api,
'url': '/customfield/delete',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def lookup(
self,
account_id,
name,
):
"""Lookup.
:param account_id: uuid of the account
:param name: Name of the customfield
"""
request_data = {
'account_id': account_id,
'name': name,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/lookup',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def search(
self,
uuid,
search=None,
):
"""Search.
:param uuid: uuid of the customfield
:param search: The value to search for (optional)
"""
request_data = {
'search': search,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_SEARCH', None)] = NotASearch('This is not a search type of customfield')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/search',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
class AsyncCustomfield:
"""AsyncCustomfield."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/customfield/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'customfields'
return AsyncQueryOPSF(**query_data)
def add(
self,
account_id,
name,
object,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
type=None,
wrapped_dicom_only=None,
):
"""Add.
:param account_id: uuid of the account
:param name: Name of the customfield
:param object: The object to associate the customfield with (Study|User_account|Group|Location|Account|Patient|Case|Order|Appointment|Dicomdata|Scanner|Query)
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (only applicable to study fields) (optional)
:param capture_on_share_code: Flag if the field should be captured during a share code exchange (only applicable to study fields) (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: DICOM tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param type: Type of the custom field (text|number|date|memo|select|multiselect|radio|checkbox|search|bool) (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'account_id': account_id,
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'object': object,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'type': type,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('ACCOUNT_NOT_FOUND', None)] = AccountNotFound('The account can not be found')
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OBJECT', None)] = InvalidObject('An invalid object was passed.')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('INVALID_SEARCH_SOURCE', None)] = InvalidSearchSource('An invalid search source was passed.')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed.')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The Customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a customfield to this account')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def set(
self,
uuid,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
name=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
wrapped_dicom_only=None,
):
"""Set.
:param uuid: uuid of the customfield
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (optional)
:param capture_on_share_code: Flag if the study type field should be captured during a share code exchange (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: Dicom tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study. (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param name: Name of the customfield (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'uuid': uuid,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_FIELD', None)] = InvalidHl7Field('Invalid field number')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The object was not found. The error_subtype holds the name of the key for the object that can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit the customfield')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/set',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def get(
self,
uuid,
):
"""Get.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view the customfield')
query_data = {
'api': self._api,
'url': '/customfield/get',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def delete(
self,
uuid,
):
"""Delete.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to delete the customfield')
query_data = {
'api': self._api,
'url': '/customfield/delete',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def lookup(
self,
account_id,
name,
):
"""Lookup.
:param account_id: uuid of the account
:param name: Name of the customfield
"""
request_data = {
'account_id': account_id,
'name': name,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/lookup',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def search(
self,
uuid,
search=None,
):
"""Search.
:param uuid: uuid of the customfield
:param search: The value to search for (optional)
"""
request_data = {
'search': search,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_SEARCH', None)] = NotASearch('This is not a search type of customfield')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/search',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
|
nilq/baby-python
|
python
|
from adafruit_servokit import ServoKit
from dcservo import DogCamServoBase
# Don't export ServoLib
__all__ = ("DogCamServoAda")
# Bring in global instance
ServoLib = ServoKit(channels=16)
class DogCamServoAda(DogCamServoBase):
def __init__(self, InName, InPin, ZeroAngle=0.0, Steps=1.0, LowerBounds=0.0, UpperBounds=180.0, PulseWidthMin=1000, PulseWidthMax=2000):
ServoLib.servo[InPin].actuation_range = UpperBounds
ServoLib.servo[InPin].set_pulse_width_range(PulseWidthMin, PulseWidthMax)
super().__init__(InName, InPin, InZeroAngle=ZeroAngle, InSteps=Steps, InLowerBounds=LowerBounds, InUpperBounds=UpperBounds)
def _MoveToPosition(self, angle):
print(f"{self.Name}: Moving to position {angle}")
try:
ServoLib.servo[self.Pin].angle = angle
except Exception as ex:
print(f"{self.Name}: Could not move position to {angle}!\n{ex}")
|
nilq/baby-python
|
python
|
'''
code by Tae Hwan Jung(Jeff Jung) @graykode
'''
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.reset_default_graph()
# 3 Words Sentence
sentences = [ "i like dog", "i like cat", "i like animal",
"dog cat animal", "apple cat dog like", "dog fish milk like",
"dog cat eyes like", "i like apple", "apple i hate",
"apple i movie book music like", "cat dog hate", "cat dog like"]
word_sequence = " ".join(sentences).split() #string
word_list = " ".join(sentences).split()
word_list = list(set(word_list))#去重的list
word_dict = {w: i for i, w in enumerate(word_list)}#字典
# Word2Vec Parameter
batch_size = 20
embedding_size = 2 # To show 2 dim embedding graph
voc_size = len(word_list)
def random_batch(data, size):
random_inputs = []
random_labels = []
random_index = np.random.choice(range(len(data)), size, replace=False)
for i in random_index:
random_inputs.append(np.eye(voc_size)[data[i][0]]) # target
random_labels.append(np.eye(voc_size)[data[i][1]]) # context word
return random_inputs, random_labels
# Make skip gram of one size window
skip_grams = []
for i in range(1, len(word_sequence) - 1):
target = word_dict[word_sequence[i]]#找到对应的字典key
context = [word_dict[word_sequence[i - 1]], word_dict[word_sequence[i + 1]]]#左右两边的value
for w in context:
skip_grams.append([target, w])#将左右两边的value放到中心的key中
# Model
inputs = tf.placeholder(tf.float32, shape=[None, voc_size])#PXn的矩阵
labels = tf.placeholder(tf.float32, shape=[None, voc_size])#???
# W and WT is not Traspose relationship
W = tf.Variable(tf.random_uniform([voc_size, embedding_size], -1.0, 1.0))#nx2的矩阵
WT = tf.Variable(tf.random_uniform([embedding_size, voc_size], -1.0, 1.0))
hidden_layer = tf.matmul(inputs, W) # [batch_size, embedding_size] px2的矩阵
output_layer = tf.matmul(hidden_layer, WT) # [batch_size, voc_size] pxn的矩阵
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output_layer, labels=labels))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)#0.001是学习步划
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(5000):
batch_inputs, batch_labels = random_batch(skip_grams, batch_size)
_, loss = sess.run([optimizer, cost], feed_dict={inputs: batch_inputs, labels: batch_labels})
if (epoch + 1)%1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
trained_embeddings = W.eval()
for i, label in enumerate(word_list):
x, y = trained_embeddings[i]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show()
|
nilq/baby-python
|
python
|
import ply.lex as lex
import ply.yacc as yacc
KEYWORDS = ("run", "load", "save", "insert", "clear", "quit", "exit")
PARAMS = ("topology", "width", "height")
DOMAINS = ("'KleinBottle'", "'MoebiusBand'", "'Torus'", "'Cylinder'", "'Plane'")
class Parser:
"""
Base class for a lexer/parser that has the rules defined as methods
"""
tokens = ()
precedence = ()
def __init__(self, game_instance, **kw):
self.names = {}
self.game_instance = game_instance
# Build the lexer and parser
lex.lex(module=self)
yacc.yacc(module=self)
def parse(self, s):
yacc.parse(s)
class GameParser(Parser):
"""
This class is a parser for the game's control/config language. It is an
adaption of David Beazleys classcalc example contained in PLY, hence an
elementary calculator is also included :)
"""
tokens = (
'NAME', 'NUMBER',
'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
'LPAREN', 'RPAREN', 'PARAM', 'KEY', 'STRING'
)
# Reserved words
reserved = dict(((k, 'PARAM') for k in PARAMS), **{k: 'KEY' for k in KEYWORDS})
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_EXP = r'\*\*'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_STRING = r'\'[a-zA-Z_]*\''
def t_NAME(self, t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = GameParser.reserved.get(t.value, 'NAME')
return t
def t_NUMBER(self, t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Parsing rules
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'EXP'),
('right', 'UMINUS'),
)
def p_statement_setparam(self, p):
"statement : PARAM expression"
try:
setattr(self.game_instance, p[1], p[2])
except Exception as e:
print(e)
def p_statement_keyword_arg(self, p):
"statement : KEY expression"
try:
getattr(self.game_instance, p[1])(p[2])
except Exception as e:
print(e)
def p_statement_keyword_noarg(self, p):
"statement : KEY"
try:
getattr(self.game_instance, p[1])()
except Exception as e:
print(e)
def p_statement_assign(self, p):
'statement : NAME EQUALS expression'
self.names[p[1]] = p[3]
def p_statement_expr(self, p):
'statement : expression'
print(p[1])
def p_expression_binop(self, p):
"""
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| expression EXP expression
"""
if p[2] == '+':
p[0] = p[1] + p[3]
elif p[2] == '-':
p[0] = p[1] - p[3]
elif p[2] == '*':
p[0] = p[1] * p[3]
elif p[2] == '/':
p[0] = p[1] / p[3]
elif p[2] == '**':
p[0] = p[1] ** p[3]
def p_expression_uminus(self, p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_expression_group(self, p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_number(self, p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_name(self, p):
'expression : NAME'
try:
p[0] = self.names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
def p_expression_string(self, p):
'expression : STRING'
p[0] = p[1].strip("'")
def p_error(self, p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
if __name__ == '__main__':
p = GameParser()
p.run()
|
nilq/baby-python
|
python
|
"""
Images should have the shape b x c x h x w.
Masks attach an alpha channel with masking values in the range [0, 1], which can
be consumed by other augmentation layers. Masks themselves consume alpha
channels by multiplying the old with the new.
"""
import math
import torch
import torch.fft
from torch import Tensor
def to_tensor(x):
return torch.tensor(x) if not isinstance(x, Tensor) else x
def _attach(image, mask):
b, c, h, w = image.shape
mask = mask.expand(b,1,h,w)
mask = mask.to(image.device)
if c == 3:
mask = mask.to(image.dtype)
return torch.cat([image, mask],1)
elif c == 4:
image[:,3,...] *= mask
return image
def detach(image):
return image[:,:3,:,:], image[:,3:,:,:]
def cutout(image, size):
b, c, h, w = image.shape
size_h, size_w = size
size_h = to_tensor(size_h).to(torch.int64).to(image.device).view(-1,1,1,1)
size_w = to_tensor(size_w).to(torch.int64).to(image.device).view(-1,1,1,1)
center_h = torch.randint(h, (b,1,1,1), device=image.device)
center_w = torch.randint(w, (b,1,1,1), device=image.device)
mask_h = torch.arange(h, device=image.device).view(1,1,-1,1)
mask_w = torch.arange(w, device=image.device).view(1,1,1,-1)
mask = (center_h - size_h <= mask_h) & (mask_h < center_h + size_h) \
& (center_w - size_w <= mask_w) & (mask_w < center_w + size_w)
return _attach(image, mask)
def random_pixel(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
h_ = h // kernel + (h % kernel != 0)
w_ = w // kernel + (w % kernel != 0)
rand = torch.rand([b,1,h_,w_], device=image.device)
rand = rand.repeat_interleave(kernel, dim=2)
rand = rand.repeat_interleave(kernel, dim=3)
rand = rand[:,:,:h,:w]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_row(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
h_ = h // kernel + (h % kernel != 0)
rand = torch.rand([b,1,h_,1], device=image.device)
rand = rand.repeat_interleave(kernel, dim=2)
rand = rand.expand(-1,-1,-1,w)[:,:,:h,:]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_col(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
w_ = w // kernel + (w % kernel != 0)
rand = torch.rand([b,1,1,w_])
rand = rand.expand(-1,-1,h,-1)[:,:,:,:w]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_block(image, size=[50,50], lam=None):
b, c, h, w = image.shape
device = image.device
if lam is not None:
sqrt_lam = torch.sqrt(lam)
size = (h * sqrt_lam, w * sqrt_lam)
if size == [h,w] or all(s == [h,w] for s in size):
return _attach(image, torch.ones(b,1,h,w))
size_h, size_w = size
size_h = to_tensor(size_h).to(torch.int64).to(device).view(-1,1,1,1)
size_w = to_tensor(size_w).to(torch.int64).to(device).view(-1,1,1,1)
rand_h = torch.floor(torch.rand([b,1,1,1], device=device) * (h - size_h + 1))
rand_w = torch.floor(torch.rand([b,1,1,1], device=device) * (w - size_w + 1))
mask_h = torch.arange(h, device=device).view(1,1,-1,1).expand(b,-1,-1,-1)
mask_w = torch.arange(w, device=device).view(1,1,1,-1).expand(b,-1,-1,-1)
mask = (rand_h <= mask_h) & (mask_h < rand_h + size_h) \
& (rand_w <= mask_w) & (mask_w < rand_w + size_w)
return _attach(image, mask)
def random_row_strip(image, **kwargs):
return random_strip(image, 2, **kwargs)
def random_col_strip(image, **kwargs):
return random_strip(image, 3, **kwargs)
def random_strip(image, dim, size=50, lam=None):
b, c = image.shape[:2]
d = image.shape[dim]
device = image.device
if lam is not None:
size = d * lam
size = to_tensor(size).to(device).view(-1,1,1,1)
start = torch.rand([b,1,1,1], device=device) * (d - size)
index = torch.arange(d, device=device).view(1,1,1,d)
mask = (start <= index) & (index < start + size)
mask = mask.transpose(-1,dim)
return _attach(image, mask)
def time(image, lam=1.0):
size = lam * image.shape[-1]
return specaugment(image, size, -1)
def frequency(image, lam=1.0):
size = lam * image.shape[-2]
return specaugment(image, size, -2)
def specaugment(image, size, dim):
b = image.shape[0]
d = image.shape[dim]
size = to_tensor(size).view(-1,1,1,1)
width = torch.rand([b,1,1,1]) * size
start = torch.rand([b,1,1,1]) * (d - width)
mask = torch.arange(0,d).view([1,1,1,-1])
mask = (start <= mask) & (mask < start + width)
mask = mask.transpose(-1,dim)
return _attach(image, mask)
def fmix(image, lam=None, decay=3.0):
b, c, h, w = image.shape
mask = low_freq_mask([b,1,h,w], decay)
mask = binarise_mask(mask, lam)
return _attach(image, mask)
def fftfreq(n, d=1.0, device='cpu'):
"""DFT sample frequency
"""
s = (n - 1) // 2 + 1
results = torch.empty(n, device=device)
results[:s] = torch.arange(0, s, device=device)
results[s:] = torch.arange(-(n // 2), 0, device=device)
return results * (1.0 / (n * d))
def fftfreq2(h, w, device='cpu'):
"""Magnitude of 2d sample frequency
"""
fy = fftfreq(h, device=device)
fy = fy.unsqueeze(-1)
if w % 2 == 1:
fx = fftfreq(w, device=device)
fx = fx[: w // 2 + 2]
else:
fx = fftfreq(w, device=device)
fx = fx[: w // 2 + 1]
return torch.sqrt(fx * fx + fy * fy)
def get_spectrum(shape, decay, device='cpu'):
b, c, h, w = shape
cap = torch.tensor(1.0 / max(h,w), device=device)
freqs = fftfreq2(h, w, device=device)
freqs = torch.maximum(freqs, cap)
h, w = freqs.shape
scale = 1.0 / (freqs ** decay).view(1,1,h,w,1)
spec = scale * torch.randn([b,c,h,w,2])
return spec[...,0] + spec[...,1] * 1j
def low_freq_mask(shape, decay):
h, w = shape[-2:]
spec = get_spectrum(shape, decay)
mask = torch.fft.ifftn(spec, s=(h,w)).real
lo = mask.flatten(2).min(-1)[0]
hi = mask.flatten(2).max(-1)[0]
lo = lo.view(shape[0],1,1,1)
hi = hi.view(shape[0],1,1,1)
return (mask - lo) / (hi - lo)
def binarise_mask(mask, lam):
shape = mask.shape
mask = mask.flatten(1)
index = mask.argsort(-1, descending=True)
if torch.rand(1) < 0.5:
cutoff = torch.ceil(lam * mask.shape[-1])
else:
cutoff = torch.floor(lam * mask.shape[-1])
cutoff = cutoff.to(torch.int64)
for msk, idx, cut in zip(mask, index, cutoff):
msk[idx[:cut]] = 1
msk[idx[cut:]] = 0
return mask.view(shape)
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
PAD_WORD_ID = 0
UNK_WORD_ID = 1
END_WORD_ID = 2
PAD_CHAR = 261
BOW_CHAR = 259
EOW_CHAR = 260
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
file_1 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_1 = np.load(open(file_1,'r'))
file_2 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_and_inat/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_2 = np.load(open(file_2,'r'))
file_3 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_3 = np.load(open(file_3,'r'))
file_4 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer_and_inat/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_4 = np.load(open(file_4,'r'))
ap = data_1['ap'].tolist()
cat_id_to_cat = data_1['cat_id_to_cat'].tolist()
cat_ids = [i for i in ap if not np.isnan(ap[i])]
print(cat_ids)
N = len(cat_ids)
ind = np.arange(N)
width = 0.15
fig = plt.figure()
ax = fig.add_subplot(111)
aps = [ap[i] for i in cat_ids]
print(aps)
print(len(ind),len(aps))
rects1 = ax.bar(ind, aps, width, color='royalblue')
ap = data_2['ap'].tolist()
rects2 = ax.bar(ind+width, [ap[i] for i in cat_ids], width, color='seagreen')
ap = data_3['ap'].tolist()
rects3 = ax.bar(ind+width*2, [ap[i] for i in cat_ids], width, color='red')
ap = data_4['ap'].tolist()
rects4 = ax.bar(ind+width*3, [ap[i] for i in cat_ids], width, color='orange')
ax.set_ylabel('mAP per class')
ax.set_title('mAP per class with and without iNat and deer-like animals')
ax.set_xticks(ind + 3*width / 2)
ax.set_xticklabels([cat_id_to_cat[i] for i in cat_ids])
plt.xticks(rotation=90)
ax.legend((rects1[0],rects2[0], rects3[0], rects4[0]),('w/deer, w/o iNat','w/ deer, w/ iNat','w/o deer, w/o iNat','w/o deer, w/iNat'), loc='lower center')
plt.tight_layout()
plt.savefig('/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer_and_inat/predictions/compare_per_seq_mAP_w_deer_and_no_deer.jpg')
|
nilq/baby-python
|
python
|
from pathlib import Path
import pytest
import git
import json
import os
from conftest import TEST_DIR
from masonry import main
from cookiecutter.exceptions import FailedHookException, UndefinedVariableInTemplate
@pytest.fixture(scope='module')
def init_simple_project(tmpdir_factory):
# Setup a basic project
temp_output_path = Path(tmpdir_factory.mktemp('simple_project').strpath)
template_path = TEST_DIR / 'example_templates' / 'breaking_project'
# Set arguments
args = f"init -o {temp_output_path} {template_path}"
from masonry import main
# Run from entry point
main.main(args=args)
cookiecutter_vars_path = os.path.join(template_path, "first_layer", "cookiecutter.json")
with open(cookiecutter_vars_path, 'r') as f:
cookiecutter_vars = json.load(f)
project_name = cookiecutter_vars['project_name']
project_dir = temp_output_path / project_name
return project_dir
def test_rollback_when_error_in_pre_hook(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_pre_hook"
with pytest.raises(FailedHookException):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_error_in_post_hook(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_post_hook"
with pytest.raises(FailedHookException):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_error_in_variable_name(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_variable_name"
with pytest.raises(UndefinedVariableInTemplate):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_init_project(tmpdir_factory):
# GIVEN a temp directory and template to initialise
temp_output_path = Path(tmpdir_factory.mktemp('empty_project').strpath)
template_path = TEST_DIR / 'example_templates' / 'breaking_project'
# WHEN a new project is initialised that causes an error
args = f"init -o {temp_output_path} {template_path}/breaking_variable_name"
with pytest.raises(UndefinedVariableInTemplate):
main.main(args=args)
# THEN the directory should be empty
target = set([])
result = set(temp_output_path.iterdir())
assert result == target
|
nilq/baby-python
|
python
|
# standard
import os
# BASE DIRECTORY
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# HEARTBEAT
HEARTBEAT = 10 * 1000
# INTERNET
INTERNET = {
'address': '1.1.1.1',
'port': 53,
'timeout': 3,
'interval': 5 * 1000
}
# MODULES
MODULES = ('fb', 'synker')
MODULES_DIR = 'src.modules'
MODULES_CONVENTION = 'title'
MODULES_SETTINGS = {
'fb': {
'interval': 60,
'instance': 'localhost',
'user': 'root',
'password': 's3cret',
'temp': '/tmp',
'dest': ''
},
'synker': {
'interval': 30,
'localdir': '',
'pattern': '*',
'clouddir': '/backup',
'limit': 0,
'token': ''
}
}
# CONFIG
CONFIG_FILENAME = 'settings.ini'
CONFIG_FILEPATH = os.path.join(BASE_DIR, CONFIG_FILENAME)
CONFIG_DEFAULT = {**MODULES_SETTINGS}
# LOG
LOG_LEVEL = 'DEBUG'
LOG_FILENAME = 'log/logs.log'
LOG_FILEPATH = os.path.join(BASE_DIR, LOG_FILENAME)
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# TRANSLATION
LANGUAGES = (
('en', 'English'),
('fa', 'Persian')
)
LANG_CODE = 'fa'
TRANSLATION_DOMAIN = 'mb'
LOCALE_DIRNAME = 'locale'
LOCALE_DIRPATH = os.path.join(BASE_DIR, LOCALE_DIRNAME)
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class douban_top250(models.Model):
serial_number=models.IntegerField()
movie_name=models.CharField(max_length=255)
introduce=models.CharField(max_length=255)
star=models.FloatField(max_length=12)
evaluate=models.CharField(max_length=255)
describe=models.CharField(max_length=255)
datetime=models.DateTimeField(auto_now=True)
def __str__(self):
return self.movie_name
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import math
import sys
from abc import abstractmethod
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
import cv2
from tf_utils import *
import cpp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpn_priors', 'rpn_priors', 'param prior config file')
flags.DEFINE_integer('rpn_params', 3, 'number of parameters per shape')
flags.DEFINE_integer('rpn_stride', 1, 'downsize factor of rpn output')
flags.DEFINE_float('rpn_logits_weight', 1.0, 'loss weight')
flags.DEFINE_float('rpn_params_weight', 1.0, 'loss weight')
class BasicRPN3D:
def __init__ (self):
priors = []
# read in priors
# what RPN estimates is the delta between priors and the real
# regression target.
if os.path.exists(FLAGS.rpn_priors):
with open(FLAGS.rpn_priors, 'r') as f:
for l in f:
if l[0] == '#':
continue
vs = [float(v) for v in l.strip().split(' ')]
assert len(vs) == FLAGS.rpn_params
priors.append(vs)
pass
pass
pass
if len(priors) == 0:
priors.append([1.0] * FLAGS.rpn_params)
pass
aardvark.print_red("PRIORS %s" % str(priors))
self.priors = np.array(priors, dtype=np.float32)
pass
def rpn_backbone (self, volume, is_training, stride):
assert False
def rpn_logits (self, net, is_training, channels):
assert False
def rpn_params (self, net, is_training, channels):
assert False
def rpn_generate_shapes (self, shape, anchor_params, priors, n_priors):
assert False
def build_rpn (self, volume, is_training, shape=None):
# volume: input volume tensor
Z,Y,X = shape
assert max(Z % FLAGS.rpn_stride, Y % FLAGS.rpn_stride, X % FLAGS.rpn_stride) == 0
oZ = Z // FLAGS.rpn_stride
oY = Y // FLAGS.rpn_stride
oX = X // FLAGS.rpn_stride
n_priors = self.priors.shape[0]
n_params = self.priors.shape[1]
self.gt_anchors = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.gt_anchors_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
# parameter of that location
self.gt_params = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors, n_params))
self.gt_params_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.backbone = self.rpn_backbone(volume, is_training, FLAGS.rpn_stride)
logits = self.rpn_logits(self.backbone, is_training, n_priors)
logits = tf.identity(logits, name='logits')
self.logits = logits
self.probs = tf.sigmoid(logits, name='probs')
params = self.rpn_params(self.backbone, is_training, n_priors * n_params)
params = tf.identity(params, name='params')
self.params = params
# setup losses
# 1. losses for logits
logits1 = tf.reshape(logits, (-1,))
gt_anchors = tf.reshape(self.gt_anchors, (-1,))
gt_anchors_weight = tf.reshape(self.gt_anchors_weight, (-1,))
xe = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(gt_anchors, tf.float32))
xe = tf.reduce_sum(xe * gt_anchors_weight) / (tf.reduce_sum(gt_anchors_weight) + 0.00001)
xe = tf.identity(xe, name='xe')
getattr(self, 'metrics', []).append(xe)
tf.losses.add_loss(xe * FLAGS.rpn_logits_weight)
# 2. losses for parameters
priors = tf.constant(self.priors[np.newaxis, :, :], dtype=tf.float32)
params = tf.reshape(params, (-1, n_priors, n_params))
gt_params = tf.reshape(self.gt_params, (-1, n_priors, n_params))
l1 = tf.losses.huber_loss(params, gt_params / priors, reduction=tf.losses.Reduction.NONE, loss_collection=None)
l1 = tf.reduce_sum(l1, axis=2)
# l1: ? * n_priors
l1 = tf.reshape(l1, (-1,))
gt_params_weight = tf.reshape(self.gt_params_weight, (-1,))
l1 = tf.reduce_sum(l1 * gt_params_weight) / (tf.reduce_sum(gt_params_weight) + 0.00001)
l1 = tf.identity(l1, name='l1')
getattr(self, 'metrics', []).append(l1)
tf.losses.add_loss(l1 * FLAGS.rpn_params_weight)
pass
|
nilq/baby-python
|
python
|
import os
import sys
import yaml
import json
import pprint
import pathlib
import logging
import inspect
import argparse
import itertools
import importlib
from genie.metaparser import MetaParser
IGNORE_DIR = ['.git', '__pycache__', 'template', 'tests']
IGNORE_FILE = ['__init__.py', 'base.py', 'utils.py']
AVAILABLE_FUNC = ['cli', 'xml', 'yang', 'rest']
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
def format(d, tab=0):
s = ['{\n']
if d is None:
return d
for k,v in d.items():
if isinstance(v, dict):
v = format(v, tab+1)
else:
v = repr(v)
s.append('%s%r: %s,\n' % (' '*tab, k, v))
s.append('%s}' % (' '*tab))
return ''.join(s)
class CreateApiDoc(object):
def __init__(self, datafile):
assert 'VIRTUAL_ENV' in os.environ
with open(datafile, 'r') as f:
self.datafile = yaml.safe_load(f)
self.output = {}
self.output['tokens'] = []
def _expand(self, name):
if '$env(VIRTUAL_ENV)' in name:
# Replace '$env(VIRTUAL_ENV)' with the actual value
return name.replace('$env(VIRTUAL_ENV)', os.environ['VIRTUAL_ENV'])
return name
def _find_parsers(self, mod):
parsers = []
for name, obj in inspect.getmembers(mod):
# starts with _ are ignored
if name.startswith('_'):
continue
# skip if not class
if not inspect.isclass(obj):
continue
# skip anything not defined in this module
try:
if inspect.getsourcefile(obj) != mod.__file__:
continue
except:
# getsourcefile fails for builtin objects
# we aren't interested in those anyway
continue
# Inherits from metaparser + have a funciton which is from the
# available func
if issubclass(obj, MetaParser) and hasattr(obj, 'cli_command'):
parsers.append(obj)
return parsers
def _add_parser(self, parser, cli, tokens, mod):
if cli not in self.output:
self.output[cli] = {}
output = self.output[cli]
for token in tokens:
if token not in output:
output[token] = {}
output = output[token]
if token not in self.output['tokens']:
self.output['tokens'].append(token)
output['module_name'] = mod.__name__.rsplit('.', 1)[-1]
output['package'] = self.package
output['class'] = parser.__name__
output['doc'] = parser.__doc__
output['schema'] = format(parser.schema)
output['uid'] = cli.replace(' ','_').replace('{', '').replace('}', '').replace('|', '_')
line = inspect.getsourcelines(parser)[-1]
temp_url = mod.__file__.replace(os.path.join(
os.environ['VIRTUAL_ENV'], 'pypi', 'genieparser') + '/', '')
style = self.root['url']['style']
if style == 'bitbucket':
url = '{p}{t}#{l}'.format(p=self.root['url']['link'], t=temp_url, l=line)
elif style == 'github':
url = p=self.root['url']['link'].format(branch=self.root['url']['branch'])
url = '{p}{t}#L{l}'.format(p=url, t=temp_url, l=line)
output['url'] = url
def _add_parsers(self, item, tokens):
# Find all classes which has a function named parse
# Will give module path
module_path = self.root['root'] + str(item).rsplit('.', 1)[0].\
replace(self.module_loc, '').replace('/', '.')
mod = importlib.import_module(module_path)
parsers = self._find_parsers(mod)
if parsers:
pass
for parser in parsers:
if isinstance(parser.cli_command, list):
for cli in parser.cli_command:
self._add_parser(parser, cli, tokens, mod)
else:
self._add_parser(parser, parser.cli_command, tokens, mod)
def _recursive_find(self, item, token):
for item in item.iterdir():
if item.is_dir():
if item.name in IGNORE_DIR:
# Ignore
continue
else:
self._recursive_find(item, token + [item.name])
elif item.is_file():
if item.name in IGNORE_FILE or item.suffix != '.py':
continue
# Then add it to the self.datafile
self._add_parsers(item, token)
def find_all_apis(self):
if 'root_directories' not in self.datafile:
return {}
for name, values in self.datafile['root_directories'].items():
log.info("Learning '{name}'".format(name=name))
# Figure out location of package so you can walk it
self.root = values
self.package = self.root['root']
self.module_loc = importlib.import_module(self.root['root']).__path__[0]
# Walk all file in there and go through the parsers
self._recursive_find(pathlib.Path(self.module_loc), [])
def find_diff(l1, l2):
'''Difference between list1 and list2'''
diff = []
for list1, list2 in itertools.zip_longest(l1, l2):
if list2 != list1:
diff.append(list2)
return diff
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-datafile',
metavar='FILE',
type=str,
default=None,
help='File containing directory information')
parser.add_argument('-save_location',
metavar='FILE',
type=str,
default=None,
help='Location to save the output file')
custom_args = parser.parse_known_args()[0]
apiDoc = CreateApiDoc(custom_args.datafile)
apiDoc.find_all_apis()
output = json.dumps(apiDoc.output)
os.makedirs(os.path.dirname(custom_args.save_location), exist_ok=True)
with open(custom_args.save_location, 'w+') as f:
f.write(output)
|
nilq/baby-python
|
python
|
from nltk import tokenize
from operator import itemgetter
import math
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words = set(stopwords.words('english'))
#nltk.download('stopwords')
## 2 Declare Variables
doc = '''I am from speak english with vanessa da com.You are so lovely.So i get emails from students telling me when i am so glad i canunderstand everything you say.Putra night charan in english tv show and i can understand anything.Does this mean that your speak in floor.Devika question.I want to make sure the you know exactly the truth.What's the next step when we explain in something like today in the show videos.I want to make sure that you can understand everything.Is.Unnatural.I am not talking.Best.Where is mauli.Children.I am not talking mike.But i am talking to really.Aloe vera flower because i want to make sure that you can understand.Everything.Turn off the talking to.Hamara i know the you are watching but on my side i see so it's difficult to help.Natural conversation.When someone is there so the reason why i want it all you get is because i have a lot of videos on my youtube channel with other english speakers.Jesus videos with people skype does videos with people in my house around my city.And i think it's a really good way.English listening to the next level.What is videos.Mossbauer
explanation.What videos with my voice to overy understand my voice.One other person.How make sure that in the description and at the end of
this video i will
'''
## 3 Remove stopwords
## 4. Find total words in the document
total_words = doc.split()
total_word_length = len(total_words)
#print(total_word_length)
##5 5. Find the total number of sentences
total_sentences = tokenize.sent_tokenize(doc)
total_sent_len = len(total_sentences)
#print(total_sent_len)
##6. Calculate TF for each word
tf_score = {}
for each_word in total_words:
each_word = each_word.replace('.','')
if each_word not in stop_words:
if each_word in tf_score:
tf_score[each_word] += 1
else:
tf_score[each_word] = 1
# Dividing by total_word_length for each dictionary element
tf_score.update((x, y/int(total_word_length)) for x, y in tf_score.items())
#print(tf_score)
##7. Function to check if the word is present in a sentence list
def check_sent(word, sentences):
final = [all([w in x for w in word]) for x in sentences]
sent_len = [sentences[i] for i in range(0, len(final)) if final[i]]
return int(len(sent_len))
##8 8. Calculate IDF for each word
idf_score = {}
for each_word in total_words:
each_word = each_word.replace('.','')
if each_word not in stop_words:
if each_word in idf_score:
idf_score[each_word] = check_sent(each_word, total_sentences)
else:
idf_score[each_word] = 1
# Performing a log and divide
idf_score.update((x, math.log(int(total_sent_len)/y)) for x, y in idf_score.items())
#print(idf_score)
##9. Calculate TF * IDF
tf_idf_score = {key: tf_score[key] * idf_score.get(key, 0) for key in tf_score.keys()}
#print(tf_idf_score)
#10. Create a function to get N important words in the document
print('..........................important word................')
def get_top_n(dict_elem, n):
sorted_result = dict(sorted(dict_elem.items(), key = itemgetter(1), reverse = True)[:n])
##################################################################
# sorted_result onctaone bot word and correspondin frequency #
###################################################################
keywords=[key for key in sorted_result.keys()]
return keywords
#11. Get the top 5 words of significance
if __name__ == '__main__':
get_top_n(tf_idf_score, 20)
print(get_top_n(tf_idf_score, 20))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Provides Generic Classes to make an image analysis.
"""
from abc import ABC, abstractmethod
import pandas as pd
class InputData(ABC):
def __init__(self, data):
self._content = data
@abstractmethod
def read(self):
pass
class Cohort(InputData):
def __init__(self, dataframe, workdir=None):
super().__init__(dataframe)
self.workdir = workdir
def read(self):
for _, row in self._content.iterrows():
filepath = row.path
name = row.id
if row.todo == 1 and filepath != 0:
if self.workdir:
filepath = str(self.workdir / filepath)
print(type(filepath))
yield (name, filepath)
class AnalysisCV(object):
'''
'''
def __init__(self, procedure):
self.procedure = procedure
def run(self, input_data):
print('running analysis !!')
all_results = {}
for (name, filepath) in input_data.read():
result = self.procedure.run(filepath, name)
results_df = pd.DataFrame(result, columns=result[0].keys())
all_results[name] = results_df
results_df.to_csv(name + '.csv')
return all_results
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# import pdb; pdb.set_trace()
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
|
nilq/baby-python
|
python
|
def cbrt(a):
s = -1 if a < 0 else 1
return s * (a*s) ** (1/3)
print(cbrt(-8)) # -2.0
print(cbrt(8)) # 2.0
print(cbrt(0)) # 0.0
|
nilq/baby-python
|
python
|
import pytest
from eth_account import Account
from eth_keys import KeyAPI
from eth_utils import is_same_address
@pytest.fixture
def c(w3, get_contract):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
with open("examples/wallet/wallet.vy") as f:
code = f.read()
# Sends wei to the contract for future transactions gas costs
c = get_contract(code, *[[a1, a2, a3, a4, a5], 3])
w3.eth.sendTransaction({"to": c.address, "value": 10 ** 17})
return c
@pytest.fixture
def sign(keccak):
def _sign(seq, to, value, data, key):
keys = KeyAPI()
comb = seq.to_bytes(32, "big") + b"\x00" * 12 + to + value.to_bytes(32, "big") + data
h1 = keccak(comb)
h2 = keccak(b"\x19Ethereum Signed Message:\n32" + h1)
sig = keys.ecdsa_sign(h2, key)
return [28 if sig.v == 1 else 27, sig.r, sig.s]
return _sign
def test_approve(w3, c, tester, assert_tx_failed, sign):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
k0, k1, k2, k3, k4, k5, k6, k7 = tester.backend.account_keys[:8]
to, value, data = b"\x35" * 20, 10 ** 16, b""
to_address = w3.toChecksumAddress(to)
def pack_and_sign(seq, *args):
sigs = [sign(seq, to, value, data, k) if k else [0, 0, 0] for k in args]
return sigs
# Legitimate approval
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
c.approve(0, "0x" + to.hex(), value, data, sigs, transact={"value": value, "from": a1})
# Approve fails if only 2 signatures are given
sigs = pack_and_sign(1, k1, 0, k3, 0, 0)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if an invalid signature is given
sigs = pack_and_sign(1, k1, 0, k7, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if transaction number is incorrect (the first argument should be 1)
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(0, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if not enough value is sent
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": 0, "from": a1})
) # noqa: E501
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
# this call should succeed
c.approve(1, to_address, value, data, sigs, call={"value": value, "from": a1})
print("Basic tests passed")
def test_javascript_signatures(w3, get_contract):
a3 = w3.eth.accounts[2]
# The zero address will cause `approve` to default to valid signatures
zero_address = "0x0000000000000000000000000000000000000000"
accounts = [
"0x776ba14735ff84789320718cf0aa43e91f7a8ce1",
"0x095ce4e4240fa66ff90282c26847456e3f3b5002",
]
# The address that will receive the transaction
recipient = "0x776Ba14735FF84789320718cf0aa43e91F7A8Ce1"
# These are the matching sigs to the accounts
raw_sigs = [
"0x4a89507bf71749fb338ed13fba623a683d9ecab0fb9c389a4298525c043e38281a00ab65628bb18a382eb8c8b4fb4dae95ccc993cf49f617c60d8051180778601c", # noqa: E501
"0xc84fe5d2a600e033930e0cf73f26e78f4c65b134f9c9992f60f08ce0863abdbe0548a6e8aa2d952659f29c67106b59fdfcd64d67df03c1df620c70c85578ae701b", # noqa: E501
]
# Turns the raw sigs into sigs
sigs = [
(w3.toInt(x[64:]), w3.toInt(x[:32]), w3.toInt(x[32:64])) # v # r # s
for x in map(lambda z: w3.toBytes(hexstr=z[2:]), raw_sigs)
]
h = w3.keccak(
(0).to_bytes(32, "big")
+ b"\x00" * 12
+ w3.toBytes(hexstr=recipient[2:])
+ (25).to_bytes(32, "big")
+ b""
) # noqa: E501
h2 = w3.keccak(b"\x19Ethereum Signed Message:\n32" + h)
# Check to make sure the signatures are valid
assert is_same_address(Account.recoverHash(h2, sigs[0]), accounts[0])
assert is_same_address(Account.recoverHash(h2, sigs[1]), accounts[1])
# Set the owners to zero addresses
with open("examples/wallet/wallet.vy") as f:
owners = [w3.toChecksumAddress(x) for x in accounts + [a3, zero_address, zero_address]]
x2 = get_contract(f.read(), *[owners, 2])
w3.eth.sendTransaction({"to": x2.address, "value": 10 ** 17})
# There's no need to pass in signatures because the owners are 0 addresses
# causing them to default to valid signatures
x2.approve(
0,
recipient,
25,
b"",
sigs + [[0, 0, 0]] * 3,
call={"to": x2.address, "value": 10 ** 17},
)
print("Javascript signature tests passed")
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class Course(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, null=False)
class Slot(models.Model):
MON = 1
TUE = 2
WED = 3
THU = 4
FRI = 5
DAY_CHOICES = [
(MON, 'Mon'),
(TUE, 'Tue'),
(WED, 'Wed'),
(THU, 'Thu'),
(FRI, 'Fri'),
]
CORY = 0
SODA = 1
ROOM_CHOICES = [
(CORY, 'Cory'),
(SODA, 'Soda'),
]
HOUR_CHOICES = [
(11, '11am'),
(12, '12pm'),
(13, '1pm'),
(14, '2pm'),
(15, '3pm'),
(16, '4pm'),
]
id = models.AutoField(primary_key=True)
hour = models.IntegerField(choices=HOUR_CHOICES)
day = models.IntegerField(choices=DAY_CHOICES)
room = models.IntegerField(choices=ROOM_CHOICES)
@staticmethod
def time(hour):
if hour < 12:
return '{}am'.format(hour)
else:
return '{}pm'.format(hour)
def start_time(self):
return self.time(self.hour)
def end_time(self):
return self.time(self.hour + 1)
class Tutor(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
slots = models.ManyToManyField(Slot)
courses = models.ManyToManyField(Course)
|
nilq/baby-python
|
python
|
import Tkinter
import tkinter
class TkinterImplementation(object):
def begin(self, wrappedIdleImage):
self.root = tkinter.Tk()
self.root.overrideredirect(True)
self.root.geometry(
"{0}x{1}+0+0".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))
self.root.config(background='black')
self.panel = Tkinter.Label(self.root, image=wrappedIdleImage.getImage())
self.panel.config(background='black')
self.panel.pack(side='bottom', fill='both', expand='yes')
self.root.update()
def update(self):
self.root.update()
def changeImage(self, image):
self.panel.config(image=image)
self.root.update()
|
nilq/baby-python
|
python
|
"""
Some utility functions that are only used for unittests.
Placing them in test/ directory seems to be against convention, so they are part of the library.
"""
from __future__ import print_function, division, absolute_import
import random
import copy
import numpy as np
import six.moves as sm
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
try:
import cPickle as pickle
except ImportError:
import pickle
import imgaug as ia
import imgaug.random as iarandom
from imgaug.augmentables.kps import KeypointsOnImage
class ArgCopyingMagicMock(mock.MagicMock):
"""A MagicMock that copies its call args/kwargs before storing the call.
This is useful for imgaug as many augmentation methods change data
in-place.
Taken from https://stackoverflow.com/a/23264042/3760780
"""
def _mock_call(self, *args, **kwargs):
args_copy = copy.deepcopy(args)
kwargs_copy = copy.deepcopy(kwargs)
return super(ArgCopyingMagicMock, self)._mock_call(
*args_copy, **kwargs_copy)
def assert_cbaois_equal(observed, expected, max_distance=1e-4):
# pylint: disable=unidiomatic-typecheck
if isinstance(observed, list) or isinstance(expected, list):
assert isinstance(observed, list)
assert isinstance(expected, list)
assert len(observed) == len(expected)
for observed_i, expected_i in zip(observed, expected):
assert_cbaois_equal(observed_i, expected_i,
max_distance=max_distance)
else:
assert type(observed) == type(expected)
assert len(observed.items) == len(expected.items)
assert observed.shape == expected.shape
for item_a, item_b in zip(observed.items, expected.items):
assert item_a.coords_almost_equals(item_b,
max_distance=max_distance)
if isinstance(expected, ia.PolygonsOnImage):
for item_obs, item_exp in zip(observed.items, expected.items):
if item_exp.is_valid:
assert item_obs.is_valid
def shift_cbaoi(cbaoi, top=0, right=0, bottom=0, left=0):
if isinstance(cbaoi, ia.KeypointsOnImage):
return cbaoi.shift(x=left-right, y=top-bottom)
return cbaoi.shift(top=top, right=right, bottom=bottom, left=left)
def create_random_images(size):
return np.random.uniform(0, 255, size).astype(np.uint8)
def create_random_keypoints(size_images, nb_keypoints_per_img):
result = []
for _ in sm.xrange(size_images[0]):
kps = []
height, width = size_images[1], size_images[2]
for _ in sm.xrange(nb_keypoints_per_img):
x = np.random.randint(0, width-1)
y = np.random.randint(0, height-1)
kps.append(ia.Keypoint(x=x, y=y))
result.append(ia.KeypointsOnImage(kps, shape=size_images[1:]))
return result
def array_equal_lists(list1, list2):
assert isinstance(list1, list), (
"Expected list1 to be a list, got type %s." % (type(list1),))
assert isinstance(list2, list), (
"Expected list2 to be a list, got type %s." % (type(list2),))
if len(list1) != len(list2):
return False
for arr1, arr2 in zip(list1, list2):
if not np.array_equal(arr1, arr2):
return False
return True
def keypoints_equal(kpsois1, kpsois2, eps=0.001):
if isinstance(kpsois1, KeypointsOnImage):
assert isinstance(kpsois2, KeypointsOnImage)
kpsois1 = [kpsois1]
kpsois2 = [kpsois2]
if len(kpsois1) != len(kpsois2):
return False
for kpsoi1, kpsoi2 in zip(kpsois1, kpsois2):
kps1 = kpsoi1.keypoints
kps2 = kpsoi2.keypoints
if len(kps1) != len(kps2):
return False
for kp1, kp2 in zip(kps1, kps2):
x_equal = (float(kp2.x) - eps
<= float(kp1.x)
<= float(kp2.x) + eps)
y_equal = (float(kp2.y) - eps
<= float(kp1.y)
<= float(kp2.y) + eps)
if not x_equal or not y_equal:
return False
return True
def reseed(seed=0):
iarandom.seed(seed)
np.random.seed(seed)
random.seed(seed)
def runtest_pickleable_uint8_img(augmenter, shape=(15, 15, 3), iterations=3):
image = np.mod(np.arange(int(np.prod(shape))), 256).astype(np.uint8)
image = image.reshape(shape)
augmenter_pkl = pickle.loads(pickle.dumps(augmenter, protocol=-1))
for _ in np.arange(iterations):
image_aug = augmenter(image=image)
image_aug_pkl = augmenter_pkl(image=image)
assert np.array_equal(image_aug, image_aug_pkl)
|
nilq/baby-python
|
python
|
"""io
Core IO Modules
"""
import os
import json
import pickle
###############################################################
# Common I/O operations
# ======================
#
def makedirs(filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
def walk(source_dir):
paths = list()
for root, dirs, files in os.walk(source_dir):
for filename in files:
paths.append(os.path.join(root, filename))
return paths
def load_json(filepath, encoding="utf-8"):
return json.load(open(filepath, "r", encoding=encoding))
def dump_json(obj, filepath, indent=None, ensure_ascii=False, makedir=True):
if makedir:
makedirs(filepath)
json.dump(
obj,
open(filepath, "w"),
indent=indent,
ensure_ascii=ensure_ascii
)
def load_pickle(filepath):
return pickle.load(open(filepath, "rb"))
def dump_pickle(obj, filepath, makedir=True):
if makedir:
makedirs(filepath)
pickle.dump(obj, open(filepath, "wb"))
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from account.models import Account
from datetime import datetime
def home(request):
# Editing Earl of the Day ID should update all data on home page
earl_of_the_day_id = 2
month = datetime.today().month
upcoming_birthdays = Account.objects.filter(birthday__month=month).order_by('birthday')
context = {
"earl_of_the_day": Account.objects.get(pk=earl_of_the_day_id),
"upcoming": upcoming_birthdays,
"active_page": "home",
}
return render(request, 'home.html', context)
|
nilq/baby-python
|
python
|
from pyrk.materials.material import Material
from pyrk.utilities.ur import units
from pyrk.density_model import DensityModel
from pyrk.inp import validation
class LiquidMaterial(Material):
''' subclass of material for liquid'''
def __init__(self,
name=None,
k=0 * units.watt / units.meter / units.kelvin,
cp=0 * units.joule / units.kg / units.kelvin,
dm=DensityModel(),
mu=0 * units.pascal * units.seconds):
"""Initalizes a material
:param name: The name of the component (i.e., "fuel" or "cool")
:type name: str.
:param k: The thermal conductivity of the component
:type k: float, pint.unit.Quantity :math:'watt/meter/K'
:param cp: specific heat capacity, :math:`c_p`, in :math:`J/kg-K`
:type cp: float, pint.unit.Quantity :math:`J/kg-K`
:param dm: The density of the material
:type dm: DensityModel object
:param mu: dynamic viscosity(for fluid), :math:`mu`, in :math:`Pa.s`
:type mu: float, pint.unit.Quantity :math:`Pa.s`
"""
Material.__init__(self, name, k, cp, dm)
self.mu = mu.to('pascal*seconds')
validation.validate_ge("mu", mu, 0 * units.pascal * units.seconds)
|
nilq/baby-python
|
python
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_dnsresolver.generated._client_factory import (
cf_dns_resolver,
cf_inbound_endpoint,
cf_outbound_endpoint,
cf_dns_forwarding_ruleset,
cf_forwarding_rule,
cf_virtual_network_link,
)
dns_resolver_dns_resolver = CliCommandType(
operations_tmpl=(
'azext_dnsresolver.vendored_sdks.dnsresolver.operations._dns_resolvers_operations#DnsResolversOperations.{}'
),
client_factory=cf_dns_resolver,
)
dns_resolver_forwarding_rule = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._forwarding_rules_operations#ForwardingRulesOperations.{}',
client_factory=cf_forwarding_rule,
)
dns_resolver_dns_forwarding_ruleset = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._dns_forwarding_rulesets_operations#DnsForwardingRulesetsOperations.{}',
client_factory=cf_dns_forwarding_ruleset,
)
dns_resolver_inbound_endpoint = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._inbound_endpoints_operations#InboundEndpointsOperations.{}',
client_factory=cf_inbound_endpoint,
)
dns_resolver_outbound_endpoint = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._outbound_endpoints_operations#OutboundEndpointsOperations.{}',
client_factory=cf_outbound_endpoint,
)
dns_resolver_virtual_network_link = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._virtual_network_links_operations#VirtualNetworkLinksOperations.{}',
client_factory=cf_virtual_network_link,
)
def load_command_table(self, _):
with self.command_group(
'dns-resolver', dns_resolver_dns_resolver, client_factory=cf_dns_resolver, is_preview=True
) as g:
g.custom_command('list', 'dns_resolver_list')
g.custom_show_command('show', 'dns_resolver_show')
g.custom_command('create', 'dns_resolver_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_show')
with self.command_group(
'dns-resolver forwarding-rule', dns_resolver_forwarding_rule, client_factory=cf_forwarding_rule
) as g:
g.custom_command('list', 'dns_resolver_forwarding_rule_list')
g.custom_show_command('show', 'dns_resolver_forwarding_rule_show')
g.custom_command('create', 'dns_resolver_forwarding_rule_create')
g.custom_command('update', 'dns_resolver_forwarding_rule_update')
g.custom_command('delete', 'dns_resolver_forwarding_rule_delete', confirmation=True)
with self.command_group(
'dns-resolver forwarding-ruleset', dns_resolver_dns_forwarding_ruleset, client_factory=cf_dns_forwarding_ruleset
) as g:
g.custom_command('list', 'dns_resolver_forwarding_ruleset_list')
g.custom_show_command('show', 'dns_resolver_forwarding_ruleset_show')
g.custom_command('create', 'dns_resolver_forwarding_ruleset_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_forwarding_ruleset_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_forwarding_ruleset_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_forwarding_ruleset_show')
with self.command_group(
'dns-resolver inbound-endpoint', dns_resolver_inbound_endpoint, client_factory=cf_inbound_endpoint
) as g:
g.custom_command('list', 'dns_resolver_inbound_endpoint_list')
g.custom_show_command('show', 'dns_resolver_inbound_endpoint_show')
g.custom_command('create', 'dns_resolver_inbound_endpoint_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_inbound_endpoint_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_inbound_endpoint_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_inbound_endpoint_show')
with self.command_group(
'dns-resolver outbound-endpoint', dns_resolver_outbound_endpoint, client_factory=cf_outbound_endpoint
) as g:
g.custom_command('list', 'dns_resolver_outbound_endpoint_list')
g.custom_show_command('show', 'dns_resolver_outbound_endpoint_show')
g.custom_command('create', 'dns_resolver_outbound_endpoint_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_outbound_endpoint_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_outbound_endpoint_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_outbound_endpoint_show')
with self.command_group(
'dns-resolver vnet-link', dns_resolver_virtual_network_link, client_factory=cf_virtual_network_link
) as g:
g.custom_command('list', 'dns_resolver_vnet_link_list')
g.custom_show_command('show', 'dns_resolver_vnet_link_show')
g.custom_command('create', 'dns_resolver_vnet_link_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_vnet_link_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_vnet_link_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_vnet_link_show')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''Generate a series of calibration frames using POV-ray.'''
from __future__ import division
import sys, os, math
def do_scene (x, y, z, fn):
'''Generate a frame with the camera at x,y,z into fn and render it.'''
f = open (fn, 'w')
print >>f, '#include "calibration_target.pov"'
print >>f, 'camera {'
print >>f, ' location <%.2f, %.2f, %.2f>' % (x, y, z)
print >>f, ' look_at <%.2f, 300, 280>' % x
print >>f, '}'
f.close ()
os.system ('povray +I%s +FN +W640 +H480 +AA +A0.3 -D &> /dev/null' % fn)
# Main program: calculate the camera positions and generate the frames.
n = 30
for i in range (0, n):
x = 75 + 100 * math.cos (i * math.pi / n)
y = 50 + 100 * math.cos (i * math.pi / n)
z = 650 + 100 * math.sin (i * math.pi / n)
print y, z
fn = 'calib-%3.3d.pov' % i
do_scene (x, y, z, fn)
|
nilq/baby-python
|
python
|
from pypeflow.common import *
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow
import os
import uuid
import sys
def run_script(job_data, job_type = "SGE" ):
if job_type == "SGE":
job_name = job_data["job_name"]
cwd = job_data["cwd"]
sge_option = job_data["sge_option"]
script_fn = job_data["script_fn"]
sge_cmd="qsub -N {job_name} {sge_option} -o {cwd}/sge_log -j y\
-S /bin/bash {script}".format(job_name=job_name,
cwd=os.getcwd(),
sge_option=sge_option,
script=script_fn)
#print sge_cmd
os.system( sge_cmd )
os.system( "sleep 1")
elif job_type == "local":
os.system( "bash %s" % job_data["script_fn"] )
def wait_for_file(filename, task = None, job_name = ""):
while 1:
time.sleep(30)
if os.path.exists(filename):
break
if task != None:
if task.shutdown_event != None and task.shutdown_event.is_set():
os.system("qdel %s" % job_name)
break
def run_p_task(self):
p_script_fn = self.parameters["p_file"]
job_id = self.parameters["job_id"]
cwd = self.parameters["cwd"]
script_dir = os.path.join( cwd )
script_fn = os.path.join( script_dir , "rp_%05d.sh" % (job_id))
log_path = os.path.join( script_dir, "rp_%05d.log" % (job_id))
script = []
script.append( "export PATH=~/task2014/dazzler/DALIGNER/:$PATH" )
script.append( "cd %s" % cwd )
script.append( ("/usr/bin/time bash %s " % p_script_fn) + ( " >& %s " % log_path ) + ( " && touch %s" % fn( self.job_done ) ) )
with open(script_fn,"w") as script_file:
script_file.write("\n".join(script))
job_name = self.URL.split("/")[-1]
job_name += "-"+str(uuid.uuid1())[:8]
job_data = {"job_name": job_name,
"cwd": cwd,
"sge_option": " -pe smp 2 -q huasm ",
"script_fn": script_fn }
run_script(job_data, job_type = "SGE")
wait_for_file( fn( self.job_done ), task=self, job_name=job_name )
def run_consensus_task(self):
job_id = self.parameters["job_id"]
cwd = self.parameters["cwd"]
script_dir = os.path.join( cwd )
script_fn = os.path.join( script_dir , "cp_%05d.sh" % (job_id))
log_path = os.path.join( script_dir, "cp_%05d.log" % (job_id))
with open( os.path.join(cwd, "c_%05d.sh" % job_id), "w") as p_script:
print >> p_script, ". /mnt/secondary/Share/HBAR_03202013/bin/activate"
print >> p_script, "cd .."
print >> p_script, """./LA4Falcon -o -f:%s las_files/%s.%d.las | """ % (prefix, prefix, job_id),
print >> p_script, """ falcon_sense.py --trim --output_multi --min_idt 0.70 --min_cov 4 --local_match_count_threshold 3 --max_n_read 800 --n_core 8 > %s""" % fn(self.out_file)
script = []
script.append( "cd %s" % cwd )
script.append( ("/usr/bin/time bash c_%05d.sh " % job_id ) + ( " >& %s " % log_path ) + ( " && touch c_%05d_done" % job_id ) )
with open(script_fn,"w") as script_file:
script_file.write("\n".join(script))
job_name = self.URL.split("/")[-1]
job_name += "-"+str(uuid.uuid1())[:8]
job_data = {"job_name": job_name,
"cwd": cwd,
"sge_option": " -pe smp 6 -q huasm ",
"script_fn": script_fn }
run_script(job_data, job_type = "SGE")
wait_for_file( os.path.join(cwd,"c_%05d_done" % job_id) , task=self, job_name=job_name )
if __name__ == "__main__":
prefix = sys.argv[1]
concurrent_jobs = 16
PypeThreadWorkflow.setNumThreadAllowed(concurrent_jobs, concurrent_jobs)
wf = PypeThreadWorkflow()
mjob_data = {}
with open("run_jobs.sh") as f:
for l in f:
l = l.strip().split()
if l[0] not in ( "LAsort", "LAmerge" ):
continue
if l[0] == "LAsort":
p_id = int( l[2].split(".")[1] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
if l[0] == "LAmerge":
l2 = l[2].split(".")
if l2[1] == "L2":
p_id = int( l[2].split(".")[2] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
else:
p_id = int( l[2].split(".")[1] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
db_file = makePypeLocalFile(os.path.abspath( "./%s.db" % prefix ))
for p_id in mjob_data:
s_data = mjob_data[p_id]
try:
os.makedirs("./p_%05d" % p_id)
os.makedirs("./p_%05d/sge_log" % p_id)
except OSError:
pass
try:
os.makedirs("./preads")
except OSError:
pass
try:
os.makedirs("./las_files")
except OSError:
pass
with open("./p_%05d/p_%05d.sh" % (p_id, p_id), "w") as p_script:
print >> p_script, """for f in `find .. -wholename "*job*/%s.%d.%s.*.*.las"`; do ln -sf $f .; done""" % (prefix, p_id, prefix)
for l in s_data:
print >> p_script, l
print >> p_script, "mv %s.%d.las ../las_files" % (prefix, p_id)
p_file = os.path.abspath( "./p_%05d/p_%05d.sh" % (p_id, p_id) )
job_done = makePypeLocalFile(os.path.abspath( "./p_%05d/p_%05d_done" % (p_id,p_id) ))
parameters = {"p_file": p_file,
"cwd": os.path.join(os.getcwd(), "p_%05d" % p_id),
"job_id": p_id}
make_p_task = PypeTask( inputs = {"db_file": db_file},
outputs = {"job_done": job_done},
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/ptask_%05d" % p_id )
p_task = make_p_task ( run_p_task )
wf.addTask(p_task)
out_file = makePypeLocalFile(os.path.abspath( "./preads/out.%04d.fa" % p_id ))
parameters = {"cwd": os.path.join(os.getcwd(), "preads" ),
"job_id": p_id}
make_c_task = PypeTask( inputs = {"job_done": job_done},
outputs = {"out_file": out_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/ct_%05d" % p_id )
c_task = make_c_task( run_consensus_task )
wf.addTask(c_task)
print p_id
wf.refreshTargets(updateFreq = 15) #all
|
nilq/baby-python
|
python
|
"""
ray.py defines a class of rays that can be represented in space. A ray
propagates in the optical system and can be refracted, reflected or dispersed.
Each instantiation is hence described by several line segments in space which
are determined by their endpoints and directions. The final segment determines
the current direction of the ray.
"""
import numpy as np
import nklab as nk
class Ray:
"""
Instantiates an optical ray.
Provides
1. A vector representation of the ray in the system.
2. Methods for updating the representation of the ray and returning its
current point and direction each time it propagates to an optical
element surface.
"""
def __init__(self, r=[0, 0, 0], k=[0, 0, 1], wavelength = 0):
"""
Instantiates an optical ray at a starting position r with initial
(normalised) direction k. Coordinates are in the x,y,z Cartesian form.
r and k can be numpy arrays or lists of integers and/or floats.
wavelength is a float (measured in nanometres).
"""
if len(r) != 3 or len(k) != 3:
raise Exception('3D vector size')
self._r = np.array(r, dtype=float)
self._k = nk.normalise(np.array(k, dtype=float))
if wavelength == 0:
self._wavelength = None
self._wavelength = float(wavelength)
# __vertices and __directions are lists of all segment endpoints and
# directions of the ray. They are useful for plotting but not useful
# for the user.
self._vertices = [self._r]
self._directions = [self._k]
def __repr__(self):
"""
Represents the current point and direction of the ray
"""
return "%s(r=[%g, %g, %g], k=[%g, %g, %g])" % (
"Ray", self.r()[0], self.r()[1], self.r()[2],
self.k()[0], self.k()[1], self.k()[2])
def __str__(self):
"""
Represents the current point and direction of the ray
"""
return "r = (%g, %g, %g), k = (%g, %g, %g)" % (
self.r()[0], self.r()[1], self.r()[2],
self.k()[0], self.k()[1], self.k()[2])
def r(self):
"""
Gets the value of the current point.
"""
return self._vertices[-1]
def k(self):
"""
Gets the value of the current direction.
"""
return self._directions[-1]
def vertices(self):
"""
Gets the values of all vertices of the ray.
Vertices are numpy arrays of floats.
"""
return self._vertices
def append(self, r, k):
"""
Appends new point and direction to the ray usually after interaction
with optical element.
r, k can be numpy arrays or lists of floats and/or integers.
Appended points and directions are numpy arrays of floats.
Directions are normalised.
"""
if len(r) != 3 or len(k) != 3:
raise Exception('3D vector size')
r = np.array(r, dtype=float)
k = nk.normalise(np.array(k, dtype=float))
self._vertices.append(r)
self._directions.append(k)
|
nilq/baby-python
|
python
|
from django.views import View
from django.http import JsonResponse
from django.shortcuts import render, reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from core.models import DesignDocument, UserDocumentDownload, UserDocumentFavorite
class ProfileView(LoginRequiredMixin, View):
template_name = 'core/profile/profile.html'
def get(self, request):
filter_param = request.GET.get('filter')
design_documents = self.get_filtered_documents(filter_param, request.user) if \
filter_param else \
DesignDocument.objects.filter(uploaded_by=request.user)
print(design_documents)
context = {
'documents': design_documents,
'filter_param': filter_param
}
return render(request, self.template_name, context)
def delete(self, request):
request.user.delete()
return JsonResponse({'message': 'Account successfully deleted'}, status=200)
def get_filtered_documents(self, filter_param, user):
try:
model_class = {
'favorites': UserDocumentFavorite,
'downloads': UserDocumentDownload
}[filter_param]
return [item.design_document for item in model_class.objects.filter(user=user)]
except KeyError:
return DesignDocument.objects.filter(uploaded_by=user)
|
nilq/baby-python
|
python
|
#Use emcee as a Metropolis-Hastings so we can avoid a lot of the difficulty of the ensemble sampler for the moment.
import numpy as np
import emcee
#create our lnprob as a multidimensional Gaussian, where icov is C^{-1}
def lnprob(x, mu, icov):
diff = x-mu
lnp = -np.dot(diff,np.dot(icov,diff))/2.0
print("lnp = ", lnp)
return lnp
ndim = 2
#Create our own parameters for this Gaussian
means = np.array([10, 3])
cov = np.array([[3.0, 0.0],[0.0, 1.0]])
icov = np.linalg.inv(cov)
print("Inverse covariance matrix", icov)
#Jump distribution parameters
MH_cov = np.array([[1.5, 0],[0., 0.7]])
sampler = emcee.MHSampler(MH_cov, ndim, lnprob, args=[means, icov])
pos, prob, state = sampler.run_mcmc(np.array([0, 0]), 5)
print("Samples", sampler.flatchain)
# sampler.reset()
# sampler.run_mcmc(pos, 5)
print("Acceptance fraction", sampler.acceptance_fraction)
#
# import triangle
# import matplotlib.pyplot as plt
#
# samples = sampler.flatchain
# figure = triangle.corner(samples, labels=(r"$\mu_1$", r"$\mu_2$"), quantiles=[0.16, 0.5, 0.84],
# show_titles=True, title_args={"fontsize": 12})
# figure.savefig("MH.png")
#
# def plot_walkers(filename, samples, labels=None):
# ndim = len(samples[0, :])
# fig, ax = plt.subplots(nrows=ndim, sharex=True)
# for i in range(ndim):
# ax[i].plot(samples[:,i])
# if labels is not None:
# ax[i].set_ylabel(labels[i])
# ax[-1].set_xlabel("Sample number")
# fig.savefig(filename)
#
# plot_walkers("walkers.png", samples, labels=(r"$\mu_1$", r"$\mu_2$"))
|
nilq/baby-python
|
python
|
temporario = list()
principal = list()
maior = menor = 0
while True:
temporario.append(input("Nome: ").strip().title())
temporario.append(float(input("Peso: ")))
if len(principal) == 0:
maior = menor = temporario[1]
else:
if temporario[1] > maior:
maior = temporario[1]
elif temporario[1] < menor:
menor = temporario[1]
principal.append(temporario[:])
temporario.clear()
resposta = input("Deseja continuar? [S/N] ").strip().upper()
if resposta == "N":
break
if resposta == "S":
print("Continuando...")
else:
break
print(f"Ao todo, você cadastrou {len(principal)} pessoas.")
print(f"O maior peso foi {maior}Kg. Peso de", end=" ")
for pessoa in principal:
if pessoa[1] == maior:
print(pessoa[0], end=" ")
print(f"\nO menor peso foi de {menor}Kg. Peso de", end=" ")
for pessoa in principal:
if pessoa[1] == menor:
print(pessoa[0], end=" ")
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(name='myslice',
version='2.0.0',
description='MySlice version 2',
url='http://myslice.info',
author='Ciro Scognamiglio',
author_email='ciro.scognamiglio@lip6.fr',
license='MIT',
packages=['myslice'],
#install_requires=[
# 'tornado',
# 'tornado_cors',
# 'SockJS-tornado',
# 'rethinkdb',
# 'requests',
# 'pycryptodome',
# 'pytz',
# 'python-dateutil',
# 'premailer',
# 'python-oauth2',
# 'pyzmq'
# ],
#scripts=['myslice/bin/myslice-sync', 'myslice/bin/myslice-web'],
#data_files=[('/etc', ['config/planetlab.cfg-dist']),
# ('/etc/init.d', ['init/myslice'])],
zip_safe=False)
|
nilq/baby-python
|
python
|
import logging
import operator
import time
from functools import reduce
from typing import Optional, Union, Dict, Collection, Any
logger = logging.getLogger(__name__)
class Configuration(object):
def __init__(self, c:Optional[Union['Configuration', Dict]]=None):
"""Create Configuration object
python dict() or another Configuration can be used as source
Args:
c (Optional[Union[, optional): Use this object as Configuration source. Defaults to None (empty configuration).
"""
self._generation = 0
super(Configuration, self).__init__()
if c is None:
self._config_object = dict()
else:
self._config_object = c
if isinstance(c, Configuration) and c._generation != 0:
self._on_update()
elif not isinstance(c, Configuration):
self._on_update()
def _on_update(self, generation=None):
self._generation = time.time() if generation is None else generation
@staticmethod
def _to_config_object(o:Union['Configuration', Dict]) -> 'Configuration':
"""internal method to convert arbitrary object into Configuration.
If the object is already a Configuration object then returns the object
Returns:
Configuration: a configuration object
"""
if isinstance(o, Configuration):
return o
return Configuration(o)
def __eq__(self, other):
if self._generation == 0 and other is None:
return True
return super(Configuration, self).__eq__(other)
def __getitem__(self, item):
return self.get_at(item)
def __setitem__(self, item, value):
self.set_at(item, value)
def __iter__(self):
for key, value in self._config_object.items():
yield key, value
def __getattr__(self, item):
try:
res = getattr(self._config_object, item)
return res
except AttributeError:
return self.get_at(item)
@staticmethod
def _is_native(o) -> bool:
_native = False
if not _native and isinstance(o, str):
_native = True
if not _native and isinstance(o, bytes):
_native = True
if not _native and isinstance(o, float):
_native = True
if not _native and isinstance(o, int):
_native = True
if not _native and isinstance(o, type(None)):
_native = True
if not _native and isinstance(o, list):
_native = True
if not _native and isinstance(o, dict):
_native = True
return _native
def as_dict(self)->Optional[Dict]:
"""Returns current configuration object as python dict
Returns:
Optional[Dict]: dict representation
"""
if isinstance(self._config_object, Configuration) and (self._is_native(self._config_object._config_object) or not hasattr(self._config_object._config_object, "__iter__")):
return self._config_object._config_object
if not hasattr(self._config_object, "__iter__"):
return self._config_object
if isinstance(self._config_object, list):
return self._config_object
if isinstance(self._config_object, str):
return self._config_object
if isinstance(self._config_object, int):
return self._config_object
if isinstance(self._config_object, float):
return self._config_object
if isinstance(self._config_object, bytes):
return self._config_object
# if self._is_native(self._config_object):
# return self._config_object
d = {}
for key, value in self._config_object.items():
_value = value.as_dict() if isinstance(value, Configuration) else value
d.update({key:_value})
return d
def __str__(self):
return str(dict(self))
def __unicode__(self):
return str(dict(self))
def __repr__(self):
return str(dict(self))
def get_at(self, path:str, convert:bool=True)->Optional[Union['Configuration', Any]]:
"""Returns Configuration branch at given address
Args:
path (Union[str,int]): path to get
convert (Boolean): (deprecated) Embed target into Configuration object if if target element is an iterable
Returns:
[type]: [description]
"""
try:
if type(path) == int:
res = operator.getitem(self._config_object, path)
else:
res = reduce(operator.getitem, path.split('.'), self._config_object)
# if convert and ( type(res) == dict or type(res) == list):
# res = self._to_config_object(res)
except (KeyError, TypeError) as e:
return None
if isinstance(res, Configuration) and self._is_native(res._config_object):
return res.as_dict()
return res
def exists(self, path:Union[str,int])->bool:
"""check if given path exists in Configuration
Args:
path (Union[str,int]): path to check
Returns:
bool: true if path exists
"""
try:
if type(path) == int:
operator.getitem(self._config_object, path)
else:
reduce(operator.getitem, path.split('.'), self._config_object)
except KeyError as e:
return False
return True
def __add__(self, item):
def merge(source, destination):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
if isinstance(node, dict):
merge(value, node)
else:
destination[key] = value
else:
destination[key] = value
return destination
if not isinstance(item, Configuration):
raise ValueError("Value must be of Configuration type", item)
destination = self.as_dict()
source = item.as_dict()
_type = type(self)
res = merge(source, destination)
c = _type(res)
if item._generation == self._generation:
c._on_update(0)
elif item._generation == 0:
c._on_update(self._generation)
elif self._generation == 0:
c._on_update(item._generation)
return c
# def set_at(self, path, value)->None:
# def _setitem(value, path):
# return {path: value}
# p = path.split('.')
# p.reverse()
# res = reduce(_setitem, p, value)
# c = Configuration(res)
# self += c
# return self
def set_at(self, path, value)->None:
value = self._value_convertor(value)
key, _sep, _path = path.partition('.')
if _sep != '':
_value = self._config_object.setdefault(key, Configuration())
if isinstance(_value, Configuration):
_value.set_at(_path, value)
else:
c = Configuration(_value)
c.set_at(_path, value)
self._config_object[key] = c
else:
self._config_object[key] = value
self._on_update()
# def __setattr__(self, name, value):
# if name in ['_config_object']:
# super(Configuration, self).__setattr__(name, value)
# else:
# self.set_at(name, value)
def __len__(self):
return len(self.as_dict())
def write(self, stream):
raise NotImplementedError
def _value_convertor(self, o):
# TODO: Validate for literal type
# raise ConfigurationException(ValueError(value))
return o
def append(self, c:Union['Configuration', Dict])->'Configuration':
"""mutates Configuration object by appending Configuration to current object
Returns:
Configuration: self, updated object
"""
source = self._config_object
destination = c
if isinstance(self._config_object, dict):
source = Configuration(self._config_object)
if isinstance(c, dict):
destination = Configuration(c)
self._config_object = source + destination
return self
|
nilq/baby-python
|
python
|
"""
collection of helper functions
"""
from __future__ import print_function, division, absolute_import
import os
from glob import glob
from collections import defaultdict
import tables
from .. import NcsFile, options
def check_sorted(channel_dirname):
"""
check how many 'sorted_...' folder there are
"""
pattern = os.path.join(channel_dirname, 'sort_???_?????_*')
return len(glob(pattern))
def spike_count_h5f(fname):
"""
return number of positive/negative spikes in h5file
"""
fid = tables.open_file(fname, 'r')
try:
n_pos = fid.root.pos.spikes.shape[0]
except tables.NoSuchNodeError:
n_pos = 0
try:
n_neg = fid.root.neg.spikes.shape[0]
except tables.NoSuchNodeError:
n_neg = 0
fid.close()
if n_pos + n_neg > 0:
ch_extracted = True
else:
ch_extracted = False
return ch_extracted, n_pos, n_neg
def check_status(channel_fname):
"""
check whether channel is extracted/sorted
"""
channel_dirname = os.path.splitext(channel_fname)[0]
if os.path.isdir(channel_dirname):
h5fname = os.path.join(channel_dirname,
'data_' + channel_dirname + '.h5')
if os.path.exists(h5fname):
ch_extracted, n_pos, n_neg = spike_count_h5f(h5fname)
n_sorted = check_sorted(channel_dirname)
else:
h5fname = None
ch_extracted = False
n_pos = n_neg = n_sorted = 0
else:
h5fname = None
ch_extracted = False
n_pos = n_neg = n_sorted = 0
return ch_extracted, n_pos, n_neg, n_sorted, h5fname
def get_channels(path, from_h5files=False):
"""
simply finds the ncs files that are big enough
"""
def h5fname2channel(h5fname):
"""
transform h5filename to channel name
It's a hack....
"""
dirname = os.path.dirname(h5fname)
basename = os.path.basename(dirname)
cand = os.path.join(basename, basename + '.ncs')
if os.path.exists(cand):
return cand
else:
print('{} not found!'.format(cand))
ret = {}
if from_h5files:
chs = []
for name in h5files(path):
test = h5fname2channel(name)
if test is not None:
chs.append(test)
else:
key = 'unknown'
ret[key] = os.path.basename(os.path.dirname(name))
else:
chs = glob(os.path.join(path, '*.ncs'))
for chan in chs:
statr = os.stat(chan)
if statr.st_size > 16 * 1024:
fid = NcsFile(chan)
name = fid.header['AcqEntName']
ret[name] = os.path.basename(chan)
return ret
def get_regions(path):
channels = glob(os.path.join(path, 'CSC*.ncs'))
regions = defaultdict(list)
for ch in channels:
statr = os.stat(ch)
if statr.st_size > 16 * 1024:
fh = NcsFile(ch)
name = fh.header['AcqEntName']
try:
int(name[-1])
name = name[:-1]
except ValueError:
if name[-4:] == '_Ref':
name = name[:-4]
else:
print('Unknown Region: ' + name[-4:])
regions[name].append(ch)
for name in regions:
regions[name] = sorted(regions[name])
return regions
def h5files(path):
"""
highly specific tool to find all relevant h5 files
if their names follow the CSC?, CSC?? naming convention
"""
def sort_function(fname):
try:
a = int(os.path.basename(fname)[8:-3])
return a
except ValueError:
return fname
# channel_dirs = glob(os.path.join(path, 'CSC?'))
# channel_dirs += glob(os.path.join(path, 'CSC??'))
channel_dirs = []
for pat in options['folder_patterns']:
channel_dirs += glob(os.path.join(path, pat))
ret = []
for chd in channel_dirs:
basename = os.path.basename(chd)
h5cand = os.path.join(chd, 'data_{}.h5'.format(basename))
if os.path.exists(h5cand):
if os.stat(h5cand).st_size > 0:
ret.append(h5cand)
return sorted(ret, key=sort_function)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import unittest
from hamlish_jinja import Hamlish, Output
import testing_base
class TestDebugOutput(testing_base.TestCase):
def setUp(self):
self.hamlish = Hamlish(
Output(indent_string='', newline_string='', debug=False))
def test_pre_tags(self):
s = self._h('''
%pre
|def test():
| if 1:
| print "Test"
''')
r = '''<pre>def test():
if 1:
print "Test"
</pre>\
'''
self.assertEqual(s, r)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
from tensorflow.keras.layers import Input, Dense, SimpleRNN, GRU, LSTM, Bidirectional
from tensorflow.keras.models import Model
REC = LSTM
sequence_length = 3
feature_dim = 1
features_in = Input(batch_shape=(1, sequence_length, feature_dim))
rnn_out = Bidirectional( REC(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=False))(features_in)
stateless_model = Model(inputs=[features_in], outputs=[rnn_out])
stateful_rnn_out = Bidirectional( REC(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=True))(features_in)
stateful_model = Model(inputs=features_in, outputs=stateful_rnn_out)
stateful_model.set_weights( stateless_model.get_weights() )
x_in = np.random.normal(0,10,sequence_length)
x_in = x_in.reshape( (1, sequence_length, feature_dim) )
def print_bidi_out(non_stateful_out, stateful_out):
fb = ['FWD::', 'BWD::']
for i in range(2):
print(fb[i])
print(f'non_stateful: {non_stateful_out.T[i]}')
print(f'stateful: {stateful_out.T[i]}')
print(f'delta: {stateful_out.T[i]-non_stateful_out.T[i]}')
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
print('\n** RESETING STATES in STATEFUL MODEL **\n')
stateful_model.reset_states()
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
|
nilq/baby-python
|
python
|
import b128
import itertools
import os
import plyvel
import secp256k1
from binascii import unhexlify
from utxo.script import OP_DUP, OP_HASH160, OP_EQUAL, \
OP_EQUALVERIFY, OP_CHECKSIG
def ldb_iter(datadir):
db = plyvel.DB(os.path.join(datadir, "chainstate"), compression=None)
obf_key = db.get((unhexlify("0e00") + "obfuscate_key"))
if obf_key is not None:
pre = 'C'
obf_key = map(ord, obf_key[1:])
else:
pre = 'c'
def norm(raw):
key, value = raw
if obf_key is not None:
value = deobfuscate(obf_key, value)
return parse_ldb_value(key, value)
else:
return parse_ldb_value_old(key, value)
it = db.iterator(prefix=pre)
it = itertools.imap(norm, it)
if obf_key is None:
it = itertools.chain.from_iterable(it)
return it
def parse_ldb_value(key, raw):
tx_hash = key[1:33]
index = b128.parse(key[33:])[0]
code, raw = b128.read(raw)
height = code >> 1
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script = decompress_raw(script_code, raw)[0]
return tx_hash, height, index, amt, script
def parse_ldb_value_old(key, raw):
tx_hash = key[1:]
version, raw = b128.read(raw)
code, raw = b128.read(raw)
first_two = (code & (2 | 4)) >> 1
n = (code >> 3) + (first_two == 0)
offset = 0
bitv = first_two
if n > 0:
while n:
n -= (ord(raw[offset]) != 0)
offset += 1
bitv = (int(raw[:offset][::-1].encode('hex'), 16) << 2) | first_two
raw = raw[offset:]
i = 0
utxos = []
while bitv > 0:
if bitv & 1:
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script, raw = decompress_raw(script_code, raw, chomp=True)
ut = (tx_hash, None, i, amt, script)
utxos.append(ut)
bitv >>= 1
i += 1
height, raw = b128.read(raw)
assert len(raw) == 0
ret = [u[:1] + (height,) + u[2:] for u in utxos]
return ret
def decompress_raw(comp_type, raw, chomp=False):
if comp_type == 0 or comp_type == 1:
l = 20
elif comp_type >= 2 and comp_type <= 5:
l = 32
else:
l = comp_type - 6
data = raw[:l]
raw = raw[l:]
if not chomp:
assert len(raw) == 0
if comp_type == 0:
script = OP_DUP + OP_HASH160 + chr(20) + data + \
OP_EQUALVERIFY + OP_CHECKSIG
elif comp_type == 1:
script = OP_HASH160 + chr(20) + data + OP_EQUAL
elif comp_type == 2 or comp_type == 3:
script = chr(33) + chr(comp_type) + data + OP_CHECKSIG
elif comp_type == 4 or comp_type == 5:
comp_pubkey = chr(comp_type - 2) + data
pubkey = secp256k1.PublicKey(
comp_pubkey, raw=True
).serialize(compressed=False)
script = chr(65) + pubkey + OP_CHECKSIG
else:
script = data
return script, raw
def deobfuscate(key, obf):
n = len(key)
de = [chr(key[i % n] ^ ord(b)) for i, b in enumerate(obf)]
return "".join(de)
|
nilq/baby-python
|
python
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Provider Model Serializers."""
import logging
from collections import defaultdict
from django.conf import settings
from django.db import transaction
from rest_framework import serializers
from rest_framework.fields import empty
from api.common import error_obj
from api.iam.serializers import AdminCustomerSerializer
from api.iam.serializers import CustomerSerializer
from api.iam.serializers import UserSerializer
from api.provider.models import Provider
from api.provider.models import ProviderAuthentication
from api.provider.models import ProviderBillingSource
from api.utils import DateHelper
from providers.provider_access import ProviderAccessor
from providers.provider_errors import ProviderErrors
LOG = logging.getLogger(__name__)
PROVIDER_CHOICE_LIST = [
provider[0]
for provider in Provider.PROVIDER_CHOICES
if (settings.DEVELOPMENT or (not settings.DEVELOPMENT and "-local" not in provider[0].lower()))
]
LCASE_PROVIDER_CHOICE_LIST = [provider.lower() for provider in PROVIDER_CHOICE_LIST]
REPORT_PREFIX_MAX_LENGTH = 64
def validate_field(data, valid_fields, key):
"""Validate a field."""
message = f"One or more required fields is invalid/missing. Required fields are {valid_fields}"
diff = set(valid_fields) - set(data)
if not diff:
return data
raise serializers.ValidationError(error_obj(key, message))
class ProviderAuthenticationSerializer(serializers.ModelSerializer):
"""Serializer for the Provider Authentication model."""
uuid = serializers.UUIDField(read_only=True)
credentials = serializers.JSONField(allow_null=False, required=True)
class Meta:
"""Metadata for the serializer."""
model = ProviderAuthentication
fields = ("uuid", "credentials")
class AWSAuthenticationSerializer(ProviderAuthenticationSerializer):
"""AWS auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "role_arn"
fields = ["role_arn"]
return validate_field(creds, fields, key)
class OCIAuthenticationSerializer(ProviderAuthenticationSerializer):
"""OCI auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "tenant"
fields = ["tenant"]
return validate_field(creds, fields, key)
class AzureAuthenticationSerializer(ProviderAuthenticationSerializer):
"""Azure auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = ""
fields = ["subscription_id", "tenant_id", "client_id", "client_secret"]
return validate_field(creds, fields, key)
def to_representation(self, instance):
"""Control output of serializer."""
provider = super().to_representation(instance)
if provider.get("authentication", {}).get("credentials", {}).get("client_secret"):
del provider["authentication"]["credentials"]["client_secret"]
return provider
class GCPAuthenticationSerializer(ProviderAuthenticationSerializer):
"""GCP auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "project_id"
fields = ["project_id"]
return validate_field(creds, fields, key)
class IBMAuthenticationSerializer(ProviderAuthenticationSerializer):
"""IBM auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "iam_token"
fields = ["iam_token"]
return validate_field(creds, fields, key)
class OCPAuthenticationSerializer(ProviderAuthenticationSerializer):
"""OCP auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "cluster_id"
fields = ["cluster_id"]
return validate_field(creds, fields, key)
class ProviderBillingSourceSerializer(serializers.ModelSerializer):
"""Serializer for the Provider Billing Source model."""
uuid = serializers.UUIDField(read_only=True)
data_source = serializers.JSONField(allow_null=False, required=True)
class Meta:
"""Metadata for the serializer."""
model = ProviderBillingSource
fields = ("uuid", "data_source")
class AWSBillingSourceSerializer(ProviderBillingSourceSerializer):
"""AWS billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["bucket"]
return validate_field(data_source, fields, key)
class OCIBillingSourceSerializer(ProviderBillingSourceSerializer):
"""OCI billing source serializer."""
data_source = serializers.JSONField(required=False, default={})
class AzureBillingSourceSerializer(ProviderBillingSourceSerializer):
"""Azure billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["resource_group", "storage_account"]
return validate_field(data_source, fields, key)
class GCPBillingSourceSerializer(ProviderBillingSourceSerializer):
"""GCP billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["dataset"]
data = validate_field(data_source, fields, key)
report_prefix = data_source.get("report_prefix", "")
if report_prefix and len(report_prefix) > REPORT_PREFIX_MAX_LENGTH:
key = "data_source.report_prefix"
message = f"Ensure this field has no more than {REPORT_PREFIX_MAX_LENGTH} characters."
raise serializers.ValidationError(error_obj(key, message))
return data
class IBMBillingSourceSerializer(ProviderBillingSourceSerializer):
"""IBM billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["enterprise_id"]
return validate_field(data_source, fields, key)
class OCPBillingSourceSerializer(ProviderBillingSourceSerializer):
"""OCP billing source serializer."""
data_source = serializers.JSONField(required=False, default={})
# Registry of authentication serializers.
AUTHENTICATION_SERIALIZERS = {
Provider.PROVIDER_AWS: AWSAuthenticationSerializer,
Provider.PROVIDER_AWS_LOCAL: AWSAuthenticationSerializer,
Provider.PROVIDER_OCI: OCIAuthenticationSerializer,
Provider.PROVIDER_OCI_LOCAL: OCIAuthenticationSerializer,
Provider.PROVIDER_AZURE: AzureAuthenticationSerializer,
Provider.PROVIDER_AZURE_LOCAL: AzureAuthenticationSerializer,
Provider.PROVIDER_GCP: GCPAuthenticationSerializer,
Provider.PROVIDER_GCP_LOCAL: GCPAuthenticationSerializer,
Provider.PROVIDER_IBM: IBMAuthenticationSerializer,
Provider.PROVIDER_IBM_LOCAL: IBMAuthenticationSerializer,
Provider.PROVIDER_OCP: OCPAuthenticationSerializer,
Provider.OCP_AWS: AWSAuthenticationSerializer,
Provider.OCP_AZURE: AzureAuthenticationSerializer,
}
# Registry of billing_source serializers.
BILLING_SOURCE_SERIALIZERS = {
Provider.PROVIDER_AWS: AWSBillingSourceSerializer,
Provider.PROVIDER_AWS_LOCAL: AWSBillingSourceSerializer,
Provider.PROVIDER_OCI: OCIBillingSourceSerializer,
Provider.PROVIDER_OCI_LOCAL: OCIBillingSourceSerializer,
Provider.PROVIDER_AZURE: AzureBillingSourceSerializer,
Provider.PROVIDER_AZURE_LOCAL: AzureBillingSourceSerializer,
Provider.PROVIDER_GCP: GCPBillingSourceSerializer,
Provider.PROVIDER_GCP_LOCAL: GCPBillingSourceSerializer,
Provider.PROVIDER_IBM: IBMBillingSourceSerializer,
Provider.PROVIDER_IBM_LOCAL: IBMBillingSourceSerializer,
Provider.PROVIDER_OCP: OCPBillingSourceSerializer,
Provider.OCP_AWS: AWSBillingSourceSerializer,
Provider.OCP_AZURE: AzureBillingSourceSerializer,
}
class ProviderSerializer(serializers.ModelSerializer):
"""Serializer for the Provider model."""
uuid = serializers.UUIDField(allow_null=True, required=False)
name = serializers.CharField(max_length=256, required=True, allow_null=False, allow_blank=False)
type = serializers.ChoiceField(choices=LCASE_PROVIDER_CHOICE_LIST)
created_timestamp = serializers.DateTimeField(read_only=True)
customer = CustomerSerializer(read_only=True)
created_by = UserSerializer(read_only=True)
active = serializers.BooleanField(read_only=True)
paused = serializers.BooleanField(required=False)
class Meta:
"""Metadata for the serializer."""
model = Provider
fields = (
"uuid",
"name",
"type",
"authentication",
"billing_source",
"customer",
"created_by",
"created_timestamp",
"active",
"paused",
)
def __init__(self, instance=None, data=empty, **kwargs):
"""Initialize the Provider Serializer.
Here we ensure we use the appropriate serializer to validate the
authentication and billing_source parameters.
"""
super().__init__(instance, data, **kwargs)
provider_type = None
if data and data != empty:
provider_type = data.get("type")
if provider_type and provider_type.lower() not in LCASE_PROVIDER_CHOICE_LIST:
key = "type"
message = f"{provider_type} is not a valid source type."
raise serializers.ValidationError(error_obj(key, message))
if provider_type:
provider_type = provider_type.lower()
self.fields["authentication"] = AUTHENTICATION_SERIALIZERS.get(
Provider.PROVIDER_CASE_MAPPING.get(provider_type)
)()
self.fields["billing_source"] = BILLING_SOURCE_SERIALIZERS.get(
Provider.PROVIDER_CASE_MAPPING.get(provider_type)
)()
else:
self.fields["authentication"] = ProviderAuthenticationSerializer()
self.fields["billing_source"] = ProviderBillingSourceSerializer()
@property
def demo_credentials(self):
"""Build formatted credentials for our nise-populator demo accounts."""
creds_by_source_type = defaultdict(list)
for account, cred_dict in settings.DEMO_ACCOUNTS.items():
for cred, info in cred_dict.items():
if info.get("source_type") == Provider.PROVIDER_AWS:
creds_by_source_type[Provider.PROVIDER_AWS].append({"role_arn": cred})
elif info.get("source_type") == Provider.PROVIDER_AZURE:
creds_by_source_type[Provider.PROVIDER_AZURE].append({"client_id": cred})
elif info.get("source_type") == Provider.PROVIDER_GCP:
creds_by_source_type[Provider.PROVIDER_GCP].append({"project_id": cred})
return creds_by_source_type
def get_request_info(self):
"""Obtain request information like user and customer context."""
user = self.context.get("user")
customer = self.context.get("customer")
if user and customer:
return user, customer
request = self.context.get("request")
if request and hasattr(request, "user"):
user = request.user
if user.customer:
customer = user.customer
else:
key = "customer"
message = "Customer for requesting user could not be found."
raise serializers.ValidationError(error_obj(key, message))
else:
key = "created_by"
message = "Requesting user could not be found."
raise serializers.ValidationError(error_obj(key, message))
return user, customer
@transaction.atomic
def create(self, validated_data):
"""Create a provider from validated data."""
user, customer = self.get_request_info()
provider_type = validated_data["type"].lower()
provider_type = Provider.PROVIDER_CASE_MAPPING.get(provider_type)
validated_data["type"] = provider_type
interface = ProviderAccessor(provider_type)
authentication = validated_data.pop("authentication")
credentials = authentication.get("credentials")
billing_source = validated_data.pop("billing_source")
data_source = billing_source.get("data_source")
if self._is_demo_account(provider_type, credentials):
LOG.info("Customer account is a DEMO account. Skipping cost_usage_source_ready check.")
else:
interface.cost_usage_source_ready(credentials, data_source)
bill, __ = ProviderBillingSource.objects.get_or_create(**billing_source)
auth, __ = ProviderAuthentication.objects.get_or_create(**authentication)
# We can re-use a billing source or a auth, but not the same combination.
dup_queryset = (
Provider.objects.filter(authentication=auth).filter(billing_source=bill).filter(customer=customer)
)
if dup_queryset.count() != 0:
conflict_provider = dup_queryset.first()
message = (
f"Cost management does not allow duplicate accounts. "
f"{conflict_provider.name} already exists. Edit source settings to configure a new source."
)
LOG.warn(message)
raise serializers.ValidationError(error_obj(ProviderErrors.DUPLICATE_AUTH, message))
provider = Provider.objects.create(**validated_data)
provider.customer = customer
provider.created_by = user
provider.authentication = auth
provider.billing_source = bill
provider.active = True
provider.save()
customer.date_updated = DateHelper().now_utc
customer.save()
return provider
def update(self, instance, validated_data):
"""Update a Provider instance from validated data."""
_, customer = self.get_request_info()
provider_type = validated_data["type"].lower()
provider_type = Provider.PROVIDER_CASE_MAPPING.get(provider_type)
validated_data["type"] = provider_type
interface = ProviderAccessor(provider_type)
authentication = validated_data.pop("authentication")
credentials = authentication.get("credentials")
billing_source = validated_data.pop("billing_source")
data_source = billing_source.get("data_source")
# updating `paused` must happen regardless of Provider availabilty
instance.paused = validated_data.pop("paused", instance.paused)
try:
if self._is_demo_account(provider_type, credentials):
LOG.info("Customer account is a DEMO account. Skipping cost_usage_source_ready check.")
else:
interface.cost_usage_source_ready(credentials, data_source)
except serializers.ValidationError as validation_error:
instance.active = False
instance.save()
raise validation_error
with transaction.atomic():
bill, __ = ProviderBillingSource.objects.get_or_create(**billing_source)
auth, __ = ProviderAuthentication.objects.get_or_create(**authentication)
if instance.billing_source != bill or instance.authentication != auth:
dup_queryset = (
Provider.objects.filter(authentication=auth).filter(billing_source=bill).filter(customer=customer)
)
if dup_queryset.count() != 0:
conflict_provder = dup_queryset.first()
message = (
f"Cost management does not allow duplicate accounts. "
f"{conflict_provder.name} already exists. Edit source settings to configure a new source."
)
LOG.warn(message)
raise serializers.ValidationError(error_obj(ProviderErrors.DUPLICATE_AUTH, message))
for key in validated_data.keys():
setattr(instance, key, validated_data[key])
instance.authentication = auth
instance.billing_source = bill
instance.active = True
instance.save()
customer.date_updated = DateHelper().now_utc
customer.save()
return instance
def _is_demo_account(self, provider_type, credentials):
"""Test whether this source is a demo account."""
key_types = {
Provider.PROVIDER_AWS: "role_arn",
Provider.PROVIDER_AZURE: "client_id",
Provider.PROVIDER_GCP: "project_id",
}
key_to_check = key_types.get(provider_type, "")
creds_to_check = self.demo_credentials.get(provider_type, [])
for cred in creds_to_check:
if credentials.get(key_to_check, True) == cred.get(key_to_check, False):
return True
return False
class AdminProviderSerializer(ProviderSerializer):
"""Provider serializer specific to service admins."""
customer = AdminCustomerSerializer(read_only=True)
|
nilq/baby-python
|
python
|
"""
collision_detection.py is used on each iteration to detect whether
an agent has collided with walls and to provide an adequate environment
response (i.e. updated position & velocity such that agen slides along the wall).
"""
import numpy as np
import pygame as pg
from decimal import Decimal
import configs as cfg
import maze
x_var = cfg.X
y_var = cfg.Y
pos = cfg.BOID_POS_VAR * cfg.Dimensions
vel = cfg.BOID_VEL_VAR * cfg.Dimensions
class Amendments:
""" Amendment data holder class """
# Field indices in the packet generated by self.get_packet()
amount_i = 0
indices_i = 1
values_i = 2
def __init__(self):
self.amount = 0
self.indices = []
self.values = []
def get_packet(self):
""" Returns all amendments in a packet format """
return (np.uint16(self.amount),
np.asarray(self.indices, dtype=np.uint16),
np.asarray(self.values, dtype=np.float32))
def clear(self):
self.amount = 0
self.indices = []
self.values = []
def run(flock, previous_flock, amaze, template_triangles, amendments):
"""
Detects collisions and calculates required amendments that
allow boid to avoid collisions.
For each boid it first checks if boid collides with the wall by rotating on the
same spot. If it is, boid is moved out of the wall. If it isn't, the checking continues:
it calculates its impulse (desired dislocation vector) and
breaks it into steps. For each step (partial impulse) it checks if a wall
is hit. If it is, boid slides along it. Multiple walls will be properly processed.
TODO: Currently it's imprecise near the corners - there's a small transparent square
on the corner of the wall with the size (cfg.collision_check_stop, cfg.collision_check_stop),
and boid can go through it. Implementing proper processing may require more complex logic
and is out of the scope of this project.
"""
amendments.clear()
i = 0
for boid in flock.np_arrays:
impulse = np.hypot(boid[vel + x_var], boid[vel + y_var])
if impulse > 0:
# We'll start from previous position and if no walls are hit,
# increase it up to the new boid position
boid[pos + x_var] = previous_flock.np_arrays[i][pos + x_var]
boid[pos + y_var] = previous_flock.np_arrays[i][pos + y_var]
template_triangle = template_triangles[min(
int(np.round(np.degrees(flock.object_list[i].orientation))),
359)]
triangle_offset = template_triangle.get_triangle_top_left()
triangle_rect = template_triangle.rect.copy()
collision_detected = False
# Fisrt check if the boid has collided into a wall without
# moving (e.g. rotated near the wall)
# ------------------------------------------------------
hit_top, hit_right, hit_bottom, hit_left = \
check_for_collision([boid[pos + x_var],
boid[pos + y_var]],
[boid[vel + x_var],
boid[vel + y_var]],
triangle_rect,
triangle_offset,
amaze)
if hit_right or hit_left or hit_top or hit_bottom:
collision_detected = True
if cfg.bounding_rects_show:
flock.object_list[i].collided = True
dx = dy = 0
if hit_right:
wall_left_x = np.trunc(triangle_rect.right / cfg.tile_width) * cfg.tile_width
# dx will be negative
dx = wall_left_x - triangle_rect.right
if hit_left:
wall_right_x = np.ceil(triangle_rect.left / cfg.tile_width) * cfg.tile_width
# dx will be positive
dx = wall_right_x - triangle_rect.left
if hit_top:
wall_above_y = np.ceil(triangle_rect.top / cfg.tile_height) * cfg.tile_height
# dy will be positive
dy = wall_above_y - triangle_rect.top
if hit_bottom:
wall_below_y = np.trunc(triangle_rect.bottom / cfg.tile_height) * cfg.tile_height
# dy will be negative
dy = wall_below_y - triangle_rect.bottom
deltas_in_tiles = maze.to_unit_tiles(dx, dy)
boid[pos + x_var] = boid[pos + x_var] + deltas_in_tiles[x_var]
boid[pos + y_var] = boid[pos + y_var] + deltas_in_tiles[y_var]
# Collision check for this boid is finished
if not collision_detected:
# First position is unobstructed, so check positions ahead
# ------------------------------------------------------
unit_impulse = cfg.collision_check_step
# noinspection PyTypeChecker
dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares
# noinspection PyTypeChecker
dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares
number_of_checks = int(np.ceil(impulse / unit_impulse))
for j in range(0, number_of_checks):
if (j + 1) * unit_impulse > impulse: # Last step can be smaller
# Using Decimal here as float != float - 0 and Decimal is exact.
# Python uses approximate values and it negatively manifests itself here.
unit_impulse = np.float32(Decimal(impulse - unit_impulse * j))
dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares
dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares
hit_top, hit_right, hit_bottom, hit_left = \
check_for_collision([boid[pos + x_var] + dx,
boid[pos + y_var] + dy],
[boid[vel + x_var],
boid[vel + y_var]],
triangle_rect,
triangle_offset,
amaze)
if hit_right or hit_left or hit_top or hit_bottom:
collision_detected = True
if cfg.bounding_rects_show:
flock.object_list[i].collided = True
# Nullify impulse if a wall is on the way
if (dx > 0 and hit_right) or (dx < 0 and hit_left):
dx = 0
if (dy > 0 and hit_bottom) or (dy < 0 and hit_top):
dy = 0
if dx == 0 and dy == 0:
# Can't proceed
break
if not maze.outside_maze(boid[pos + x_var] + dx,
boid[pos + y_var] + dy):
# The boid was moved outside the maze
# Apply amendments to the host data according to the type of collision
# I.e. slide along the wall
boid[pos + x_var] = boid[pos + x_var] + dx
boid[pos + y_var] = boid[pos + y_var] + dy
else:
# Boid is outside the maze, no point continuing the check
break
if collision_detected:
# Save amendments to transfer them later to the GPU
amendments.values.append(np.copy([boid[pos + x_var],
boid[pos + y_var]]))
amendments.indices.append(i)
amendments.amount += 1
i += 1
def check_for_collision(boid_center, boid_impulse, triangle_rect, triangle_offset, amaze):
""" Returns collision types (left, right, top, bottom) """
triangle_rect_coors = maze.to_coors(
boid_center[x_var],
boid_center[y_var])
triangle_rect.left = triangle_rect_coors[x_var] + triangle_offset[x_var]
triangle_rect.top = triangle_rect_coors[y_var] + triangle_offset[y_var]
# Get new neighboring walls as a list of coordinate pairs
neighboring_walls = \
maze.get_neighboring_tiles(boid_center[x_var], boid_center[y_var],
amaze, maze.Wall, include_none=False)
# Convert coordinates into rects
neighboring_walls_rects = []
for wall in neighboring_walls:
neighboring_walls_rects.append(
pg.Rect(wall[x_var] * cfg.tile_width, wall[y_var] * cfg.tile_height,
cfg.tile_width, cfg.tile_height))
# Check if triangle collides with any of them
colliding_walls = triangle_rect.collidelistall(neighboring_walls_rects)
hit_top = hit_bottom = hit_left = hit_right = False
diagonal_collision = None
if colliding_walls:
# Collision detected
for wall_i in colliding_walls:
# Get collision type (horizontal/vertical)
collision_types = get_collision_type(neighboring_walls[wall_i][x_var],
neighboring_walls[wall_i][y_var],
maze.to_unit_tiles(triangle_rect.centerx,
triangle_rect.centery),
triangle_rect)
if collision_types[0] == maze.Orientation.diagonal:
diagonal_collision = collision_types[1:]
else:
for collision_type in collision_types:
if collision_type == maze.Location.top:
hit_top = True
if collision_type == maze.Location.bottom:
hit_bottom = True
if collision_type == maze.Location.left:
hit_left = True
if collision_type == maze.Location.right:
hit_right = True
if diagonal_collision is not None:
if not (hit_top or hit_bottom or hit_left or hit_right):
# If boid has collided only with a diagonal wall, then alter
# its velocity, otherwise ignore it.
if diagonal_collision == [maze.Location.left, maze.Location.bottom]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_left = True
else:
hit_bottom = True
if diagonal_collision == [maze.Location.right, maze.Location.top]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_right = True
else:
hit_top = True
if diagonal_collision == [maze.Location.right, maze.Location.bottom]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_right = True
else:
hit_bottom = True
return hit_top, hit_right, hit_bottom, hit_left
def get_collision_type(wall_x_float, wall_y_float, boid_pos_float, triangle_rect):
"""
Returns thetype of collision (horizontal/vertical).
C H C
V b V
C H C
(H - horizontal, V - vertical, C - corner, b - boid previous position)
"""
wall_x = int(wall_x_float)
wall_y = int(wall_y_float)
boid_x = int(boid_pos_float[x_var])
boid_y = int(boid_pos_float[y_var])
if wall_x != boid_x and wall_y != boid_y:
# Corner wall
return get_diagonal_collision_type(wall_x, wall_y, [boid_x, boid_y], triangle_rect)
if wall_y != boid_y:
# Horizontal wall
if wall_y < boid_y:
return [maze.Location.top, ]
else:
return [maze.Location.bottom, ]
# Vertical wall
if wall_x > boid_x:
return [maze.Location.right, ]
else:
return [maze.Location.left, ]
def get_diagonal_collision_type(wall_x, wall_y, boid_center, triangle_rect):
""" Checks with which side of the diagonally positioned (not oriented) wall boid has collided """
# Get wall type
diagonal_wall_position = 0
if wall_x == np.trunc(boid_center[x_var]) - 1:
""" T F F
F F F
T F F
(one of the "True" walls) """
if wall_y == np.trunc(boid_center[y_var]) - 1:
diagonal_wall_position = (maze.Location.left, maze.Location.top)
else:
diagonal_wall_position = (maze.Location.left, maze.Location.bottom)
if wall_x == np.trunc(boid_center[x_var]) + 1:
""" F F T
F F F
F F T
(one of the "True" walls) """
if wall_y == np.trunc(boid_center[y_var]) - 1:
diagonal_wall_position = (maze.Location.right, maze.Location.top)
else:
diagonal_wall_position = (maze.Location.right, maze.Location.bottom)
wall_left, wall_top = maze.to_coors(wall_x,
wall_y)
wall_right, wall_bottom = maze.to_coors(wall_x + 1,
wall_y + 1)
precision_x = cfg.collision_check_step * cfg.window_width
precision_y = cfg.collision_check_step * cfg.window_height
# Get collision type
wall_on_left = None
wall_on_right = None
wall_above = None
wall_below = None
if diagonal_wall_position[1] == maze.Location.top and triangle_rect.top >= wall_top - precision_y:
wall_above = True
if diagonal_wall_position[1] == maze.Location.bottom and triangle_rect.bottom <= wall_top + precision_y:
wall_below = True
if diagonal_wall_position[0] == maze.Location.right:
# One of the walls on right from the boid's position
if triangle_rect.right <= wall_left + precision_x:
# Boid is at least on the left edge of the wall
wall_on_right = True
if wall_on_right and (wall_above or wall_below):
# Boid is on both edges of the wall, i.e. on its corner
return [maze.Orientation.diagonal, maze.Location.right, diagonal_wall_position[1]]
if wall_on_right:
# Bois is only on the left edge of the wall
return [maze.Orientation.diagonal, maze.Location.right]
else: # diagonal_wall_position[0] == maze.Location.left
# One of the walls on left from the boid's position
if triangle_rect.left >= wall_right - precision_x:
# Boid is at least on the right edge of the wall
wall_on_left = True
if wall_on_left and (wall_above or wall_below):
# Boid is on both edges of the wall, i.e. on its corner
return [maze.Orientation.diagonal, maze.Location.left, diagonal_wall_position[1]]
if wall_on_right:
# Bois is only on the right edge of the wall
return [maze.Orientation.diagonal, maze.Location.left]
if wall_above or wall_below:
return [maze.Orientation.diagonal, diagonal_wall_position[1]]
|
nilq/baby-python
|
python
|
import copy
import numpy as np
import pytest
import xarray as xr
from gcm_filters import Filter, FilterShape, GridType
from gcm_filters.filter import FilterSpec
def _check_equal_filter_spec(spec1, spec2):
assert spec1.n_steps_total == spec2.n_steps_total
np.testing.assert_allclose(spec1.s, spec2.s)
assert (spec1.is_laplacian == spec2.is_laplacian).all()
assert spec1.s_max == spec2.s_max
np.testing.assert_allclose(spec1.p, spec2.p, rtol=1e-07, atol=1e-07)
# These values were just hard copied from my dev environment.
# All they do is check that the results match what I got when I ran the code.
# They do NOT assure that the filter spec is correct.
@pytest.mark.parametrize(
"filter_args, expected_filter_spec",
[
(
dict(
filter_scale=10.0,
dx_min=1.0,
filter_shape=FilterShape.GAUSSIAN,
transition_width=np.pi,
ndim=2,
),
FilterSpec(
n_steps_total=10,
s=[
8.0 + 0.0j,
3.42929331 + 0.0j,
7.71587822 + 0.0j,
2.41473596 + 0.0j,
7.18021542 + 0.0j,
1.60752541 + 0.0j,
6.42502377 + 0.0j,
0.81114415 - 0.55260985j,
5.50381534 + 0.0j,
4.48146765 + 0.0j,
],
is_laplacian=[
True,
True,
True,
True,
True,
True,
True,
False,
True,
True,
],
s_max=8.0,
p=[
0.09887381,
-0.19152534,
0.1748326,
-0.14975371,
0.12112337,
-0.09198484,
0.0662522,
-0.04479323,
0.02895827,
-0.0173953,
0.00995974,
-0.00454758,
],
),
),
(
dict(
filter_scale=2.0,
dx_min=1.0,
filter_shape=FilterShape.TAPER,
transition_width=np.pi,
ndim=1,
),
FilterSpec(
n_steps_total=3,
s=[
5.23887374 - 1.09644141j,
-0.76856043 - 1.32116962j,
3.00058907 - 2.95588288j,
],
is_laplacian=[False, False, False],
s_max=4.0,
p=[
0.83380304,
-0.23622724,
-0.06554041,
0.01593978,
0.00481014,
-0.00495532,
0.00168445,
],
),
),
],
)
def test_filter_spec(filter_args, expected_filter_spec):
"""This test just verifies that the filter specification looks as expected."""
filter = Filter(**filter_args)
_check_equal_filter_spec(filter.filter_spec, expected_filter_spec)
# TODO: check other properties of filter_spec?
# define (for now: hard-code) which grids are associated with vector Laplacians
vector_grids = [gt for gt in GridType if gt.name in {"VECTOR_C_GRID"}]
# all remaining grids are for scalar Laplacians
scalar_grids = [gt for gt in GridType if gt not in vector_grids]
@pytest.fixture(scope="module", params=scalar_grids)
def grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = (128, 256)
data = np.random.rand(ny, nx)
grid_vars = {}
if grid_type == GridType.REGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {"wet_mask": da_mask}
if grid_type == GridType.IRREGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_data = np.ones_like(data)
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
grid_vars = {
"wet_mask": da_mask,
"dxw": da_grid,
"dyw": da_grid,
"dxs": da_grid,
"dys": da_grid,
"area": da_grid,
"kappa_w": da_grid,
"kappa_s": da_grid,
}
if grid_type == GridType.TRIPOLAR_REGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
mask_data[0, :] = 0 # Antarctica
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {"wet_mask": da_mask}
if grid_type == GridType.TRIPOLAR_POP_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
mask_data[0, :] = 0 # Antarctica
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_data = np.ones_like(data)
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
grid_vars = {
"wet_mask": da_mask,
"dxe": da_grid,
"dye": da_grid,
"dxn": da_grid,
"dyn": da_grid,
"tarea": da_grid,
}
da = xr.DataArray(data, dims=["y", "x"])
return grid_type, da, grid_vars
@pytest.fixture(scope="module", params=vector_grids)
def vector_grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = (128, 256)
grid_vars = {}
if grid_type == GridType.VECTOR_C_GRID:
# construct spherical coordinate system similar to MOM6 NeverWorld2 grid
# define latitudes and longitudes
lat_min = -70
lat_max = 70
lat_u = np.linspace(
lat_min + 0.5 * (lat_max - lat_min) / ny,
lat_max - 0.5 * (lat_max - lat_min) / ny,
ny,
)
lat_v = np.linspace(lat_min + (lat_max - lat_min) / ny, lat_max, ny)
lon_min = 0
lon_max = 60
lon_u = np.linspace(lon_min + (lon_max - lon_min) / nx, lon_max, nx)
lon_v = np.linspace(
lon_min + 0.5 * (lon_max - lon_min) / nx,
lon_max - 0.5 * (lon_max - lon_min) / nx,
nx,
)
(geolon_u, geolat_u) = np.meshgrid(lon_u, lat_u)
(geolon_v, geolat_v) = np.meshgrid(lon_v, lat_v)
# radius of a random planet smaller than Earth
R = 6378000 * np.random.rand(1)
# dx varies spatially
dxCu = R * np.cos(geolat_u / 360 * 2 * np.pi)
dxCv = R * np.cos(geolat_v / 360 * 2 * np.pi)
dxBu = dxCv + np.roll(dxCv, -1, axis=1)
dxT = dxCu + np.roll(dxCu, 1, axis=1)
da_dxCu = xr.DataArray(dxCu, dims=["y", "x"])
da_dxCv = xr.DataArray(dxCv, dims=["y", "x"])
da_dxBu = xr.DataArray(dxBu, dims=["y", "x"])
da_dxT = xr.DataArray(dxT, dims=["y", "x"])
# dy is set constant, equal to dx at the equator
dy = np.max(dxCu) * np.ones((ny, nx))
da_dy = xr.DataArray(dy, dims=["y", "x"])
# compute grid cell areas
area_u = dxCu * dy
area_v = dxCv * dy
da_area_u = xr.DataArray(area_u, dims=["y", "x"])
da_area_v = xr.DataArray(area_v, dims=["y", "x"])
# set isotropic and anisotropic kappas
kappa_data = np.ones((ny, nx))
da_kappa = xr.DataArray(kappa_data, dims=["y", "x"])
# put a big island in the middle
mask_data = np.ones((ny, nx))
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {
"wet_mask_t": da_mask,
"wet_mask_q": da_mask,
"dxT": da_dxT,
"dyT": da_dy,
"dxCu": da_dxCu,
"dyCu": da_dy,
"dxCv": da_dxCv,
"dyCv": da_dy,
"dxBu": da_dxBu,
"dyBu": da_dy,
"area_u": da_area_u,
"area_v": da_area_v,
"kappa_iso": da_kappa,
"kappa_aniso": da_kappa,
}
data_u = np.random.rand(ny, nx)
data_v = np.random.rand(ny, nx)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
return grid_type, da_u, da_v, grid_vars, geolat_u
#################### Diffusion-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=3.0, dx_min=1.0, n_steps=0, filter_shape=FilterShape.GAUSSIAN)],
)
def test_diffusion_filter(grid_type_and_input_ds, filter_args):
"""Test all diffusion-based filters: filters that use a scalar Laplacian."""
grid_type, da, grid_vars = grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filter.plot_shape()
filtered = filter.apply(da, dims=["y", "x"])
# check conservation
# this would need to be replaced by a proper area-weighted integral
da_sum = da.sum()
filtered_sum = filtered.sum()
xr.testing.assert_allclose(da_sum, filtered_sum)
# check that we get an error if we pass scalar Laplacian to .apply_to vector,
# where the latter method is for vector Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u, filtered_v = filter.apply_to_vector(da, da, dims=["y", "x"])
# check variance reduction
assert (filtered ** 2).sum() < (da ** 2).sum()
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
bad_filter_args = copy.deepcopy(filter_args)
# check that we get an error if ndim > 2 and n_steps = 0
bad_filter_args["ndim"] = 3
bad_filter_args["n_steps"] = 0
with pytest.raises(ValueError, match=r"When ndim > 2, you .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if n_steps < n_steps_default
bad_filter_args["ndim"] = 2
bad_filter_args["n_steps"] = 3
with pytest.warns(UserWarning, match=r"Warning: You have set n_steps .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if numerical instability possible
bad_filter_args["n_steps"] = 0
bad_filter_args["filter_scale"] = 1000
with pytest.warns(UserWarning, match=r"Warning: Filter scale much larger .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
#################### Visosity-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=1.0, dx_min=1.0, n_steps=10, filter_shape=FilterShape.TAPER)],
)
def test_viscosity_filter(vector_grid_type_and_input_ds, filter_args):
"""Test all viscosity-based filters: filters that use a vector Laplacian."""
grid_type, da_u, da_v, grid_vars, geolat_u = vector_grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
# check conservation under solid body rotation: u = cos(lat), v=0;
data_u = np.cos(geolat_u / 360 * 2 * np.pi)
data_v = np.zeros_like(data_u)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
xr.testing.assert_allclose(filtered_u, da_u, atol=1e-12)
xr.testing.assert_allclose(filtered_v, da_v, atol=1e-12)
# check that we get an error if we pass vector Laplacian to .apply, where
# the latter method is for scalar Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u = filter.apply(da_u, dims=["y", "x"])
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
|
nilq/baby-python
|
python
|
import configparser
import logging
import os
import shutil
from pathlib import Path
from urllib.error import URLError
import intake
import matplotlib.image as mplimg
import pandas as pd
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
pkg_name = __name__.split(".")[0]
configpath = Path.home() / ".{}.ini".format(pkg_name)
LOGGER = logging.getLogger(__name__)
def get_config():
"""Read the configfile and return config dict.
Returns
-------
dict
Dictionary with the content of the configpath file.
"""
if not configpath.exists():
raise IOError("Config file {} not found.".format(str(configpath)))
else:
config = configparser.ConfigParser()
config.read(str(configpath))
return config
def get_data_root():
d = get_config()
data_root = Path(d["planet4_db"]["path"]).expanduser()
data_root.mkdir(exist_ok=True, parents=True)
return data_root
def set_database_path(dbfolder):
"""Use to write the database path into the config.
Parameters
----------
dbfolder : str or pathlib.Path
Path to where planet4 will store clustering results by default.
"""
try:
d = get_config()
except IOError:
d = configparser.ConfigParser()
d["planet4_db"] = {}
d["planet4_db"]["path"] = dbfolder
with configpath.open("w") as f:
d.write(f)
print("Saved database path into {}.".format(configpath))
# module global data_root !
if not configpath.exists():
print("No configuration file {} found.\n".format(configpath))
savepath = input("Please provide the path where you want to store planet4 meta-data:")
set_database_path(savepath)
data_root = get_data_root()
def get_subframe(url):
"""Download image if not there yet and return numpy array.
Takes a data record (called 'line'), picks out the image_url.
First checks if the name of that image is already stored in
the image path. If not, it grabs it from the server.
Then uses matplotlib.image to read the image into a numpy-array
and finally returns it.
"""
targetpath = data_root / "images" / os.path.basename(url)
targetpath.parent.mkdir(exist_ok=True)
if not targetpath.exists():
LOGGER.info("Did not find image in cache. Downloading ...")
try:
path = urlretrieve(url)[0]
except URLError:
msg = "Image not in cache. Cannot download subframe image. No internet?"
LOGGER.error(msg)
return None
LOGGER.debug("Done.")
shutil.move(path, str(targetpath))
else:
LOGGER.debug("Found image in cache.")
im = mplimg.imread(targetpath)
return im
def get_url_for_tile_id(tile_id):
storagepath = data_root / "catalogs/tile_urls.csv"
storagepath.parent.mkdir(exist_ok=True)
if not storagepath.exists():
urls = intake.cat.planet4.tile_urls.read()
urls.to_csv(storagepath, index=False)
urls = urls.set_index("tile_id").squeeze()
else:
urls = pd.read_csv(storagepath).set_index("tile_id").squeeze()
return urls.at[tile_id]
def get_intake_p4_item(item_name, update=False):
fname = item_name + ".csv"
storagepath = data_root / f"catalogs/{fname}"
storagepath.parent.mkdir(exist_ok=True, parents=True)
if not storagepath.exists() or update is True:
s = "Downloading catalog"
if update:
s + " for update"
print(s)
df = getattr(intake.cat.planet4, item_name).read()
df.to_csv(storagepath, index=False)
else:
df = pd.read_csv(storagepath)
return df
def get_blotch_catalog(update=False):
return get_intake_p4_item("blotches", update)
def get_fan_catalog(update=False):
return get_intake_p4_item("fans", update)
def get_tile_coordinates(update=False):
return get_intake_p4_item("tile_coordinates", update)
def get_meta_data(update=False):
return get_intake_p4_item("meta_data", update)
def get_region_names(update=False):
return get_intake_p4_item("region_names", update)
def get_tile_urls(update=False):
return get_intake_p4_item("tile_urls", update)
def update_local_catalog_files():
for item in "blotches fans tile_coordinates meta_data region_names tile_urls".split():
print("Updating", item)
get_intake_p4_item(item, update=True)
|
nilq/baby-python
|
python
|
class Instance(Element,IDisposable):
""" The base class for all instance objects. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetTotalTransform(self):
"""
GetTotalTransform(self: Instance) -> Transform
Gets the total transform,which includes the true north transform for instances
like import instances.
Returns: The calculated total transform.
"""
pass
def GetTransform(self):
"""
GetTransform(self: Instance) -> Transform
Gets the transform of the instance.
Returns: The inherent transform.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
nilq/baby-python
|
python
|
from layers import *
from encoding import *
import matplotlib.pyplot as plt
import csv
import sys
import getopt
import random
# Path to save the parameters
filename = 'parameters.npz'
# Train the RNN with the given parameters
def train(learning_rate, units, epochs):
# Try to load the parameters if they are saved, create a new RNN with the specified units otherwise
rnn = RNN(filename=filename, units=units)
# Extract the strain names from the dataset
with open('cannabis.csv', newline='', encoding="utf-8") as csvfile:
cannabis_data = csv.reader(csvfile)
names_oh = []
excluded_names = 0
print('Loading weed strain names from database...')
# The first column of the data contains the strain name
for row in cannabis_data:
# Replace syphons with spaces
name = row[0].replace('-', ' ').lower()
# Add the end token to the name
name = name + '>'
# Convert to one-hot vector and append to the array
valid, name_oh = one_hot_string(name)
# Only append the name if it's valid(no numbers in it)
if valid:
names_oh.append(name_oh)
else:
excluded_names += 1
# First row is metadata so delete it
names_oh = names_oh[1:]
print('{} names were excluded because they contained numbers or other invalid characters. {} names remain.'.format(excluded_names, len(names_oh)))
# Keep track of the average cost in each epoch
costs = []
print('==============================================')
print('Training for {} epochs with learning_rate={}'.format(epochs, learning_rate))
for e in range(epochs):
cost = 0
for name_oh in names_oh:
# Apply forward-propagation
cost += rnn(name_oh)
# Backpropagate and update weights of the RNN
rnn.backpropagate()
rnn.update_weights(learning_rate)
cost /= len(names_oh)
print('(Epoch {}/{}) Cost = {}'.format(e + 1, epochs, cost), end='\r')
costs.append(cost)
print('Training finished, Cost: {} -> {}'.format(costs[0], costs[-1]))
print('==============================================')
# Save the updated parameters
rnn.save_parameters(filename)
# Plot the cost in each epoch
plt.plot(costs, color='r')
# Change the name of the window
fig = plt.gcf()
fig.canvas.set_window_title('WEED LMAO')
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.show()
# Generate a name with the trained RNN
def gen_names():
# Load the RNN from file
rnn = RNN(filename=filename)
print('Input how the name should start. Leave blank if you want it completely random and type \\ to exit')
while True:
# Get the user's chosen start for the strain name, and lowercase it
start = input().lower()
if start == '\\':
return
# Start with random letter if no input is given
if start == '':
# Only pick a letter, don't start with space or end-token
start = letters[random.randint(1, n_letters - 2)]
# Generate the string if the input is valid
valid, gen_strain = rnn.gen_name(start)
if valid:
print(gen_strain)
else:
print('Input contains invalid characters. Only use letters a-z and spaces.')
def train_args(arg_list):
opts, arga = getopt.getopt(arg_list, 'r:u:e:')
learning_rate = 0.07
units = 32
epochs = 100
for opt, value in opts:
if opt == '-r':
learning_rate = float(value)
if opt == '-u':
units = int(value)
if opt == '-e':
epochs = int(value)
train(learning_rate, units, epochs)
if __name__ == '__main__':
if sys.argv[1] == 'train':
train_args(sys.argv[2:])
if sys.argv[1] == 'generate':
gen_names()
|
nilq/baby-python
|
python
|
def selection_sort(some_list):
"""
https://en.wikipedia.org/wiki/Selection_sort
Split the list into a sorted/unsorted portion. Go through the list from left to right, starting with position 0 in
the unsorted portion. When we find the minimum element of the unsorted portion, swap it to the end of the sorted
list portion.
O(N^2)
"""
iters = 0
for i in range(0, len(some_list) - 1):
iters += 1
min_index = i # Always reset min for each loop
for j in range(i + 1, len(some_list)):
iters += 1
if some_list[j] < some_list[min_index]:
min_index = j
if min_index != i:
some_list[i], some_list[min_index] = some_list[min_index], some_list[i]
return iters, some_list
|
nilq/baby-python
|
python
|
"""
Boolean Satisfiability
Interface Classes:
DPLLInterface
Interface Functions:
backtrack
iter_backtrack
dpll
"""
import random
class DPLLInterface(object):
"""DPLL algorithm interface"""
def bcp(self):
"""Boolean Constraint Propagation
Return an untyped point that results from unit propagation.
If BCP detects a contradiction, return None.
"""
raise NotImplementedError()
def ple(self):
"""Pure Literal Elimination
Return an untyped point that results from pure literal elimination.
If PLE detects a contradiction, return None.
"""
raise NotImplementedError()
def backtrack(bf):
"""
If this function is satisfiable, return a satisfying input upoint.
Otherwise, return None.
"""
if bf.is_zero():
ret = None
elif bf.is_one():
ret = frozenset(), frozenset()
else:
v = bf.top
#v = random.choice(bf.inputs)
upnt0 = frozenset([v.uniqid]), frozenset()
upnt1 = frozenset(), frozenset([v.uniqid])
for upnt in [upnt0, upnt1]:
bt_upnt = backtrack(bf.urestrict(upnt))
if bt_upnt is not None:
ret = (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
break
else:
ret = None
return ret
def iter_backtrack(bf, rand=False):
"""Iterate through all satisfying points using backtrack algorithm."""
if bf.is_one():
yield frozenset(), frozenset()
elif not bf.is_zero():
if rand:
v = random.choice(bf.inputs) if rand else bf.top
else:
v = bf.top
upnt0 = frozenset([v.uniqid]), frozenset()
upnt1 = frozenset(), frozenset([v.uniqid])
upoints = [upnt0, upnt1]
if rand:
random.shuffle(upoints)
for upnt in upoints:
for bt_upnt in iter_backtrack(bf.urestrict(upnt), rand):
yield (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
def dpll(cnf):
"""
Davis-Putnam-Logemann-Loveland (DPLL) Algorithm
"""
if cnf.is_zero():
ret = None
elif cnf.is_one():
ret = frozenset(), frozenset()
else:
# 1. Boolean constraint propagation
bcp_upnt = cnf.bcp()
if bcp_upnt is None:
# BCP found a contradiction
ret = None
else:
bcp_cnf = cnf.urestrict(bcp_upnt)
if bcp_cnf.is_one():
# BCP found a solution
ret = bcp_upnt
else:
# 2. Pure literal elimination
ple_upnt = bcp_cnf.ple()
bcp_ple_cnf = bcp_cnf.urestrict(ple_upnt)
bcp_ple_upnt = (bcp_upnt[0] | ple_upnt[0],
bcp_upnt[1] | ple_upnt[1])
if bcp_ple_cnf.is_one():
# PLE found a solution
ret = bcp_ple_upnt
else:
# 3. Variable selection heuristic
v = bcp_ple_cnf.top
#v = random.choice(bcp_ple_cnf.inputs)
# 4. Backtrack
upnt0 = (bcp_ple_upnt[0] | {v.uniqid}, bcp_ple_upnt[1])
upnt1 = (bcp_ple_upnt[0], bcp_ple_upnt[1] | {v.uniqid})
for upnt in [upnt0, upnt1]:
bt_upnt = dpll(bcp_ple_cnf.urestrict(upnt))
if bt_upnt is not None:
# Backtrack found a solution
ret = (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
break
else:
# Backtrack found a contradiction
ret = None
return ret
|
nilq/baby-python
|
python
|
import numpy as np
class Constant(object):
"""
Concatenates a constant value to the node attributes.
**Arguments**
- `value`: the value to concatenate to the node attributes.
"""
def __init__(self, value):
self.value = value
def __call__(self, graph):
value = np.zeros((graph.n_nodes, 1)) + self.value
if graph.x is None:
graph.x = value
else:
graph.x = np.concatenate((graph.x, value), axis=-1)
return graph
|
nilq/baby-python
|
python
|
import glob
from os import path as osp
import numpy as np
import pytest
import tqdm
import habitat_sim
NUM_TESTS = 100
TURN_DEGREE = 30.0
ACCEPTABLE_SPLS = {
("try_step", False): 0.97,
("try_step_no_sliding", False): 0.925,
("try_step", True): 0.82,
("try_step_no_sliding", True): 0.60,
}
base_dir = osp.abspath(osp.join(osp.dirname(__file__), ".."))
test_navmeshes = [
osp.join(base_dir, "data/scene_datasets/mp3d/17DRP5sb8fy/17DRP5sb8fy.navmesh"),
osp.join(
base_dir, "data/scene_datasets/habitat-test-scenes/skokloster-castle.navmesh"
),
osp.join(base_dir, "data/scene_datasets/habitat-test-scenes/van-gogh-room.navmesh"),
]
test_all = False
gibson_base = osp.join(base_dir, "data/scene_datasets/gibson")
if test_all and osp.exists(gibson_base):
test_navmeshes += glob.glob(f"{gibson_base}/*.navmesh")
mp3d_base = osp.join(base_dir, "data/scene_datasets/mp3d")
if test_all and osp.exists(mp3d_base):
test_navmeshes += glob.glob(f"{mp3d_base}/*/*.navmesh")
mp3d_example_base = osp.join(base_dir, "data/scene_datasets/mp3d_example")
if test_all and osp.exists(mp3d_example_base):
test_navmeshes += glob.glob(f"{mp3d_example_base}/*/*.navmesh")
@pytest.fixture(scope="module")
def pbar():
if test_all:
return tqdm.tqdm(total=len(test_navmeshes) * NUM_TESTS)
else:
return None
num_fails = 0.0
num_tested = 0
total_spl = 0.0
@pytest.mark.parametrize("test_navmesh", test_navmeshes)
@pytest.mark.parametrize("move_filter_fn", ["try_step", "try_step_no_sliding"])
@pytest.mark.parametrize("action_noise", [False, True])
def test_greedy_follower(test_navmesh, move_filter_fn, action_noise, pbar):
global num_fails
global num_tested
global total_spl
if not osp.exists(test_navmesh):
pytest.skip(f"{test_navmesh} not found")
pathfinder = habitat_sim.PathFinder()
pathfinder.load_nav_mesh(test_navmesh)
assert pathfinder.is_loaded
pathfinder.seed(0)
np.random.seed(seed=0)
scene_graph = habitat_sim.SceneGraph()
agent = habitat_sim.Agent(scene_graph.get_root_node().create_child())
agent.controls.move_filter_fn = getattr(pathfinder, move_filter_fn)
agent.agent_config.action_space["turn_left"].actuation.amount = TURN_DEGREE
agent.agent_config.action_space["turn_right"].actuation.amount = TURN_DEGREE
if action_noise:
# "_" prefix the perfect actions so that we can use noisy actions instead
agent.agent_config.action_space = {
"_" + k: v for k, v in agent.agent_config.action_space.items()
}
agent.agent_config.action_space.update(
**dict(
move_forward=habitat_sim.ActionSpec(
"pyrobot_noisy_move_forward",
habitat_sim.PyRobotNoisyActuationSpec(amount=0.25),
),
turn_left=habitat_sim.ActionSpec(
"pyrobot_noisy_turn_left",
habitat_sim.PyRobotNoisyActuationSpec(amount=TURN_DEGREE),
),
turn_right=habitat_sim.ActionSpec(
"pyrobot_noisy_turn_right",
habitat_sim.PyRobotNoisyActuationSpec(amount=TURN_DEGREE),
),
)
)
follower = habitat_sim.GreedyGeodesicFollower(
pathfinder,
agent,
forward_key="move_forward",
left_key="turn_left",
right_key="turn_right",
)
test_spl = 0.0
for _ in range(NUM_TESTS):
follower.reset()
state = habitat_sim.AgentState()
while True:
state.position = pathfinder.get_random_navigable_point()
goal_pos = pathfinder.get_random_navigable_point()
path = habitat_sim.ShortestPath()
path.requested_start = state.position
path.requested_end = goal_pos
if pathfinder.find_path(path) and path.geodesic_distance > 2.0:
break
agent.state = state
failed = False
gt_geo = path.geodesic_distance
agent_distance = 0.0
last_xyz = state.position
num_acts = 0
# If there is not action noise, then we can use find_path to get all the actions
if not action_noise:
try:
action_list = follower.find_path(goal_pos)
except habitat_sim.errors.GreedyFollowerError:
action_list = [None]
while True:
# If there is action noise, we need to plan a single action, actually take it, and repeat
if action_noise:
try:
next_action = follower.next_action_along(goal_pos)
except habitat_sim.errors.GreedyFollowerError:
break
else:
next_action = action_list[0]
action_list = action_list[1:]
if next_action is None:
break
agent.act(next_action)
agent_distance += np.linalg.norm(last_xyz - agent.state.position)
last_xyz = agent.state.position
num_acts += 1
if num_acts > 1e4:
break
end_state = agent.state
path.requested_start = end_state.position
pathfinder.find_path(path)
failed = path.geodesic_distance > follower.forward_spec.amount
spl = float(not failed) * gt_geo / max(gt_geo, agent_distance)
test_spl += spl
if test_all:
num_fails += float(failed)
num_tested += 1
total_spl += spl
pbar.set_postfix(
num_fails=num_fails,
failure_rate=num_fails / num_tested,
spl=total_spl / num_tested,
)
pbar.update()
if not test_all:
assert test_spl / NUM_TESTS >= ACCEPTABLE_SPLS[(move_filter_fn, action_noise)]
|
nilq/baby-python
|
python
|
""" Views related to rsync or FTP account access. """
__author__ = "William Tucker"
__date__ = "2018-03-13"
__copyright__ = "Copyright 2019 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
from django.shortcuts import render, redirect
from uploader.ftp.forms import FtpPasswordChangeForm
from uploader.ftp.utils import generate_visible_ftp_password, set_ftp_password
def ftp_random_password(request):
generate_visible_ftp_password(request.user)
return redirect('browse')
def ftp_access(request):
if request.method=='POST':
form = FtpPasswordChangeForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
password = cleaned_data.get('password')
set_ftp_password(request.user, password)
return redirect('browse')
else:
form = FtpPasswordChangeForm()
return render(request, 'uploader/ftp/access.html', {'form': form})
|
nilq/baby-python
|
python
|
from dataclasses import dataclass, field
from typing import Optional
# TODO: remove default Hydra pallets - pallets will become required parameter
PALLETS = ["amm", "exchange", "transaction_multi_payment"]
@dataclass
class Config:
do_db_bench: bool = False
substrate_repo_path: str = "./substrate"
do_pallet_bench: bool = True
performance_check: bool = False
reference_values: Optional[str] = None
dump_results: Optional[str] = None
# Directory
# TODO: support for file ( but if multiple pallets in one run - different files ?)
output_dir: Optional[str] = None
template: Optional[str] = None
pallets: [str] = field(default_factory=lambda: PALLETS)
|
nilq/baby-python
|
python
|
import pyaudio
class AudioRecorder:
def __init__(self, channels_=2, format_=pyaudio.paInt16, rate_=44100, chunk_=256):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=format_, channels=channels_,
rate=rate_, input=True, frames_per_buffer=chunk_)
self.channels = channels_
self.format = format_
self.rate = rate_
self.chunk = chunk_
def record_chunk(self):
return self.stream.read(self.chunk)
def __enter__(self):
return self
def __exit__(self, *arg):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
class AudioPlayer:
def __init__(self, channels_=2, format_=pyaudio.paInt16, rate_=44100, chunk_=256):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=format_, channels=channels_,
rate=rate_, output=True)
self.channels = channels_
self.format = format_
self.rate = rate_
self.chunk = chunk_
def play_chunk(self, chunk):
self.stream.write(chunk)
def __enter__(self):
return self
def __exit__(self, *arg):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
|
nilq/baby-python
|
python
|
import argparse
import logging
import gdk.commands.methods as methods
import gdk.common.parse_args_actions as actions
import pytest
def test_run_command_with_valid_namespace_without_debug(mocker):
# Integ test that appropriate action is called only once with valid command namespace.
args_namespace = argparse.Namespace(component="init", init=None, lang="python", template="name", **{"gdk": "component"})
spy_component_build = mocker.spy(methods, "_gdk_component_build")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
spy_logger = mocker.spy(logging, "basicConfig")
mock_component_init = mocker.patch("gdk.commands.methods._gdk_component_init", return_value=None)
actions.run_command(args_namespace)
assert mock_component_init.call_count == 1
assert spy_component_build.call_count == 0
assert spy_call_action_by_name.call_count == 1
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
assert spy_logger.call_count == 0
def test_run_command_with_valid_debug_enabled(mocker):
# Integ test that appropriate action is called only once with valid command namespace.
args_namespace = argparse.Namespace(
component="init", init=None, lang="python", template="name", **{"gdk": "component"}, debug=True
)
spy_component_build = mocker.spy(methods, "_gdk_component_build")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
mock_component_init = mocker.patch("gdk.commands.methods._gdk_component_init", return_value=None)
spy_logging_ = mocker.spy(logging.getLogger(), "setLevel")
actions.run_command(args_namespace)
assert mock_component_init.call_count == 1
assert spy_component_build.call_count == 0
assert spy_call_action_by_name.call_count == 1
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
spy_logging_.assert_called_once_with(logging.DEBUG)
with pytest.raises(AssertionError):
spy_logging_.assert_called_once_with(logging.WARN)
def test_run_command_with_invalid_namespace_method(mocker):
# Test that action when the method doesn't exist for an invalid namespace
args_namespace = argparse.Namespace(component="invalid", invalid=None, **{"gdk": "component"})
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
with pytest.raises(SystemExit):
actions.run_command(args_namespace)
assert spy_call_action_by_name.call_count == 1 # No method name to call if namespace is invalid
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
from django import forms
from .models import Image, UserProfile, Establishment
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms.widgets import TextInput, PasswordInput
from mysite.widgets import MyClearableFileInput
from municipios.widgets import SelectMunicipioWidget
class FormEstablishment(forms.ModelForm):
class Meta:
model = Establishment
fields = ('name', 'address', 'ec_type', 'img_logo', 'img_vitrin', 'cnpj', 'insc_est', 'phone',
'site', 'email', 'zip_code')
widgets = {
"img_vitrin": MyClearableFileInput(),
"img_logo": MyClearableFileInput(),
"address": SelectMunicipioWidget(),
}
def __init__(self, *args, **kwargs):
super(FormEstablishment, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['address'].widget.attrs = {'class': 'form-control'}
self.fields['ec_type'].widget.attrs = {'class': 'form-control'}
self.fields['img_logo'].required = False
self.fields['img_logo'].widget.attrs = {'class': 'form-control'}
self.fields['img_vitrin'].required = False
self.fields['img_vitrin'].widget.attrs = {'class': 'form-control'}
self.fields['phone'].widget.attrs = {'class': 'form-control', 'placeholder': 'Telefone'}
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
self.fields['site'].required = False
self.fields['site'].widget.attrs = {'class': 'form-control', 'placeholder': 'Site'}
self.fields['zip_code'].widget.attrs = {'class': 'form-control', 'placeholder': 'Cep'}
self.fields['cnpj'].required = False
self.fields['cnpj'].widget.attrs = {'class': 'form-control', 'placeholder': 'CNPJ'}
self.fields['insc_est'].required = False
self.fields['insc_est'].widget.attrs = {'class': 'form-control', 'placeholder': 'Incrição Estadual'}
class WableAuthenticationForm(AuthenticationForm):
username = forms.CharField(widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'E-mail'}))
password = forms.CharField(widget=PasswordInput(attrs={'class': 'form-control', 'placeholder':'Senha'}))
class WableRegistrationForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'password1', 'password2', 'email')
def __init__(self, *args, **kwargs):
super(WableRegistrationForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Sobrenome'}
self.fields['email'].required = False
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
self.fields['username'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail ou número do celular'}
self.fields['password1'].widget.attrs = {'class': 'form-control', 'placeholder': 'Senha'}
self.fields['password2'].widget.attrs = {'class': 'form-control', 'placeholder': 'Confirme a senha'}
def save(self, commit=True):
user = super(WableRegistrationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['last_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Sobrenome'}
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('phone', 'birthday', 'image_field', 'address')
widgets = {
"image_field": MyClearableFileInput(),
"address": SelectMunicipioWidget(),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
self.fields['image_field'].required = False
self.fields['image_field'].widget.attrs = {'onChange': 'readFile(this);'}
self.fields['birthday'].required = False
self.fields['birthday'].widget.attrs = {'class': 'form-control', 'placeholder': 'dd/mm/aaaa'}
self.fields['phone'].widget.attrs = {'class': 'form-control', 'placeholder': 'Telefone'}
self.fields['address'].widget.attrs = {'class': 'form-control'}
class ImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image_field', 'cropping_free')
labels = {
'image_field': (''),
}
def __init__(self, *args, **kwargs):
super(ImageForm, self).__init__(*args, **kwargs)
self.fields['image_field'].widget.attrs = {'onChange': 'readURL(this);'}
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
# Author: mrdmnd@ (Matt Redmond)
# Based off of code in //depot/google3/experimental/mobile_gwp
"""Code to transport profile data between a user's machine and the CWP servers.
Pages:
"/": the main page for the app, left blank so that users cannot access
the file upload but left in the code for debugging purposes
"/upload": Updates the datastore with a new file. the upload depends on
the format which is templated on the main page ("/")
input includes:
profile_data: the zipped file containing profile data
board: the architecture we ran on
chromeos_version: the chromeos_version
"/serve": Lists all of the files in the datastore. Each line is a new entry
in the datastore. The format is key~date, where key is the entry's
key in the datastore and date is the file upload time and date.
(Authentication Required)
"/serve/([^/]+)?": For downloading a file of profile data, ([^/]+)? means
any character sequence so to download the file go to
'/serve/$key' where $key is the datastore key of the file
you want to download.
(Authentication Required)
"/del/([^/]+)?": For deleting an entry in the datastore. To use go to
'/del/$key' where $key is the datastore key of the entry
you want to be deleted form the datastore.
(Authentication Required)
TODO: Add more extensive logging"""
import cgi
import logging
import md5
import urllib
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
logging.getLogger().setLevel(logging.DEBUG)
class FileEntry(db.Model):
profile_data = db.BlobProperty() # The profile data
date = db.DateTimeProperty(auto_now_add=True) # Date it was uploaded
data_md5 = db.ByteStringProperty() # md5 of the profile data
board = db.StringProperty() # board arch
chromeos_version = db.StringProperty() # ChromeOS version
class MainPage(webapp.RequestHandler):
"""Main page only used as the form template, not actually displayed."""
def get(self, response=''): # pylint: disable-msg=C6409
if response:
self.response.out.write('<html><body>')
self.response.out.write("""<br>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><label>Profile Data:</label></div>
<div><input type="file" name="profile_data"/></div>
<div><label>Board</label></div>
<div><input type="text" name="board"/></div>
<div><label>ChromeOS Version</label></div>
<div><input type="text" name="chromeos_version"></div>
<div><input type="submit" value="send" name="submit"></div>
</form>
</body>
</html>""")
class Upload(webapp.RequestHandler):
"""Handler for uploading data to the datastore, accessible by anyone."""
def post(self): # pylint: disable-msg=C6409
"""Takes input based on the main page's form."""
getfile = FileEntry()
f1 = self.request.get('profile_data')
getfile.profile_data = db.Blob(f1)
getfile.data_md5 = md5.new(f1).hexdigest()
getfile.board = self.request.get('board')
getfile.chromeos_version = self.request.get('chromeos_version')
getfile.put()
self.response.out.write(getfile.key())
#self.redirect('/')
class ServeHandler(webapp.RequestHandler):
"""Given the entry's key in the database, output the profile data file. Only
accessible from @google.com accounts."""
def get(self, resource): # pylint: disable-msg=C6409
if Authenticate(self):
file_key = str(urllib.unquote(resource))
request = db.get(file_key)
self.response.out.write(request.profile_data)
class ListAll(webapp.RequestHandler):
"""Displays all files uploaded. Only accessible by @google.com accounts."""
def get(self): # pylint: disable-msg=C6409
"""Displays all information in FileEntry, ~ delimited."""
if Authenticate(self):
query_str = 'SELECT * FROM FileEntry ORDER BY date ASC'
query = db.GqlQuery(query_str)
delimiter = '~'
for item in query:
display_list = [item.key(), item.date, item.data_md5, item.board,
item.chromeos_version]
str_list = [cgi.escape(str(i)) for i in display_list]
self.response.out.write(delimiter.join(str_list) + '</br>')
class DelEntries(webapp.RequestHandler):
"""Deletes entries. Only accessible from @google.com accounts."""
def get(self, resource): # pylint: disable-msg=C6409
"""A specific entry is deleted, when the key is given."""
if Authenticate(self):
fkey = str(urllib.unquote(resource))
request = db.get(fkey)
if request:
db.delete(fkey)
def Authenticate(webpage):
"""Some urls are only accessible if logged in with a @google.com account."""
user = users.get_current_user()
if user is None:
webpage.redirect(users.create_login_url(webpage.request.uri))
elif user.email().endswith('@google.com'):
return True
else:
webpage.response.out.write('Not Authenticated')
return False
def main():
application = webapp.WSGIApplication(
[
('/', MainPage),
('/upload', Upload),
('/serve/([^/]+)?', ServeHandler),
('/serve', ListAll),
('/del/([^/]+)?', DelEntries),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from typing import Iterable
import torch
from torch import Tensor
def to_np(arr):
return arr.detach().cpu().numpy()
def to_t(t: Iterable, device: torch.device = 'cuda', dtype: torch.dtype = torch.float64) -> Tensor:
if isinstance(t, Tensor):
return t
return torch.tensor(t, device=device, dtype=dtype)
@torch.jit.script
def pi() -> float:
return torch.acos(torch.tensor(0., dtype=torch.float64)).item() * 2
@torch.jit.script
def length(t: Tensor) -> Tensor:
return torch.sqrt((t ** 2).sum(-1))
@torch.jit.script
def norm(t: Tensor) -> Tensor:
t_length = length(t)
if t_length > 0:
return t / t_length
return t
@torch.jit.script
def get_2d_vector(vec: Tensor):
return torch.stack([
torch.sqrt(torch.sum(vec[..., :2] ** 2, dim=-1)),
vec[..., 2],
], -1)
|
nilq/baby-python
|
python
|
"""PivotCalculator
Pivot points is the top/bottom that the price has ever reached.
"""
from collections import deque, namedtuple
from operator import gt
class PivotCalculator(object):
def __init__(self, window_size=5, cmp=gt):
self.window_size = window_size
self.cmp = cmp
# exit_check: whether it should be considered as a local extrim
# when it get removed from the qeue
self.QE = namedtuple("QueueEelment", ["val", "idx", "exit_check"])
self._q = deque() # queue to hold the local extrim candidates
self._idx = 0 # index of the current value to be processed.
self._result = []
self._post_process_done = False
def __call__(self, v):
is_extrim = False
# XXX: local extrim <=> if ENTER and EXIT checks are both True
# ENTER: if it is a local extrim when it enters the queue
# there should be no other element in the queue
while self._q and self.cmp(v, self._q[-1][0]):
self._q.pop()
exit_check = not self._q
t = self.QE(v, self._idx, exit_check)
self._q.append(t)
# EXIT: if it is a local extrim point when it leaves the queue
# it should be still the best candidate (in the front).
candidate = self._q[0]
# e.g. windows_size = 5, candidate.idx = 0, self._idx = 4
if self._idx - candidate.idx >= self.window_size - 1:
self._q.popleft()
if candidate.exit_check:
is_extrim = True
# DEBUG:
#print(self._idx, "{:.2f}".format(v), self._q[0] if self._q else [],
# ["{:.2f}".format(e[0]) for e in self._q],
# self._idx - self.window_size, result)
# Only after seeing window_size of elements we can tell if a local extrim is found or not.
if self._idx >= self.window_size - 1:
self._result.append(is_extrim)
self._idx += 1
def _post(self):
for i in range(self._idx - self.window_size + 1, self._idx):
# XXX: there should be maximum window_size-1 of elements left to be examined.
# and only the first element is possible to be an extrim.
is_extrim = self._q and self._q[0].idx == i and self._q[0].exit_check
self._result.append(is_extrim)
self._q.clear()
@property
def result(self):
if not self._post_process_done:
self._post_process_done = True
self._post()
return self._result
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['EnterprisePolicyArgs', 'EnterprisePolicy']
@pulumi.input_type
class EnterprisePolicyArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
encryption: Optional[pulumi.Input['PropertiesEncryptionArgs']] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input['PropertiesLockboxArgs']] = None,
network_injection: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a EnterprisePolicy resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['PropertiesEncryptionArgs'] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input['EnterprisePolicyIdentityArgs'] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['PropertiesLockboxArgs'] lockbox: Settings concerning lockbox.
:param pulumi.Input['PropertiesNetworkInjectionArgs'] network_injection: Settings concerning network injection.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if enterprise_policy_name is not None:
pulumi.set(__self__, "enterprise_policy_name", enterprise_policy_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if lockbox is not None:
pulumi.set(__self__, "lockbox", lockbox)
if network_injection is not None:
pulumi.set(__self__, "network_injection", network_injection)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['PropertiesEncryptionArgs']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['PropertiesEncryptionArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="enterprisePolicyName")
def enterprise_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EnterprisePolicy.
"""
return pulumi.get(self, "enterprise_policy_name")
@enterprise_policy_name.setter
def enterprise_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enterprise_policy_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def lockbox(self) -> Optional[pulumi.Input['PropertiesLockboxArgs']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@lockbox.setter
def lockbox(self, value: Optional[pulumi.Input['PropertiesLockboxArgs']]):
pulumi.set(self, "lockbox", value)
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@network_injection.setter
def network_injection(self, value: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]):
pulumi.set(self, "network_injection", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class EnterprisePolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']] lockbox: Settings concerning lockbox.
:param pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']] network_injection: Settings concerning network injection.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnterprisePolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param EnterprisePolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnterprisePolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = encryption
__props__.__dict__["enterprise_policy_name"] = enterprise_policy_name
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["lockbox"] = lockbox
__props__.__dict__["network_injection"] = network_injection
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:powerplatform:EnterprisePolicy"), pulumi.Alias(type_="azure-native:powerplatform/v20201030preview:EnterprisePolicy"), pulumi.Alias(type_="azure-nextgen:powerplatform/v20201030preview:EnterprisePolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EnterprisePolicy, __self__).__init__(
'azure-native:powerplatform:EnterprisePolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EnterprisePolicy':
"""
Get an existing EnterprisePolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["lockbox"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_injection"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return EnterprisePolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.PropertiesResponseEncryption']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.EnterprisePolicyIdentityResponse']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def lockbox(self) -> pulumi.Output[Optional['outputs.PropertiesResponseLockbox']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> pulumi.Output[Optional['outputs.PropertiesResponseNetworkInjection']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
nilq/baby-python
|
python
|
import logging
import os
import yaml
from DataCuration.main import main as start_web_scrape
from util import create_folder
def load_config():
"""
Loads the configuration file
:return: Content of the configuration file
"""
with open('config.yaml', 'r') as file:
content = yaml.load(file, yaml.FullLoader)
return content
def verify_configurations(conf: dict):
"""
Verify the content loaded from configuration file is correct or not. It is checked in the
beginning to prevent giving errors later in the code.
:param conf: content of the configuration file
:return: None
"""
# TODO: Add checks for content of the configuration file.
pass
def main():
config = load_config()
verify_configurations(config)
start_web_scrape(config)
if __name__ == '__main__':
create_folder(os.path.join(os.getcwd(), 'logs'))
logging.basicConfig(filename='logs/DataCuration.log',
filemode='w',
level=logging.INFO,
format='%(asctime)s: '
'%(filename)s: '
'%(levelname)s: '
'%(lineno)d:\t'
'%(message)s')
main()
|
nilq/baby-python
|
python
|
########################################
# PROJECT 1 - Linked List
# Author: Tony Sulfaro
# PID: A52995491
########################################
class Node:
# DO NOT MODIFY THIS CLASS #
__slots__ = 'value', 'next_node'
def __init__(self, value, next_node=None):
"""
DO NOT EDIT
Initialize a node
:param value: value of the node
:param next_node: pointer to the next node, default is None
"""
self.value = value # element at the node
self.next_node = next_node # reference to next node
def __eq__(self, other):
"""
DO NOT EDIT
Determine if two nodes are equal (same value)
:param other: node to compare to
:return: True if nodes are equal, False otherwise
"""
if other is None:
return False
if self.value == other.value:
return True
return False
def __repr__(self):
"""
DO NOT EDIT
String representation of a node
:return: string of value
"""
return str(self.value)
class LinkedList:
def __init__(self):
"""
DO NOT EDIT
Create/initialize an empty linked list
"""
self.head = None # Node
self.tail = None # Node
self.size = 0 # Integer
def __eq__(self, other):
"""
DO NOT EDIT
Defines "==" (equality) for two linked lists
:param other: Linked list to compare to
:return: True if equal, False otherwise
"""
if self.size != other.size:
return False
if self.head != other.head or self.tail != other.tail:
return False
# Traverse through linked list and make sure all nodes are equal
temp_self = self.head
temp_other = other.head
while temp_self is not None:
if temp_self == temp_other:
temp_self = temp_self.next_node
temp_other = temp_other.next_node
else:
return False
# Make sure other is not longer than self
if temp_self is None and temp_other is None:
return True
return False
def __repr__(self):
"""
DO NOT EDIT
String representation of a linked list
:return: string of list of values
"""
temp_node = self.head
values = []
if temp_node is None:
return None
while temp_node is not None:
values.append(temp_node.value)
temp_node = temp_node.next_node
return str(values)
###### MODIFY THE BELOW FUNCTIONS #####
# ------------------------Accessor Functions---------------------------
def length(self):
"""
Gets the number of nodes of the linked list
:return: size of list
"""
return self.size
def is_empty(self):
"""
Determines if the linked list is empty
:return: True if list is empty and False if not empty
"""
return self.size == 0
def front_value(self):
"""
Gets the first value of the list
:return: value of the list head
"""
if self.head is not None:
return self.head.value
return None
def back_value(self):
"""
Gets the last value of the list
:return: value of the list tail
"""
if self.tail is not None:
return self.tail.value
return None
def count(self, val):
"""
Counts the number of times a value 'val' occurs in the list
:param val: value to find and count
:return: number of time 'val' occurs
"""
count = 0
temp_self = self.head
if temp_self is None:
return 0
while temp_self is not None:
if temp_self.value == val:
count += 1
temp_self = temp_self.next_node
return count
def find(self, val):
"""
Searches for and returns the first node with the value 'val'
:param val: value to search for
:return: True if value is in list, False if value is not found
"""
temp_self = self.head
while temp_self is not None:
if temp_self.value == val:
return True
temp_self = temp_self.next_node
return False
# ------------------------Mutator Functions---------------------------
def push_front(self, val):
"""
Adds a node to the front of the list with value 'val'
:param val: value to add to list
:return: no return
"""
if self.size == 0:
new_node = Node(val, self.head)
self.head = new_node
self.tail = new_node
self.size += 1
else:
self.head = Node(val, self.head)
self.size += 1
def push_back(self, val):
"""
Adds a node to the back of the list with value 'val'
:param val: value to add to list
:return: no return
"""
if self.size == 0:
new_node = Node(val)
self.head = new_node
self.tail = new_node
self.size += 1
else:
new_node = Node(val)
self.tail.next_node = new_node
self.tail = new_node
self.size += 1
def pop_front(self):
"""
Removes a node from the front of the list
:return: the value of the removed node
"""
head = self.head
if head is not None:
next_node = self.head.next_node
if head is not None:
self.head = next_node
self.size -= 1
return head.value
else:
return None
def pop_back(self):
"""
Removes a node from the back of the list
:return: the value of the removed node
"""
if self.head is not None:
current_node = self.head
prev_node = None
while current_node.next_node is not None:
prev_node = current_node
current_node = current_node.next_node
if prev_node is None: # popping list of one element
self.head = None
self.tail = None
self.size -= 1
return current_node.value
else:
prev_node.next_node = None
self.tail = prev_node
self.size -= 1
return current_node.value
else:
return None
def reverse_list(self):
"""
Reverses the values of the given linked list
:return: no return
"""
current_node = self.head
prev_node = None
self.tail = self.head
while current_node is not None:
next_node = current_node.next_node
current_node.next_node = prev_node
prev_node = current_node
current_node = next_node
self.head = prev_node
def main():
"""
Main Docstring
:return: no return
"""
stu = LinkedList()
stu.push_front(45)
stu.push_front(39)
stu.push_front(10)
stu.push_front(98)
stu.push_front(6)
print(stu)
print('size: ', stu.size)
print('head: ', stu.head.value)
print('tail: ', stu.tail.value)
stu.reverse_list()
print(stu)
print('size: ', stu.size)
print('head: ', stu.head.value)
print('tail: ', stu.tail.value)
'''current_node = stu.head
while current_node.next_node is not None:
print('node: ', current_node.value,' next: ', current_node.next_node.value)
current_node = current_node.next_node'''
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from html_parse.src.parser import Parser
import unittest
class TestParser(unittest.TestCase):
def test_remove_end_tags(self):
parser = Parser()
html_string = '<title>Hello</title>'
self.assertEqual(parser.remove_end_tags(html_string), '<title>Hello|;|')
def test_remove_end_tags_with_head(self):
parser = Parser()
html_string = '<head><title>Hello</title></head>'
self.assertEqual(parser.remove_end_tags(html_string), '<head><title>Hello|;||;|')
def test_remove_end_tags_with_html(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head></html>'
self.assertEqual(parser.remove_end_tags(html_string), '<html><head><title>Hello|;||;||;|')
def test_remove_end_tags_web_page(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head><body><p>World</p></body></html>'
self.assertEqual(parser.remove_end_tags(html_string), '<html><head><title>Hello|;||;|<body><p>World|;||;||;|')
def test_clean_start_tags(self):
parser = Parser()
html_string = '<title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_with_head(self):
parser = Parser()
html_string = '<head><title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_with_html(self):
parser = Parser()
html_string = '<html><head><title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_web_page(self):
parser = Parser()
html_string = '<html><head><title>Hello|;||;|<body><p>World|;||;||;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;||;|<p>World|;||;||;|')
def test_remove_hanging_colons(self):
parser = Parser()
colons = '|;||;||;||;|'
self.assertEqual(parser.remove_hanging_colons(colons), '|;|')
def test_remove_hanging_colons_with_text(self):
parser = Parser()
string = '|;|hello|;||;||;|'
self.assertEqual(parser.remove_hanging_colons(string), '|;|hello|;|')
def test_remove_hanging_colons_with_html(self):
parser = Parser()
html_string = '<title>Hello|;||;|<p>World|;||;||;|'
self.assertEqual(parser.remove_hanging_colons(html_string), '<title>Hello|;|<p>World|;|')
def test_tag_to_key(self):
parser = Parser()
html_string = '<title>'
self.assertEqual(parser.tag_to_key(html_string), 'title|:|')
def test_tag_to_key_tag_and_text(self):
parser = Parser()
html_string = '<title>Hello|;|<p>World|;|'
self.assertEqual(parser.tag_to_key(html_string), 'title|:|Hello|;|p|:|World|;|')
def test_to_array(self):
parser = Parser()
html_string = 'title|:|Hello|;|p|:|World|;|'
result = parser.to_array(html_string)
self.assertEqual(result[0], 'title|:|Hello')
self.assertEqual(result[1], 'p|:|World')
self.assertEqual(len(result), 2)
def test_to_dicts(self):
parser = Parser()
array = ['title|:|Hello|','p|:|World|']
result = parser.to_dicts(array)
self.assertEqual(result[0]['title'], 'Hello')
self.assertEqual(result[1]['p'], 'World')
self.assertEqual(len(result), 2)
def test_parse(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head><body><p>World</p></body></html>'
result = parser.parse(html_string)
self.assertEqual(result[0]['title'], 'Hello')
self.assertEqual(result[1]['p'], 'World')
self.assertEqual(len(result), 2)
|
nilq/baby-python
|
python
|
"""
Book: Building RESTful Python Web Services
Chapter 3: Improving and adding authentication to an API with Django
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from rest_framework.pagination import LimitOffsetPagination
class LimitOffsetPaginationWithMaxLimit(LimitOffsetPagination):
max_limit = 10
|
nilq/baby-python
|
python
|
# AUTHOR: Dalon Lobo
# Python3 Concept: Plotting line plot using matplotlib
# GITHUB: https://github.com/dalonlobo
import numpy as np
import matplotlib.pyplot as plt
# Create dummy x and y values. In this case I create values using numpy.
# This graph will show sine wave
x = np.arange(0, 10, 0.1) # Values for x coordinate
y = np.sin(x) # Values for y coordinate using numpy sin function
plt.plot(x, y) # Plots the x and y coordinates
plt.xlabel("x - values") # show x label
plt.ylabel("y = sin(x)") # show y label
plt.show() # Displays the plot
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.