hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7aa2635f7e1d5416d843dacc6078257816ee795
| 2,268
|
py
|
Python
|
src/encoded/server_defaults.py
|
beta-cell-network/beta-cell-nw
|
093b078fdb7932ebfcbc0715aeeb2261eda3ee52
|
[
"MIT"
] | 4
|
2018-01-04T22:31:08.000Z
|
2021-07-15T17:39:16.000Z
|
src/encoded/server_defaults.py
|
beta-cell-network/beta-cell-nw
|
093b078fdb7932ebfcbc0715aeeb2261eda3ee52
|
[
"MIT"
] | 7
|
2017-10-31T23:47:47.000Z
|
2022-01-10T00:12:42.000Z
|
src/encoded/server_defaults.py
|
beta-cell-network/beta-cell-nw
|
093b078fdb7932ebfcbc0715aeeb2261eda3ee52
|
[
"MIT"
] | 10
|
2017-09-14T00:57:07.000Z
|
2021-07-27T23:41:14.000Z
|
from datetime import datetime
from jsonschema_serialize_fork import NO_DEFAULT
from pyramid.security import effective_principals
from pyramid.threadlocal import get_current_request
from string import (
digits,
ascii_uppercase,
)
import random
import uuid
from snovault.schema_utils import server_default
ACCESSION_FACTORY = __name__ + ':accession_factory'
def includeme(config):
from pyramid.path import DottedNameResolver
accession_factory = config.registry.settings.get('accession_factory')
if accession_factory:
factory = DottedNameResolver().resolve(accession_factory)
else:
factory = enc_accession
config.registry[ACCESSION_FACTORY] = factory
@server_default
def userid(instance, subschema):
request = get_current_request()
principals = effective_principals(request)
for principal in principals:
if principal.startswith('userid.'):
return principal[7:]
return NO_DEFAULT
@server_default
def now(instance, subschema):
# from jsonschema_serialize_fork date-time format requires a timezone
return datetime.utcnow().isoformat() + '+00:00'
@server_default
def uuid4(instance, subschema):
return str(uuid.uuid4())
@server_default
def accession(instance, subschema):
if 'external_accession' in instance:
return NO_DEFAULT
request = get_current_request()
factory = request.registry[ACCESSION_FACTORY]
# With 17 576 000 options
ATTEMPTS = 10
for attempt in range(ATTEMPTS):
new_accession = factory(subschema['accessionType'])
if new_accession in request.root:
continue
return new_accession
raise AssertionError("Free accession not found in %d attempts" % ATTEMPTS)
ENC_ACCESSION_FORMAT = (digits, digits, digits, ascii_uppercase, ascii_uppercase, ascii_uppercase)
def enc_accession(accession_type):
random_part = ''.join(random.choice(s) for s in ENC_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
TEST_ACCESSION_FORMAT = (digits, ) * 6
def test_accession(accession_type):
""" Test accessions are generated on test.encodedcc.org
"""
random_part = ''.join(random.choice(s) for s in TEST_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
| 28
| 98
| 0.740741
| 270
| 2,268
| 6
| 0.348148
| 0.088889
| 0.039506
| 0.042593
| 0.096296
| 0.096296
| 0.096296
| 0.096296
| 0.040741
| 0
| 0
| 0.009704
| 0.182099
| 2,268
| 80
| 99
| 28.35
| 0.863612
| 0.065697
| 0
| 0.181818
| 0
| 0
| 0.056845
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.127273
| false
| 0
| 0.163636
| 0.036364
| 0.436364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7b0b81ceafaed0e74acb2a5f98af6b65a8f276d
| 1,850
|
py
|
Python
|
tests/test_api_account_state.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | null | null | null |
tests/test_api_account_state.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | null | null | null |
tests/test_api_account_state.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | 1
|
2020-05-03T04:34:32.000Z
|
2020-05-03T04:34:32.000Z
|
import unittest
import os
import json
import requests
import requests_mock
from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError
class APIAccountStateTestCase(unittest.TestCase):
def setUp(self):
self.service = IOService()
@requests_mock.mock()
def test_account_state_without_auth(self, mock):
data = self._read_mock_response('account_state_without_auth')
self._setup_response(mock, data, 401)
with self.assertRaises(AuthorizationError):
self.service.get_account_state()
@requests_mock.mock()
def test_account_state_auth_not_ok(self, mock):
data = self._read_mock_response('account_state_not_ok')
for code in range(201, 600):
# skip 401 status code (unauthorized)
if code == 401:
continue
self._setup_response(mock, data, code)
with self.assertRaises(UnexpectedResponseCodeError) as cm:
self.service.get_account_state()
self.assertEqual(cm.exception.status_code, code)
@requests_mock.mock()
def test_account_state(self, mock):
data = self._read_mock_response('account_state')
self.service = IOService()
self._setup_response(mock, data)
self.assertEqual(self.service.get_account_state(), data)
self.fail("auth missing")
def _read_mock_response(self, name):
path = os.path.join(os.path.dirname(__file__), name + '.json')
with open(path, 'r') as file:
data = json.loads(file.read())
return data
def _setup_response(self, mock, response, code=None):
if code is None:
code = requests.codes.ok
mock.get(
self.service.api + api_url.URL_ACCOUNT_STATE,
json=response,
status_code=code)
| 31.355932
| 85
| 0.656216
| 220
| 1,850
| 5.254545
| 0.286364
| 0.103806
| 0.041522
| 0.049308
| 0.356401
| 0.205017
| 0.205017
| 0.114187
| 0.114187
| 0
| 0
| 0.010823
| 0.250811
| 1,850
| 58
| 86
| 31.896552
| 0.823232
| 0.018919
| 0
| 0.159091
| 0
| 0
| 0.042471
| 0.014341
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7b4d983814129255c3038e65a92199d05319e32
| 6,061
|
py
|
Python
|
tobit.py
|
AlvaroCorrales/tobit
|
6993b1cfe58010cd59aac477ced3c2525342244f
|
[
"MIT"
] | 1
|
2021-04-13T03:14:01.000Z
|
2021-04-13T03:14:01.000Z
|
tobit.py
|
AlvaroCorrales/tobit
|
6993b1cfe58010cd59aac477ced3c2525342244f
|
[
"MIT"
] | null | null | null |
tobit.py
|
AlvaroCorrales/tobit
|
6993b1cfe58010cd59aac477ced3c2525342244f
|
[
"MIT"
] | null | null | null |
import math
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import norm # edit
from scipy.special import log_ndtr
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
def split_left_right_censored(x, y, cens):
counts = cens.value_counts()
if -1 not in counts and 1 not in counts:
warnings.warn("No censored observations; use regression methods for uncensored data")
xs = []
ys = []
for value in [-1, 0, 1]:
if value in counts:
split = cens == value
y_split = np.squeeze(y[split].values)
x_split = x[split].values
else:
y_split, x_split = None, None
xs.append(x_split)
ys.append(y_split)
return xs, ys
def tobit_neg_log_likelihood(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1])
s = params[-1]
to_cat = []
cens = False
if y_left is not None:
cens = True
left = (y_left - np.dot(x_left, b))
to_cat.append(left)
if y_right is not None:
cens = True
right = (np.dot(x_right, b) - y_right)
to_cat.append(right)
if cens:
concat_stats = np.concatenate(to_cat, axis=0) / s
log_cum_norm = scipy.stats.norm.logcdf(concat_stats) # log_ndtr(concat_stats)
cens_sum = log_cum_norm.sum()
else:
cens_sum = 0
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
mid = scipy.stats.norm.logpdf(mid_stats) - math.log(max(np.finfo('float').resolution, s))
mid_sum = mid.sum()
else:
mid_sum = 0
loglik = cens_sum + mid_sum
return - loglik
def tobit_neg_log_likelihood_der(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
s = params[-1]
beta_jac = np.zeros(len(b))
sigma_jac = 0
if y_left is not None:
left_stats = (y_left - np.dot(x_left, b)) / s
l_pdf = scipy.stats.norm.logpdf(left_stats)
l_cdf = log_ndtr(left_stats)
left_frac = np.exp(l_pdf - l_cdf)
beta_left = np.dot(left_frac, x_left / s)
beta_jac -= beta_left
left_sigma = np.dot(left_frac, left_stats)
sigma_jac -= left_sigma
if y_right is not None:
right_stats = (np.dot(x_right, b) - y_right) / s
r_pdf = scipy.stats.norm.logpdf(right_stats)
r_cdf = log_ndtr(right_stats)
right_frac = np.exp(r_pdf - r_cdf)
beta_right = np.dot(right_frac, x_right / s)
beta_jac += beta_right
right_sigma = np.dot(right_frac, right_stats)
sigma_jac -= right_sigma
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
beta_mid = np.dot(mid_stats, x_mid / s)
beta_jac += beta_mid
mid_sigma = (np.square(mid_stats) - 1).sum()
sigma_jac += mid_sigma
combo_jac = np.append(beta_jac, sigma_jac / s) # by chain rule, since the expression above is dloglik/dlogsigma
return -combo_jac
class TobitModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.ols_coef_ = None
self.ols_intercept = None
self.coef_ = None
self.intercept_ = None
self.sigma_ = None
def fit(self, x, y, cens, verbose=False):
"""
Fit a maximum-likelihood Tobit regression
:param x: Pandas DataFrame (n_samples, n_features): Data
:param y: Pandas Series (n_samples,): Target
:param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored
:param verbose: boolean, show info from minimization
:return:
"""
x_copy = x.copy()
if self.fit_intercept:
x_copy.insert(0, 'intercept', 1.0)
else:
x_copy.scale(with_mean=True, with_std=False, copy=False)
init_reg = LinearRegression(fit_intercept=False).fit(x_copy, y)
b0 = init_reg.coef_
y_pred = init_reg.predict(x_copy)
resid = y - y_pred
resid_var = np.var(resid)
s0 = np.sqrt(resid_var)
params0 = np.append(b0, s0)
xs, ys = split_left_right_censored(x_copy, y, cens)
result = minimize(lambda params: tobit_neg_log_likelihood(xs, ys, params), params0, method='BFGS',
jac=lambda params: tobit_neg_log_likelihood_der(xs, ys, params), options={'disp': verbose})
if verbose:
print(result)
self.ols_coef_ = b0[1:]
self.ols_intercept = b0[0]
if self.fit_intercept:
self.intercept_ = result.x[1]
self.coef_ = result.x[1:-1]
else:
self.coef_ = result.x[:-1]
self.intercept_ = 0
self.sigma_ = result.x[-1]
return self
def predict(self, x):
return self.intercept_ + np.dot(x, self.coef_)
def score(self, x, y, scoring_function=mean_absolute_error):
y_pred = np.dot(x, self.coef_)
return scoring_function(y, y_pred)
# EDIT - insert marginal effects function
def margins(self, x, k = 0):
"""
Marginal effects on dependent variable of a regressor, identified by coef
:param x: array with all regressors (independent variables) to make a prediction
:param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects
:return: an array with the marginal effects estimated at each observation's level
The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf
evaluated at x_i * coeff[k] / sigma
"""
return self.coef_[k] * norm.cdf(self.predict(x) / self.sigma_)
| 33.120219
| 123
| 0.620855
| 907
| 6,061
| 3.936053
| 0.218302
| 0.018207
| 0.013445
| 0.023529
| 0.182633
| 0.133894
| 0.108683
| 0.07507
| 0.058824
| 0.058824
| 0
| 0.008943
| 0.280482
| 6,061
| 183
| 124
| 33.120219
| 0.809677
| 0.177858
| 0
| 0.19084
| 0
| 0
| 0.018553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061069
| false
| 0
| 0.076336
| 0.007634
| 0.198473
| 0.007634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7b509b05f7f3079575b9250d0a2891a9795c878
| 1,554
|
py
|
Python
|
setup.py
|
Raymond38324/hagworm
|
196d4735719f586d52a1cd9f21aedd00e16b59b0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Raymond38324/hagworm
|
196d4735719f586d52a1cd9f21aedd00e16b59b0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Raymond38324/hagworm
|
196d4735719f586d52a1cd9f21aedd00e16b59b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import setuptools
with open(r'README.md', r'r', encoding="utf8") as stream:
long_description = stream.read()
setuptools.setup(
name=r'hagworm',
version=r'3.0.0',
license=r'Apache License Version 2.0',
platforms=[r'all'],
author=r'Shaobo.Wang',
author_email=r'wsb310@gmail.com',
description=r'Network Development Suite',
long_description=long_description,
long_description_content_type=r'text/markdown',
url=r'https://github.com/wsb310/hagworm',
packages=setuptools.find_packages(),
package_data={r'hagworm': [r'static/*.*']},
python_requires=r'>= 3.7',
install_requires=[
r'aioftp==0.13.0',
r'aiohttp==3.5.4',
r'aiokafka==0.5.2',
r'aiomysql==0.0.20',
r'aioredis==1.2.0',
r'cacheout==0.11.1',
r'crontab==0.22.6',
r'cryptography==2.7.0',
r'hiredis==1.0.0',
r'Jinja2==2.10.1',
r'tornado-jinja2==0.2.4',
r'loguru==0.3.0',
r'motor==2.0.0',
r'mq_http_sdk==1.0.1',
r'objgraph==3.4.1',
r'Pillow==6.1.0',
r'psutil==5.6.3',
r'PyJWT==1.7.1',
r'pytest==5.0.1',
r'pytest-asyncio==0.10.0',
r'Sphinx==2.1.2',
r'SQLAlchemy==1.3.5',
r'tornado==6.0.3',
r'xlwt==1.3.0',
r'xmltodict==0.12.0',
],
classifiers=[
r'Programming Language :: Python :: 3.7',
r'License :: OSI Approved :: Apache Software License',
r'Operating System :: POSIX :: Linux',
],
)
| 28.254545
| 62
| 0.548263
| 238
| 1,554
| 3.52521
| 0.420168
| 0.021454
| 0.045292
| 0.071514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086473
| 0.248391
| 1,554
| 54
| 63
| 28.777778
| 0.631849
| 0.013514
| 0
| 0.04
| 0
| 0
| 0.439582
| 0.028086
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02
| 0
| 0.02
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7b7578b3382d7cf2565fe8fe7621c5d451e663b
| 1,374
|
py
|
Python
|
conduit_rest/radish/conduit_rest_steps.py
|
dduleba/tw2019-ui-tests
|
5f149c6c2bdb9f2d69a02c038248374f6b0b5903
|
[
"MIT"
] | 1
|
2019-09-27T23:12:07.000Z
|
2019-09-27T23:12:07.000Z
|
conduit_rest/radish/conduit_rest_steps.py
|
dduleba/conduit-tests
|
5f149c6c2bdb9f2d69a02c038248374f6b0b5903
|
[
"MIT"
] | null | null | null |
conduit_rest/radish/conduit_rest_steps.py
|
dduleba/conduit-tests
|
5f149c6c2bdb9f2d69a02c038248374f6b0b5903
|
[
"MIT"
] | null | null | null |
import time
from faker import Faker
from radish_ext.radish.step_config import StepConfig
from conduit.client import ConduitClient, ConduitConfig
class ConduitStepsConfig(StepConfig):
def __init__(self, context):
super().__init__(context)
self._faker = None
self.client = ConduitClient(ConduitConfig().set_properties(context.cfg.get('conduit_backend').get('url')))
@property
def faker(self):
if self._faker is None:
self._faker = Faker(locale='en-us')
seed = time.time()
self.log.debug(f'Faker seed {seed}')
self._faker.seed()
return self._faker
def get_conduit_config(context):
return ConduitStepsConfig.get_instance(context)
class ConduitRestBaseSteps(object):
def created_user(self, step, ):
"""created User"""
stc_rest = get_conduit_config(step.context)
user_model = {'user': {'username': stc_rest.faker.user_name(),
'password': stc_rest.faker.password(),
'email': stc_rest.faker.email()
}
}
stc_rest.test_data.data.update(user_model)
stc_rest.log.debug(user_model)
ret_json = stc_rest.client.users.register_user(**user_model['user'])
stc_rest.log.info(f'user created {ret_json}')
| 31.227273
| 114
| 0.61936
| 157
| 1,374
| 5.165605
| 0.369427
| 0.069051
| 0.04439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269287
| 1,374
| 43
| 115
| 31.953488
| 0.807769
| 0.008734
| 0
| 0
| 0
| 0
| 0.067847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.032258
| 0.129032
| 0.032258
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7b94b2b66d38c20024028b233b4eaed057202d2
| 5,057
|
py
|
Python
|
SPAE/read_write.py
|
simon-schuler/SPAE
|
2b970e30838da258b969b316488e7963d66119be
|
[
"MIT"
] | null | null | null |
SPAE/read_write.py
|
simon-schuler/SPAE
|
2b970e30838da258b969b316488e7963d66119be
|
[
"MIT"
] | 1
|
2021-04-12T20:28:55.000Z
|
2021-04-12T20:28:55.000Z
|
SPAE/read_write.py
|
simon-schuler/SPAE
|
2b970e30838da258b969b316488e7963d66119be
|
[
"MIT"
] | null | null | null |
#Writing MOOG parameter file for the parameter, abundance, and error calculations.
#The parameter file only needs to be written once, at beginning of the routine, because the output
#files are overwritten with each itereation of the routine, only minimal output data are needed.
#
#The user can choose to have the parameter file written to screen by choosing verbose=True
#The user can choose to have more detailed MOOG output by chooseing the appropriate values for the
#MOOG input parameters.
import numpy as np
def param_file(linelist,atmosphere=0,molecules=1,lines=0,flux=0,damp=0,plot=0,units=0,verbose=False):
if verbose:
print('abfind')
print('terminal \'x11\'')
print('standard_out \'moog_out.1\'')
print('summary_out \'moog_out.2\'')
print('model_in \'star.mod\'')
print('lines_in \'' + linelist + '\'')
print('atmosphere ' + str(atmosphere))
print('molecules ' + str(molecules))
print('lines ' + str(lines))
print('flux/int ' + str(flux))
print('damping ' + str(damp))
print('plot ' + str(plot))
print('units ' + str(units))
with open('batch.par', 'wt') as file:
file.write('abfind' + '\n')
file.write('terminal \'x11\'' + '\n')
file.write('standard_out \'moog_out.1\'' + '\n')
file.write('summary_out \'moog_out.2\'' + '\n')
file.write('model_in \'star.mod\'' + '\n')
file.write('lines_in \'' + linelist + '\'' + '\n')
file.write('atmosphere ' + str(atmosphere) + '\n')
file.write('molecules ' + str(molecules) + '\n')
file.write('lines ' + str(lines) + '\n')
file.write('flux/int ' + str(flux) + '\n')
file.write('damping ' + str(damp) + '\n')
file.write('plot ' + str(plot) + '\n')
file.write('units ' + str(units) + '\n')
#Function for creating the solar and stellar linelists
def linelist_create(star_in, sun_in, direc_path):
with open(direc_path + '/linelist_star.txt', 'w') as out_star:
with open(direc_path + '/linelist_sun.txt', 'w') as out_sun:
with open(star_in) as file_star:
with open(sun_in) as file_sun:
line_star = file_star.readline()
out_star.write(line_star) #accounts for comment line in linelist files
line_sun = file_sun.readline()
out_sun.write(line_sun) #accounts for comment line in linelist files
line = file_star.readlines()
line_s = file_sun.readlines()
for line_star in line:
line_star_split = line_star.split()
#if len(line_star_split) < 2: continue
for line_sun in line_s:
line_sun_split = line_sun.split()
#if len(line_sun_split) < 2: continue
if line_star_split[0] == line_sun_split[0] and line_star_split[1] == line_sun_split[1]:
out_star.write(line_star)
out_sun.write(line_sun)
continue
#Reads Moog output files, parsing elements and colums
def read_file(filename):
count = 0
elements = ['Fe I ', 'Fe II ', 'C I ', 'N I ', 'O I ', 'S I', 'K I ', 'Na I ', 'Mg I ', 'Al I ', 'Si I ', 'Ca I ', 'Sc II ', 'Ti I ', 'Ti II ', 'V ', 'Cr I ',
'Mn I ', 'Co I ', 'Ni I ', 'Cu I ', 'Zn I ', 'Ba II ']
dtype = [('wavelength', 'f8'),
('ID', 'f8'),
('EP', 'f8'),
('logGF', 'f8'),
('EWin', 'f8'),
('logRWin', 'f8'),
('abund', 'f8'),
('delavg', 'f8')]
abundances = []
el_found = []
with open(filename) as file:
while True:
count += 1
# Get next line from file
line = file.readline()
# if line is empty end of file is reached
if not line: break
for j, el in enumerate(elements):
species = 'Abundance Results for Species ' + el
if species in line:
new_arr = []
el_found.append(el)
line = file.readline().split()
line = file.readline().split()
while len(line) == 8:
new_arr.append(line)
line = file.readline().rstrip().split()
new_arr = np.array(new_arr)
new_arr = np.core.records.fromarrays(new_arr.T,dtype=dtype)
abundances.append(new_arr)
return el_found, abundances
| 41.45082
| 168
| 0.489816
| 588
| 5,057
| 4.086735
| 0.292517
| 0.048689
| 0.049938
| 0.013317
| 0.135664
| 0.052434
| 0.034124
| 0.034124
| 0
| 0
| 0
| 0.010283
| 0.384615
| 5,057
| 121
| 169
| 41.793388
| 0.761889
| 0.160174
| 0
| 0.073171
| 0
| 0
| 0.164775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036585
| false
| 0
| 0.012195
| 0
| 0.060976
| 0.158537
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7ba2b5a0bc557fae2df973eed4ab42b40580f6e
| 1,862
|
py
|
Python
|
lectures/optimization/optimization_plots.py
|
carolinalvarez/ose-course-scientific-computing
|
4b816fa81320c88fc5f35b203f0541e0a1a00939
|
[
"MIT"
] | null | null | null |
lectures/optimization/optimization_plots.py
|
carolinalvarez/ose-course-scientific-computing
|
4b816fa81320c88fc5f35b203f0541e0a1a00939
|
[
"MIT"
] | null | null | null |
lectures/optimization/optimization_plots.py
|
carolinalvarez/ose-course-scientific-computing
|
4b816fa81320c88fc5f35b203f0541e0a1a00939
|
[
"MIT"
] | null | null | null |
"""Plots for optimization lecture."""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
def plot_contour(f, allvecs, legend_path):
"""Plot contour graph for function f."""
# Create array from values with at least two dimensions.
allvecs = np.atleast_2d(allvecs)
X, Y, Z = _get_grid(f)
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title("objective function")
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
plt.rc("text", usetex=False)
plt.rc("font", family="serif")
plt.plot(1, 1, "r*", markersize=10, label="minimum")
plt.plot(4.5, -1.5, "bx", markersize=10, label="initial guess")
plt.plot(
np.array(allvecs)[:, 0], np.array(allvecs)[:, 1], "go", markersize=4, label=legend_path,
)
plt.legend()
return plt
def _get_grid(f):
"""Create a grid for function f."""
# create data to visualize objective function
n = 50 # number of discretization points along the x-axis
m = 50 # number of discretization points along the x-axis
a = -2.0
b = 5.0 # extreme points in the x-axis
c = -2
d = 5.0 # extreme points in the y-axis
X, Y = np.meshgrid(np.linspace(a, b, n), np.linspace(c, d, m))
Z = np.zeros(X.shape)
argument = np.zeros(2)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
argument[0] = X[i, j]
argument[1] = Y[i, j]
Z[i][j] = f(argument)
return X, Y, Z
def plot_surf(f):
"""Plot surface graph of function f."""
X, Y, Z = _get_grid(f)
fig = plt.figure()
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
fig.colorbar(surf)
plt.title("objective function")
| 27.791045
| 96
| 0.605263
| 297
| 1,862
| 3.740741
| 0.356902
| 0.010801
| 0.013501
| 0.032403
| 0.20162
| 0.20162
| 0.145815
| 0.145815
| 0.145815
| 0.068407
| 0
| 0.026912
| 0.241676
| 1,862
| 66
| 97
| 28.212121
| 0.759915
| 0.216971
| 0
| 0.177778
| 0
| 0
| 0.092942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7ba60efd06c8906b83387592b8347e6da526db9
| 7,141
|
py
|
Python
|
gdsfactory/functions.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
gdsfactory/functions.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
gdsfactory/functions.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
"""All functions return a Component so you can easily pipe or compose them.
There are two types of functions:
- decorators: return the original component
- containers: return a new component
"""
from functools import lru_cache, partial
import numpy as np
from omegaconf import OmegaConf
from pydantic import validate_arguments
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.text_rectangular import text_rectangular_multi_layer
from gdsfactory.port import auto_rename_ports
from gdsfactory.types import (
Anchor,
Axis,
ComponentSpec,
Float2,
Layer,
List,
Optional,
Strs,
)
cache = lru_cache(maxsize=None)
def add_port(component: Component, **kwargs) -> Component:
"""Return Component with a new port."""
component.add_port(**kwargs)
return component
@cell
def add_text(
component: ComponentSpec,
text: str = "",
text_offset: Float2 = (0, 0),
text_anchor: Anchor = "cc",
text_factory: ComponentSpec = text_rectangular_multi_layer,
) -> Component:
"""Return component inside a new component with text geometry.
Args:
component: component spec.
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = component_new << text_factory(text)
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def add_texts(
components: List[ComponentSpec],
prefix: str = "",
index0: int = 0,
**kwargs,
) -> List[Component]:
"""Return a list of Component with text labels.
Args:
components: list of component specs.
prefix: Optional prefix for the labels.
index0: defaults to 0 (0, for first component, 1 for second ...).
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
return [
add_text(component, text=f"{prefix}{i+index0}", **kwargs)
for i, component in enumerate(components)
]
@cell
def rotate(
component: ComponentSpec, angle: float = 90, recenter: bool = False
) -> Component:
"""Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component: spec.
angle: to rotate in degrees.
recenter: recenter component after rotating.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
origin_offset = ref.origin - np.array((ref.xmin, ref.ymin))
ref.rotate(angle)
if recenter:
ref.move(
origin=ref.center,
destination=np.array((ref.xsize / 2, ref.ysize / 2)) - origin_offset,
)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
rotate90 = partial(rotate, angle=90)
rotate90n = partial(rotate, angle=-90)
rotate180 = partial(rotate, angle=180)
@cell
def mirror(
component: ComponentSpec, p1: Float2 = (0, 1), p2: Float2 = (0, 0)
) -> Component:
"""Return new Component with a mirrored reference.
Args:
component: component spec.
p1: first point to define mirror axis.
p2: second point to define mirror axis.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def move(
component: Component,
origin=(0, 0),
destination=None,
axis: Optional[Axis] = None,
) -> Component:
"""Return new Component with a moved reference to the original component.
Args:
component: to move.
origin: of component.
destination: Optional x, y.
axis: x or y axis.
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def move_port_to_zero(component: Component, port_name: str = "o1"):
"""Return a container that contains a reference to the original component.
The new component has port_name in (0, 0).
"""
if port_name not in component.ports:
raise ValueError(
f"port_name = {port_name!r} not in {list(component.ports.keys())}"
)
return move(component, -component.ports[port_name].midpoint)
def update_info(component: Component, **kwargs) -> Component:
"""Return Component with updated info."""
component.info.update(**kwargs)
return component
@validate_arguments
def add_settings_label(
component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None
) -> Component:
"""Add a settings label to a component.
Args:
component: spec.
layer_label: for label.
settings: tuple or list of settings. if None, adds all changed settings.
"""
d = (
{setting: component.get_setting(setting) for setting in settings}
if settings
else component.metadata.changed
)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
__all__ = (
"add_port",
"add_text",
"add_settings_label",
"auto_rename_ports",
"cache",
"mirror",
"move",
"move_port_to_zero",
"rotate",
"update_info",
)
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2(
length_mmi=10,
decorator=partial(add_settings_label, settings=["name", "length_mmi"]),
)
# c.show()
cr = rotate(component=c)
cr.show()
# cr = c.rotate()
# cr.pprint()
# cr.show()
# cm = move(c, destination=(20, 20))
# cm.show()
# cm = mirror(c)
# cm.show()
# cm = c.mirror()
# cm.show()
# cm2 = move_port_to_zero(cm)
# cm2.show()
# cm3 = add_text(c, "hi")
# cm3.show()
# cr = rotate(component=c)
# cr.show()
# print(component_rotated)
# component_rotated.pprint
# component_netlist = component.get_netlist()
# component.pprint_netlist()
| 25.967273
| 87
| 0.669654
| 900
| 7,141
| 5.165556
| 0.217778
| 0.096795
| 0.036137
| 0.051624
| 0.316412
| 0.26866
| 0.254894
| 0.220478
| 0.220478
| 0.220478
| 0
| 0.011473
| 0.23106
| 7,141
| 274
| 88
| 26.062044
| 0.835185
| 0.324464
| 0
| 0.302158
| 0
| 0
| 0.045197
| 0.00655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064748
| false
| 0
| 0.093525
| 0
| 0.223022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7ba7f82e01986b93c50e54b040c99061ee59d08
| 26,640
|
py
|
Python
|
OverlayUFOs/Overlay UFOs.roboFontExt/lib/OverlayUFOs.py
|
connordavenport/fbOpenTools
|
794c71d504cea1248c256bea11d5249b0a4144a1
|
[
"Unlicense"
] | null | null | null |
OverlayUFOs/Overlay UFOs.roboFontExt/lib/OverlayUFOs.py
|
connordavenport/fbOpenTools
|
794c71d504cea1248c256bea11d5249b0a4144a1
|
[
"Unlicense"
] | null | null | null |
OverlayUFOs/Overlay UFOs.roboFontExt/lib/OverlayUFOs.py
|
connordavenport/fbOpenTools
|
794c71d504cea1248c256bea11d5249b0a4144a1
|
[
"Unlicense"
] | null | null | null |
#coding=utf-8
from __future__ import division
"""
# OVERLAY UFOS
For anyone looking in here, sorry the code is so messy. This is a standalone version of a script with a lot of dependencies.
"""
import os
from AppKit import * #@PydevCodeAnalysisIgnore
from vanilla import * #@PydevCodeAnalysisIgnore
from mojo.drawingTools import *
from mojo.events import addObserver, removeObserver
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
from mojo.UI import UpdateCurrentGlyphView
from fontTools.pens.transformPen import TransformPen
from defconAppKit.windows.baseWindow import BaseWindowController
import unicodedata
#from lib.tools.defaults import getDefaultColor
from lib.tools.drawing import strokePixelPath
from lib.UI.spaceCenter.glyphSequenceEditText import splitText
from builtins import chr
selectedSymbol = u'•'
def SmallTextListCell(editable=False):
cell = NSTextFieldCell.alloc().init()
size = NSSmallControlSize #NSMiniControlSize
cell.setControlSize_(size)
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(size))
cell.setFont_(font)
cell.setEditable_(editable)
return cell
class TX:
"""
An agnostic way to get a naked font.
"""
@classmethod
def naked(cls, f):
try:
return f.naked()
except:
return f
class Tool():
"""
The tool object manages the font list. This is a simplification.
"""
fonts = AllFonts()
def addObserver(self, target, method, action):
addObserver(target, method, action)
def removeObserver(self, target, method, action):
removeObserver(target, method, action)
def getCurrentFont(self):
return CurrentFont()
def getFonts(self):
u"""Answers the list of selected fonts, ordered by their path.
"""
return self.fonts
def appendToFonts(self, path):
f = OpenFont(path, showUI=False)
self.fonts.append(f)
def removeFromFonts(self, path):
for i, f in enumerate(self.fonts):
if f.path == path:
del self.fonts[i]
def getFontPaths(self):
return [f.path or str(f.info.familyName)+" "+str(f.info.styleName) for f in self.getFonts()]
def getFontLabel(self, path):
if path is None:
return None
if not path:
return 'Untitled'
name = path.split('/')[-1]
status = selectedSymbol
return status, path, name
def getFontLabels(self):
labels = {}
for path in self.getFontPaths():
if path:
label = self.getFontLabel(path)
name = label[-1]
else:
name = 'Untitled'
if not name in labels:
labels[name] = []
labels[name].append(label)
sortedLabels = []
for _, labelSet in sorted(labels.items()):
if len(labelSet) == 1: # There is only a single font with this name
sortedLabels.append(labelSet[0])
else: # Otherwise we'll have to construct new names to show the difference
for status, path, name in sorted(labelSet):
sortedLabels.append((status, path, '%s "%s"' % (name, '/'.join(path.split('/')[:-1]))))
return sortedLabels
class C:
"""
Some constants.
"""
C2 = 100
BUTTON_WIDTH = 80
STYLE_CHECKBOXSIZE = 'small'
STYLE_LABELSIZE = 'small'
STYLE_RADIOSIZE = 'small'
L = 22
LL = 25
class OverlayUFOs(BaseWindowController):
DEFAULTKEY = "com.fontbureau.overlayUFO"
DEFAULTKEY_FILLCOLOR = "%s.fillColor" %DEFAULTKEY
DEFAULTKEY_STROKECOLOR = "%s.strokeColor" %DEFAULTKEY
DEFAULTKEY_STROKE = "%s.stroke" %DEFAULTKEY
DEFAULTKEY_FILL = "%s.fill" %DEFAULTKEY
FALLBACK_FILLCOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .1)
FALLBACK_STROKECOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .5)
VERSION = 1.0
NAME = u'Overlay UFOs'
MANUAL = u"""In the current glyph window, this will present the view the same glyph from a separate
UFO or set of UFOs.<br/>
This does NOT import the UFO into a background layer. Instead, it renders a outline directly from the UFO into the glyph window view.
<ul>
<li>There is no need to import duplicate data into a background layer.</li>
<li>The source outline is always live; when changes are made to the source, they will automatically
appear in the current without re-importing.</li>
<li>The source font does not need to be opened with a UI.</li>
</ul>
<h3>DIALOG</h3>
<ul>
<li>A floating dialog is present to let you open and select source fonts, fill, stroke, color.</li>
<li>Source Fonts: The default source font list is self.getOpenFonts(). The refresh button will
return this list to self.getOpenFonts().</li>
<li>Adding Fonts: You can manually add fonts by selecting a UFO file.
The UFO file will open without an interface.</li>
<li>Removing Fonts: There are buttons for removing selected fonts and for clearing the source font list.</li>
</ul>
<h3>BUGS/IMPROVEMENTS</h3>
<ul>
<li>Known Issue: The source font is drawn on top of the current font, instead of behind it.
So, it is good to select a color with a low opacity.</li>
<li>Known Bug: If the glyph window for both source and current fonts are open, it is possible
to select and inadvertently edit the source outline in the current window. I don't know how to solve this.</li>
<li>Improvement?: Add options to scale the source font.</li>
<li>Improvement?: Set different colors, fill settings for each font?</li>
</ul>
"""
# Fixed width of the window.
VIEWMINSIZE = 400
VIEWSIZE = VIEWMINSIZE
VIEWMAXSIZE = VIEWMINSIZE
WINDOW_POSSIZE = (130, 20, VIEWSIZE, 260)
WINDOW_MINSIZE = (VIEWMINSIZE, 260)
WINDOW_MAXSIZE = (VIEWMAXSIZE, 260)
def getPathListDescriptor(self):
return [
dict(title='Status', key='status', cell=SmallTextListCell(editable=False), width=12, editable=False),
dict(title='Name', key='name', width=300, cell=SmallTextListCell(editable=False), editable=False),
dict(title='Path', key='path', width=0, editable=False),
]
################
# OBSERVERS AND UPDATERS
################
def fontSelectionChanged(self):
self.setSourceFonts()
def activateModule(self):
self.tool.addObserver(self, 'drawInactive', 'drawInactive')
self.tool.addObserver(self, 'drawBackground', 'drawBackground')
self.tool.addObserver(self, 'fontDidOpen', 'fontDidOpen')
self.tool.addObserver(self, 'fontWillClose', 'fontWillClose')
def deactivateModule(self):
removeObserver(self, 'drawBackground')
removeObserver(self, 'drawInactive')
removeObserver(self, 'fontDidOpen')
removeObserver(self, 'fontWillClose')
################
# CONTEXTS
################
def fontDidOpen(self, info):
font = info.get('font')
if font:
self.tool.fonts.append(font)
self.refreshCallback()
def fontWillClose(self, info):
font = info.get('font')
path = font.path
if path:
self.tool.removeFromFonts(path)
self.refreshCallback()
def __init__(self):
self.tool = Tool()
self.w = FloatingWindow((400, 200), "Overlay UFOs", minSize=(400, 200))
self.populateView()
self.getView().open()
def getView(self):
return self.w
def refreshCallback(self, sender=None):
"""
Update the font list.
"""
self.getView().fontList.set(self.getFontItems())
def resetCallback(self, sender=None):
"""
Resets the view to the currently opened fonts.
"""
self.tool.fonts = AllFonts()
self.getView().fontList.set(self.getFontItems())
def addCallback(self, sender=None):
"""
Open a font without UI and add it to the font list.
"""
f = OpenFont(None, showUI=False)
if f is None:
return
self.tool.appendToFonts(f.path)
self.refreshCallback()
def populateView(self):
"""
The UI
"""
self.fillColor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
self.strokeColor = getExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, self.FALLBACK_STROKECOLOR)
self.contextBefore = self.contextAfter = ''
# Populating the view can only happen after the view is attached to the window,
# or else the relative widths go wrong.
view = self.getView()
view.add = Button((-40, 3, 30, 22), '+', callback=self.addCallback)
view.reset = Button((-40, 30, 30, 22), chr(8634), callback=self.resetCallback)
# Flag to see if the selection list click is in progress. We are resetting the selection
# ourselves, using the list "buttons", but changing that selection will cause another
# list update, that should be ignored.
self._selectionChanging = False
# Indicate that we are a drawing module
self._canDraw = True
self.sources = []
x = y = 4
view.fontList = List((C.C2, y, 250, -65), self.getFontItems(),
selectionCallback=self.fontListCallback,
drawFocusRing=False,
enableDelete=False,
allowsMultipleSelection=False,
allowsEmptySelection=True,
drawHorizontalLines=True,
showColumnTitles=False,
columnDescriptions=self.getPathListDescriptor(),
rowHeight=16,
)
view.viewEnabled = CheckBox((x, y, C.BUTTON_WIDTH, 22), "Show",
callback=self.viewCallback, sizeStyle=C.STYLE_CHECKBOXSIZE,
value=True)
y += C.L
view.fill = CheckBox((x, y, 60, 22), "Fill", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "fill"), True),
value = True,
callback=self.fillCallback)
y += C.L
color = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
view.color = ColorWell((x, y, 60, 22),
color=color,
callback=self.colorCallback)
y += C.L + 5
view.stroke = CheckBox((x, y, 60, 22), "Stroke", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "stroke"), False),
value = False,
callback=self.strokeCallback)
y += C.LL
view.alignText = TextBox((x, y, 90, 50), 'Alignment', sizeStyle=C.STYLE_LABELSIZE)
y += C.L
view.align = RadioGroup((x, y, 90, 50), ['Left', 'Center', 'Right'], isVertical=True,
sizeStyle=C.STYLE_RADIOSIZE, callback=self.alignCallback)
view.align.set(0)
#view.contextLabel = TextBox((C.C2, -58, 90, 50), 'Contexts', sizeStyle=C.STYLE_LABELSIZE)
view.viewCurrent = CheckBox((C.C2, -60, 150, 22), "Always View Current", sizeStyle=C.STYLE_CHECKBOXSIZE,
value = False,
callback=self.contextEditCallback)
#view.contextUandlc = CheckBox((C.C2+170, -60, 85, 22), "Match Case", sizeStyle=C.STYLE_CHECKBOXSIZE,
# value = False,
# callback=self.contextEditCallback)
view.contextBefore = EditText((C.C2, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Left Context')
view.contextCurrent = EditText((C.C2+95, -30, 60, 20), callback=self.contextCurrentEditCallback, continuous=True, sizeStyle="small")
view.contextAfter = EditText((C.C2+165, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Right Context')
self.activateModule()
self.setUpBaseWindowBehavior()
def fontListCallback(self, sender):
u"""If there is a selection, toggle the status of these fonts."""
# Avoid recursive loop because of changing font selection
if not self._selectionChanging:
for selectedIndex in sender.getSelection():
item = sender.get()[selectedIndex]
if item['status']:
item['status'] = ''
else:
item['status'] = selectedSymbol
# If shift is held when pressing an entry in the font list,
# the non-selected fonts will swap with the current's state
if NSEvent.modifierFlags() & NSShiftKeyMask:
items = [sender.get()[i] for i in range(len(sender.get())) if i != selectedIndex]
for subItems in items:
if item['status'] == '':
subItems['status'] = selectedSymbol
else:
subItems['status'] = ''
self._selectionChanging = True
# Avoid recursive loop because of changing font selection
sender.setSelection([])
self._selectionChanging = False
self.updateView()
def canDraw(self):
return True
"""
There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
"""
#def isUpper(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Lu':
# return True
# return False
#def isLower(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Ll':
# return True
# return False
def getHiddenFont(self, path):
from builtins import str
for f in self.tool.getFonts():
if f.path == path:
return f
elif path == str(f.info.familyName)+" "+str(f.info.styleName):
return f
def drawBackground(self, info):
u"""Draw the background of defined glyphs and fonbts.
Scale is available as mouse.scale."""
view = self.getView()
if not view.viewEnabled.get():
return
fill = getExtensionDefault(self.DEFAULTKEY_FILL, True)
stroke = getExtensionDefault(self.DEFAULTKEY_STROKE, True)
fillcolor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
glyph = info.get('glyph')
if glyph is not None:
current = glyph.getParent()
else:
current = self.tool.getCurrentFont()
if glyph is None or current is None:
return
align = self.getAlignment()
# Get the fonts from the list and see if they are selected.
sourceItems = self.getSourceFonts()
showFonts = []
for item in sourceItems:
if not item['status']:
continue
path = item['path']
font = self.getHiddenFont(path)
showFonts.append(font)
if view.viewCurrent.get() and current not in showFonts:
showFonts.append(current)
for font in showFonts:
self.fillColor.setFill()
self.strokeColor.setStroke()
contextBefore, contextCurrent, contextAfter = self.getContexts()
if font is not None:
contextBefore = splitText(contextBefore, TX.naked(font).unicodeData, TX.naked(font).groups)
contextBefore = [font[gname] for gname in contextBefore if gname in font.keys()]
contextAfter = splitText(contextAfter, TX.naked(font).unicodeData, TX.naked(font).groups)
contextAfter = [font[gname] for gname in contextAfter if gname in font.keys()]
contextCurrent = splitText(contextCurrent, TX.naked(font).unicodeData, TX.naked(font).groups)
if len(contextCurrent) > 0:
contextCurrent = [font[gname] for gname in [contextCurrent[0]] if gname in font.keys()]
if len(contextCurrent) > 0:
sourceGlyph = contextCurrent[0]
else:
sourceGlyph = None
elif glyph.name in font.keys():
sourceGlyph = font[glyph.name]
else:
sourceGlyph = None
"""
#There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
if view.contextUandlc.get():
caseTransform = None
if self.isUpper(glyph):
caseTransform = FontTX.unicodes.getUpperFromLower
elif self.isLower(glyph):
caseTransform = FontTX.unicodes.getLowerFromUpper
if caseTransform:
for i, g in enumerate(contextBefore):
newG = caseTransform(g)
if newG is not None:
contextBefore[i] = newG
newG = caseTransform(sourceGlyph)
if newG is not None:
sourceGlyph = newG
if caseTransform:
for i, g in enumerate(contextAfter):
newG = caseTransform(g)
if newG is not None:
contextAfter[i] = newG
"""
scale(current.info.unitsPerEm/float(font.info.unitsPerEm))
widthOffset = 0
if sourceGlyph is not None:
if align == 'center':
destCenter = float(glyph.width/2) / current.info.unitsPerEm
sourceCenter = float(sourceGlyph.width/2) / font.info.unitsPerEm
widthOffset = (destCenter-sourceCenter) * font.info.unitsPerEm
elif align == 'right':
widthOffset = ( ( glyph.width / glyph.getParent().info.unitsPerEm ) - (sourceGlyph.width / sourceGlyph.getParent().info.unitsPerEm ) ) * font.info.unitsPerEm
translate(widthOffset, 0)
previousGlyph = sourceGlyph
contextBefore.reverse()
totalWidth = 0
for i, cbGlyph in enumerate(contextBefore):
kernValue = 0
if previousGlyph is not None and previousGlyph.getParent() == cbGlyph.getParent():
# Uncomment to activate kerning. Requires FontTX.
#kernValue += FontTX.kerning.getValue((previousGlyph.name, cbGlyph.name), font.kerning, font.groups)
kernValue += 0
translate(-cbGlyph.width-kernValue, 0)
totalWidth += cbGlyph.width + kernValue
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
previousGlyph = cbGlyph
translate(totalWidth, 0)
totalWidth = 0
contextCurrentAndAfter = [sourceGlyph]+contextAfter
for i, cbGlyph in enumerate(contextCurrentAndAfter):
if cbGlyph is None:
cbGlyph = sourceGlyph
nextGlyph = None
if i + 1 < len(contextCurrentAndAfter):
nextGlyph = contextCurrentAndAfter[i+1]
if (i == 0 and cbGlyph == glyph) or sourceGlyph is None:
pass
else:
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
kernValue = 0
if cbGlyph is not None and nextGlyph is not None and nextGlyph.getParent() == cbGlyph.getParent():
#kernValue = FontTX.kerning.getValue((cbGlyph.name, nextGlyph.name), font.kerning, font.groups)
# Uncomment to activate kerning. Requires FontTX.
kernValue = 0
width = 0
if cbGlyph is not None:
width = cbGlyph.width
translate(width+kernValue, 0)
totalWidth += width + kernValue
previousGlyph = cbGlyph
translate(-totalWidth, 0)
translate(-widthOffset, 0)
scale(font.info.unitsPerEm/float(current.info.unitsPerEm))
#restore()
drawInactive = drawBackground
def viewCallback(self, sender):
self.updateView()
def getSourceFonts(self):
"""
Get the fonts in the list.
"""
view = self.getView()
return view.fontList.get()
def setSourceFonts(self):
u"""
Set the font list from the current set of open fonts.
"""
view = self.getView()
labels = []
currentSelection = []
for d in self.getSourceFonts():
if d['status']:
currentSelection.append(d['path'])
for status, path, name in self.tool.getFontLabels():
if path in currentSelection:
status = selectedSymbol
else:
status = ''
labels.append(dict(status=status, path=path, name=name))
view.fontList.set(labels)
def colorCallback(self, sender):
"""
Change the color.
"""
selectedColor = sender.get()
r = selectedColor.redComponent()
g = selectedColor.greenComponent()
b = selectedColor.blueComponent()
a = 1
strokeColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a)
setExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, selectedColor)
setExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, strokeColor)
self.fillColor = selectedColor
self.strokeColor = strokeColor
self.updateView()
def fillCallback(self, sender):
"""
Change the fill status.
"""
setExtensionDefault(self.DEFAULTKEY_FILL, sender.get())
self.updateView()
def strokeCallback(self, sender):
"""
Change the stroke status.
"""
setExtensionDefault(self.DEFAULTKEY_STROKE, sender.get())
self.updateView()
def alignCallback(self, sender):
"""
Change the alignment status.
"""
self.updateView()
def getAlignment(self):
"""
Get the alignment as a string.
"""
view = self.getView()
index = view.align.get()
if index == 0:
return 'left'
elif index == 1:
return 'center'
elif index == 2:
return 'right'
def updateView(self, sender=None):
UpdateCurrentGlyphView()
def windowCloseCallback(self, sender):
self.deactivateModule()
self.updateView()
BaseWindowController.windowCloseCallback(self, sender)
def getFontItems(self, update=False):
"""
Get all fonts in a way that can be set into a vanilla list.
"""
paths = set() # Set of all unique paths in the merges lists
itemsByName = {}
if update: # If update flag is set, then keep the existing selected fonts.
for item in self.getSourceFonts():
if item['status']:
itemsByName[item['name']] = item
currentStatuses = {}
if hasattr(self.getView(), 'fontList'):
for d in self.getSourceFonts():
currentStatuses[d['path']] = d['status']
for status, path, uniqueName in self.tool.getFontLabels():
if path in currentStatuses:
status = currentStatuses[path]
else:
status = selectedSymbol
if not uniqueName in itemsByName.keys():# If it is not already there, add this to the list
itemsByName[uniqueName] = dict(status=status, path=path, name=uniqueName)
fontList = []
for key, item in sorted(itemsByName.items()):
fontList.append(item)
return fontList
################
# CONTEXTS
################
def getContexts(self):
if not hasattr(self, 'contextBefore'):
self.contextBefore = ''
if not hasattr(self, 'contextAfter'):
self.contextAfter = ''
if not hasattr(self, 'contextCurrent'):
self.contextCurrent = None
return self.contextBefore, self.contextCurrent, self.contextAfter
def setContexts(self, contextBefore, contextCurrent, contextAfter):
self.contextBefore = contextBefore
self.contextCurrent = contextCurrent
self.contextAfter = contextAfter
def contextEditCallback(self, sender):
before = self.getView().contextBefore.get()
current = self.getView().contextCurrent.get() or None
after = self.getView().contextAfter.get()
self.setContexts(before, current, after)
self.updateView()
def contextCurrentEditCallback(self, sender):
#if sender.get():
#sender.set(sender.get()[0])
self.contextEditCallback(sender)
if __name__ == "__main__":
OverlayUFOs()
| 39.118943
| 182
| 0.575526
| 2,687
| 26,640
| 5.678452
| 0.202456
| 0.009176
| 0.005899
| 0.008848
| 0.187443
| 0.149823
| 0.136387
| 0.112007
| 0.073011
| 0.073011
| 0
| 0.011281
| 0.327853
| 26,640
| 681
| 183
| 39.118943
| 0.84078
| 0.109347
| 0
| 0.177243
| 0
| 0.015317
| 0.106797
| 0.005803
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091904
| false
| 0.002188
| 0.039387
| 0.010941
| 0.249453
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7bb3480194f9fe2fbc061710221cb965aa24166
| 9,368
|
py
|
Python
|
pyteamup/Calendar.py
|
LogicallyUnfit/pyTeamUp
|
a398fe6808d506ca4e05090b58e0a697aa1f46e5
|
[
"MIT"
] | 5
|
2019-04-11T14:52:19.000Z
|
2022-03-13T10:39:22.000Z
|
pyteamup/Calendar.py
|
LogicallyUnfit/pyTeamUp
|
a398fe6808d506ca4e05090b58e0a697aa1f46e5
|
[
"MIT"
] | 9
|
2019-04-11T14:49:59.000Z
|
2021-11-30T08:34:31.000Z
|
pyteamup/Calendar.py
|
LogicallyUnfit/pyTeamUp
|
a398fe6808d506ca4e05090b58e0a697aa1f46e5
|
[
"MIT"
] | 3
|
2019-04-11T14:17:00.000Z
|
2021-07-15T06:59:13.000Z
|
import requests
import json
import datetime
import sys
from dateutil.parser import parse as to_datetime
try:
import pandas as pd
except:
pass
from pyteamup.utils.utilities import *
from pyteamup.utils.constants import *
from pyteamup.Event import Event
class Calendar:
def __init__(self, cal_id, api_key):
self.__calendar_id = cal_id
self.__api_key = api_key
self.__cal_base = f'/{cal_id}'
self.__token_str = f'?_teamup_token={self.api_key}'
self.__subcalendars = None
self.__valid_api = None
self.__configuration = None
self._base_url = BASE_URL + self.__cal_base
self._event_collection_url = self._base_url + EVENTS_BASE + self.__token_str
self._subcalendars_url = self._base_url + SUBCALENDARS_BASE + self.__token_str
self._check_access_url = BASE_URL + CHECK_ACCESS_BASE + self.__token_str
self.events_json = None
if not self.valid_api:
raise Exception(f'Invalid Api Key: {self.api_key}')
def __str__(self):
return self.calendar_id
@property
def api_key(self):
return self.__api_key
@property
def calendar_id(self):
return self.__calendar_id
@property
def valid_api(self):
"""Makes a request to the calendar to see if the api is valid"""
if not self.__valid_api:
req = requests.get(self._check_access_url)
try:
check_status_code(req.status_code)
self.__valid_api = True
except:
self.__valid_api = False
return self.__valid_api
else:
return None
@property
def configuration(self):
if self.__configuration is None:
print('Fetching configuration')
req = requests.get(self._base_url + CONFIGURATION_BASE + self.__token_str)
check_status_code(req.status_code)
self.__configuration = json.loads(req.text)['configuration']
return self.__configuration
@property
def subcalendars(self):
if not self.__subcalendars:
print('Fetching Subcalendars')
req = requests.get(self._subcalendars_url)
check_status_code(req.status_code)
self.__subcalendars = json.loads(req.text)['subcalendars']
return self.__subcalendars
def clear_calendar_cache(self):
self.__subcalendars = None
self.__configuration = None
def get_event_collection(self, start_dt=None, end_dt=None, subcal_id=None, returnas='events', markdown=False):
"""
Method allows bulk fetching of events that fall between the provided time frame. If None is provided then
the current date -30 and +180 days is used.
:param start_dt: if set as None then set as today minus 30 days
:param end_dt: if left as None then set as today plus 180 days
:param subcal_id: optional str or list-like if a different calendar should be queried
:return: json of events
"""
if returnas not in ('events', 'dataframe', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
if start_dt is None:
start_dt = datetime.date.today() - datetime.timedelta(30)
if end_dt is None:
end_dt = datetime.date.today() + datetime.timedelta(180)
subcal_par = ''
if subcal_id:
if isinstance(subcal_id, (list, tuple)):
for id in subcal_id:
subcal_par += f'&subcalendarId[]={id}'
else:
subcal_par = f'&subcalendarId[]={subcal_id}'
if markdown == True:
para_markdown = '&format[]=markdown'
else:
para_markdown = ''
parameters = f'&startDate={start_dt.strftime("%Y-%m-%d")}&endDate={end_dt.strftime("%Y-%m-%d")}' + subcal_par + para_markdown
req = requests.get(self._event_collection_url + parameters)
check_status_code(req.status_code)
self.events_json = json.loads(req.text)['events']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in self.events_json]
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(self.events_json)
else:
return self.events_json
def _create_event_from_json(self, payload):
""" Lazy Creation of Event by passing a formatted payload"""
resp = requests.post(self._event_collection_url, data=payload, headers=POST_HEADERS)
try:
check_status_code(resp.status_code)
except:
print(payload)
print(resp.text)
raise
return resp.text
def get_event(self, event_id, returnas='event'):
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + f'/{event_id}' + self.__token_str
resp = requests.get(url)
check_status_code(resp.status_code)
event_dict = json.loads(resp.text)['event']
if returnas == 'event':
return Event(self, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
def get_subcalendar(self):
raise NotImplementedError
def search_events(self):
raise NotImplementedError
def get_changed_events(self, modified_since, returnas='event'):
"""
Get changed events since given unix time
:param modified_since: <int> Unix timestamp, must be less than 30 days old
:param returnas: <str> `event` `series` `dict` are valid options
:return: Tuple of event list and returned timestamp
"""
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + self.__token_str + '&modifiedSince=' + str(modified_since)
resp = requests.get(url)
check_status_code(resp.status_code)
events_json = json.loads(resp.text)['events']
timestamp = json.loads(resp.text)['timestamp']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in events_json], timestamp
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(events_json), timestamp
else:
return events_json, timestamp
def new_event(self, title, start_dt, end_dt, subcalendar_ids, all_day=False,
notes=None, location=None, who=None, remote_id=None, returnas='event'):
"""
Create a new event within a provided subcalendar. Can return as Event object, Series object, or Dictionary.
Undo_id not included with return unless returnas='event' in which case it is included with the returned Event Object
:param subcalendar_id: <str, int, or list-like> Required - the ID of the subcalendar within the calendar the event should be created in.
:param title: <str> Title of the event, must be
:param start_dt: <datetime> Start Datetime
:param end_dt: <datetime> End Datetime
:param all_day: <Bool> Allday or Not
:param notes: <str> HTML or Markdown formatted string detailing the Description
:param location: <str> Location of the event
:param who: <str>
:param remote_id: <str> Remote ID of the event, used to link the TeamUp event record to its source information
:param returnas: <str> `event` `series` `dict` are valid options
:return:
"""
if returnas not in ('event','dict','series'):
raise ValueError(f'Unrecognized returnas paramter: {returnas}')
if not isinstance(start_dt, datetime.datetime) or not isinstance(end_dt, datetime.datetime):
try:
start_dt = to_datetime(start_dt)
end_dt = to_datetime(end_dt)
except:
raise ValueError('Parse failed, please pass all dates as a datetime object')
if isinstance(subcalendar_ids, (str, int)):
subcalendar_ids = [subcalendar_ids]
if not isinstance(subcalendar_ids, (tuple, list)):
raise ValueError(f'Unrecognized Type: Subcalendar_ids type: {type(subcalendar_ids)}')
dict = {'remote_id': remote_id,
'title': title,
'subcalendar_ids': subcalendar_ids,
'start_dt': format_date(start_dt),
'end_dt': format_date(end_dt),
'all_day': all_day,
'notes': notes,
'location': location,
'who': who
}
resp_text = self._create_event_from_json(json.dumps(dict))
resp_dict = json.loads(resp_text)
event_dict = resp_dict['event']
undo_id = resp_dict['undo_id']
if returnas == 'event':
return Event(self, undo_id = undo_id, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
| 39.694915
| 144
| 0.627242
| 1,161
| 9,368
| 4.825151
| 0.174849
| 0.024991
| 0.014995
| 0.014281
| 0.278293
| 0.256159
| 0.210996
| 0.175652
| 0.175652
| 0.167083
| 0
| 0.002526
| 0.28149
| 9,368
| 235
| 145
| 39.86383
| 0.829743
| 0.170047
| 0
| 0.307692
| 0
| 0.005917
| 0.122926
| 0.024028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088757
| false
| 0.011834
| 0.053254
| 0.017751
| 0.266272
| 0.023669
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7bde259829ba295ad5078b7f30b72f3fddb4e13
| 1,608
|
py
|
Python
|
examples/ws2812/main.py
|
ivankravets/pumbaa
|
2a1869cc204e3128516ed6fa9f89529aedec1702
|
[
"MIT"
] | 69
|
2016-09-04T18:36:18.000Z
|
2021-07-04T21:51:54.000Z
|
examples/ws2812/main.py
|
ivankravets/pumbaa
|
2a1869cc204e3128516ed6fa9f89529aedec1702
|
[
"MIT"
] | 42
|
2016-09-02T20:10:19.000Z
|
2020-07-01T05:54:01.000Z
|
examples/ws2812/main.py
|
ivankravets/pumbaa
|
2a1869cc204e3128516ed6fa9f89529aedec1702
|
[
"MIT"
] | 11
|
2016-09-29T14:33:23.000Z
|
2021-02-28T19:30:49.000Z
|
#
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017, Erik Moqvist
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
import board
from drivers import Ws2812
import time
PIXEL_MAX = 81
RED = PIXEL_MAX * b'\x00\xff\x00'
GREEN = PIXEL_MAX * b'\xff\x00\x00'
BLUE = PIXEL_MAX * b'\x00\x00\xff'
WS2812 = Ws2812(board.PIN_GPIO18)
while True:
print('Red.')
WS2812.write(RED)
time.sleep(0.5)
print('Green.')
WS2812.write(GREEN)
time.sleep(0.5)
print('Blue.')
WS2812.write(BLUE)
time.sleep(0.5)
| 29.236364
| 69
| 0.735075
| 249
| 1,608
| 4.726908
| 0.506024
| 0.074766
| 0.02294
| 0.028037
| 0.027188
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041065
| 0.182214
| 1,608
| 54
| 70
| 29.777778
| 0.853992
| 0.707711
| 0
| 0.166667
| 0
| 0
| 0.115909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7be4754a949474c9764e2ad170025656a516b5f
| 740
|
py
|
Python
|
reports/urls.py
|
aysiu/manana
|
8af8b57c72f6154affdb5f3a9a3469a49e5818fe
|
[
"Apache-2.0"
] | 9
|
2016-02-16T23:53:40.000Z
|
2020-07-13T16:04:18.000Z
|
reports/urls.py
|
aysiu/manana
|
8af8b57c72f6154affdb5f3a9a3469a49e5818fe
|
[
"Apache-2.0"
] | null | null | null |
reports/urls.py
|
aysiu/manana
|
8af8b57c72f6154affdb5f3a9a3469a49e5818fe
|
[
"Apache-2.0"
] | 4
|
2016-02-16T23:56:13.000Z
|
2019-05-20T15:12:14.000Z
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('reports.views',
url(r'^index/*$', 'index'),
url(r'^dashboard/*$', 'dashboard'),
url(r'^$', 'index'),
url(r'^detail/(?P<serial>[^/]+)$', 'detail'),
url(r'^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$', 'detail_pkg'),
url(r'^detailmachine/(?P<serial>[^/]+)$', 'machine_detail'),
url(r'^appleupdate/(?P<serial>[^/]+)$', 'appleupdate'),
url(r'^raw/(?P<serial>[^/]+)$', 'raw'),
url(r'^submit/(?P<submission_type>[^/]+)$', 'submit'),
url(r'^warranty/(?P<serial>[^/]+)$', 'warranty'),
# for compatibilty with MunkiReport scripts
url(r'^ip$', 'lookup_ip'),
url(r'^(?P<submission_type>[^/]+)$', 'submit'),
)
| 41.111111
| 81
| 0.554054
| 87
| 740
| 4.643678
| 0.413793
| 0.118812
| 0.044554
| 0.10396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12973
| 740
| 18
| 82
| 41.111111
| 0.627329
| 0.055405
| 0
| 0
| 0
| 0
| 0.560172
| 0.369628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7be660a1e99ce3791843752d3993ac9fa123bdb
| 5,812
|
py
|
Python
|
BackEnd/venv/lib/python3.8/site-packages/pytest_flask/fixtures.py
|
MatheusBrodt/App_LabCarolVS
|
9552149ceaa9bee15ef9a45fab2983c6651031c4
|
[
"MIT"
] | null | null | null |
BackEnd/venv/lib/python3.8/site-packages/pytest_flask/fixtures.py
|
MatheusBrodt/App_LabCarolVS
|
9552149ceaa9bee15ef9a45fab2983c6651031c4
|
[
"MIT"
] | 1
|
2019-08-20T18:42:14.000Z
|
2019-08-20T18:42:14.000Z
|
BackEnd/venv/lib/python3.8/site-packages/pytest_flask/fixtures.py
|
MatheusBrodt/App_LabCarolVS
|
9552149ceaa9bee15ef9a45fab2983c6651031c4
|
[
"MIT"
] | 1
|
2019-08-20T18:11:48.000Z
|
2019-08-20T18:11:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import multiprocessing
import pytest
import socket
import signal
import os
import logging
try:
from urllib2 import URLError, urlopen
except ImportError:
from urllib.error import URLError
from urllib.request import urlopen
from flask import _request_ctx_stack
@pytest.yield_fixture
def client(app):
"""A Flask test client. An instance of :class:`flask.testing.TestClient`
by default.
"""
with app.test_client() as client:
yield client
@pytest.fixture
def client_class(request, client):
"""Uses to set a ``client`` class attribute to current Flask test client::
@pytest.mark.usefixtures('client_class')
class TestView:
def login(self, email, password):
credentials = {'email': email, 'password': password}
return self.client.post(url_for('login'), data=credentials)
def test_login(self):
assert self.login('foo@example.com', 'pass').status_code == 200
"""
if request.cls is not None:
request.cls.client = client
class LiveServer(object):
"""The helper class uses to manage live server. Handles creation and
stopping application in a separate process.
:param app: The application to run.
:param host: The host where to listen (default localhost).
:param port: The port to run application.
"""
def __init__(self, app, host, port, clean_stop=False):
self.app = app
self.port = port
self.host = host
self.clean_stop = clean_stop
self._process = None
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker,
args=(self.app, self.host, self.port)
)
self._process.start()
# We must wait for the server to start listening with a maximum
# timeout of 5 seconds.
timeout = 5
while timeout > 0:
time.sleep(1)
try:
urlopen(self.url())
timeout = 0
except URLError:
timeout -= 1
def url(self, url=''):
"""Returns the complete url based on server options."""
return 'http://%s:%d%s' % (self.host, self.port, url)
def stop(self):
"""Stop application process."""
if self._process:
if self.clean_stop and self._stop_cleanly():
return
if self._process.is_alive():
# If it's still alive, kill it
self._process.terminate()
def _stop_cleanly(self, timeout=5):
"""Attempts to stop the server cleanly by sending a SIGINT signal and waiting for
``timeout`` seconds.
:return: True if the server was cleanly stopped, False otherwise.
"""
try:
os.kill(self._process.pid, signal.SIGINT)
self._process.join(timeout)
return True
except Exception as ex:
logging.error('Failed to join the live server process: %r', ex)
return False
def __repr__(self):
return '<LiveServer listening at %s>' % self.url()
def _rewrite_server_name(server_name, new_port):
"""Rewrite server port in ``server_name`` with ``new_port`` value."""
sep = ':'
if sep in server_name:
server_name, port = server_name.split(sep, 1)
return sep.join((server_name, new_port))
@pytest.fixture(scope='function')
def live_server(request, app, monkeypatch, pytestconfig):
"""Run application in a separate process.
When the ``live_server`` fixture is applied, the ``url_for`` function
works as expected::
def test_server_is_up_and_running(live_server):
index_url = url_for('index', _external=True)
assert index_url == 'http://localhost:5000/'
res = urllib2.urlopen(index_url)
assert res.code == 200
"""
port = pytestconfig.getvalue('live_server_port')
if port == 0:
# Bind to an open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
host = pytestconfig.getvalue('live_server_host')
# Explicitly set application ``SERVER_NAME`` for test suite
# and restore original value on test teardown.
server_name = app.config['SERVER_NAME'] or 'localhost'
monkeypatch.setitem(app.config, 'SERVER_NAME',
_rewrite_server_name(server_name, str(port)))
clean_stop = request.config.getvalue('live_server_clean_stop')
server = LiveServer(app, host, port, clean_stop)
if request.config.getvalue('start_live_server'):
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture
def config(app):
"""An application config."""
return app.config
@pytest.fixture
def request_ctx(app):
"""The request context which contains all request relevant information,
e.g. `session`, `g`, `flashes`, etc.
"""
return _request_ctx_stack.top
@pytest.fixture(params=['application/json', 'text/html'])
def mimetype(request):
return request.param
def _make_accept_header(mimetype):
return [('Accept', mimetype)]
@pytest.fixture
def accept_mimetype(mimetype):
return _make_accept_header(mimetype)
@pytest.fixture
def accept_json(request):
return _make_accept_header('application/json')
@pytest.fixture
def accept_jsonp():
return _make_accept_header('application/json-p')
@pytest.fixture(params=['*', '*/*'])
def accept_any(request):
return _make_accept_header(request.param)
| 28.213592
| 89
| 0.635754
| 724
| 5,812
| 4.957182
| 0.294199
| 0.036222
| 0.026748
| 0.024519
| 0.097799
| 0.020619
| 0
| 0
| 0
| 0
| 0
| 0.005545
| 0.255334
| 5,812
| 205
| 90
| 28.35122
| 0.823706
| 0.317447
| 0
| 0.083333
| 0
| 0
| 0.070513
| 0.005876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175926
| false
| 0
| 0.111111
| 0.064815
| 0.435185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7be8fc77e58c39c645eb0be54b3d89d725dc934
| 7,700
|
py
|
Python
|
tableauserverclient/server/endpoint/endpoint.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | 1
|
2021-12-22T21:34:17.000Z
|
2021-12-22T21:34:17.000Z
|
tableauserverclient/server/endpoint/endpoint.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
tableauserverclient/server/endpoint/endpoint.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
from .exceptions import (
ServerResponseError,
InternalServerError,
NonXMLResponseError,
EndpointUnavailableError,
)
from functools import wraps
from xml.etree.ElementTree import ParseError
from ..query import QuerySet
import logging
try:
from distutils2.version import NormalizedVersion as Version
except ImportError:
from distutils.version import LooseVersion as Version
logger = logging.getLogger("tableau.endpoint")
Success_codes = [200, 201, 202, 204]
class Endpoint(object):
def __init__(self, parent_srv):
self.parent_srv = parent_srv
@staticmethod
def _make_common_headers(auth_token, content_type):
headers = {}
if auth_token is not None:
headers["x-tableau-auth"] = auth_token
if content_type is not None:
headers["content-type"] = content_type
return headers
@staticmethod
def _safe_to_log(server_response):
"""Checks if the server_response content is not xml (eg binary image or zip)
and replaces it with a constant
"""
ALLOWED_CONTENT_TYPES = ("application/xml", "application/xml;charset=utf-8")
if server_response.headers.get("Content-Type", None) not in ALLOWED_CONTENT_TYPES:
return "[Truncated File Contents]"
else:
return server_response.content
def _make_request(
self,
method,
url,
content=None,
auth_token=None,
content_type=None,
parameters=None,
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
if content is not None:
parameters["data"] = content
logger.debug(u"request {}, url: {}".format(method.__name__, url))
if content:
logger.debug(u"request content: {}".format(content[:1000]))
server_response = method(url, **parameters)
self.parent_srv._namespace.detect(server_response.content)
self._check_status(server_response)
# This check is to determine if the response is a text response (xml or otherwise)
# so that we do not attempt to log bytes and other binary data.
if len(server_response.content) > 0 and server_response.encoding:
logger.debug(
u"Server response from {0}:\n\t{1}".format(
url, server_response.content.decode(server_response.encoding)
)
)
return server_response
def _check_status(self, server_response):
if server_response.status_code >= 500:
raise InternalServerError(server_response)
elif server_response.status_code not in Success_codes:
try:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
except ParseError:
# This will happen if we get a non-success HTTP code that
# doesn't return an xml error object (like metadata endpoints)
# we convert this to a better exception and pass through the raw
# response body
raise NonXMLResponseError(server_response.content)
except Exception:
# anything else re-raise here
raise
def get_unauthenticated_request(self, url):
return self._make_request(self.parent_srv.session.get, url)
def get_request(self, url, request_object=None, parameters=None):
if request_object is not None:
try:
# Query param delimiters don't need to be encoded for versions before 3.7 (2020.1)
self.parent_srv.assert_at_least_version("3.7")
parameters = parameters or {}
parameters["params"] = request_object.get_query_params()
except EndpointUnavailableError:
url = request_object.apply_query_params(url)
return self._make_request(
self.parent_srv.session.get,
url,
auth_token=self.parent_srv.auth_token,
parameters=parameters,
)
def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
def put_request(self, url, xml_request=None, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def post_request(self, url, xml_request, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def api(version):
"""Annotate the minimum supported version for an endpoint.
Checks the version on the server object and compares normalized versions.
It will raise an exception if the server version is > the version specified.
Args:
`version` minimum version that supports the endpoint. String.
Raises:
EndpointUnavailableError
Returns:
None
Example:
>>> @api(version="2.3")
>>> def get(self, req_options=None):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.parent_srv.assert_at_least_version(version)
return func(self, *args, **kwargs)
return wrapper
return _decorator
def parameter_added_in(**params):
"""Annotate minimum versions for new parameters or request options on an endpoint.
The api decorator documents when an endpoint was added, this decorator annotates
keyword arguments on endpoints that may control functionality added after an endpoint was introduced.
The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing
an exception.
Args:
Key/value pairs of the form `parameter`=`version`. Kwargs.
Raises:
UserWarning
Returns:
None
Example:
>>> @api(version="2.0")
>>> @parameter_added_in(no_extract='2.5')
>>> def download(self, workbook_id, filepath=None, extract_only=False):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import warnings
server_ver = Version(self.parent_srv.version or "0.0")
params_to_check = set(params) & set(kwargs)
for p in params_to_check:
min_ver = Version(str(params[p]))
if server_ver < min_ver:
error = "{!r} not available in {}, it will be ignored. Added in {}".format(p, server_ver, min_ver)
warnings.warn(error)
return func(self, *args, **kwargs)
return wrapper
return _decorator
class QuerysetEndpoint(Endpoint):
@api(version="2.0")
def all(self, *args, **kwargs):
queryset = QuerySet(self)
return queryset
@api(version="2.0")
def filter(self, *args, **kwargs):
queryset = QuerySet(self).filter(**kwargs)
return queryset
@api(version="2.0")
def order_by(self, *args, **kwargs):
queryset = QuerySet(self).order_by(*args)
return queryset
@api(version="2.0")
def paginate(self, **kwargs):
queryset = QuerySet(self).paginate(**kwargs)
return queryset
| 33.189655
| 118
| 0.632597
| 909
| 7,700
| 5.191419
| 0.262926
| 0.056368
| 0.046832
| 0.020131
| 0.248782
| 0.224412
| 0.190506
| 0.134986
| 0.121212
| 0.099174
| 0
| 0.008842
| 0.28026
| 7,700
| 231
| 119
| 33.333333
| 0.842656
| 0.216104
| 0
| 0.282759
| 0
| 0
| 0.051217
| 0.004934
| 0
| 0
| 0
| 0
| 0.013793
| 1
| 0.137931
| false
| 0
| 0.062069
| 0.02069
| 0.337931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c0ec1f2d22d969372f765fb0d7aef4a98be04f
| 4,617
|
py
|
Python
|
spec/test_importer.py
|
lajohnston/anki-freeplane
|
746e3dd714653df428f0541609b9c51e29cd2726
|
[
"MIT"
] | 15
|
2016-10-06T00:27:26.000Z
|
2022-03-04T04:24:50.000Z
|
spec/test_importer.py
|
eljay26/anki-freeplane
|
746e3dd714653df428f0541609b9c51e29cd2726
|
[
"MIT"
] | null | null | null |
spec/test_importer.py
|
eljay26/anki-freeplane
|
746e3dd714653df428f0541609b9c51e29cd2726
|
[
"MIT"
] | 6
|
2016-11-08T06:55:47.000Z
|
2021-03-24T22:15:14.000Z
|
import unittest
from freeplane_importer.importer import Importer
from mock import Mock
from mock import MagicMock
from mock import call
from freeplane_importer.model_not_found_exception import ModelNotFoundException
class TestImporter(unittest.TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_model = MagicMock()
self.mock_collection.models.byName.return_value = self.mock_model
self.mock_note = MagicMock()
self.mock_note.model.return_value = self.mock_model
self.mock_collection.newNote.return_value = self.mock_note
self.mock_collection.models.fieldNames.return_value = []
self.importer = Importer(self.mock_collection)
self.mock_collection.db.scalar.return_value = None
self.note = {
'id': 100,
'deck': 'History',
'model': 'Basic',
'fields': {}
}
def test_it_should_initialise_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.setCurrent.assert_called_with(
self.mock_model)
def test_it_should_select_the_correct_deck(self):
self.mock_collection.decks.id.return_value = 100
self.importer = Importer(self.mock_collection)
self.importer.import_note(self.note)
self.mock_model.__setitem__.assert_called_with('did', 100)
self.mock_collection.decks.id.assert_called_with('History')
def test_it_should_find_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.byName.assert_called_with('Basic')
def test_it_should_return_true_if_note_was_added_successfully(self):
self.assertTrue(self.importer.import_note(self.note))
def test_it_should_raise_a_no_model_exception_if_the_model_does_not_exist(self):
self.mock_collection.models.byName.return_value = None
self.assertRaises(ModelNotFoundException,
self.importer.import_note, self.note)
def test_it_should_create_a_new_note(self):
self.importer.import_note(self.note)
self.mock_collection.newNote.assert_called_with()
def test_it_should_get_the_field_names_from_the_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.fieldNames.assert_called_with(
self.mock_model)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_lowercase(self):
self.mock_collection.models.fieldNames.return_value = ['id']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('id', 100)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_uppercase(self):
self.mock_collection.models.fieldNames.return_value = ['ID']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('ID', 100)
def test_it_should_populate_the_note_with_the_field_values(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front', 'Back']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_has_calls(
[call('Front', 'Front value'), call('Back', 'Back value')])
def test_it_should_ignore_fields_that_do_not_exist_in_the_model(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front']
self.importer.import_note(self.note)
self.assertFalse('Back' in self.mock_note)
def test_it_should_save_the_note_changes(self):
self.importer.import_note(self.note)
self.mock_note.flush.assert_called_with()
def test_it_should_attempt_to_find_an_existing_note_with_the_given_node_id(self):
self.mock_collection.getNote.return_value = self.mock_note
self.mock_collection.db.scalar.return_value = 123
self.importer.import_note(self.note)
self.mock_collection.getNote.assert_called_with(123)
def test_it_should_add_the_note_to_the_collection_if_it_is_new(self):
del self.mock_note.mod
self.importer.import_note(self.note)
self.mock_collection.addNote.assert_called_with(self.mock_note)
def test_it_should_not_add_the_note_to_the_collection_if_it_is_not_new(self):
self.importer.import_note(self.note)
self.assertEqual(0, self.mock_collection.addNote.call_count)
| 38.157025
| 90
| 0.719731
| 621
| 4,617
| 4.925926
| 0.164251
| 0.104609
| 0.135338
| 0.073553
| 0.63256
| 0.607715
| 0.602158
| 0.457012
| 0.405361
| 0.338673
| 0
| 0.005879
| 0.189517
| 4,617
| 120
| 91
| 38.475
| 0.811598
| 0
| 0
| 0.258427
| 0
| 0
| 0.037254
| 0
| 0
| 0
| 0
| 0
| 0.179775
| 1
| 0.179775
| false
| 0
| 0.269663
| 0
| 0.460674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c11d6e36451e4175726cdb9543215d1fb0fff9
| 1,089
|
py
|
Python
|
analysis/fitexp.py
|
mfkasim91/idcovid19
|
3e51b16354581a4e0defc635f837f93faff26afc
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/fitexp.py
|
mfkasim91/idcovid19
|
3e51b16354581a4e0defc635f837f93faff26afc
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/fitexp.py
|
mfkasim91/idcovid19
|
3e51b16354581a4e0defc635f837f93faff26afc
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_const", default=False, const=True)
args = parser.parse_args()
data = np.loadtxt("../data/data.csv", skiprows=1, usecols=list(range(1,8)), delimiter=",")[33:,:]
xdays = data[:,0] - np.mean(data[:,0])
deaths = data[:,-1]
print(xdays, deaths)
logdeaths = np.log(deaths)
slope, offset, rval, pval, stderr = linregress(xdays, logdeaths)
stderr = np.sqrt(np.sum((logdeaths-(slope*logdeaths+offset))**2) / (len(logdeaths)-2.)) / np.sqrt(np.sum((xdays - np.mean(xdays))**2))
if args.plot:
plt.plot(xdays, np.exp(offset + slope*xdays), 'C0-')
plt.plot(xdays, np.exp(offset + (slope+stderr)*xdays), 'C0--')
plt.plot(xdays, np.exp(offset + (slope-stderr)*xdays), 'C0--')
plt.plot(xdays, deaths, 'C0o')
plt.gca().set_yscale("log")
plt.show()
print("Slope: %.3e" % slope)
print("Doubling every: %.2f" % (np.log(2)/slope))
print("R-squared: %.3f" % (rval*rval))
print("Stderr: %.3e" % stderr)
| 35.129032
| 134
| 0.665748
| 162
| 1,089
| 4.450617
| 0.41358
| 0.038835
| 0.066574
| 0.058252
| 0.178918
| 0.178918
| 0.178918
| 0.140083
| 0.140083
| 0.140083
| 0
| 0.020812
| 0.117539
| 1,089
| 30
| 135
| 36.3
| 0.729448
| 0
| 0
| 0
| 0
| 0
| 0.100092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c399f4aa408e4541e327b125cd44ba175da7ef
| 1,901
|
py
|
Python
|
percept/plot.py
|
joshleeb/PerceptronVis
|
2d0e2f1969e11498533f190f5598c174b7584513
|
[
"MIT"
] | null | null | null |
percept/plot.py
|
joshleeb/PerceptronVis
|
2d0e2f1969e11498533f190f5598c174b7584513
|
[
"MIT"
] | null | null | null |
percept/plot.py
|
joshleeb/PerceptronVis
|
2d0e2f1969e11498533f190f5598c174b7584513
|
[
"MIT"
] | null | null | null |
import matplotlib.lines as lines
import matplotlib.pyplot as plt
COLOR_CLASSIFICATIONS = [
'black', # Unclassified
'blue', # Classified True (1)
'red' # Classified False (0)
]
def generate_line(ax, p0, p1, color='black', style='-'):
'''
Generates a line between points p0 and p1 which extends to be the width of
the plot.
'''
x0, y0 = p0
x1, y1 = p1
gradient = (y0 - y1) / (x0 - x1)
intercept = y1 - gradient * x1
x = ax.get_xlim()
data_y = [x[0] * gradient + intercept, x[1] * gradient + intercept]
return lines.Line2D(x, data_y, color=color, linestyle=style)
def get_boundary_plot_fn(weights):
'''
Gets the function used to represent and plot the line representative by the
perceptron's weights. The equation is: f(x) = -(w1/w2)x - w0/w2.
'''
def fn(x):
return -weights[1] / weights[2] * x - weights[0] / weights[2]
return fn
def get_point_color(point, colors):
'''
Get's the color of the point to be displayed.
'''
if point.classification is None:
return colors[0]
return colors[1] if point.classification else colors[2]
def generate(title, class_boundary, weights, points, bounds):
'''
Generates a scatter plot of points with the actualy classification boundary
and the perceptron's classification boundary drawn in.
'''
boundary_fn = get_boundary_plot_fn(weights)
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xlim(bounds[0])
ax.set_ylim(bounds[1])
ax.set_title(title)
ax.add_line(generate_line(
ax, class_boundary[0], class_boundary[1], 'cyan', '--'
))
ax.add_line(generate_line(ax, (0, boundary_fn(0)), (1, boundary_fn(1))))
ax.scatter(
[pt.x for pt in points], [pt.y for pt in points],
c=[get_point_color(pt, COLOR_CLASSIFICATIONS) for pt in points], s=30
)
return fig
| 29.246154
| 79
| 0.637559
| 278
| 1,901
| 4.255396
| 0.345324
| 0.030431
| 0.035503
| 0.032967
| 0.079459
| 0.038884
| 0
| 0
| 0
| 0
| 0
| 0.030429
| 0.239348
| 1,901
| 64
| 80
| 29.703125
| 0.78769
| 0.2404
| 0
| 0
| 0
| 0
| 0.017531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.052632
| 0.026316
| 0.342105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c444c1fb4481f333fa9c3252930b474ff296c2
| 27,392
|
py
|
Python
|
openpype/hosts/flame/api/lib.py
|
j-cube/OpenPype
|
f0849cbd08070a320d19bb55b7e368189a57e3ab
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/flame/api/lib.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2
|
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/flame/api/lib.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
import sys
import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
MARKER_NAME,
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
flame_apps = []
selection = None
@contextlib.contextmanager
def io_preferences_file(klass, filepath, write=False):
try:
flag = "w" if write else "r"
yield open(filepath, flag)
except IOError as _error:
klass.log.info("Unable to work with preferences `{}`: {}".format(
filepath, _error))
class FlameAppFramework(object):
# flameAppFramework class takes care of preferences
class prefs_dict(dict):
def __init__(self, master, name, **kwargs):
self.name = name
self.master = master
if not self.master.get(self.name):
self.master[self.name] = {}
self.master[self.name].__init__()
def __getitem__(self, k):
return self.master[self.name].__getitem__(k)
def __setitem__(self, k, v):
return self.master[self.name].__setitem__(k, v)
def __delitem__(self, k):
return self.master[self.name].__delitem__(k)
def get(self, k, default=None):
return self.master[self.name].get(k, default)
def setdefault(self, k, default=None):
return self.master[self.name].setdefault(k, default)
def pop(self, *args, **kwargs):
return self.master[self.name].pop(*args, **kwargs)
def update(self, mapping=(), **kwargs):
self.master[self.name].update(mapping, **kwargs)
def __contains__(self, k):
return self.master[self.name].__contains__(k)
def copy(self): # don"t delegate w/ super - dict.copy() -> dict :(
return type(self)(self)
def keys(self):
return self.master[self.name].keys()
@classmethod
def fromkeys(cls, keys, v=None):
return cls.master[cls.name].fromkeys(keys, v)
def __repr__(self):
return "{0}({1})".format(
type(self).__name__, self.master[self.name].__repr__())
def master_keys(self):
return self.master.keys()
def __init__(self):
self.name = self.__class__.__name__
self.bundle_name = "OpenPypeFlame"
# self.prefs scope is limited to flame project and user
self.prefs = {}
self.prefs_user = {}
self.prefs_global = {}
self.log = log
try:
import flame
self.flame = flame
self.flame_project_name = self.flame.project.current_project.name
self.flame_user_name = flame.users.current_user.name
except Exception:
self.flame = None
self.flame_project_name = None
self.flame_user_name = None
import socket
self.hostname = socket.gethostname()
if sys.platform == "darwin":
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
"Library",
"Caches",
"OpenPype",
self.bundle_name
)
elif sys.platform.startswith("linux"):
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
".OpenPype",
self.bundle_name)
self.prefs_folder = os.path.join(
self.prefs_folder,
self.hostname,
)
self.log.info("[{}] waking up".format(self.__class__.__name__))
try:
self.load_prefs()
except RuntimeError:
self.save_prefs()
# menu auto-refresh defaults
if not self.prefs_global.get("menu_auto_refresh"):
self.prefs_global["menu_auto_refresh"] = {
"media_panel": True,
"batch": True,
"main_menu": True,
"timeline_menu": True
}
self.apps = []
def get_pref_file_paths(self):
prefix = self.prefs_folder + os.path.sep + self.bundle_name
prefs_file_path = "_".join([
prefix, self.flame_user_name,
self.flame_project_name]) + ".prefs"
prefs_user_file_path = "_".join([
prefix, self.flame_user_name]) + ".prefs"
prefs_global_file_path = prefix + ".prefs"
return (prefs_file_path, prefs_user_file_path, prefs_global_file_path)
def load_prefs(self):
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path) as prefs_file:
self.prefs = pickle.load(prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path) as prefs_file:
self.prefs_user = pickle.load(prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path) as prefs_file:
self.prefs_global = pickle.load(prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def save_prefs(self):
# make sure the preference folder is available
if not os.path.isdir(self.prefs_folder):
try:
os.makedirs(self.prefs_folder)
except Exception:
self.log.info("Unable to create folder {}".format(
self.prefs_folder))
return False
# get all pref file paths
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path, True) as prefs_file:
pickle.dump(self.prefs, prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path, True) as prefs_file:
pickle.dump(self.prefs_user, prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path, True) as prefs_file:
pickle.dump(self.prefs_global, prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def get_current_project():
import flame
return flame.project.current_project
def get_current_sequence(selection):
import flame
def segment_to_sequence(_segment):
track = _segment.parent
version = track.parent
return version.parent
process_timeline = None
if len(selection) == 1:
if isinstance(selection[0], flame.PySequence):
process_timeline = selection[0]
if isinstance(selection[0], flame.PySegment):
process_timeline = segment_to_sequence(selection[0])
else:
for segment in selection:
if isinstance(segment, flame.PySegment):
process_timeline = segment_to_sequence(segment)
break
return process_timeline
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
except Exception:
pass
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
WireTapNodeHandle,
WireTapStr
)
class GetProjectColorPolicy(object):
def __init__(self, host_name=None, _log=None):
# Create a connection to the Backburner manager using the Wiretap
# python API.
#
self.log = _log or log
self.host_name = host_name or "localhost"
self._wiretap_client = WireTapClient()
if not self._wiretap_client.init():
raise Exception("Could not initialize Wiretap Client")
self._server = WireTapServerHandle(
"{}:IFFFS".format(self.host_name))
def process(self, project_name):
policy_node_handle = WireTapNodeHandle(
self._server,
"/projects/{}/syncolor/policy".format(project_name)
)
self.log.info(policy_node_handle)
policy = WireTapStr()
if not policy_node_handle.getNodeTypeStr(policy):
self.log.warning(
"Could not retrieve policy of '%s': %s" % (
policy_node_handle.getNodeId().id(),
policy_node_handle.lastError()
)
)
return policy.c_str()
policy_wiretap = GetProjectColorPolicy(_log=_log)
return policy_wiretap.process(project_name)
def get_segment_data_marker(segment, with_marker=None):
"""
Get openpype track item tag created by creator or loader plugin.
Attributes:
segment (flame.PySegment): flame api object
with_marker (bool)[optional]: if true it will return also marker object
Returns:
dict: openpype tag data
Returns(with_marker=True):
flame.PyMarker, dict
"""
for marker in segment.markers:
comment = marker.comment.get_value()
color = marker.colour.get_value()
name = marker.name.get_value()
if (name == MARKER_NAME) and (
color == COLOR_MAP[MARKER_COLOR]):
if not with_marker:
return json.loads(comment)
else:
return marker, json.loads(comment)
def set_segment_data_marker(segment, data=None):
"""
Set openpype track item tag to input segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
dict: json loaded data
"""
data = data or dict()
marker_data = get_segment_data_marker(segment, True)
if marker_data:
# get available openpype tag if any
marker, tag_data = marker_data
# update tag data with new data
tag_data.update(data)
# update marker with tag data
marker.comment = json.dumps(tag_data)
else:
# update tag data with new data
marker = create_segment_data_marker(segment)
# add tag data to marker's comment
marker.comment = json.dumps(data)
def set_publish_attribute(segment, value):
""" Set Publish attribute in input Tag object
Attribute:
segment (flame.PySegment)): flame api object
value (bool): True or False
"""
tag_data = get_segment_data_marker(segment)
tag_data["publish"] = value
# set data to the publish attribute
set_segment_data_marker(segment, tag_data)
def get_publish_attribute(segment):
""" Get Publish attribute from input Tag object
Attribute:
segment (flame.PySegment)): flame api object
Returns:
bool: True or False
"""
tag_data = get_segment_data_marker(segment)
if not tag_data:
set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT)
return MARKER_PUBLISH_DEFAULT
return tag_data["publish"]
def create_segment_data_marker(segment):
""" Create openpype marker on a segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
flame.PyMarker: flame api object
"""
# get duration of segment
duration = segment.record_duration.relative_frame
# calculate start frame of the new marker
start_frame = int(segment.record_in.relative_frame) + int(duration / 2)
# create marker
marker = segment.create_marker(start_frame)
# set marker name
marker.name = MARKER_NAME
# set duration
marker.duration = MARKER_DURATION
# set colour
marker.colour = COLOR_MAP[MARKER_COLOR] # Red
return marker
def get_sequence_segments(sequence, selected=False):
segments = []
# loop versions in sequence
for ver in sequence.versions:
# loop track in versions
for track in ver.tracks:
# ignore all empty tracks and hidden too
if len(track.segments) == 0 and track.hidden:
continue
# loop all segment in remaining tracks
for segment in track.segments:
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
if (
selected is True
and segment.selected.get_value() is not True
):
continue
# add it to original selection
segments.append(segment)
return segments
@contextlib.contextmanager
def maintained_segment_selection(sequence):
"""Maintain selection during context
Attributes:
sequence (flame.PySequence): python api object
Yield:
list of flame.PySegment
Example:
>>> with maintained_segment_selection(sequence) as selected_segments:
... for segment in selected_segments:
... segment.selected = False
>>> print(segment.selected)
True
"""
selected_segments = get_sequence_segments(sequence, True)
try:
# do the operation on selected segments
yield selected_segments
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected_segments:
segment.selected = True
def reset_segment_selection(sequence):
"""Deselect all selected nodes
"""
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
segment.selected = False
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = str(re.sub("[<>]", "", token)).replace(" ", "_")
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def get_segment_attributes(segment):
if segment.name.get_value() == "":
return None
# Add timeline segment to tree
clip_data = {
"shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# head and tail with forward compatibility
if segment.head:
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
"<track>", "<track name>"
])
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr_name in segment_attrs:
if not hasattr(segment, attr_name):
continue
attr = getattr(segment, attr_name)
segment_attrs_data[attr] = str(attr).replace("+", ":")
if attr_name in ["record_in", "record_out"]:
clip_data[attr_name] = attr.relative_frame
else:
clip_data[attr_name] = attr.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
@contextlib.contextmanager
def maintained_object_duplication(item):
"""Maintain input item duplication
Attributes:
item (any flame.PyObject): python api object
Yield:
duplicate input PyObject type
"""
import flame
# Duplicate the clip to avoid modifying the original clip
duplicate = flame.duplicate(item)
try:
# do the operation on selected segments
yield duplicate
finally:
# delete the item at the end
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
| 29.109458
| 79
| 0.593531
| 3,162
| 27,392
| 4.918722
| 0.150538
| 0.014402
| 0.010802
| 0.013888
| 0.259435
| 0.196297
| 0.163377
| 0.133222
| 0.106732
| 0.078184
| 0
| 0.002402
| 0.316187
| 27,392
| 940
| 80
| 29.140426
| 0.827932
| 0.168005
| 0
| 0.189286
| 0
| 0
| 0.066266
| 0.005146
| 0
| 0
| 0
| 0.002128
| 0
| 1
| 0.103571
| false
| 0.001786
| 0.0375
| 0.021429
| 0.242857
| 0.001786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c5220186916c25d94c94c265afef27d8cdfced
| 1,287
|
py
|
Python
|
newanalysis/plot_performances.py
|
nriesterer/cogsci-individualization
|
da97bf0a6b53f440670e22ff591348f3d3fab230
|
[
"MIT"
] | null | null | null |
newanalysis/plot_performances.py
|
nriesterer/cogsci-individualization
|
da97bf0a6b53f440670e22ff591348f3d3fab230
|
[
"MIT"
] | null | null | null |
newanalysis/plot_performances.py
|
nriesterer/cogsci-individualization
|
da97bf0a6b53f440670e22ff591348f3d3fab230
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if len(sys.argv) != 3:
print('usage: python plot_performances.py <group_csv> <indiv_csv>')
exit()
group_file = sys.argv[1]
indiv_file = sys.argv[2]
# Load the data
df_group = pd.read_csv(group_file)
df_indiv = pd.read_csv(indiv_file)
df = pd.concat([df_group, df_indiv], sort=True)
# Prepare the data for plotting
plot_df = df.groupby(['model', 'id'], as_index=False)['hit'].agg('mean')
mfa_df = plot_df.loc[plot_df['model'] == 'MFA']
mfa_median = mfa_df['hit'].median()
plot_df = plot_df.loc[plot_df['model'] != 'MFA']
# Plot the data
sns.set(style='whitegrid', palette='colorblind')
plt.figure(figsize=(7, 3))
order = plot_df.groupby('model', as_index=False)['hit'].agg('median').sort_values('hit')['model']
colors = [('C0' if 'mReasoner' in x else 'C2') for x in order]
sns.boxplot(x='model', y='hit', data=plot_df, order=order, palette=colors)
plt.axhline(y=mfa_median, ls='--', color='C7', zorder=10)
plt.text(0.002, mfa_median + 0.015, 'MFA', color='C7', fontsize=10, transform=plt.gca().transAxes)
plt.xlabel('')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylabel('Coverage Accuracy')
plt.tight_layout()
plt.savefig('visualizations/performances.pdf')
plt.show()
| 28.6
| 98
| 0.700855
| 218
| 1,287
| 4
| 0.440367
| 0.055046
| 0.025229
| 0.034404
| 0.098624
| 0.057339
| 0.057339
| 0.057339
| 0
| 0
| 0
| 0.022727
| 0.111111
| 1,287
| 44
| 99
| 29.25
| 0.73951
| 0.044289
| 0
| 0
| 0
| 0
| 0.171289
| 0.025285
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c66a8f8b52a73b0ced73b9208760d1628d3b03
| 3,165
|
py
|
Python
|
integration_test/basic_op_capi.py
|
cl9200/nbase-arc
|
47c124b11b0bb2e8a8428c6d628ce82dc24c1ade
|
[
"Apache-2.0"
] | null | null | null |
integration_test/basic_op_capi.py
|
cl9200/nbase-arc
|
47c124b11b0bb2e8a8428c6d628ce82dc24c1ade
|
[
"Apache-2.0"
] | null | null | null |
integration_test/basic_op_capi.py
|
cl9200/nbase-arc
|
47c124b11b0bb2e8a8428c6d628ce82dc24c1ade
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import unittest
import testbase
import default_cluster
import util
import os
import constant
import config
import time
import telnetlib
import signal
class TestBasicOpCAPI(unittest.TestCase):
cluster = config.clusters[2]
@classmethod
def setUpClass(cls):
return 0
@classmethod
def tearDownClass(cls):
return 0
def setUp(self):
util.set_process_logfile_prefix( 'TestBasicOp_%s' % self._testMethodName )
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis(self.cluster, arch=self.arch)
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown(self):
testbase.defaultTearDown(self)
def run_capi_server(self):
# run capi test server
_capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
""" % self.cluster['cluster_name']
old_cwd = os.path.abspath( os.getcwd() )
os.chdir(util.capi_dir(0))
f = open('capi_server.conf', 'w')
f.write(_capi_server_conf)
f.close()
os.chdir(old_cwd)
if self.arch is 32:
cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
else:
cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER
capi_server = util.exec_proc_async(util.capi_dir(0),
cmd, True, None, subprocess.PIPE, None)
# ping check
while True:
try:
t = telnetlib.Telnet('127.0.0.1', 6200)
break
except:
time.sleep(1)
continue
t.write("ping\r\n")
t.read_until('+PONG\r\n')
t.close()
return capi_server
def stop_process(self, capi_server):
capi_server.send_signal(signal.SIGTERM)
capi_server.wait()
def test_basic_op_capi(self):
capi_server = self.run_capi_server()
f = open("%s/test_basicop_output_capi%d" % (constant.logdir, self.arch), 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest_gw --accurate --gw-port 6200",
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
self.stop_process(capi_server)
| 28.00885
| 113
| 0.653081
| 422
| 3,165
| 4.708531
| 0.462085
| 0.075491
| 0.035229
| 0.016105
| 0.069451
| 0.02617
| 0
| 0
| 0
| 0
| 0
| 0.03182
| 0.255292
| 3,165
| 112
| 114
| 28.258929
| 0.811201
| 0.183254
| 0
| 0.077922
| 0
| 0
| 0.200623
| 0.041683
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.090909
| false
| 0
| 0.142857
| 0.025974
| 0.298701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c6a85099fcd6a3265a36a9b36bdf7fa4e9b9a7
| 5,509
|
py
|
Python
|
examples/scripts/flopy_lake_example.py
|
andrewcalderwood/flopy
|
0432ce96a0a5eec4d20adb4d384505632a2db3dc
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 351
|
2015-01-03T15:18:48.000Z
|
2022-03-31T09:46:43.000Z
|
examples/scripts/flopy_lake_example.py
|
andrewcalderwood/flopy
|
0432ce96a0a5eec4d20adb4d384505632a2db3dc
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 1,256
|
2015-01-15T21:10:42.000Z
|
2022-03-31T22:43:06.000Z
|
examples/scripts/flopy_lake_example.py
|
andrewcalderwood/flopy
|
0432ce96a0a5eec4d20adb4d384505632a2db3dc
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 553
|
2015-01-31T22:46:48.000Z
|
2022-03-31T17:43:35.000Z
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| 35.089172
| 103
| 0.626429
| 852
| 5,509
| 4.030516
| 0.318075
| 0.011648
| 0.01456
| 0.022132
| 0.149971
| 0.144147
| 0.131334
| 0.131334
| 0.115317
| 0.10396
| 0
| 0.030288
| 0.256852
| 5,509
| 156
| 104
| 35.314103
| 0.8085
| 0.449809
| 0
| 0.14433
| 0
| 0
| 0.065087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.051546
| 0
| 0.072165
| 0.030928
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c6afa7ba07a568b76988ebc296a4b468c42738
| 11,428
|
py
|
Python
|
P2/Caso2/clustering.py
|
Ocete/Inteligenica-de-Negocio
|
0c3bb3914893c608790002743530aba535be7249
|
[
"MIT"
] | null | null | null |
P2/Caso2/clustering.py
|
Ocete/Inteligenica-de-Negocio
|
0c3bb3914893c608790002743530aba535be7249
|
[
"MIT"
] | null | null | null |
P2/Caso2/clustering.py
|
Ocete/Inteligenica-de-Negocio
|
0c3bb3914893c608790002743530aba535be7249
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
'''
import time
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import metrics
from sklearn import cluster
from math import floor
import seaborn as sns
# Cosas bonitas por defecto
sns.set()
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
censo = pd.read_csv('../mujeres_fecundidad_INE_2018.csv')
'''
for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
#'''
#Se pueden reemplazar los valores desconocidos por un número
#censo = censo.replace(np.NaN,0)
# Sustituimos valores perdidos con la media
for col in censo:
censo[col].fillna(censo[col].mean(), inplace=True)
#seleccionar casos
subset = censo.loc[(censo['TRAREPRO']==1) & (censo['NEMBTRAREPRO']<=6)]
# Seleccionar variables
usadas = ['NHIJOS', 'TIPOTRAREPRO', 'NMESESTRAREPRO', 'NEMBTRAREPRO']
X = subset[usadas]
X_normal = X.apply(norm_to_zero_one)
print('Tamaño de la población tras filtrado: ',len(X_normal.index))
for col in X:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count, ' AFTER')
algoritmos = (('KMeans', cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)),
('MeanShift', cluster.MeanShift(cluster_all=False, min_bin_freq=3)),
('Ward', cluster.AgglomerativeClustering(n_clusters=4, linkage='ward')),
('DBScan', cluster.DBSCAN(eps=0.35, min_samples=5)),
('Birch', cluster.Birch(threshold=0.1,n_clusters=5)))
cluster_predict = {}
calinski = {}
silh = {}
times = {}
n_clusters = {}
clusters_fig, clusters_axis = plt.subplots(3, 2, figsize=(10,10))
clusters_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
ijs = [(0,0), (0,1), (1,0), (1,1), (2,0), (2,1)]
for i_alg, par in enumerate(algoritmos):
name, alg = par
print('----- Ejecutando ' + name,)
t = time.time()
cluster_predict[name] = alg.fit_predict(X_normal)
tiempo = time.time() - t
times[name] = tiempo
metric_CH = metrics.calinski_harabasz_score(X_normal, cluster_predict[name])
calinski[name] = metric_CH
metric_SC = metrics.silhouette_score(X_normal, cluster_predict[name], metric='euclidean', sample_size=floor(len(X)), random_state=123456)
silh[name] = metric_SC
# Asignamos de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict[name],index=X.index,columns=['cluster'])
if (name == 'KMeans'):
clusters_kmeans = clusters
alg_kmeans = alg
elif (name == 'Ward'):
clusters_ward = clusters
print("Tamaño de cada cluster:")
size = clusters['cluster'].value_counts()
cluster_fractions = []
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
cluster_fractions.append( 100*i/len(clusters) )
n_clusters[name] = len(size)
# Bar charts
if ( len(cluster_fractions) > 7 ):
cluster_fractions = cluster_fractions[0:6]
i, j = ijs[i_alg]
y_pos = np.arange(len(cluster_fractions))
labels = [ "Cluster " + str(i) for i in range(len(cluster_fractions)) ]
clusters_axis[i, j].bar(y_pos, cluster_fractions, tick_label=labels, color=clusters_colors)
clusters_axis[i, j].set_ylim(0, 100)
clusters_axis[i, j].set_title(name)
if (j == 0):
clusters_axis[i, j].set_ylabel("Cluster size (%)")
clusters_axis[2,1].remove()
#clusters_fig.savefig("clusters.png")
plt.show()
from prettytable import PrettyTable
header = ['Algoritmo', 'CH', 'Silh', 'Tiempo', 'Número de clusters']
tabla = PrettyTable(header)
for name, alg in algoritmos:
tabla.add_row([name,
"{0:.2f}".format(calinski[name]),
"{0:.2f}".format(silh[name]),
"{0:.2f}".format(times[name]),
n_clusters[name]])
print(tabla)
# Escribir los datos en un general.csv
'''
with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
#'''
# ----------------------- FUNCIONES DE DISTRIBUCIÓN ---------
print("---------- Preparando funciones de distribución...")
n_clusters_ward = n_clusters['Ward']
n_var = len(usadas)
X_ward = pd.concat([X, clusters_ward], axis=1)
fig, axes = plt.subplots(n_clusters_ward, n_var, sharey=True, figsize=(15,15))
fig.subplots_adjust(wspace=0, hspace=0)
colors = sns.color_palette(palette=None, n_colors=n_clusters_ward, desat=None)
rango = []
for j in range(n_var):
rango.append([X_ward[usadas[j]].min(), X_ward[usadas[j]].max()])
for i in range(n_clusters_ward):
dat_filt = X_ward.loc[X_ward['cluster']==i]
for j in range(n_var):
#ax = sns.kdeplot(dat_filt[usadas[j]], label="", shade=True, color=colors[i], ax=axes[i,j])
ax = sns.boxplot(dat_filt[usadas[j]], color=colors[i], flierprops={'marker':'o','markersize':4}, ax=axes[i,j])
if (i==n_clusters_ward-1):
axes[i,j].set_xlabel(usadas[j])
else:
axes[i,j].set_xlabel("")
if (j==0):
axes[i,j].set_ylabel("Cluster "+str(i))
else:
axes[i,j].set_ylabel("")
axes[i,j].set_yticks([])
axes[i,j].grid(axis='x', linestyle='-', linewidth='0.2', color='gray')
axes[i,j].grid(axis='y', b=False)
ax.set_xlim(rango[j][0]-0.05*(rango[j][1]-rango[j][0]),rango[j][1]+0.05*(rango[j][1]-rango[j][0]))
plt.show()
#fig.savefig("boxes.png")
# ---------------- SCATTER MATRIX -----------------------
'''
plt.clf()
print("---------- Preparando el scatter matrix...")
# Se añade la asignación de clusters como columna a X
variables = list(X_ward)
variables.remove('cluster')
sns_plot = sns.pairplot(X_ward, vars=variables, hue="cluster", palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# sns_plot.savefig("scatter_matrix.png")
plt.show()
#'''
# ----------------------- DENDOGRAMAS -----------------------
#En clustering hay que normalizar para las métricas de distancia
# X_normal = preprocessing.normalize(X, norm='l2')
X_normal = (X - X.min() ) / (X.max() - X.min())
#Vamos a usar este jerárquico y nos quedamos con 100 clusters, es decir, cien ramificaciones del dendrograma
ward = cluster.AgglomerativeClustering(n_clusters=20, linkage='ward')
name, algorithm = ('Ward', ward)
cluster_predict = {}
k = {}
t = time.time()
cluster_predict[name] = algorithm.fit_predict(X_normal)
tiempo = time.time() - t
k[name] = len(set(cluster_predict[name]))
# Se convierte la asignación de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict['Ward'],index=X.index,columns=['cluster'])
# Y se añade como columna a X
X_cluster = pd.concat([X, clusters], axis=1)
# Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico
min_size = 3
X_filtrado = X
'''
X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k['Ward'],k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
X_filtrado = X
#'''
#Normalizo el conjunto filtrado
X_filtrado_normal = preprocessing.normalize(X_filtrado, norm='l2')
# Obtengo el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
from scipy.cluster import hierarchy
linkage_array = hierarchy.ward(X_filtrado_normal)
plt.clf()
dendro = hierarchy.dendrogram(linkage_array,orientation='left', p=10, truncate_mode='lastp') #lo pongo en horizontal para compararlo con el generado por seaborn
# puedo usar "p=10,truncate_mode='lastp'" para cortar el dendrograma en 10 hojas
# Dendograma usando seaborn (que a su vez usa scipy) para incluir un heatmap
X_filtrado_normal_DF = pd.DataFrame(X_filtrado_normal, index=X_filtrado.index, columns=usadas)
# Añadimos una columna de label para indicar el cluster al que pertenece cada objeto
labels = X_ward['cluster']
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
# Para añadir los labels reordenados. Ahora mismo no salen los colores en la
# columna donde deberian. Intuyo que esto se debe a que los ids no encajan.
#'''
ordering = clustergrid.dendrogram_row.reordered_ind
labels_list = [x for _, x in sorted(zip(ordering,labels), key=lambda pair: pair[0])]
labels = pd.Series(labels_list, index=X_filtrado_normal_DF.index, name='cluster')
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
#'''
#plt.savefig("dendograma.png")
# ----------------------- HEATMAPS -----------------------
#'''
plt.figure(1)
centers = pd.DataFrame(alg_kmeans.cluster_centers_, columns=list(X))
centers_desnormal = centers.copy()
centers_desnormal = centers.drop([4])
# Calculamos los centroides
X = pd.concat([X, clusters_ward], axis=1)
for variable in list(centers):
for k_cluster in range(n_clusters_ward):
centroide = X.loc[(clusters_ward['cluster']==k_cluster)][variable].mean()
centers_desnormal.loc[k_cluster, variable] = centroide
# Normalizamos
centers_normal2 = centers_desnormal.copy()
centers_normal2 = (centers_normal2 - centers_normal2.min() ) / (centers_normal2.max() - centers_normal2.min())
import matplotlib.pyplot as plt
heatmap_fig, ax = plt.subplots(figsize=(10,10))
heatmap = sns.heatmap(centers_normal2, cmap="YlGnBu", annot=centers_desnormal, fmt='.3f')
# Para evitar que los bloques de arriba y abajo se corten por la mitad
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
#heatmap_fig.savefig("heatmap.png")
#'''
| 37.468852
| 187
| 0.651995
| 1,575
| 11,428
| 4.593016
| 0.279365
| 0.02115
| 0.016174
| 0.006221
| 0.237904
| 0.154548
| 0.134642
| 0.127454
| 0.113907
| 0.099254
| 0
| 0.018061
| 0.186034
| 11,428
| 304
| 188
| 37.592105
| 0.759192
| 0.198985
| 0
| 0.141026
| 0
| 0
| 0.081817
| 0.004583
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00641
| false
| 0
| 0.083333
| 0.00641
| 0.096154
| 0.044872
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c71735421912226dadf924d3330fb19e4f6af5
| 9,029
|
py
|
Python
|
signal_processing/ecg_preproc.py
|
DeepPSP/cpsc2020
|
47acb884ea1f2f819e564d8a17ad37001ed0df27
|
[
"BSD-3-Clause"
] | 1
|
2021-12-07T11:44:48.000Z
|
2021-12-07T11:44:48.000Z
|
signal_processing/ecg_preproc.py
|
wenh06/cpsc2020
|
47acb884ea1f2f819e564d8a17ad37001ed0df27
|
[
"BSD-3-Clause"
] | null | null | null |
signal_processing/ecg_preproc.py
|
wenh06/cpsc2020
|
47acb884ea1f2f819e564d8a17ad37001ed0df27
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T14:56:02.000Z
|
2021-05-25T14:56:02.000Z
|
"""
preprocess of (single lead) ecg signal:
band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact)
TODO:
1. motion artefact detection,
and slice the signal into continuous (no motion artefact within) segments
2. to add
References:
-----------
[1] https://github.com/PIA-Group/BioSPPy
[2] to add
"""
import os, time
import multiprocessing as mp
from copy import deepcopy
from numbers import Real
from typing import Union, Optional, Any, List, Dict
import numpy as np
from easydict import EasyDict as ED
from scipy.ndimage.filters import median_filter
from scipy.signal.signaltools import resample
from scipy.io import savemat
# from scipy.signal import medfilt
# https://github.com/scipy/scipy/issues/9680
try:
from biosppy.signals.tools import filter_signal
except:
from references.biosppy.biosppy.signals.tools import filter_signal
from cfg import PreprocCfg
from .ecg_rpeaks import (
xqrs_detect, gqrs_detect, pantompkins,
hamilton_detect, ssf_detect, christov_detect, engzee_detect, gamboa_detect,
)
from .ecg_rpeaks_dl import seq_lab_net_detect
__all__ = [
"preprocess_signal",
"parallel_preprocess_signal",
"denoise_signal",
]
QRS_DETECTORS = {
"xqrs": xqrs_detect,
"gqrs": gqrs_detect,
"pantompkins": pantompkins,
"hamilton": hamilton_detect,
"ssf": ssf_detect,
"christov": christov_detect,
"engzee": engzee_detect,
"gamboa": gamboa_detect,
"seq_lab": seq_lab_net_detect,
}
DL_QRS_DETECTORS = [
"seq_lab",
]
def preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will be updated by this `config`
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
filtered_ecg = raw_sig.copy()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
if fs != cfg.fs:
filtered_ecg = resample(filtered_ecg, int(round(len(filtered_ecg)*cfg.fs/fs)))
# remove baseline
if 'baseline' in cfg.preproc:
window1 = 2 * (cfg.baseline_window1 // 2) + 1 # window size must be odd
window2 = 2 * (cfg.baseline_window2 // 2) + 1
baseline = median_filter(filtered_ecg, size=window1, mode='nearest')
baseline = median_filter(baseline, size=window2, mode='nearest')
filtered_ecg = filtered_ecg - baseline
# filter signal
if 'bandpass' in cfg.preproc:
filtered_ecg = filter_signal(
signal=filtered_ecg,
ftype='FIR',
band='bandpass',
order=int(0.3 * fs),
sampling_rate=fs,
frequency=cfg.filter_band,
)['signal']
if cfg.rpeaks and cfg.rpeaks.lower() not in DL_QRS_DETECTORS:
# dl detectors not for parallel computing using `mp`
detector = QRS_DETECTORS[cfg.rpeaks.lower()]
rpeaks = detector(sig=filtered_ecg, fs=fs).astype(int)
else:
rpeaks = np.array([], dtype=int)
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
def parallel_preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None, save_dir:Optional[str]=None, save_fmt:str='npy', verbose:int=0) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will `update` this `config`
save_dir: str, optional,
directory for saving the outcome ('filtered_ecg' and 'rpeaks')
save_fmt: str, default 'npy',
format of the save files, 'npy' or 'mat'
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
start_time = time.time()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
epoch_len = int(cfg.parallel_epoch_len * fs)
epoch_overlap_half = int(cfg.parallel_epoch_overlap * fs) // 2
epoch_overlap = 2 * epoch_overlap_half
epoch_forward = epoch_len - epoch_overlap
if len(raw_sig) <= 3 * epoch_len: # too short, no need for parallel computing
retval = preprocess_signal(raw_sig, fs, cfg)
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
retval.rpeaks = rpeaks
return retval
l_epoch = [
raw_sig[idx*epoch_forward: idx*epoch_forward + epoch_len] \
for idx in range((len(raw_sig)-epoch_overlap)//epoch_forward)
]
if cfg.parallel_keep_tail:
tail_start_idx = epoch_forward * len(l_epoch) + epoch_overlap
if len(raw_sig) - tail_start_idx < 30 * fs: # less than 30s, make configurable?
# append to the last epoch
l_epoch[-1] = np.append(l_epoch[-1], raw_sig[tail_start_idx:])
else: # long enough
tail_epoch = raw_sig[tail_start_idx-epoch_overlap:]
l_epoch.append(tail_epoch)
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=preprocess_signal,
iterable=[(e, fs, cfg) for e in l_epoch],
)
if cfg.parallel_keep_tail:
tail_result = result[-1]
result = result[:-1]
filtered_ecg = result[0]['filtered_ecg'][:epoch_len-epoch_overlap_half]
rpeaks = result[0]['rpeaks'][np.where(result[0]['rpeaks']<epoch_len-epoch_overlap_half)[0]]
for idx, e in enumerate(result[1:]):
filtered_ecg = np.append(
filtered_ecg, e['filtered_ecg'][epoch_overlap_half: -epoch_overlap_half]
)
epoch_rpeaks = e['rpeaks'][np.where( (e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len-epoch_overlap_half) )[0]]
rpeaks = np.append(rpeaks, (idx+1)*epoch_forward + epoch_rpeaks)
if cfg.parallel_keep_tail:
filtered_ecg = np.append(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])
tail_rpeaks = tail_result['rpeaks'][np.where(tail_result['rpeaks'] >= epoch_overlap_half)[0]]
rpeaks = np.append(rpeaks, len(result)*epoch_forward + tail_rpeaks)
if verbose >= 1:
if cfg.rpeaks.lower() in DL_QRS_DETECTORS:
print(f"signal processing took {round(time.time()-start_time, 3)} seconds")
else:
print(f"signal processing and R peaks detection took {round(time.time()-start_time, 3)} seconds")
start_time = time.time()
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
if verbose >= 1:
print(f"R peaks detection using {cfg.rpeaks} took {round(time.time()-start_time, 3)} seconds")
if save_dir:
# NOTE: this part is not tested
os.makedirs(save_dir, exist_ok=True)
if save_fmt.lower() == 'npy':
np.save(os.path.join(save_dir, "filtered_ecg.npy"), filtered_ecg)
np.save(os.path.join(save_dir, "rpeaks.npy"), rpeaks)
elif save_fmt.lower() == 'mat':
# save into 2 files, keep in accordance
savemat(os.path.join(save_dir, "filtered_ecg.mat"), {"filtered_ecg": filtered_ecg}, format='5')
savemat(os.path.join(save_dir, "rpeaks.mat"), {"rpeaks": rpeaks}, format='5')
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
"""
to check correctness of the function `parallel_preprocess_signal`,
say for record A01, one can call
>>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten()
>>> processed = parallel_preprocess_signal(raw_sig, 400)
>>> print(len(processed['filtered_ecg']) - len(raw_sig))
>>> start_t = int(3600*24.7811)
>>> len_t = 10
>>> fig, ax = plt.subplots(figsize=(20,6))
>>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400])
>>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]:
>>> ax.axvline(r-start_t*400,c='red',linestyle='dashed')
>>> plt.show()
or one can use the 'dataset.py'
"""
| 34.59387
| 175
| 0.646694
| 1,226
| 9,029
| 4.583197
| 0.221044
| 0.062645
| 0.028475
| 0.015661
| 0.386546
| 0.344367
| 0.295782
| 0.248799
| 0.217476
| 0.217476
| 0
| 0.012745
| 0.226603
| 9,029
| 260
| 176
| 34.726923
| 0.791923
| 0.225717
| 0
| 0.208633
| 0
| 0.007194
| 0.097319
| 0.018846
| 0
| 0
| 0
| 0.003846
| 0
| 1
| 0.014388
| false
| 0.014388
| 0.107914
| 0
| 0.143885
| 0.021583
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c75c3cc68eb1ff8bc4c52efd3bee52faa60a5f
| 761
|
bzl
|
Python
|
ocaml/bootstrap.bzl
|
mobileink/obazl
|
eb9d10d1aac040dbc05a038265276e3ab3a52233
|
[
"Apache-2.0"
] | null | null | null |
ocaml/bootstrap.bzl
|
mobileink/obazl
|
eb9d10d1aac040dbc05a038265276e3ab3a52233
|
[
"Apache-2.0"
] | null | null | null |
ocaml/bootstrap.bzl
|
mobileink/obazl
|
eb9d10d1aac040dbc05a038265276e3ab3a52233
|
[
"Apache-2.0"
] | null | null | null |
## mv to //:WORKSPACE.bzl ocaml_configure
load("//ocaml/_bootstrap:ocaml.bzl", _ocaml_configure = "ocaml_configure")
# load("//ocaml/_bootstrap:obazl.bzl", _obazl_configure = "obazl_configure")
load("//ocaml/_rules:ocaml_repository.bzl" , _ocaml_repository = "ocaml_repository")
# load("//ocaml/_rules:opam_configuration.bzl" , _opam_configuration = "opam_configuration")
# load("//ocaml/_toolchains:ocaml_toolchains.bzl",
# _ocaml_toolchain = "ocaml_toolchain",
# _ocaml_register_toolchains = "ocaml_register_toolchains")
# obazl_configure = _obazl_configure
ocaml_configure = _ocaml_configure
ocaml_repository = _ocaml_repository
# ocaml_toolchain = _ocaml_toolchain
# ocaml_register_toolchains = _ocaml_register_toolchains
| 38.05
| 96
| 0.768725
| 82
| 761
| 6.573171
| 0.195122
| 0.12987
| 0.141002
| 0.085343
| 0.393321
| 0.274583
| 0.274583
| 0.274583
| 0.274583
| 0.274583
| 0
| 0
| 0.115637
| 761
| 19
| 97
| 40.052632
| 0.800892
| 0.649146
| 0
| 0
| 0
| 0
| 0.367188
| 0.246094
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7c9b4be102dc7ada3fac5b424f329fc54878619
| 3,021
|
py
|
Python
|
simple/facenet.py
|
taflahi/facenet
|
64e74744437e18978782b497b42300b8d4a2342b
|
[
"MIT"
] | 5
|
2018-09-25T21:04:39.000Z
|
2020-09-03T20:07:37.000Z
|
simple/facenet.py
|
SoloSynth1/facenet
|
64e74744437e18978782b497b42300b8d4a2342b
|
[
"MIT"
] | null | null | null |
simple/facenet.py
|
SoloSynth1/facenet
|
64e74744437e18978782b497b42300b8d4a2342b
|
[
"MIT"
] | 14
|
2018-10-15T00:03:24.000Z
|
2020-08-11T05:04:24.000Z
|
import tensorflow as tf
from .. src.align import detect_face
from .. src import facenet
from .. simple import download_model
import sys
import os
from os.path import expanduser
import copy
import cv2
import numpy as np
from scipy import spatial
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
def align_face(images, image_size=160, margin=11):
with tf.Graph().as_default():
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
tmp_image_paths = copy.copy(images)
img_list = []
for image in tmp_image_paths:
img = cv2.imread(os.path.expanduser(image))[:, :, ::-1]
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = cv2.resize(cropped[:, :, ::-1],
(image_size, image_size))[:, :, ::-1]
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images
def embedding(images):
# check is model exists
home = expanduser('~')
model_path = home + '/.facenet_model/20180408-102900/20180408-102900.pb'
if not os.path.exists(model_path):
print("model not exists, downloading model")
download_model.download()
print("model downloaded to " + model_path)
with tf.Graph().as_default():
with tf.Session() as sess:
facenet.load_model(model_path)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: images,
phase_train_placeholder: False}
emb = sess.run(embeddings, feed_dict=feed_dict)
return emb
def compare(images, threshold=0.7):
emb = embedding(images)
sims = np.zeros((len(images), len(images)))
for i in range(len(images)):
for j in range(len(images)):
sims[i][j] = (
1 - spatial.distance.cosine(emb[i], emb[j]) > threshold)
return sims
| 35.127907
| 96
| 0.620655
| 414
| 3,021
| 4.379227
| 0.330918
| 0.027579
| 0.019857
| 0.02813
| 0.087148
| 0.065085
| 0.065085
| 0.065085
| 0.047435
| 0
| 0
| 0.038205
| 0.254882
| 3,021
| 85
| 97
| 35.541176
| 0.767215
| 0.049321
| 0
| 0.029412
| 0
| 0
| 0.057243
| 0.017452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.161765
| 0
| 0.25
| 0.044118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7cb514f4b628937e89d11a214a0267002c52972
| 1,515
|
py
|
Python
|
tests/test_messages/test_inbound/test_manage_all_link_record.py
|
michaeldavie/pyinsteon
|
e5b2e2910f4eff1474f158051fa71f75c2077dd6
|
[
"MIT"
] | 15
|
2020-07-08T05:29:14.000Z
|
2022-03-24T18:56:26.000Z
|
tests/test_messages/test_inbound/test_manage_all_link_record.py
|
michaeldavie/pyinsteon
|
e5b2e2910f4eff1474f158051fa71f75c2077dd6
|
[
"MIT"
] | 107
|
2019-06-03T09:23:02.000Z
|
2022-03-31T23:12:38.000Z
|
tests/test_messages/test_inbound/test_manage_all_link_record.py
|
michaeldavie/pyinsteon
|
e5b2e2910f4eff1474f158051fa71f75c2077dd6
|
[
"MIT"
] | 16
|
2019-01-24T01:09:49.000Z
|
2022-02-24T03:48:42.000Z
|
"""Test Manage All-Link Record."""
import unittest
from binascii import unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId
from pyinsteon.protocol.messages.all_link_record_flags import \
AllLinkRecordFlags
from tests import set_log_levels
from tests.utils import hex_to_inbound_message
# pylint: disable=no-member
class TestManageAllLinkRecord(unittest.TestCase):
"""Test Manage All-Link Record."""
def setUp(self):
"""Set up test."""
self.hex = "026F400405060708090a0b"
self.hex_ack = "026F400405060708090a0b06"
self.message_id = MessageId(0x6F)
self.action = ManageAllLinkRecordAction(0x40)
self.flags = AllLinkRecordFlags(0x04)
self.group = int(0x05)
self.address = Address("060708")
self.data1 = int(0x09)
self.data2 = int(0x0A)
self.data3 = int(0x0B)
self.ack = AckNak(0x06)
self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_ack)
set_log_levels(
logger="info",
logger_pyinsteon="info",
logger_messages="info",
logger_topics=False,
)
def test_id(self):
"""Test ID."""
assert self.msg.message_id == self.message_id
def test_ack_nak(self):
"""Test ACK/NAK."""
assert self.msg.ack == self.ack
def test_bytes(self):
"""Test bytes."""
assert bytes(self.msg) == unhexlify(self.hex_ack)
| 30.3
| 76
| 0.654785
| 178
| 1,515
| 5.421348
| 0.376404
| 0.036269
| 0.040415
| 0.035233
| 0.047668
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060711
| 0.238944
| 1,515
| 49
| 77
| 30.918367
| 0.776236
| 0.087129
| 0
| 0
| 0
| 0
| 0.047337
| 0.034024
| 0
| 0
| 0.023669
| 0
| 0.088235
| 1
| 0.117647
| false
| 0
| 0.205882
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7cbc44076f7cb93b253c24fadcf22b9899a01e8
| 5,054
|
py
|
Python
|
Clock/Clock_Fig3F.py
|
chAwater/OpenFig
|
d37d59c6a77d76c7d8a9e8623ce94a95406f1843
|
[
"MIT"
] | null | null | null |
Clock/Clock_Fig3F.py
|
chAwater/OpenFig
|
d37d59c6a77d76c7d8a9e8623ce94a95406f1843
|
[
"MIT"
] | null | null | null |
Clock/Clock_Fig3F.py
|
chAwater/OpenFig
|
d37d59c6a77d76c7d8a9e8623ce94a95406f1843
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Figure Info.
#
# | Title | Journal | Authors | Article Date | Code Date | Figure | Links |
# |:------|:-------:|:-------:|:------------:|:---------:|:------:|:-----:|
# |A microfluidic approach for experimentally modelling <br> the intercellular coupling system of a mammalian <br> circadian clock at single-cell level|Lab on a Chip|Kui Han|2020.03.02|2020.03.11| Fig3F | [DOI](https://doi.org/10.1039/D0LC00140F) |
#
# In[1]:
# data_file = 'SinPeaksDOWN.xls'
# new_inputs = pd.read_excel(data_file,header=None)
# new_inputs.to_csv('data.csv',index=False)
# In[2]:
import os, sys, warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['svg.fonttype'] = 'none'
sns.set_context(context='poster')
bigsize = 20
midsize = 18
smallsize = 14
hugesize = 24
# In[ ]:
# Load data
new_inputs = pd.read_csv('data.csv')
new_inputs = new_inputs.values.flatten()
new_inputs = new_inputs[~np.isnan(new_inputs)]
new_inputs = pd.Series(new_inputs)
dict_time = new_inputs.astype(int).value_counts()
# Set start and end days
d_min = np.floor( ((new_inputs-12)/24).astype(np.float).min() )
d_min = max(0, d_min)
d_max = np.ceil( ((new_inputs-12)/24).astype(np.float).max() )
drug_time = 22 + np.arange(0,d_max+1)*24
# Set plot
n_plot = int( d_max - d_min + 1 )
n_rows = int( np.ceil(n_plot/4) )
ratio_dfs_dict = dict(zip(np.arange(n_plot), [pd.DataFrame()]*n_plot))
fig, axs = plt.subplots(
ncols=4,nrows=n_rows,
figsize=(18,n_rows*4),
subplot_kw={'polar':True},
gridspec_kw={'hspace':0.5},
)
axs = axs.flatten()
# Plot data for each 24h
for i_time in dict_time.keys():
if i_time<12:
continue
d_time = int( np.floor((i_time-12)/24)-d_min )
# In one day
ratio_df = ratio_dfs_dict[d_time]
ratio_df = ratio_df.append(
{
'ref_time' : ((i_time-12) % 24),
'n' : dict_time[i_time]
}, ignore_index=True)
ratio_dfs_dict[d_time] = ratio_df
# Date to r
t_time = (((i_time-12) % 24)/24)*2*np.pi
t_drug = ((1+drug_time[d_time]-12)%24)/24*2*np.pi
axs[d_time].bar(t_drug, 1, width=2/24*2*np.pi, bottom=0.0, color='bisque', edgecolor='k', alpha=0.7, zorder=10)
axs[d_time].scatter(t_time, 0.5, color='dodgerblue', s=dict_time[i_time]*30, alpha=0.7, zorder=20)
# Plot info for each 24h
for i,ax in enumerate(axs):
labels = (12+np.arange(24*(d_min+i),24*(d_min+i+1),6)).astype(int).astype(str)
labels[0] = str( int(labels[0])+24 ) + ' / ' + labels[0]
labels[2] = labels[2] + ' h'
ax.set_xticklabels( labels, fontsize=midsize )
ax.set_yticklabels([])
ax.tick_params(axis='x', pad=0)
ratio_df = ratio_dfs_dict[i]
if ratio_df.shape[0]!=0:
r_df = pd.concat(
[
ratio_df['n'],
pd.cut(
ratio_df['ref_time'],
bins =[0, 3, 10, 14, 24 ],
labels=[ 'Q1','Q2','Q3','Q4'],
include_lowest=True,
)
], axis=1
).groupby('ref_time').sum()
r = np.round( 100*(r_df.loc['Q3']/r_df.sum())['n'], 1 )
ax.text( 12/24*2*np.pi, -0.5, str(r)+'%', fontsize=smallsize, ha='center', va='center', color='tomato' )
ax.plot(
np.linspace(10, 14, 20)/24*2*np.pi,
[0.05]*20,
lw=5, color='tomato',alpha=0.7,
zorder=20,
)
ax.set_thetagrids([0,90,180,270])
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rgrids([])
ax.set_rlim(0,1)
ax.set_rorigin(-1.0)
ax.annotate(
s='',
xytext=(np.pi/8,1),
xy=(np.pi*3/8,1),
size=40,
arrowprops={
'facecolor':'black',
'arrowstyle':'->',
'connectionstyle':"arc3,rad=-0.17",
},
)
ax.text(np.pi/4,1,'Time',fontsize=smallsize, rotation=-40, ha='center',va='bottom')
else:
lgs = []
for s in np.arange(5,30,5):
lg = ax.scatter(s, 0.5, color='dodgerblue', s=s*30, alpha=0.7, zorder=1, label=s)
lgs.append(lg)
lg = ax.scatter(1,1,marker='s',s=300, color='bisque', edgecolor='k', alpha=0.7, label='Drug')
lgs.append(lg)
ax.set_rlim(0,0.1)
ax.axis('off')
ax.legend(
handles=lgs,
ncol=2,
title='# of cells',
title_fontsize=midsize,
fontsize=smallsize,
frameon=False,
labelspacing=1.5,
handletextpad=0.2,
columnspacing=0.4,
)
fig.subplots_adjust(hspace=0.3)
fig.suptitle('Cells distribution under drug treatment', y=1, fontsize=hugesize)
fig.savefig('Clock_Fig3F.svg', transparent=True, bbox_inches='tight')
fig.savefig('Clock_Fig3F.png', transparent=True, bbox_inches='tight')
plt.show()
# In[ ]:
| 28.234637
| 248
| 0.564108
| 759
| 5,054
| 3.617918
| 0.347826
| 0.042607
| 0.009104
| 0.012746
| 0.151857
| 0.067735
| 0.067735
| 0
| 0
| 0
| 0
| 0.062483
| 0.255837
| 5,054
| 178
| 249
| 28.393258
| 0.667642
| 0.139493
| 0
| 0.017094
| 0
| 0
| 0.072189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051282
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7cbd8f6da109df8e878fcc548912f6a3815a1c2
| 10,733
|
py
|
Python
|
rameniaapp/views/report.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/views/report.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/views/report.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from django.template import loader
from django.conf import settings
from django.contrib.auth.models import User
from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle
from django.views.generic import ListView, FormView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from rameniaapp.decorators import user_is_moderator
from rameniaapp.actionhookutils import dispatch_hook
from rameniaapp.utils import UserIsModeratorMixin
from django.forms.widgets import Select
from django.contrib import messages
class ReportForm(LoginRequiredMixin, CreateView):
'''Class based view for creating reports'''
template_name = "report_form.html"
model = Report
success_url = "/app"
fields = ["reason"]
url_path = "/app"
login_url="/app/login"
def get_form(self, form_class=None):
form = super(ReportForm, self).get_form(form_class)
form.fields['reason'].widget.attrs.update({'class':'form-control'})
return form
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.reporter = self.request.user
form.instance.status = 'OP'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Adds url_path value and relevant object id to template'''
context = super().get_context_data(**kwargs)
context["id"] = self.kwargs["id"]
context["url_path"] = self.url_path
return context
class NoodleReportForm(ReportForm):
'''Class based view for reporting noodles'''
model = NoodleReport
#This is used to allow the form to create the correct object
url_path = "noodle_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.noodle = Noodle.objects.get(pk=self.kwargs["id"])
form.instance.type = 'ND'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Noodle.objects.get(pk=self.kwargs["id"]).name
return context
class ReviewReportForm(ReportForm):
'''Class based view for reporting reviews'''
model = ReviewReport
url_path = "review_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.review = Review.objects.get(pk=self.kwargs["id"])
form.instance.type = 'RV'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Review.objects.get(pk=self.kwargs["id"]).title
return context
class ProfileReportForm(ReportForm):
'''Class based view for reporting profile'''
model = ProfileReport
url_path = "profile_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.profile = Profile.objects.get(pk=self.kwargs["id"])
form.instance.type = 'PF'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Profile.objects.get(pk=self.kwargs["id"]).name
return context
class ReportList(LoginRequiredMixin, UserIsModeratorMixin, ListView):
'''Class based view for viewing reports'''
# These values are overriden for the subclasses so we can create
# multiple types of noodles without rewriting code
model = Report
item_type = ""
context_object_name = "reports"
template_name = "report_view.html"
login_url="/app/login"
def get_queryset(self):
'''Get all reports for specific objects'''
if "item_id" in self.kwargs:
item_tuple = self.get_item(self.kwargs["item_id"])
self.kwargs[item_tuple[0]] = item_tuple[1]
# This prevents the next line from breaking
del self.kwargs["item_id"]
# Using get_item, this lets us filter for any kind of object without
# writing extra code
return self.model.objects.filter(**self.kwargs)
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
return (None, None)
def get_context_data(self, **kwargs):
'''Knowing the item type lets us not break things'''
context = super().get_context_data(**kwargs)
context['item_type'] = self.item_type
return context
class NoodleReportList(ReportList):
'''List of noodle reports'''
model = NoodleReport
item_type = "Noodles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
noodle = Noodle.objects.get(id=id)
return ("noodle", noodle)
class ReviewReportList(ReportList):
'''List of review reports'''
model = ReviewReport
item_type = "Reviews"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
review = Review.objects.get(id=id)
return ("review", review)
class ProfileReportList(ReportList):
'''List of profile reports'''
model = ProfileReport
item_type = "Profiles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
profile = Profile.objects.get(id=id)
return ("profile", profile)
@login_required(login_url="/app/login")
@user_is_moderator
def ban_user(request, report_type, user_id):
'''Ban a user by their id; expects report_type arg for redirect reasons'''
if request.method == "POST":
user = User.objects.get(pk=user_id).delete()
path = None
if report_type == "ND":
path = "reports/noodle"
elif report_type == "RV":
path = "reports/review"
elif report_type == "PF":
path = "reports/profile"
messages.add_message(request, messages.WARNING, "User banned")
return HttpResponseRedirect("/app/mod/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def delete_content(request, report_id):
'''This method deletes offending items that have been reported, or just their content'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
reporter = report.reporter
creator = None
path = get_return_path(report)
# Deleting object is dependent on type
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
report.review.delete()
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
report.noodle.delete()
elif report.type == "PF":
# Deleting a profile will break fundamental assumptions, so we instead
# remove all content from it.
report = ProfileReport.objects.get(pk=report_id)
report.profile.name = "AnonymousUser"
report.profile.profile_pic = Profile._meta.get_field('profile_pic').default
report.profile.metadata["Description"] = ""
report.profile.save()
creator = report.profile.user
report.delete()
# If we delete the content, it was reasonable to report it
dispatch_hook(reporter, "good-report")
if creator:
# If the noodle's creator hasn't been banned, penalize them
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.WARNING, "Content deleted")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def update_report_status(request, report_id, status):
'''Change report status to "open", "resolved", or "spam"'''
if request.method == "POST":
# Validate status is the correct value
if status in dict(Report.STATUS_CHOICES):
report = Report.objects.get(pk=report_id)
report.status = status
report.save()
creator = None
path = get_return_path(report)
# Get the creator of the relevant object/report
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
elif report.type == "PF":
report = ProfileReport.objects.get(pk=report_id)
creator = report.profile.user
# Reward people for good reports
if status == "ED":
if report.reporter:
dispatch_hook(report.reporter, "good-report")
if creator:
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.SUCCESS, "Report marked as resolved")
# Penalize people for bad reports
if status == "SP":
if report.reporter:
dispatch_hook(report.reporter, "bad-report")
messages.add_message(request, messages.WARNING, "Report marked as spam")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def ignore_report(request, report_id):
'''Ignore (delete) a report'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
path = get_return_path(report)
if report.reporter:
# We assume a bad report is worth deleting if its creator
# wasn't banned
dispatch_hook(report.reporter, "bad-report")
report.delete()
messages.add_message(request, messages.WARNING, "Report ignored")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
def get_return_path(report):
'''Util method to return a correct redirect path'''
if report.type == "RV":
return "review"
elif report.type == "ND":
return "noodle"
elif report.type == "PF":
return "profile"
| 39.171533
| 104
| 0.644461
| 1,283
| 10,733
| 5.28371
| 0.186282
| 0.028028
| 0.028323
| 0.023897
| 0.453459
| 0.43443
| 0.392831
| 0.323794
| 0.323794
| 0.298864
| 0
| 0.000247
| 0.246529
| 10,733
| 274
| 105
| 39.171533
| 0.838012
| 0.174881
| 0
| 0.497512
| 0
| 0
| 0.078346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099502
| false
| 0
| 0.064677
| 0
| 0.447761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7cf1b7d56bb02ccf14d9d4fb7fbc22544c1690f
| 512
|
py
|
Python
|
mjml/elements/head/mj_style.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 23
|
2020-10-02T14:52:21.000Z
|
2022-03-24T16:05:21.000Z
|
mjml/elements/head/mj_style.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 17
|
2020-10-07T14:48:06.000Z
|
2022-03-18T13:56:11.000Z
|
mjml/elements/head/mj_style.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 8
|
2021-01-13T11:54:41.000Z
|
2022-03-10T15:50:55.000Z
|
from ._head_base import HeadComponent
__all__ = ['MjStyle']
class MjStyle(HeadComponent):
@classmethod
def default_attrs(cls):
return {
'inline' : '',
}
def handler(self):
add = self.context['add']
inline_attr = 'inlineStyle' if (self.get_attr('inline') == 'inline') else 'style'
if inline_attr == 'inlineStyle':
raise NotImplementedError('style inlining not supported yet')
add(inline_attr, self.getContent())
| 24.380952
| 89
| 0.597656
| 51
| 512
| 5.784314
| 0.627451
| 0.101695
| 0.088136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283203
| 512
| 20
| 90
| 25.6
| 0.803815
| 0
| 0
| 0
| 0
| 0
| 0.170588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0.071429
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7cf29c510e55652c76da9423af99e7754022e49
| 3,399
|
py
|
Python
|
model_zoo/official/nlp/bert/src/sample_process.py
|
i4oolish/mindspore
|
dac3be31d0f2c0a3516200f47af30980e566601b
|
[
"Apache-2.0"
] | 2
|
2020-08-12T16:14:40.000Z
|
2020-12-04T03:05:57.000Z
|
model_zoo/official/nlp/bert/src/sample_process.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/bert/src/sample_process.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""process txt"""
import re
import json
def process_one_example_p(tokenizer, text, max_seq_len=128):
"""process one testline"""
textlist = list(text)
tokens = []
for _, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
if len(tokens) >= max_seq_len - 1:
tokens = tokens[0:(max_seq_len - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
for _, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
ntokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
ntokens.append("**NULL**")
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
feature = (input_ids, input_mask, segment_ids)
return feature
def label_generation(text="", probs=None, label2id_file=""):
"""generate label"""
data = [text]
probs = [probs]
result = []
label2id = json.loads(open(label2id_file).read())
id2label = [k for k, v in label2id.items()]
for index, prob in enumerate(probs):
for v in prob[1:len(data[index]) + 1]:
result.append(id2label[int(v)])
labels = {}
start = None
index = 0
for _, t in zip("".join(data), result):
if re.search("^[BS]", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = index
if re.search("^O", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = None
index += 1
if start is not None:
label = result[start][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
return labels
| 33.653465
| 78
| 0.562518
| 427
| 3,399
| 4.344262
| 0.313817
| 0.070081
| 0.033962
| 0.051752
| 0.280863
| 0.241509
| 0.219946
| 0.205391
| 0.205391
| 0.205391
| 0
| 0.017932
| 0.294498
| 3,399
| 100
| 79
| 33.99
| 0.75563
| 0.202118
| 0
| 0.381579
| 0
| 0
| 0.009318
| 0
| 0
| 0
| 0
| 0
| 0.039474
| 1
| 0.026316
| false
| 0
| 0.026316
| 0
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d08a1b7fd50820c50ef7603b8e08a3f497a3ac
| 2,273
|
py
|
Python
|
lang_model/data_loader.py
|
alex44jzy/FancyALMLDLNLP
|
c55a67a51de72339f4ab13bd46008eb418d293a3
|
[
"MIT"
] | null | null | null |
lang_model/data_loader.py
|
alex44jzy/FancyALMLDLNLP
|
c55a67a51de72339f4ab13bd46008eb418d293a3
|
[
"MIT"
] | null | null | null |
lang_model/data_loader.py
|
alex44jzy/FancyALMLDLNLP
|
c55a67a51de72339f4ab13bd46008eb418d293a3
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from gensim.corpora.dictionary import Dictionary
class LangDataset(Dataset):
def __init__(self, src_sents, trg_sents, max_len=-1):
self.src_sents = src_sents
self.trg_sents = trg_sents
# Create the vocabulary for both the source and target.
self.vocab = Dictionary(src_sents + trg_sents)
# Patch the vocabularies and add the <pad> and <unk> symbols.
special_tokens = {'<pad>': 0, '<unk>': 1, '</s>': 2}
self.vocab.patch_with_special_tokens(special_tokens)
# Keep track of how many data points.
self._len = len(src_sents)
if max_len < 0:
# If it's not set, find the longest text in the data.
max_src_len = max(len(sent) for sent in src_sents)
self.max_len = max_src_len
else:
self.max_len = max_len
def pad_sequence(self, vectorized_sent, max_len):
# To pad the sentence:
# Pad left = 0; Pad right = max_len - len of sent.
pad_dim = (0, max_len - len(vectorized_sent))
return F.pad(vectorized_sent, pad_dim, 'constant')
def __getitem__(self, index):
vectorized_src = self.vectorize(self.vocab, self.src_sents[index])
vectorized_trg = self.vectorize(self.vocab, self.trg_sents[index])
return {'x': self.pad_sequence(vectorized_src, self.max_len),
'y': self.pad_sequence(vectorized_trg, self.max_len),
'x_len': len(vectorized_src),
'y_len': len(vectorized_trg)}
def __len__(self):
return self._len
def vectorize(self, vocab, tokens):
"""
:param tokens: Tokens that should be vectorized.
:type tokens: list(str)
"""
# See https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2idx
# Lets just cast list of indices into torch tensors directly =)
return torch.tensor(vocab.doc2idx(tokens, unknown_word_index=1))
def unvectorize(self, vocab, indices):
"""
:param indices: Converts the indices back to tokens.
:type tokens: list(int)
"""
return [vocab[i] for i in indices]
| 37.262295
| 114
| 0.635724
| 307
| 2,273
| 4.501629
| 0.335505
| 0.047757
| 0.028944
| 0.023155
| 0.037627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005974
| 0.263528
| 2,273
| 60
| 115
| 37.883333
| 0.819594
| 0.258689
| 0
| 0
| 0
| 0
| 0.021066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.121212
| 0.030303
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d378679d5e763e0a3427a5a59048ba70934d41
| 4,322
|
py
|
Python
|
tests/pytests/scenarios/multimaster/conftest.py
|
lllamnyp/salt
|
de112e5b362191e3708e170b7eb8e990787ad412
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/scenarios/multimaster/conftest.py
|
lllamnyp/salt
|
de112e5b362191e3708e170b7eb8e990787ad412
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/scenarios/multimaster/conftest.py
|
lllamnyp/salt
|
de112e5b362191e3708e170b7eb8e990787ad412
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import shutil
import subprocess
import pytest
import salt.utils.platform
log = logging.getLogger(__name__)
@pytest.fixture(scope="package", autouse=True)
def skip_on_tcp_transport(request):
if request.config.getoption("--transport") == "tcp":
pytest.skip("Multimaster under the TPC transport is not working. See #59053")
@pytest.fixture(scope="package")
def salt_mm_master_1(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
config_overrides = {
"interface": "127.0.0.1",
}
factory = salt_factories.salt_master_daemon(
"mm-master-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_1_salt_cli(salt_mm_master_1):
return salt_mm_master_1.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_master_2(salt_factories, salt_mm_master_1):
if salt.utils.platform.is_darwin() or salt.utils.platform.is_freebsd():
subprocess.check_output(["ifconfig", "lo0", "alias", "127.0.0.2", "up"])
config_defaults = {
"open_mode": True,
"transport": salt_mm_master_1.config["transport"],
}
config_overrides = {
"interface": "127.0.0.2",
}
# Use the same ports for both masters, they are binding to different interfaces
for key in (
"ret_port",
"publish_port",
):
config_overrides[key] = salt_mm_master_1.config[key]
factory = salt_factories.salt_master_daemon(
"mm-master-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_2_salt_cli(salt_mm_master_2):
return salt_mm_master_2.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_1.salt_minion_daemon(
"mm-minion-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_2.salt_minion_daemon(
"mm-minion-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
| 31.547445
| 85
| 0.679084
| 577
| 4,322
| 4.70364
| 0.195841
| 0.135593
| 0.114959
| 0.07664
| 0.712601
| 0.664333
| 0.641857
| 0.603537
| 0.547531
| 0.547531
| 0
| 0.026628
| 0.200602
| 4,322
| 136
| 86
| 31.779412
| 0.7589
| 0.04211
| 0
| 0.531532
| 0
| 0
| 0.137331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063063
| false
| 0
| 0.054054
| 0.018018
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d59e3cde73fd0dad74b149197ee60ec8e8c83b
| 3,900
|
py
|
Python
|
demisto_sdk/commands/common/hook_validations/release_notes.py
|
yalonso7/demisto-sdk
|
4b832078cdadb0b604a064532975e8be68ac726a
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/common/hook_validations/release_notes.py
|
yalonso7/demisto-sdk
|
4b832078cdadb0b604a064532975e8be68ac726a
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/common/hook_validations/release_notes.py
|
yalonso7/demisto-sdk
|
4b832078cdadb0b604a064532975e8be68ac726a
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import itertools
from demisto_sdk.commands.common.constants import VALIDATED_PACK_ITEM_TYPES
from demisto_sdk.commands.common.errors import Errors
from demisto_sdk.commands.common.hook_validations.base_validator import \
BaseValidator
from demisto_sdk.commands.common.tools import (get_latest_release_notes_text,
get_release_notes_file_path)
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
class ReleaseNotesValidator(BaseValidator):
"""Release notes validator is designed to ensure the existence and correctness of the release notes in content repo.
Attributes:
file_path (str): the path to the file we are examining at the moment.
release_notes_path (str): the path to the changelog file of the examined file.
latest_release_notes (str): the text of the UNRELEASED section in the changelog file.
master_diff (str): the changes in the changelog file compared to origin/master.
"""
def __init__(self, file_path, modified_files=None, pack_name=None, added_files=None, ignored_errors=None,
print_as_warnings=False):
super().__init__(ignored_errors=ignored_errors, print_as_warnings=print_as_warnings)
self.file_path = file_path
self.modified_files = modified_files
self.added_files = added_files
self.pack_name = pack_name
self.release_notes_path = get_release_notes_file_path(self.file_path)
self.latest_release_notes = get_latest_release_notes_text(self.release_notes_path)
def are_release_notes_complete(self):
is_valid = True
modified_added_files = itertools.chain.from_iterable((self.added_files or [], self.modified_files or []))
if modified_added_files:
for file in modified_added_files:
if not any(permitted_type in file for permitted_type in VALIDATED_PACK_ITEM_TYPES):
continue
elif self.pack_name in file:
update_rn_util = UpdateRN(pack=self.pack_name, pack_files=set(), update_type=None,
added_files=set())
file_name, file_type = update_rn_util.identify_changed_file_type(file)
if file_name and file_type:
if (file_type not in self.latest_release_notes) or (file_name not in self.latest_release_notes):
entity_name = update_rn_util.get_display_name(file)
error_message, error_code = Errors.missing_release_notes_entry(file_type, self.pack_name,
entity_name)
if self.handle_error(error_message, error_code, self.file_path):
is_valid = False
return is_valid
def has_release_notes_been_filled_out(self):
release_notes_comments = self.latest_release_notes
if len(release_notes_comments) == 0:
error_message, error_code = Errors.release_notes_file_empty()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
elif '%%UPDATE_RN%%' in release_notes_comments:
error_message, error_code = Errors.release_notes_not_finished()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
return True
def is_file_valid(self):
"""Checks if given file is valid.
Return:
bool. True if file's release notes are valid, False otherwise.
"""
validations = [
self.has_release_notes_been_filled_out(),
self.are_release_notes_complete()
]
return all(validations)
| 50
| 120
| 0.661795
| 488
| 3,900
| 4.922131
| 0.239754
| 0.129892
| 0.052456
| 0.052456
| 0.273522
| 0.170691
| 0.13239
| 0.073272
| 0.057452
| 0.057452
| 0
| 0.000356
| 0.278974
| 3,900
| 77
| 121
| 50.649351
| 0.853841
| 0.144615
| 0
| 0.072727
| 0
| 0
| 0.003969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.127273
| 0
| 0.309091
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d5fc15217b2b0e024e35082215227dc7639d0e
| 14,326
|
py
|
Python
|
PyOpenGL/PyGame/ex06/src/mathematics.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyOpenGL/PyGame/ex06/src/mathematics.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyOpenGL/PyGame/ex06/src/mathematics.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
import numpy as np
class ProjectionMatrix():
"""This matrix provides projection distortion.
Projection distortion is when things that are far away
appear smaller and things that are close appear bigger.
This works flawlessly so far. Takes in screen-size and
provides near- and far clipping. fov is field-of-view
and smaller values will make view zoom in. A value of 1
will provide a panorama image."""
def __init__(self, screen_size, zNear, zFar, fov):
if fov >= 1: # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result.
fov = 0.99999;
tanHalfFOV = np.tan(fov * np.pi / 2.0)
zRange = zNear - zFar;
self.projectionMatrix = np.array([
[ # Row 0:
screen_size[1] / (tanHalfFOV * screen_size[0]),
0,
0,
0
],
[ # Row 1:
0,
1.0 / tanHalfFOV,
0,
0
],
[ # Row 2:
0,
0,
(-zNear - zFar)/zRange,
2.0 * zFar * zNear / zRange
],
[ # Row 3:
0,
0,
1,
0
],
], dtype=np.float32)
def get(self):
return self.projectionMatrix
class ViewMatrix():
"""This matrix transform a model as if it's percieved by a
camera with a target 'self.t' in global world coordinates
and a position 'self.p' in global world coordinates. Global
coordinates are x=right, y=forth and z=up."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
# target coordinates:
self.t = vec3(0, 0, 0)
# tolerance value:
self.tolerance = 0.5
"""The tolerance value is for testing when view lies within bounds.
In case of 'self.orbitTarget()', it's for testing when view gets too
close to target z-axis. In case of 'self.approachTarget()', it's for
testing when view gets too close to target coordinates."""
# Sensitivity value:
self.alpha = 0.01
"""The sensitivity value is for tuning how sensitive 'self.orbitTarget()'
and 'self.approachTarget()' are to user input."""
# Initialize the rotationMatrix as the identity matrix:
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def setPos(self, p):
self.p = vec3(p.x, p.y, p.z)
def lookAt(self, target=None, up=None):
"""This function focuses the view on a target.
Tested and seem to work as it should... ........finally........"""
if target != None:
self.t = vec3(target.x, target.y, target.z)
f = self.t.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, s.y, s.z, 0],
[ u.x, u.y, u.z, 0],
[ f.x, f.y, f.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def approachTarget(self, amount):
"""This function approaches the view towards the target
when amount is positive and moves away from the target when
amount is negative. It will stay outside the self.tolerance
distance. When completely close to the target, view cannot
look up or down too much."""
if amount == 0:
# If amount is zero, do nothing.
return
if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:
# If 'self.approachTarget()' will not take the view within twice the
# tolerance distance, approach the target by given amount:
self.p = self.p.add(self.t.sub(self.p).scale(amount))
def orbitTarget(self, axis):
if axis == (0, 0):
return # Do nothing
# Get target2camera-vector:
p = self.p.sub(self.t)
# Assign passed values to variables we can change if we have to:
axis_x = -axis[0]
if axis[1] > 0.30/self.alpha:
"""If axis[1] is bigger than 0.40 / self.alpha, we get strange results
becouse view can 'tunnel' over the boundary set when getting view is
getting close to target z-axis. Changing tolerance doen't change it a
whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is
really really large as it is."""
axis_y = 0.3 / self.alpha
elif axis[1] < -0.30/self.alpha:
axis_y = -0.3 / self.alpha
else:
axis_y = axis[1]
if axis_y > 0 and p.z > 0:
"""Tests if user is trying to orbit the view up
and if the view is above the 'equator'. The second
test is to make sure the view doesn't get stuck
if it gets inside the tolerance bounds and can get back
out as long as it's trying to move away."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
elif axis_y < 0 and p.z < 0:
"""Tests if user is trying to orbit the view down
and if the view is below the 'equator'. Same test
but for different case as the one above."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
if axis_y == 0: #If the other axis is zero:
# Amount of rotation for target-cam x-axis: (longitude, west2east)
v = vec3(0, 0, 1) # v is up vector
rate = axis_x
elif axis_x == 0: #If the other axis is zero:
# Amount of rotation for target-cam y-axis: (latitude, south2north)
v = p.cross(vec3(0, 0, 1)).norm() # v is side vector
rate = axis_y
else: #If neither is zero
# u is up vector:
u = vec3(0, 0, axis_x)
# s is side vector:
s = p.cross(vec3(0, 0, 1)).norm().scale(axis_y)
# v is combined vector:
v = u.add(s).norm()
rate = abs(axis_x) + abs(axis_y)
sin = np.sin(self.alpha * rate)
cos = np.cos(self.alpha * rate)
rotateMatrix = np.matrix([
[ # Row 0:
( v.x*v.x*(1 - cos) + cos ),
( v.y*v.x*(1 - cos) - v.z*sin ),
( v.z*v.x*(1 - cos) + v.y*sin ),
0
],
[ # Row 1:
( v.x*v.y*(1 - cos) + v.z*sin ),
( v.y*v.y*(1 - cos) + cos ),
( v.z*v.y*(1 - cos) - v.x*sin ),
0
],
[ # Row 2:
( v.x*v.z*(1 - cos) - v.y*sin ),
( v.y*v.z*(1 - cos) + v.x*sin ),
( v.z*v.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32)
p = rotateMatrix.dot( np.array([p.x, p.y, p.z, 1.0]) ).getA()[0][0:3]
self.p = vec3(p[0], p[1], p[2]).add(self.t)
self.lookAt(self.t)
def get(self):
translationMatrix = np.matrix([
[1,0,0,-self.p.x],
[0,1,0,-self.p.y],
[0,0,1,-self.p.z],
[0,0,0,1]
], dtype=np.float32)
return (self.rotationMatrix*translationMatrix).getA()
class ModelMatrix():
"""This matrix transform a model into world coordinates.
Heavily tested and should work properly. Could probably
be optimized further or even translated into cython for
performance."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
self.s = vec3(1, 1, 1)
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def rotate(self, turns, unit):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
u = unit.norm()
sin = np.sin(turns * np.pi * 2)
cos = np.cos(turns * np.pi * 2)
self.rotationMatrix = self.rotationMatrix.dot(
np.matrix([
[ # Row 0:
( u.x*u.x*(1 - cos) + cos ),
( u.y*u.x*(1 - cos) - u.z*sin ),
( u.z*u.x*(1 - cos) + u.y*sin ),
0
],
[ # Row 1:
( u.x*u.y*(1 - cos) + u.z*sin ),
( u.y*u.y*(1 - cos) + cos ),
( u.z*u.y*(1 - cos) - u.x*sin ),
0
],
[ # Row 2:
( u.x*u.z*(1 - cos) - u.y*sin ),
( u.y*u.z*(1 - cos) + u.x*sin ),
( u.z*u.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32))
def scale(self, s):
self.s = vec3(s.x, s.y, s.z)
def lookAt(self, target, up=None):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
# Get normalized vector pointing from model to target
f = target.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
# s must be normalized! Consider when f and u are not perpendicular!
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, f.x, u.x, 0],
[ s.y, f.y, u.y, 0],
[ s.z, f.z, u.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def get(self):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
translationMatrix = np.matrix([
[1,0,0,self.p.x],
[0,1,0,self.p.y],
[0,0,1,self.p.z],
[0,0,0,1]
], dtype=np.float32)
scaleMatrix = np.matrix([
[self.s.x,0,0,0],
[0,self.s.y,0,0],
[0,0,self.s.z,0],
[0,0,0,1]
], dtype=np.float32)
return (translationMatrix*self.rotationMatrix*scaleMatrix).getA()
class quaternion():
def __init__(self, x, y, z, w):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
def mag(self): # Get length of quaternion
return np.sqrt(self.x*self.x + self.y*self.y + self.y*self.y + self.y*self.y)
def norm(self): # Normalize quaternion
return quaternion(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag(),
w= self.w / self.mag())
def conjugate(self):
return quaternion(
x=-self.x,
y=-self.y,
z=-self.z,
w= self.w)
def xQ(self, q): # Multiply with quaternion
return quaternion(
x= self.x * q.w + self.w * q.x + self.y * q.z - self.z * q.y,
y= self.y * q.w + self.w * q.y + self.z * q.x - self.x * q.z,
z= self.z * q.w + self.w * q.z + self.x * q.y - self.y * q.x,
w= self.w * q.w - self.x * q.x - self.y * q.y - self.z * q.z)
def xV(self, v): # Multiply with vector
return quaternion(
x= self.w*v.x + self.y*v.z - self.z*v.y,
y= self.w*v.y + self.z*v.x - self.x*v.z,
z= self.w*v.z + self.x*v.y - self.y*v.x,
w=-self.x*v.x - self.y*v.y - self.z*v.z)
class vec2():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y)
def norm(self):
return vec2(
x= self.x / self.mag(),
y= self.y / self.mag())
class vec3():
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def cross(self, vector):
return vec3(
x= self.y*vector.z - self.z*vector.y,
y= self.z*vector.x - self.x*vector.z,
z= self.x*vector.y - self.y*vector.x)
def dot(self, vector):
return float( self.x*vector.x + self.y*vector.y + self.z*vector.z )
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def norm(self):
return vec3(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag())
def add(self, vector):
return vec3(
x= self.x + vector.x,
y= self.y + vector.y,
z= self.z + vector.z)
def sub(self, vector):
return vec3(
x= self.x - vector.x,
y= self.y - vector.y,
z= self.z - vector.z)
def scale(self, scalar):
return vec3(
self.x*scalar,
self.y*scalar,
self.z*scalar)
def rotate(self, angle, axis):
pass
| 34.603865
| 100
| 0.454279
| 1,990
| 14,326
| 3.247236
| 0.141709
| 0.01888
| 0.013463
| 0.008047
| 0.421232
| 0.342618
| 0.29805
| 0.289384
| 0.283658
| 0.280563
| 0
| 0.034355
| 0.416864
| 14,326
| 413
| 101
| 34.687651
| 0.739167
| 0.176881
| 0
| 0.385417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114583
| false
| 0.003472
| 0.003472
| 0.052083
| 0.204861
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d672fb0397af44cf591c05913dd9f20b250483
| 1,652
|
py
|
Python
|
test_utils/mocks.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 7
|
2015-01-23T17:24:04.000Z
|
2022-01-12T16:54:24.000Z
|
test_utils/mocks.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 18
|
2017-12-09T01:11:23.000Z
|
2021-09-22T13:26:24.000Z
|
test_utils/mocks.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 1
|
2015-06-22T02:17:55.000Z
|
2015-06-22T02:17:55.000Z
|
from xml.dom import minidom
import pywikibot
from api.decorator import time_this
SiteMock = pywikibot.Site
class PageMock(pywikibot.Page):
def __init__(self, *args, **kwargs):
super(PageMock, self).__init__(*args, **kwargs)
self.filename = "test_data/test_pages_%s.xml" % self.site.lang
self.parsed = minidom.parse(open(self.filename, 'r'))
self.pages = self.parsed.getElementsByTagName('page')
def put(self, newtext, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None, **kwargs):
print(('Saving page [[%s]] through put' % self.title()))
def save(self, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None,
apply_cosmetic_changes=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
@time_this('Page.get() method mock')
def get(self, force=False, get_redirect=False, sysop=False):
for page in self.pages:
xml_title = page.getElementsByTagName(
'title')[0].childNodes[0].nodeValue
if xml_title == self.title():
return page.getElementsByTagName(
'text')[0].childNodes[0].nodeValue
print(('No page %s found in "%s"' % (self.title(), self.filename)))
return ''
p = PageMock(SiteMock('en', 'wiktionary'), 'gaon')
e = p.get()
| 36.711111
| 78
| 0.624092
| 203
| 1,652
| 4.985222
| 0.364532
| 0.044466
| 0.047431
| 0.059289
| 0.365613
| 0.365613
| 0.336957
| 0.336957
| 0.336957
| 0.336957
| 0
| 0.003127
| 0.225787
| 1,652
| 44
| 79
| 37.545455
| 0.788116
| 0
| 0
| 0.0625
| 0
| 0
| 0.118039
| 0.016344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0.09375
| 0
| 0.34375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d6da38ffc0a1fb86619973f197115c4b076c8a
| 5,796
|
py
|
Python
|
dl_tensorflow/deepdream.py
|
jarvisqi/deep_learning
|
988a5b0551ccf2c480a519c66aca149053826d30
|
[
"MIT"
] | 32
|
2017-10-26T13:37:36.000Z
|
2021-03-24T09:06:45.000Z
|
dl_tensorflow/deepdream.py
|
2892778775/deep_learning
|
988a5b0551ccf2c480a519c66aca149053826d30
|
[
"MIT"
] | 3
|
2018-11-19T05:55:46.000Z
|
2019-03-01T05:20:43.000Z
|
dl_tensorflow/deepdream.py
|
2892778775/deep_learning
|
988a5b0551ccf2c480a519c66aca149053826d30
|
[
"MIT"
] | 38
|
2017-11-08T15:42:48.000Z
|
2021-05-10T00:42:33.000Z
|
import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
| 30.031088
| 104
| 0.619393
| 862
| 5,796
| 4.009281
| 0.264501
| 0.018519
| 0.019676
| 0.013021
| 0.273148
| 0.269676
| 0.224537
| 0.20081
| 0.20081
| 0.183449
| 0
| 0.03292
| 0.234817
| 5,796
| 192
| 105
| 30.1875
| 0.746336
| 0.100069
| 0
| 0.216418
| 0
| 0
| 0.062247
| 0.031798
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11194
| false
| 0
| 0.089552
| 0.014925
| 0.291045
| 0.029851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d6e3bbbed972de89ca1f857b7b3b2178ada3d2
| 1,829
|
py
|
Python
|
admin.py
|
BlueBlock/usage-reporter
|
e30bbef6d281944d62f716c37aff17861a653967
|
[
"MIT"
] | 4
|
2018-08-30T06:16:35.000Z
|
2022-02-18T08:06:21.000Z
|
admin.py
|
BlueBlock/usage-reporter
|
e30bbef6d281944d62f716c37aff17861a653967
|
[
"MIT"
] | 1
|
2018-03-29T17:04:44.000Z
|
2018-03-29T17:04:44.000Z
|
admin.py
|
BlueBlock/usage-reporter
|
e30bbef6d281944d62f716c37aff17861a653967
|
[
"MIT"
] | 4
|
2018-01-31T06:55:32.000Z
|
2022-01-16T10:39:18.000Z
|
import calendar
import datetime
import logging
import os
import webapp2
import dbmodel
TESTING = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
class ResetHandler(webapp2.RequestHandler):
def get(self):
timestamp = calendar.timegm(datetime.datetime.utcnow().timetuple())
self.response.write('<html><body><form method="POST"><input type="text" value="' + str(
timestamp) + '" name="day"><input type="submit"></form></body></html>')
def post(self):
timestamp = int(self.request.get('day', None))
entry_day = datetime.datetime.utcfromtimestamp(timestamp).date()
logging.info('Processing day %s', entry_day)
starttimestamp = calendar.timegm((entry_day.year, entry_day.month, entry_day.day, 0, 0, 0))
endtimestamp = starttimestamp + 24 * 60 * 60
logging.info('starttimestamp, endtimestamp: (%s, %s)', starttimestamp, endtimestamp)
count = 0
for item in dbmodel.ReportItem.all().filter('counted', 0).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
for item in dbmodel.ReportItem.all().filter('counted', 1).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
logging.info('Reset for %s items', count)
for item in dbmodel.AggregateItem.all().filter('timestamp =', starttimestamp).filter('rangetype =', 'day'):
item.delete()
app = webapp2.WSGIApplication([
('/tasks/admin/reset', ResetHandler)
], debug=TESTING)
| 35.173077
| 115
| 0.632586
| 194
| 1,829
| 5.93299
| 0.396907
| 0.034752
| 0.023458
| 0.041703
| 0.288445
| 0.288445
| 0.288445
| 0.288445
| 0.215465
| 0.215465
| 0
| 0.011822
| 0.213778
| 1,829
| 51
| 116
| 35.862745
| 0.788595
| 0
| 0
| 0.222222
| 0
| 0
| 0.207764
| 0.019136
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d717769a7df13adf5117eb840b41a6b41f5506
| 2,708
|
py
|
Python
|
napari/utils/colormaps/categorical_colormap_utils.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | 1
|
2021-04-24T10:10:54.000Z
|
2021-04-24T10:10:54.000Z
|
napari/utils/colormaps/categorical_colormap_utils.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | 2
|
2021-05-17T02:15:08.000Z
|
2022-03-12T21:19:52.000Z
|
napari/utils/colormaps/categorical_colormap_utils.py
|
Zac-HD/napari
|
102a7e8f845893c874d2b86f9371d41130100b89
|
[
"BSD-3-Clause"
] | null | null | null |
from dataclasses import dataclass
from itertools import cycle
from typing import Dict, Union
import numpy as np
from ...layers.utils.color_transformations import (
transform_color,
transform_color_cycle,
)
@dataclass(eq=False)
class ColorCycle:
"""A dataclass to hold a color cycle for the fallback_colors
in the CategoricalColormap
Attributes
----------
values : np.ndarray
The (Nx4) color array of all colors contained in the color cycle.
cycle : cycle
The cycle object that gives fallback colors.
"""
values: np.ndarray
cycle: cycle
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
# turn a generic dict into object
if isinstance(val, dict):
return _coerce_colorcycle_from_dict(val)
elif isinstance(val, ColorCycle):
return val
else:
return _coerce_colorcycle_from_colors(val)
def _json_encode(self):
return {'values': self.values.tolist()}
def __eq__(self, other):
if isinstance(other, ColorCycle):
eq = np.array_equal(self.values, other.values)
else:
eq = False
return eq
def _coerce_colorcycle_from_dict(
val: Dict[str, Union[str, list, np.ndarray, cycle]]
) -> ColorCycle:
# validate values
color_values = val.get('values')
if color_values is None:
raise ValueError('ColorCycle requires a values argument')
transformed_color_values = transform_color(color_values)
# validate cycle
color_cycle = val.get('cycle')
if color_cycle is None:
transformed_color_cycle = transform_color_cycle(
color_cycle=color_values,
elem_name='color_cycle',
default="white",
)[0]
else:
transformed_color_cycle = color_cycle
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def _coerce_colorcycle_from_colors(
val: Union[str, list, np.ndarray]
) -> ColorCycle:
if isinstance(val, str):
val = [val]
(
transformed_color_cycle,
transformed_color_values,
) = transform_color_cycle(
color_cycle=val,
elem_name='color_cycle',
default="white",
)
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def compare_colormap_dicts(cmap_1, cmap_2):
if len(cmap_1) != len(cmap_2):
return False
for k, v in cmap_1.items():
if k not in cmap_2:
return False
if not np.allclose(v, cmap_2[k]):
return False
return True
| 25.308411
| 73
| 0.64771
| 327
| 2,708
| 5.119266
| 0.281346
| 0.101553
| 0.062724
| 0.035842
| 0.284349
| 0.123059
| 0.087216
| 0.087216
| 0.087216
| 0.087216
| 0
| 0.004564
| 0.271787
| 2,708
| 106
| 74
| 25.54717
| 0.84432
| 0.119645
| 0
| 0.24
| 0
| 0
| 0.036705
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.066667
| 0.013333
| 0.346667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d75d84ab48e0f55426fa5ef9b76cbde3951e30
| 7,027
|
py
|
Python
|
src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py
|
stas-prokopiev/ipywidgets_toggle_buttons
|
84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2
|
[
"MIT"
] | null | null | null |
src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py
|
stas-prokopiev/ipywidgets_toggle_buttons
|
84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2
|
[
"MIT"
] | null | null | null |
src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py
|
stas-prokopiev/ipywidgets_toggle_buttons
|
84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2
|
[
"MIT"
] | null | null | null |
"""Abstract class for all toggle buttons"""
# Standard library imports
import logging
from collections import OrderedDict
# Third party imports
import ipywidgets
# Local imports
from .abc_toggle_buttons import BaseToggleButtons
from .layouts import DICT_LAYOUT_HBOX_ANY
LOGGER = logging.getLogger(__name__)
class BaseToggleButtonsWithHide(BaseToggleButtons):
"""Abstract class for all toggle buttons
Values are stored in self.widget_parent when displayed is self.widget
Which is updated in the moment when display() is launched
"""
def __init__(
self,
widget_parent,
options_visible=None,
options_hidden=None,
**kwargs
):
"""Initialize object"""
super().__init__(widget_parent, **kwargs)
# hidden attributes to setters
self._options_visible = []
self._options_hidden = []
self._bool_is_hidden_options_created = False
# Create scaffolds inside self.widgets
self._create_scaffold_for_widget()
self._dict_visible_button_by_option = OrderedDict()
self._dict_hidden_button_by_option = OrderedDict()
# Set options
self.options_visible = options_visible
self.options_hidden = options_hidden
self._update_buttons_for_new_options()
@property
def options_visible(self):
"""Getter for visible options used in widget"""
return self._options_visible
@options_visible.setter
def options_visible(self, new_value):
"""Setter for visible options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_visible):
return None
self._options_visible = new_value
self._create_buttons_for_visible_options()
# Update hidden options to delete which exists in new visible
# This will also update the whole widget
self.options_hidden = self._options_hidden
self.options = self._options_visible + self._options_hidden
self._update_widget_view()
@property
def options_hidden(self):
"""Getter for hidden options used in widget"""
return self._options_hidden
@options_hidden.setter
def options_hidden(self, new_value):
"""Setter for hidden options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_hidden):
return None
# Filter out from hidden options all options which exists in main
options_hidden_cleared = []
for str_option in new_value:
if str_option not in self.options_visible:
options_hidden_cleared.append(str_option)
self._options_hidden = options_hidden_cleared
self.options = self._options_visible + self._options_hidden
# self._create_buttons_for_hidden_options()
self._update_widget_view()
def turn_off_all_buttons(self):
"""Mark all buttons as not clicked"""
for str_option in self._dict_visible_button_by_option:
but = self._dict_visible_button_by_option[str_option]
but.button_style = ""
for str_option in self._dict_hidden_button_by_option:
but = self._dict_hidden_button_by_option[str_option]
but.button_style = ""
# Change style of selected hidden button
# self._widget_but_hidden_option_selected.description = "..."
# self._widget_but_hidden_option_selected.button_style = ""
def _update_buttons_for_new_options(self):
"""Update buttons if options were changed"""
self._create_buttons_for_visible_options()
self._bool_is_hidden_options_created = False
# self._create_buttons_for_hidden_options()
def _create_scaffold_for_widget(self):
"""Create scaffold of ipywidget Boxes for self"""
# Main buttons box
self._widget_hbox_main = ipywidgets.HBox()
self._widget_hbox_main.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_main.layout.flex_flow = "row wrap"
# Middle buttons box
self._widget_hbox_middle_buttons = ipywidgets.HBox()
self._widget_hbox_middle_buttons.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
self._create_middle_buttons()
# Hidden buttons box
self._widget_hbox_hidden = ipywidgets.HBox()
self._widget_hbox_hidden.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_hidden.layout.flex_flow = "row wrap"
def _create_buttons_for_visible_options(self):
"""Create buttons for all visible options"""
self._dict_visible_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_visible)
list_buttons = []
for str_option in list(self.options_visible):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_visible_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_main.children = list_buttons
def _create_middle_buttons(self):
"""Create buttons which are in charge what to do with hidden buttons"""
self._wid_but_hide_show = ipywidgets.ToggleButton(
value=False,
description="Show Hidden options",
button_style="info",
)
self._wid_but_hide_show.layout.width = "40%"
self._wid_but_hide_show.observe(
lambda _: self._update_widget_view(), "value")
self._widget_but_hidden_option_selected = ipywidgets.Button(
description="...", disabled=True)
self._widget_but_hidden_option_selected.layout.width = "40%"
self._widget_hbox_middle_buttons.children = [
self._widget_but_hidden_option_selected, self._wid_but_hide_show]
def _create_buttons_for_hidden_options(self):
"""Create buttons for all hidden options"""
self._dict_hidden_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_hidden)
list_buttons = []
for str_option in list(self.options_hidden):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
if str_option in self.value:
but_wid.button_style = "success"
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_hidden_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_hidden.children = list_buttons
| 40.154286
| 91
| 0.672549
| 851
| 7,027
| 5.13396
| 0.165687
| 0.057908
| 0.042802
| 0.027466
| 0.61387
| 0.464408
| 0.336461
| 0.271229
| 0.256352
| 0.193637
| 0
| 0.000762
| 0.253166
| 7,027
| 174
| 92
| 40.385057
| 0.831745
| 0.210474
| 0
| 0.298246
| 0
| 0
| 0.011479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096491
| false
| 0
| 0.04386
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d7886d9a5f7ae38bdb7d01f1fc136b75bb2a50
| 3,899
|
py
|
Python
|
Players/DWPMPlayer.py
|
jokvedaras/game-framework
|
9ff60e15d1beff54f94e280501929664ce59afe7
|
[
"Apache-2.0"
] | null | null | null |
Players/DWPMPlayer.py
|
jokvedaras/game-framework
|
9ff60e15d1beff54f94e280501929664ce59afe7
|
[
"Apache-2.0"
] | null | null | null |
Players/DWPMPlayer.py
|
jokvedaras/game-framework
|
9ff60e15d1beff54f94e280501929664ce59afe7
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Pat McClernan and Dan Wegmann'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
| 32.22314
| 108
| 0.598359
| 528
| 3,899
| 4.3125
| 0.30303
| 0.039526
| 0.022837
| 0.018445
| 0.094422
| 0.054458
| 0.024594
| 0.024594
| 0
| 0
| 0
| 0.015945
| 0.324442
| 3,899
| 120
| 109
| 32.491667
| 0.848519
| 0.364452
| 0
| 0.09375
| 0
| 0
| 0.028798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.03125
| 0.03125
| 0.234375
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d7ef9a92fb0bfab05a3bc1de9e8efb6f62b67d
| 1,023
|
py
|
Python
|
example/example.py
|
mowshon/age-and-gender
|
e5c912f6ba739f30a45c04208b6d16500e4488cd
|
[
"MIT"
] | 81
|
2020-06-17T12:53:03.000Z
|
2022-03-11T20:02:46.000Z
|
example/example.py
|
mowshon/age-and-gender
|
e5c912f6ba739f30a45c04208b6d16500e4488cd
|
[
"MIT"
] | 4
|
2020-06-18T09:28:12.000Z
|
2021-07-13T09:16:29.000Z
|
example/example.py
|
mowshon/age-and-gender
|
e5c912f6ba739f30a45c04208b6d16500e4488cd
|
[
"MIT"
] | 17
|
2020-06-18T07:08:09.000Z
|
2022-03-31T03:56:58.000Z
|
from age_and_gender import *
from PIL import Image, ImageDraw, ImageFont
data = AgeAndGender()
data.load_shape_predictor('models/shape_predictor_5_face_landmarks.dat')
data.load_dnn_gender_classifier('models/dnn_gender_classifier_v1.dat')
data.load_dnn_age_predictor('models/dnn_age_predictor_v1.dat')
filename = 'test-image.jpg'
img = Image.open(filename).convert("RGB")
result = data.predict(img)
font = ImageFont.truetype("Acme-Regular.ttf", 20)
for info in result:
shape = [(info['face'][0], info['face'][1]), (info['face'][2], info['face'][3])]
draw = ImageDraw.Draw(img)
gender = info['gender']['value'].title()
gender_percent = int(info['gender']['confidence'])
age = info['age']['value']
age_percent = int(info['age']['confidence'])
draw.text(
(info['face'][0] - 10, info['face'][3] + 10), f"{gender} (~{gender_percent}%)\n{age} y.o. (~{age_percent}%).",
fill='white', font=font, align='center'
)
draw.rectangle(shape, outline="red", width=5)
img.show()
| 31
| 118
| 0.672532
| 144
| 1,023
| 4.611111
| 0.451389
| 0.072289
| 0.033133
| 0.042169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017998
| 0.130987
| 1,023
| 32
| 119
| 31.96875
| 0.728909
| 0
| 0
| 0
| 0
| 0
| 0.281525
| 0.132942
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7d86ca9e9717fc1914525f4cf4555781fc27cb0
| 1,463
|
py
|
Python
|
code/generate_games.py
|
jppg/pygame-tictactoe
|
f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7
|
[
"MIT"
] | null | null | null |
code/generate_games.py
|
jppg/pygame-tictactoe
|
f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7
|
[
"MIT"
] | null | null | null |
code/generate_games.py
|
jppg/pygame-tictactoe
|
f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7
|
[
"MIT"
] | null | null | null |
from tictactoe import TicTacToe
import random
import csv
import os
gameNr = 1
gameLimit = 10000
lst_moves_1 = []
lst_moves_2 = []
while gameNr <= gameLimit:
print("+++++++++++")
print("Game#", gameNr)
game = TicTacToe()
tmp_moves_1 = []
tmp_moves_2 = []
while game.get_winner() == 0 and game.possible_moves() > 0:
pos = game.get_positions().copy()
while game.possible_moves() > 0:
move = random.randint(0,9)
if game.play(int(move)):
if game.get_player() == 1:
tmp_moves_2.append([gameNr] + [game.get_turn() - 1] + pos + [move])
else:
tmp_moves_1.append([gameNr] + [game.get_turn() - 1] + pos + [move])
break
print("Winner of game ", gameNr, "is", game.get_winner())
if game.get_winner() == 1:
lst_moves_1.append(tmp_moves_1)
#lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1])
else:
#lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1])
lst_moves_2.append(tmp_moves_2)
#print("List X: ", lst_moves_1)
#print("List O: ", lst_moves_2)
game.print_board()
gameNr = gameNr + 1
with open('moves_1.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_1:
writer.writerows(row)
with open('moves_2.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_2:
writer.writerows(row)
| 27.603774
| 87
| 0.580314
| 209
| 1,463
| 3.813397
| 0.258373
| 0.082811
| 0.056462
| 0.035132
| 0.314931
| 0.314931
| 0.314931
| 0.254705
| 0.115433
| 0.115433
| 0
| 0.037523
| 0.27136
| 1,463
| 53
| 88
| 27.603774
| 0.710131
| 0.113465
| 0
| 0.153846
| 0
| 0
| 0.044049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102564
| 0
| 0.102564
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7dc267a8e2592a1c24d3b8c06a265a370010c46
| 2,906
|
py
|
Python
|
stixcore/tmtc/tests/test_packets.py
|
nicHoch/STIXCore
|
16822bbb37046f8e6c03be51909cfc91e9822cf7
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T13:42:43.000Z
|
2022-03-31T13:42:43.000Z
|
stixcore/tmtc/tests/test_packets.py
|
nicHoch/STIXCore
|
16822bbb37046f8e6c03be51909cfc91e9822cf7
|
[
"BSD-3-Clause"
] | 192
|
2020-11-03T22:40:19.000Z
|
2022-03-31T15:17:13.000Z
|
stixcore/tmtc/tests/test_packets.py
|
nicHoch/STIXCore
|
16822bbb37046f8e6c03be51909cfc91e9822cf7
|
[
"BSD-3-Clause"
] | 3
|
2020-11-09T15:05:18.000Z
|
2022-01-21T07:52:51.000Z
|
import bitstring
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.tmtc.packets import (
SOURCE_PACKET_HEADER_STRUCTURE,
TC_DATA_HEADER_STRUCTURE,
TM_DATA_HEADER_STRUCTURE,
SourcePacketHeader,
TCPacket,
TMDataHeader,
TMPacket,
)
from stixcore.tmtc.tm.tm_1 import TM_1_1
@pytest.fixture
def idb():
return IDBManager(test_data.idb.DIR).get_idb("2.26.34")
@pytest.mark.parametrize('class_header', [(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE),
(TMDataHeader, TM_DATA_HEADER_STRUCTURE)])
def test_tmtc_headers(class_header):
cls, header = class_header
test_fmt = ', '.join(header.values())
test_values = {n: 2**int(v.split(':')[-1])-1 for n, v in header.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
sph = cls(test_binary)
assert all([getattr(sph, key) == test_values[key]
for key in header.keys() if not key.startswith('spare')])
def test_tm_packet(idb):
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TM_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TMPacket(test_binary, idb=idb)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TM_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tc_packet():
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TC_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_values['process_id'] = 90
test_values['packet_category'] = 12
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TCPacket(test_binary)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TC_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tm_1_1(idb):
packet = TM_1_1('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)
assert packet.source_packet_header.process_id == 90
assert packet.source_packet_header.packet_category == 1
assert packet.data_header.service_type == 1
assert packet.data_header.service_subtype == 1
| 41.514286
| 97
| 0.699931
| 393
| 2,906
| 4.885496
| 0.175573
| 0.101563
| 0.09375
| 0.084375
| 0.657292
| 0.607292
| 0.532292
| 0.519792
| 0.479688
| 0.459896
| 0
| 0.02436
| 0.180661
| 2,906
| 69
| 98
| 42.115942
| 0.782024
| 0
| 0
| 0.263158
| 0
| 0
| 0.041308
| 0.014458
| 0
| 0
| 0.014458
| 0
| 0.157895
| 1
| 0.087719
| false
| 0
| 0.105263
| 0.017544
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7dcceeeb44aada8315f0c77d81c291531d15b79
| 3,097
|
py
|
Python
|
mxnet/local_forward.py
|
rai-project/onnx_examples
|
45db7b3e03dd674f28aeef3fcb1e60f5bca47948
|
[
"MIT"
] | null | null | null |
mxnet/local_forward.py
|
rai-project/onnx_examples
|
45db7b3e03dd674f28aeef3fcb1e60f5bca47948
|
[
"MIT"
] | null | null | null |
mxnet/local_forward.py
|
rai-project/onnx_examples
|
45db7b3e03dd674f28aeef3fcb1e60f5bca47948
|
[
"MIT"
] | null | null | null |
# run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
| 27.651786
| 84
| 0.683242
| 444
| 3,097
| 4.54955
| 0.324324
| 0.035644
| 0.067327
| 0.059406
| 0.184158
| 0.082178
| 0.031683
| 0
| 0
| 0
| 0
| 0.008397
| 0.192444
| 3,097
| 111
| 85
| 27.900901
| 0.79928
| 0.022603
| 0
| 0.072289
| 0
| 0
| 0.122024
| 0.009259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0.012048
| 0.084337
| 0
| 0.144578
| 0.024096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7de097e9b9739100654b069d9cac10ffe5b515c
| 1,198
|
py
|
Python
|
tests/test_get_angles.py
|
Mopolino8/lammps-data-file
|
5c9015d05fa1484a33c84e6cfb90cd4a7d99d133
|
[
"MIT"
] | 13
|
2017-05-30T17:43:10.000Z
|
2021-08-06T04:21:44.000Z
|
tests/test_get_angles.py
|
njustcodingjs/lammps-data-file
|
3a0729b5ab4d2344326d09ac4ee1aab41442f14a
|
[
"MIT"
] | 2
|
2018-05-28T15:35:32.000Z
|
2018-05-28T16:21:09.000Z
|
tests/test_get_angles.py
|
njustcodingjs/lammps-data-file
|
3a0729b5ab4d2344326d09ac4ee1aab41442f14a
|
[
"MIT"
] | 10
|
2017-05-23T21:19:21.000Z
|
2022-03-08T02:18:00.000Z
|
from lammps_data.angles import get_angles
def test_separate_diatomic_molecules_should_have_no_angles():
bonds = [(0, 1), (2, 3)]
assert get_angles(bonds) == []
def test_molecule_with_two_bonds_should_have_one_angle():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_within_angle_tuples():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
bonds = [(1, 2), (0, 1)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_of_angle_tuples():
bonds = [(0, 1), (1, 2), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
bonds = [(1, 2), (0, 1), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
def test_tetrahedral_molecule_should_have_six_angles():
bonds = [(0, 1), (0, 2), (0, 3), (0, 4)]
assert get_angles(bonds) == [(1, 0, 2),
(1, 0, 3),
(1, 0, 4),
(2, 0, 3),
(2, 0, 4),
(3, 0, 4)]
| 33.277778
| 87
| 0.520033
| 178
| 1,198
| 3.196629
| 0.202247
| 0.049209
| 0.123023
| 0.15993
| 0.599297
| 0.506151
| 0.506151
| 0.45167
| 0.45167
| 0.45167
| 0
| 0.09689
| 0.30217
| 1,198
| 35
| 88
| 34.228571
| 0.583732
| 0
| 0
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 1
| 0.2
| false
| 0
| 0.04
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e12276bc98092252c4149244dfdf01adca03b0
| 477
|
py
|
Python
|
9-Wine-Scaling.py
|
Pawel762/Class-7_homework
|
e79d2f8d218980d814443951dae7840f521ba191
|
[
"MIT"
] | null | null | null |
9-Wine-Scaling.py
|
Pawel762/Class-7_homework
|
e79d2f8d218980d814443951dae7840f521ba191
|
[
"MIT"
] | null | null | null |
9-Wine-Scaling.py
|
Pawel762/Class-7_homework
|
e79d2f8d218980d814443951dae7840f521ba191
|
[
"MIT"
] | null | null | null |
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
print('Pre scaling X')
print(X)
scaler = StandardScaler()
scaler.fit(X)
scaled_features = scaler.transform(X)
print('Post scaling X')
print(scaled_features)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, y, test_size=0.375)
| 21.681818
| 88
| 0.796646
| 74
| 477
| 4.905405
| 0.432432
| 0.090909
| 0.077135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0.111111
| 477
| 21
| 89
| 22.714286
| 0.846698
| 0
| 0
| 0
| 0
| 0
| 0.056723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e14941f3967e5d720a9a0637e48720262f173d
| 4,057
|
py
|
Python
|
tests/conftest.py
|
szkkteam/flask-starter
|
7019036e7ee017ca5df9059d0b4a0d29005beab5
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
szkkteam/flask-starter
|
7019036e7ee017ca5df9059d0b4a0d29005beab5
|
[
"MIT"
] | 2
|
2021-03-31T19:36:44.000Z
|
2021-12-13T20:30:11.000Z
|
tests/conftest.py
|
szkkteam/flask-starter
|
7019036e7ee017ca5df9059d0b4a0d29005beab5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import os
import pytest
# Pip package imports
from collections import namedtuple
from flask import template_rendered
from flask_security.signals import (
reset_password_instructions_sent,
user_confirmed,
user_registered,
)
# Internal package imports
from backend.app import _create_app
from backend.config import TestConfig
from backend.extensions import db as db_ext
from backend.extensions.mail import mail
from ._client import (
ApiTestClient,
ApiTestResponse,
HtmlTestClient,
HtmlTestResponse,
)
from ._model_factory import ModelFactory
@pytest.fixture(autouse=True, scope='session')
def app():
app = _create_app(TestConfig)
#ctx = app.app_context()
ctx = app.test_request_context()
ctx.push()
yield app
ctx.pop()
@pytest.yield_fixture
def client(app):
app.response_class = HtmlTestResponse
app.test_client_class = HtmlTestClient
with app.test_client() as client:
yield client
@pytest.yield_fixture
def api_client(app):
app.response_class = ApiTestResponse
app.test_client_class = ApiTestClient
with app.test_client() as client:
yield client
@pytest.fixture(autouse=True, scope='session')
def db():
db_ext.create_all()
yield db_ext
db_ext.drop_all()
@pytest.fixture(autouse=True)
def db_session(db):
connection = db.engine.connect()
transaction = connection.begin()
session = db.create_scoped_session(options=dict(bind=connection, binds={}))
db.session = session
try:
yield session
finally:
transaction.rollback()
connection.close()
session.remove()
@pytest.fixture(scope='session')
def celery_config():
return {'broker_url': 'redis://localhost:6379/1',
'result_backend': 'redis://localhost:6379/1',
'accept_content': ('json', 'pickle')}
@pytest.fixture()
def templates(app):
records = []
RenderedTemplate = namedtuple('RenderedTemplate', 'template context')
def record(sender, template, context, **extra):
records.append(RenderedTemplate(template, context))
template_rendered.connect(record, app)
try:
yield records
finally:
template_rendered.disconnect(record, app)
@pytest.fixture()
def outbox():
with mail.record_messages() as messages:
yield messages
@pytest.fixture()
def registrations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
user_registered.connect(record, app)
try:
yield records
finally:
user_registered.disconnect(record, app)
@pytest.fixture()
def confirmations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs['user'])
print("Record: ", records[-1])
user_confirmed.connect(record, app)
try:
yield records
finally:
print("Disconnect record: ", records)
user_confirmed.disconnect(record, app)
@pytest.fixture()
def password_resets(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
reset_password_instructions_sent.connect(record, app)
try:
yield records
finally:
reset_password_instructions_sent.disconnect(record, app)
@pytest.fixture()
def user(model_factory):
yield model_factory.create('User', 'user')
@pytest.fixture()
def newslettersubscribe(model_factory):
yield model_factory.create('NewsletterSubscribe', 'newslettersubscribe')
@pytest.fixture()
def admin(model_factory):
yield model_factory.create('User', 'admin')
@pytest.fixture()
def models(request, model_factory):
mark = request.param
if mark is not None:
return model_factory.get_models(mark)
@pytest.fixture()
def model_factory(app, db_session):
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'model_fixtures')
yield ModelFactory(db_session, app.models, fixtures_dir)
| 22.792135
| 79
| 0.689426
| 468
| 4,057
| 5.818376
| 0.275641
| 0.066838
| 0.058759
| 0.02791
| 0.286816
| 0.268454
| 0.204187
| 0.093279
| 0.093279
| 0.062431
| 0
| 0.003697
| 0.199901
| 4,057
| 177
| 80
| 22.920904
| 0.835182
| 0.034508
| 0
| 0.322835
| 0
| 0
| 0.063667
| 0.012273
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15748
| false
| 0.031496
| 0.086614
| 0.007874
| 0.259843
| 0.015748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e5a0b18daf16984d985969f34fb443eae76979
| 3,733
|
py
|
Python
|
generate_figure9.py
|
IBM/Simultaneous-diagonalization
|
385545401395a2e07f109441db4751a5dcf8f0a4
|
[
"Apache-2.0"
] | null | null | null |
generate_figure9.py
|
IBM/Simultaneous-diagonalization
|
385545401395a2e07f109441db4751a5dcf8f0a4
|
[
"Apache-2.0"
] | null | null | null |
generate_figure9.py
|
IBM/Simultaneous-diagonalization
|
385545401395a2e07f109441db4751a5dcf8f0a4
|
[
"Apache-2.0"
] | 1
|
2022-03-14T18:36:12.000Z
|
2022-03-14T18:36:12.000Z
|
# Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# E. van den Berg and Kristan Temme, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
| 31.905983
| 103
| 0.682561
| 613
| 3,733
| 4.102773
| 0.420881
| 0.007157
| 0.004771
| 0.013121
| 0.126839
| 0.085089
| 0.080318
| 0.062028
| 0.062028
| 0
| 0
| 0.037628
| 0.159925
| 3,733
| 116
| 104
| 32.181034
| 0.764349
| 0.284222
| 0
| 0.084507
| 0
| 0
| 0.088369
| 0.020393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.098592
| 0.014085
| 0.140845
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e5bf2a376cfb8077d1056296fc71ad74e416d7
| 793
|
py
|
Python
|
undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py
|
NASA-DEVELOP/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 65
|
2015-09-10T12:59:56.000Z
|
2022-02-27T22:09:03.000Z
|
undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py
|
snowzm/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 40
|
2015-04-08T19:23:30.000Z
|
2015-08-04T15:53:11.000Z
|
undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py
|
snowzm/dnppy
|
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
|
[
"NASA-1.3"
] | 45
|
2015-08-14T19:09:38.000Z
|
2022-02-15T18:53:16.000Z
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: qgeddes
#
# Created: 25/04/2013
# Copyright: (c) qgeddes 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import L7GapFiller
Scenes=arcpy.GetParameterAsText(0)
Scenes=Scenes.split(";")
OutputFolder=arcpy.GetParameterAsText(1)
OutputFile= arcpy.GetParameterAsText(2)
Output=OutputFolder+"\\"+OutputFile
CloudMasks= arcpy.GetParameterAsText(3)
CloudMasks= CloudMasks.split(";")
Z=arcpy.GetParameter(4)
arcpy.AddMessage(Z)
arcpy.env.scratchWorkspace=OutputFolder
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput=True
L7GapFiller.L7GapFill(Scenes, Output,CloudMasks,Z)
| 26.433333
| 80
| 0.600252
| 67
| 793
| 7.104478
| 0.567164
| 0.193277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029289
| 0.095839
| 793
| 29
| 81
| 27.344828
| 0.634589
| 0.366961
| 0
| 0
| 0
| 0
| 0.022358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e75b487c0cdec2958e2495ad3a66ff9804a5e3
| 1,855
|
py
|
Python
|
ingestion/tests/unit/great_expectations/test_ometa_validation_action.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
ingestion/tests/unit/great_expectations/test_ometa_validation_action.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
ingestion/tests/unit/great_expectations/test_ometa_validation_action.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suite for the action module implementation
"""
import os
from unittest import mock
from jinja2 import Environment
from pytest import mark
from metadata.great_expectations.action import OpenMetadataValidationAction
from metadata.great_expectations.utils.ometa_config_handler import render_template
@mark.parametrize(
"input,expected",
[
(None, "list_entities"),
("service_name", "get_by_name"),
],
)
def test_get_table_entity(input, expected, mocked_ometa, mocked_ge_data_context):
"""Test get table entity"""
ometa_validation = OpenMetadataValidationAction(
data_context=mocked_ge_data_context,
config_file_path="my/config/path",
ometa_service_name=input,
)
res = ometa_validation._get_table_entity("database", "schema", "table")
assert res._type == expected
def test_create_jinja_environment(fixture_jinja_environment):
"""Test create jinja environment"""
assert isinstance(fixture_jinja_environment, Environment)
@mock.patch.dict(os.environ, {"API_VERSION": "v1"})
def test_render_template(fixture_jinja_environment):
"""Test create jinja environment"""
tmplt = render_template(fixture_jinja_environment)
assert tmplt == "hostPort: http://localhost:8585\napiVersion: v1"
| 34.351852
| 82
| 0.755256
| 240
| 1,855
| 5.65
| 0.516667
| 0.082596
| 0.067847
| 0.057522
| 0.109882
| 0.072271
| 0.072271
| 0
| 0
| 0
| 0
| 0.009615
| 0.15903
| 1,855
| 53
| 83
| 35
| 0.859615
| 0.367116
| 0
| 0
| 0
| 0
| 0.125219
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e7bdfc8b236f444e8faf6ff083ca3ec5dec358
| 1,285
|
py
|
Python
|
tests/integration/Containers.py
|
adnrs96/runtime
|
e824224317e6aa108cf06968474fc44fa33488d6
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/Containers.py
|
adnrs96/runtime
|
e824224317e6aa108cf06968474fc44fa33488d6
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/Containers.py
|
adnrs96/runtime
|
e824224317e6aa108cf06968474fc44fa33488d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from storyruntime.Containers import Containers
from storyruntime.constants.ServiceConstants import ServiceConstants
import storyscript
def test_containers_format_command(story):
"""
Ensures a simple resolve can be performed
"""
story_text = 'alpine echo msg:"foo"\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
'arguments': {'msg': {'type': 'string'}}
}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo', '{"msg":"foo"}']
def test_containers_format_command_no_arguments(story):
story_text = 'alpine echo\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo']
| 26.770833
| 68
| 0.529183
| 115
| 1,285
| 5.791304
| 0.382609
| 0.096096
| 0.138138
| 0.126126
| 0.60961
| 0.51952
| 0.51952
| 0.51952
| 0.51952
| 0.51952
| 0
| 0.003472
| 0.327626
| 1,285
| 47
| 69
| 27.340426
| 0.767361
| 0.049805
| 0
| 0.432432
| 0
| 0
| 0.118672
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.054054
| false
| 0
| 0.081081
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e91e12c70be5743a54ddceae5d419516ca3301
| 1,367
|
py
|
Python
|
project_name/core/admin.py
|
cosmunsoftwares/django-boilerplate
|
147aa7f59901d0fb95d41acf8ec118c6830267f8
|
[
"MIT"
] | 3
|
2018-11-30T19:51:35.000Z
|
2020-10-20T00:28:49.000Z
|
project_name/core/admin.py
|
cosmun-softwares/django-boilerplate
|
147aa7f59901d0fb95d41acf8ec118c6830267f8
|
[
"MIT"
] | 6
|
2020-04-09T20:00:45.000Z
|
2022-02-10T08:25:47.000Z
|
project_name/core/admin.py
|
cosmunsoftwares/django-boilerplate
|
147aa7f59901d0fb95d41acf8ec118c6830267f8
|
[
"MIT"
] | 1
|
2018-08-27T21:44:44.000Z
|
2018-08-27T21:44:44.000Z
|
from django.contrib import admin
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None, renderer=None):
output = []
if value and getattr(value, "url", None):
output.append(u'<a href="%s" target="_blank">%s</a>' % (value.url, thumbnail(value)))
output.append(super(AdminFileWidget, self).render(name, value, attrs, renderer))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def redirect_one_object(model, obj):
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')
if obj:
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/')
return response
def thumbnail(obj, size='col-md-2'):
return mark_safe('<img src="{}" class="img-thumbnail {} p-0">'.format(obj.url, size))
| 37.972222
| 104
| 0.688369
| 177
| 1,367
| 5.180791
| 0.418079
| 0.043621
| 0.037077
| 0.043621
| 0.130862
| 0.130862
| 0.130862
| 0.130862
| 0.130862
| 0.130862
| 0
| 0.001765
| 0.171178
| 1,367
| 35
| 105
| 39.057143
| 0.80759
| 0
| 0
| 0
| 0
| 0.038462
| 0.171178
| 0.112655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0.038462
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7e9c8cc7086c2b1fd149895cfcda90298ab4af1
| 1,222
|
py
|
Python
|
src/5vents.py
|
subhash686/aoc-2021
|
a01fa07f94148b7072c3ba4c854b546862d3486a
|
[
"Apache-2.0"
] | null | null | null |
src/5vents.py
|
subhash686/aoc-2021
|
a01fa07f94148b7072c3ba4c854b546862d3486a
|
[
"Apache-2.0"
] | null | null | null |
src/5vents.py
|
subhash686/aoc-2021
|
a01fa07f94148b7072c3ba4c854b546862d3486a
|
[
"Apache-2.0"
] | null | null | null |
import os
plane = [[0 for i in range(1000)] for j in range(1000)]
count = [0]
def overlapping_vents():
path = os.getcwd()
file_path = os.path.join(path, 'vents.txt')
file1 = open(file_path, 'r')
Lines = file1.readlines()
for line in Lines:
input = line.strip()
points = input.split(" -> ")
plot(points[0], points[1])
print(count[0])
def plot(point1, point2):
p1 = point1.split(",")
p2 = point2.split(",")
x1 = int(p1[0])
x2 = int(p2[0])
y1 = int(p1[1])
y2 = int(p2[1])
if x1 == x2 and y1 == y2:
addpoints(x1, y1)
elif x1 == x2:
if y1 > y2:
y1, y2 = y2, y1
for y in range(y1, y2+1):
addpoints(x1, y)
elif y1 == y2:
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, y1)
else:
slope = (y2-y1)/ (x2-x1)
intercept = y1 - (x1 * slope)
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, int(x*slope)+int(intercept))
def addpoints(x, y):
if plane[x][y] == 1:
count[0] +=1
plane[x][y] += 1
if __name__ == "__main__":
overlapping_vents()
| 22.218182
| 55
| 0.488543
| 183
| 1,222
| 3.196721
| 0.284153
| 0.054701
| 0.030769
| 0.02735
| 0.136752
| 0.136752
| 0.136752
| 0.136752
| 0.136752
| 0.136752
| 0
| 0.102757
| 0.346972
| 1,222
| 54
| 56
| 22.62963
| 0.630326
| 0
| 0
| 0.133333
| 0
| 0
| 0.01964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.022222
| 0
| 0.088889
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7eb057d4134335a7eb1bab05618a4866e334bff
| 1,217
|
py
|
Python
|
problems/test_0073_m_plus_n_space.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | 1
|
2017-06-17T23:47:17.000Z
|
2017-06-17T23:47:17.000Z
|
problems/test_0073_m_plus_n_space.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | null | null | null |
problems/test_0073_m_plus_n_space.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | null | null | null |
import unittest
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
rows = [0] * len(matrix)
cols = [0] * len(matrix[0])
for i, row in enumerate(matrix):
for j, num in enumerate(row):
if not num:
rows[i] = 1
cols[j] = 1
for row, num in enumerate(rows):
if num:
for j in range(len(matrix[0])):
matrix[row][j] = 0
for col, num in enumerate(cols):
if num:
for i in range(len(matrix)):
matrix[i][col] = 0
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2, 0],
[1, 2, 3],
[0, 2, 3],
],
[
[0, 0, 0],
[0, 2, 0],
[0, 0, 0],
]
)
def _test(self, matrix, expected):
Solution().setZeroes(matrix)
self.assertEqual(expected, matrix)
if __name__ == '__main__':
unittest.main()
| 23.403846
| 76
| 0.419063
| 135
| 1,217
| 3.703704
| 0.318519
| 0.024
| 0.024
| 0.064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039394
| 0.457683
| 1,217
| 51
| 77
| 23.862745
| 0.718182
| 0.080526
| 0
| 0.108108
| 0
| 0
| 0.00736
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.081081
| false
| 0
| 0.027027
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7edb1043a4f03dfdc950843e15b617197779da3
| 9,077
|
py
|
Python
|
tests/unit/test_juju.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_juju.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_juju.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | null | null | null |
import os
import tempfile
import mock
from . import utils
from hotsos.core.config import setup_config
from hotsos.core.ycheck.scenarios import YScenarioChecker
from hotsos.core.issues.utils import KnownBugsStore, IssuesStore
from hotsos.plugin_extensions.juju import summary
JOURNALCTL_CAPPEDPOSITIONLOST = """
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] CollectionCloner ns:juju.txns.log finished cloning with status: QueryPlanKilled: PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] collection clone for 'juju.txns.log' failed due to QueryPlanKilled: While cloning collection 'juju.txns.log' there was an error 'PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)'
""" # noqa
RABBITMQ_CHARM_LOGS = """
2021-02-17 08:18:44 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
2021-02-17 08:20:34 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
""" # noqa
UNIT_LEADERSHIP_ERROR = """
2021-09-16 10:28:25 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:28:47 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:06 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:53 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:30:41 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
""" # noqa
class JujuTestsBase(utils.BaseTestCase):
def setUp(self):
super().setUp()
setup_config(PLUGIN_NAME='juju')
class TestJujuSummary(JujuTestsBase):
def test_summary_keys(self):
inst = summary.JujuSummary()
self.assertEqual(list(inst.output.keys()),
['charm-repo-info',
'charms',
'machine',
'services',
'units',
'version'])
def test_service_info(self):
expected = {'ps': ['jujud (1)'],
'systemd': {
'enabled': ['jujud-machine-1']}
}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['services'],
expected)
def test_machine_info(self):
inst = summary.JujuSummary()
self.assertTrue(inst.plugin_runnable)
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.22')
self.assertEqual(actual['machine'], '1')
@mock.patch('hotsos.core.plugins.juju.JujuMachine')
def test_get_lxd_machine_info(self, mock_machine):
mock_machine.return_value = mock.MagicMock()
mock_machine.return_value.id = '0-lxd-11'
mock_machine.return_value.version = '2.9.9'
inst = summary.JujuSummary()
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.9')
self.assertEqual(actual['machine'], '0-lxd-11')
def test_charm_versions(self):
expected = ['ceph-osd-508', 'neutron-openvswitch-457',
'nova-compute-589']
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['charms'],
expected)
def test_get_unit_info(self):
expected = {'local': ['ceph-osd-0', 'neutron-openvswitch-1',
'nova-compute-0']}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['units'],
expected)
class TestJujuScenarios(JujuTestsBase):
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
def test_1852502(self, mock_helper):
mock_helper.return_value = mock.MagicMock()
mock_helper.return_value.journalctl.return_value = \
JOURNALCTL_CAPPEDPOSITIONLOST.splitlines(keepends=True)
YScenarioChecker()()
mock_helper.return_value.journalctl.assert_called_with(
unit='juju-db')
msg_1852502 = ('known mongodb bug identified - '
'https://jira.mongodb.org/browse/TOOLS-1636 '
'Workaround is to pass --no-logs to juju '
'create-backup. This is an issue only with Mongo '
'3. Mongo 4 does not have this issue. Upstream is '
'working on migrating to Mongo 4 in the Juju 3.0 '
'release.')
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1852502',
'desc': msg_1852502,
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
def test_1910958(self):
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-rabbitmq-server-0.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(RABBITMQ_CHARM_LOGS)
YScenarioChecker()()
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1910958',
'desc':
('Unit unit-rabbitmq-server-0 failed to start due '
'to members in relation 236 that cannot be '
'removed.'),
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('jujud_checks.yaml'))
@mock.patch('hotsos.core.host_helpers.systemd.ServiceChecksBase.processes',
{})
def test_jujud_checks(self):
YScenarioChecker()()
msg = ('No jujud processes found running on this host but it seems '
'there should be since Juju is installed.')
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('charm_checks.yaml'))
def test_unit_checks(self, mock_cli):
mock_cli.return_value = mock.MagicMock()
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-keystone-2.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(UNIT_LEADERSHIP_ERROR)
# first try outside age limit
mock_cli.return_value.date.return_value = "2021-09-25 00:00:00"
YScenarioChecker()()
self.assertEqual(IssuesStore().load(), {})
# then within
mock_cli.return_value.date.return_value = "2021-09-17 00:00:00"
YScenarioChecker()()
msg = ("Juju unit(s) 'keystone' are showing leadership errors in "
"their logs from the last 7 days. Please investigate.")
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
| 51.282486
| 344
| 0.637435
| 1,065
| 9,077
| 5.333333
| 0.266667
| 0.034331
| 0.021127
| 0.026761
| 0.592782
| 0.553697
| 0.553697
| 0.553697
| 0.536092
| 0.50581
| 0
| 0.044333
| 0.254489
| 9,077
| 176
| 345
| 51.573864
| 0.795035
| 0.005949
| 0
| 0.324138
| 0
| 0.062069
| 0.431407
| 0.06643
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0.075862
| false
| 0.006897
| 0.055172
| 0
| 0.151724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7ef7d842b61d4e084cbe5d2d84903334c53e8d0
| 9,626
|
py
|
Python
|
tools/SPGAN/main.py
|
by-liu/OpenUnReID
|
2260d8e16588a992631c9c84e6cee4304ae8593d
|
[
"Apache-2.0"
] | null | null | null |
tools/SPGAN/main.py
|
by-liu/OpenUnReID
|
2260d8e16588a992631c9c84e6cee4304ae8593d
|
[
"Apache-2.0"
] | null | null | null |
tools/SPGAN/main.py
|
by-liu/OpenUnReID
|
2260d8e16588a992631c9c84e6cee4304ae8593d
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import collections
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
try:
# PyTorch >= 1.6 supports mixed precision training
from torch.cuda.amp import autocast
amp_support = True
except:
amp_support = False
from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
class SPGANRunner(GANBaseRunner):
def train_step(self, iter, batch):
data_src, data_tgt = batch[0], batch[1]
self.real_A = data_src['img'].cuda()
self.real_B = data_tgt['img'].cuda()
# Forward
self.fake_B = self.model['G_A'](self.real_A) # G_A(A)
self.fake_A = self.model['G_B'](self.real_B) # G_B(B)
self.rec_A = self.model['G_B'](self.fake_B) # G_B(G_A(A))
self.rec_B = self.model['G_A'](self.fake_A) # G_A(G_B(B))
# G_A and G_B
if iter % 2 == 0:
self.set_requires_grad([self.model['D_A'], self.model['D_B'], self.model['Metric']], False) # save memory
if self.scaler is None:
self.optimizer['G'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['G'].zero_grad()
if self._epoch > 1:
self.backward_G(retain_graph=True)
self.backward_GM()
else:
self.backward_G()
if self.scaler is None:
self.optimizer['G'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['G'])
# SiaNet for SPGAN
if self._epoch > 0:
self.set_requires_grad([self.model['Metric']], True)
if self.scaler is None:
self.optimizer['Metric'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['Metric'].zero_grad()
self.backward_M()
if self.scaler is None:
self.optimizer['Metric'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['Metric'])
# D_A and D_B
self.set_requires_grad([self.model['D_A'], self.model['D_B']], True)
# self.optimizer['D'].zero_grad()
# self.backward_D()
# self.optimizer['D'].step()
if self.scaler is None:
self.optimizer['D'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['D'].zero_grad()
self.backward_D()
if self.scaler is None:
self.optimizer['D'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['D'])
# save translated images
if self._rank == 0:
self.save_imgs(['real_A', 'real_B', 'fake_A', 'fake_B', 'rec_A', 'rec_B'])
return 0
def backward_GM(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A)
fake_B_metric = self.model['Metric'](self.fake_B)
# positive pairs
loss_pos = self.criterions['sia_G'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_G'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_G'](fake_B_metric, real_B_metric, 0) + \
self.criterions['sia_G'](fake_A_metric, real_A_metric, 0)
loss_M = (loss_pos + 0.5 * loss_neg) / 4.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_G']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_G': loss_M.item()}
self.train_progress.update(meters)
def backward_M(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A.detach())
fake_B_metric = self.model['Metric'](self.fake_B.detach())
# positive pairs
loss_pos = self.criterions['sia_M'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_M'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_M'](real_A_metric, real_B_metric, 0)
loss_M = (loss_pos + 2 * loss_neg) / 3.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_M']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_M': loss_M.item()}
self.train_progress.update(meters)
def parge_config():
parser = argparse.ArgumentParser(description="SPGAN training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
assert len(list(cfg.TRAIN.datasets.keys()))==2, \
"the number of datasets for domain-translation training should be two"
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / 'log.txt', debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, _ = build_train_dataloader(cfg, joint=False)
# build model
model = build_gan_model(cfg)
for key in model.keys():
model[key].cuda()
if dist:
ddp_cfg = {
"device_ids": [cfg.gpu],
"output_device": cfg.gpu,
"find_unused_parameters": True,
}
for key in model.keys():
model[key] = torch.nn.parallel.DistributedDataParallel(model[key], **ddp_cfg)
elif cfg.total_gpus > 1:
for key in model.keys():
model[key] = torch.nn.DataParallel(model[key])
# build optimizer
optimizer = {}
optimizer['G'] = build_optimizer([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)
optimizer['D'] = build_optimizer([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)
optimizer['Metric'] = build_optimizer([model['Metric']], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = [build_lr_scheduler(optimizer[key], **cfg.TRAIN.SCHEDULER) \
for key in optimizer.keys()]
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, cuda=True)
# build runner
runner = SPGANRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"}
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the latest model
# runner.resume(cfg.work_dir)
# final inference
test_loader, _ = build_val_dataloader(
cfg,
for_clustering=True,
all_datasets=True
)
# source to target
infer_gan(
cfg,
model['G_A'],
test_loader[0],
dataset_name=list(cfg.TRAIN.datasets.keys())[0]
)
# target to source
infer_gan(
cfg,
model['G_B'],
test_loader[1],
dataset_name=list(cfg.TRAIN.datasets.keys())[1]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
main()
| 31.980066
| 117
| 0.60108
| 1,242
| 9,626
| 4.441224
| 0.18277
| 0.029369
| 0.027194
| 0.020305
| 0.372553
| 0.33992
| 0.320703
| 0.28227
| 0.235315
| 0.169507
| 0
| 0.005014
| 0.274777
| 9,626
| 300
| 118
| 32.086667
| 0.785131
| 0.060669
| 0
| 0.243363
| 0
| 0
| 0.074498
| 0.005884
| 0
| 0
| 0
| 0
| 0.004425
| 1
| 0.022124
| false
| 0
| 0.088496
| 0
| 0.123894
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7efcc01c957ea47bff3471d2bc47b9aa1291cde
| 1,907
|
py
|
Python
|
utility/data_download.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | 2
|
2018-12-20T20:31:21.000Z
|
2018-12-29T14:51:42.000Z
|
utility/data_download.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | null | null | null |
utility/data_download.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | null | null | null |
import logging
import requests
import multiprocessing
import pathlib
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from joblib import delayed
from joblib import Parallel
from datetime import date
from datetime import timedelta
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def next_date(start_date=date(2018, 3, 1)):
days_to_download = abs(start_date - date.today()).days - 5
for date_offset in range(days_to_download):
yield start_date
start_date = start_date + timedelta(days=1)
def download_all(inputs: List[Tuple[pathlib.Path, str]], cookies: Optional[Dict]):
session = requests.session()
inputs[0][0].parent.mkdir(parents=True, exist_ok=True)
def download_single_link(file_path: pathlib.Path, url):
thread_nr = multiprocessing.current_process().name
thread_nr = thread_nr[thread_nr.rfind('-') + 1:]
file_name = file_path.stem
if file_path.is_file():
logger.info('{} {} already exists'.format(thread_nr, file_name))
return
try:
response = session.get(url=url, cookies=cookies)
except TimeoutError:
logger.critical('{} Timeout Error'.format(thread_nr))
return
content = response.content.decode('utf-8')
if response.status_code != 200:
logger.critical('{} {}'.format(thread_nr, url, response.status_code))
logger.critical('{}'.format(thread_nr, content))
return
else:
logger.info('{} {} {} OK'.format(thread_nr, file_name, response.status_code))
with open(str(file_path), mode='w', encoding='utf-8') as output_file:
output_file.write(content)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(download_single_link)(*j) for j in inputs)
| 32.322034
| 89
| 0.677504
| 246
| 1,907
| 5.056911
| 0.406504
| 0.057878
| 0.05627
| 0.028939
| 0.080386
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010645
| 0.211851
| 1,907
| 58
| 90
| 32.87931
| 0.817033
| 0
| 0
| 0.066667
| 0
| 0
| 0.034609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7f2afbcc386f15d0c1677f0f7647f383dcc88bb
| 7,625
|
py
|
Python
|
model/net_qspline_A.py
|
jercoco/QSQF
|
6c435f8d4e1baf1937b06a52e63446f9a29f5ad8
|
[
"Apache-2.0"
] | null | null | null |
model/net_qspline_A.py
|
jercoco/QSQF
|
6c435f8d4e1baf1937b06a52e63446f9a29f5ad8
|
[
"Apache-2.0"
] | null | null | null |
model/net_qspline_A.py
|
jercoco/QSQF
|
6c435f8d4e1baf1937b06a52e63446f9a29f5ad8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 19:52:22 2020
#Plan A
@author: 18096
"""
'''Defines the neural network, loss function and metrics'''
#from functools import reduce
import torch
import torch.nn as nn
from torch.nn.functional import pad
from torch.autograd import Variable
import logging
logger = logging.getLogger('DeepAR.Net')
class Net(nn.Module):
def __init__(self, params,device):
'''
We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates.
'''
super(Net, self).__init__()
self.params = params
self.device = device
self.lstm = nn.LSTM(input_size=params.lstm_input_size,
hidden_size=params.lstm_hidden_dim,
num_layers=params.lstm_layers,
bias=True,
batch_first=False,
dropout=params.lstm_dropout)
# initialize LSTM forget gate bias to be 1 as recommanded by
# http://proceedings.mlr.press/v37/jozefowicz15.pdf
for names in self.lstm._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
#Plan A:
#beta_01:[beta0,beta1]
self.beta_n1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_beta_1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_sigma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
self.pre_gamma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
# softmax to make sure Σu equals to 1
self.sigma = nn.Softmax(dim=1)
# softplus to make sure gamma is positive
self.gamma = nn.Softplus()
# softplus to make sure beta0 is positive
self.beta_1 = nn.Softplus()
def forward(self, x, hidden, cell):
_, (hidden, cell) = self.lstm(x, (hidden, cell))
# use h from all three layers to calculate mu and sigma
hidden_permute = \
hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1)
#Plan A:
beta_n1 = self.beta_n1(hidden_permute)
pre_beta_1 = self.pre_beta_1(hidden_permute)
beta_1 = self.beta_1(pre_beta_1)
beta_1=-beta_1
pre_sigma = self.pre_sigma(hidden_permute)
sigma = self.sigma(pre_sigma)
pre_gamma = self.pre_gamma(hidden_permute)
gamma = self.gamma(pre_gamma)
#Plan A:
return ((beta_n1,beta_1,sigma,torch.squeeze(gamma)),hidden,cell)
def init_hidden(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def init_cell(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def predict(self, x, hidden, cell, sampling=False):
"""
generate samples by sampling from
"""
batch_size = x.shape[1]
samples = torch.zeros(self.params.sample_times,batch_size,
self.params.pred_steps,
device=self.device)
for j in range(self.params.sample_times):
decoder_hidden = hidden
decoder_cell = cell
for t in range(self.params.pred_steps):
func_param,decoder_hidden,decoder_cell=\
self(x[self.params.pred_start+t].unsqueeze(0),
decoder_hidden,decoder_cell)
beta_n1,beta_1,sigma,gamma=func_param
#pred_cdf is a uniform ditribution
uniform = torch.distributions.uniform.Uniform(
torch.tensor([0.0], device=sigma.device),
torch.tensor([1.0], device=sigma.device))
pred_cdf=uniform.sample([batch_size])
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
ksi=pad(torch.cumsum(sigma,dim=1),(1,0))[:,:-1]
indices=ksi<pred_cdf
pred=(beta_N*pad(pred_cdf,(1,0),value=1)).sum(dim=1)
pred=pred+((pred_cdf-ksi).pow(2)*beta*indices).sum(dim=1)
samples[j, :, t] = pred
#predict value at t-1 is as a covars for t,t+1,...,t+lag
for lag in range(self.params.lag):
if t<self.params.pred_steps-lag-1:
x[self.params.pred_start+t+1,:,0]=pred
sample_mu = torch.mean(samples, dim=0) # mean or median ?
sample_std = samples.std(dim=0)
return samples, sample_mu, sample_std
def loss_fn(func_param, labels: Variable):
beta_n1,beta_1,sigma,gamma=func_param
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
#calculate the maximum for each segment of the spline
ksi=torch.cumsum(sigma,dim=1)
df1=ksi.expand(sigma.shape[1],sigma.shape[0],sigma.shape[1]).T.clone()
df2=pad(ksi.T.unsqueeze(2),(1,0),'constant',value=1)
ksi=pad(ksi,(1,0))[:,:-1]
knots=df1-ksi
knots[knots<0]=0
knots=(df2*beta_N).sum(dim=2)+(knots.pow(2)*beta).sum(dim=2)
knots=pad(knots.T,(1,0))[:,:-1]#F(ksi_1~K)=0~max
diff=labels.view(-1,1)-knots
alpha_l=diff>0
alpha_A=torch.sum(alpha_l*beta,dim=1)
alpha_B=beta_N[:,1]-2*torch.sum(alpha_l*beta*ksi,dim=1)
alpha_C=beta_N[:,0]-labels+torch.sum(alpha_l*beta*ksi*ksi,dim=1)
#since A may be zero, roots can be from different methods.
not_zero=(alpha_A!=0)
alpha=torch.zeros_like(alpha_A)
#since there may be numerical calculation error,#0
idx=(alpha_B**2-4*alpha_A*alpha_C)<0#0
diff=diff.abs()
index=diff==(diff.min(dim=1)[0].view(-1,1))
index[~idx,:]=False
#index=diff.abs()<1e-4#0,1e-4 is a threshold
#idx=index.sum(dim=1)>0#0
alpha[idx]=ksi[index]#0
alpha[~not_zero]=-alpha_C[~not_zero]/alpha_B[~not_zero]
not_zero=~(~not_zero | idx)#0
delta=alpha_B[not_zero].pow(2)-4*alpha_A[not_zero]*alpha_C[not_zero]
alpha[not_zero]=(-alpha_B[not_zero]+torch.sqrt(delta))/(2*alpha_A[not_zero])
crps_1=labels*(2*alpha-1)
#lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n)
#crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)])
crps_2=beta_N[:,0]*(1-2*alpha)+beta_N[:,1]*(1/3-alpha.pow(2))
crps_3=torch.sum(2*beta/((2+1)*(2+2))*(1-ksi).pow(2+2),dim=1)
crps_4=torch.sum(alpha_l*2*beta/(2+1)*(torch.unsqueeze(alpha,1)-ksi).pow(2+1),dim=1)
crps=crps_1+crps_2+crps_3-crps_4
crps = torch.mean(crps)
return crps
| 40.131579
| 89
| 0.571148
| 1,127
| 7,625
| 3.708075
| 0.193434
| 0.023929
| 0.026801
| 0.031826
| 0.240727
| 0.218713
| 0.191912
| 0.179947
| 0.16559
| 0.16559
| 0
| 0.041514
| 0.286033
| 7,625
| 189
| 90
| 40.343915
| 0.726121
| 0.138361
| 0
| 0.207692
| 0
| 0
| 0.00353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.038462
| 0.015385
| 0.130769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7f3bbfe8ecf852146009a98359ee99148f7760a
| 11,124
|
py
|
Python
|
workflow_parser/datasource/log_engine.py
|
cyx1231st/workflow_parser
|
d2e78c191c75c7addda89e6e336be90f6ca9717d
|
[
"Apache-2.0"
] | null | null | null |
workflow_parser/datasource/log_engine.py
|
cyx1231st/workflow_parser
|
d2e78c191c75c7addda89e6e336be90f6ca9717d
|
[
"Apache-2.0"
] | null | null | null |
workflow_parser/datasource/log_engine.py
|
cyx1231st/workflow_parser
|
d2e78c191c75c7addda89e6e336be90f6ca9717d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from collections import defaultdict
import os
from os import path
import sys
from .. import reserved_vars as rv
from ..service_registry import Component
from ..service_registry import ServiceRegistry
from . import Line
from . import Source
from .exc import LogError
class DriverPlugin(object):
__metaclass__ = ABCMeta
def __init__(self,
f_filter_logfile,
f_filter_logline,
extensions):
self._extensions = extensions
self.f_filter_logfile = f_filter_logfile
self.f_filter_logline = f_filter_logline
def _purge_dict_empty_values(self, var_dict):
for k in var_dict.keys():
if var_dict[k] in {None, ""}:
var_dict.pop(k)
def do_filter_logfile(self, f_dir, f_name):
assert isinstance(f_dir, str)
assert isinstance(f_name, str)
assert f_name in f_dir
# skip non-file
if not path.isfile(f_dir):
return False, None
# check file extension
ext_match = False
for ext in self._extensions:
if f_name.endswith("." + ext):
ext_match = True
if not ext_match:
return False, None
try:
var_dict = {}
ret = self.f_filter_logfile(f_dir, f_name, var_dict)
assert isinstance(ret, bool)
if ret:
# NOTE
# print("(LogDriver) loaded: %s" % f_dir)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
return True, var_dict
else:
# skip
return False, None
except Exception as e:
raise LogError(
"(LogDriver) `f_filter_logfile` error when f_name=%s"
% f_name, e)
def do_filter_logline(self, line, lino, where):
assert isinstance(line, str)
assert isinstance(lino, int)
assert isinstance(where, str)
try:
var_dict = {}
ret = self.f_filter_logline(line, var_dict)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
assert isinstance(ret, bool)
return ret, var_dict
except Exception as e:
raise LogError("(LogDriver) `f_filter_logline` error at %s@%d %s"
% (where, lino, line), e)
class FileDatasource(object):
def __init__(self, name, f_dir, vs, sr, plugin):
assert isinstance(sr, ServiceRegistry)
assert isinstance(plugin, DriverPlugin)
self.sr = sr
self.plugin = plugin
self.name = name
self.f_dir = f_dir
self.total_lines = 0
self.source = Source(name, f_dir, vs)
self.requests = set()
@property
def total_lineobjs(self):
return self.source.len_lineobjs
# def _buffer_lines(self, lines):
# buffer_lines = Heap(key=lambda a: a.seconds)
# prv_line = [None]
# def _flush_line(flush=None):
# while buffer_lines:
# if flush and buffer_lines.distance < flush:
# break
# line = buffer_lines.pop()
# if prv_line[0] is not None:
# prv_line[0].nxt_logline = line
# line.prv_logline = prv_line[0]
# assert prv_line[0] <= line
# yield line
# prv_line[0] = line
# for line in lines:
# assert isinstance(line, LogLine)
# buffer_lines.push(line)
# for line in _flush_line(1):
# yield line
# for line in _flush_line():
# yield line
def yield_lineobjs(self, targets_byname):
with open(self.f_dir, 'r') as reader:
for line in reader:
self.total_lines += 1
lino = self.total_lines
if_proceed, vs = self.plugin.do_filter_logline(
line, lino, self.name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s@%d %s: unrecognized component %s"
% (self.name, lino, line, component))
else:
vs[rv.COMPONENT] = c_obj
# collect requests
request = vs.get(rv.REQUEST)
if request is not None:
self.requests.add(request)
lineobj = self.source.append_line(
lino, line, vs, targets_byname)
yield lineobj
@classmethod
def create_byfolder(cls, log_folder, sr, plugin):
assert isinstance(log_folder, str)
assert isinstance(plugin, DriverPlugin)
datasources = []
# current_path = path.dirname(os.path.realpath(__file__))
current_path = os.getcwd()
log_folder = path.join(current_path, log_folder)
for f_name in os.listdir(log_folder):
f_dir = path.join(log_folder, f_name)
if_proceed, vs = plugin.do_filter_logfile(f_dir, f_name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s: unrecognized component %s"
% (f_name, component))
else:
vs[rv.COMPONENT] = c_obj
ds = cls(f_name.rsplit(".", 1)[0], f_dir, vs, sr, plugin)
datasources.append(ds)
return log_folder, datasources
# step1: load related log files
def loadsources(log_folder, sr, plugin):
print("Load data sources...")
log_folder, datasources = FileDatasource.create_byfolder(
log_folder, sr, plugin)
print("---------------")
#### summary ####
print("%d datasources from %s" % (len(datasources), log_folder))
print()
return datasources
# step2: read sources
def readsources(datasources, sr, report):
targets_byname = {}
targets_byhost = defaultdict(list)
targets_bycomponent = defaultdict(list)
threads = set()
print("Read data sources...")
for datasource in datasources:
for line_obj in datasource.yield_lineobjs(targets_byname):
pass
for targetobj in targets_byname.values():
if not isinstance(targetobj.target, str) or not targetobj.target:
raise LogError("%s has invalid target: %s" % (
targetobj, target.target))
if not isinstance(targetobj.host, str) or not targetobj.host:
raise LogError("%s has invalid host: %s" % (
targetobj, target.host))
if not isinstance(targetobj.component, Component):
raise LogError("%s has invalid component: %s" % (
targetobj, target.component))
targets_byhost[targetobj.host].append(targetobj)
targets_bycomponent[targetobj.component].append(targetobj)
threads.update(targetobj.thread_objs)
print("---------------")
#### summary ####
total_targets = len(targets_byname)
total_hosts = len(targets_byhost)
total_components = len(targets_bycomponent)
print("%d targets, %d hosts" %
(total_targets,
total_hosts))
total_lines = sum(datasource.total_lines for datasource in datasources)
total_lineobjs = sum(datasource.total_lineobjs
for datasource in datasources)
if not total_lines:
print("0 valid lines")
else:
print("%.2f%% valid: %d lines -> %d lineobjs"
% (float(total_lineobjs)/total_lines*100,
total_lines,
total_lineobjs))
for comp in sr.sr_components:
targets = targets_bycomponent.get(comp, [])
if not targets:
raise LogError("ERROR! miss component %s" % comp)
else:
component_threads = sum(len(target.thread_objs) for target in targets)
component_lines = sum(target.len_lineobjs for target in targets)
min_target_threads, max_target_threads = sys.maxsize, 0
min_target_lineobjs, max_target_lineobjs = sys.maxsize, 0
hosts_ = set()
for target_obj in targets:
hosts_.add(target_obj.host)
min_target_threads = min(min_target_threads, len(target_obj.thread_objs))
max_target_threads = max(max_target_threads, len(target_obj.thread_objs))
min_target_lineobjs = min(min_target_lineobjs,
target_obj.len_lineobjs)
max_target_lineobjs = max(max_target_lineobjs,
target_obj.len_lineobjs)
print(" %s: %d hosts, %d targets, %d threads, %d lines"
% (comp, len(hosts_), len(targets),
component_threads,
component_lines))
print(" per-target: %.3f[%d, %d] threads, %.3f[%d, %d] loglines"
% (component_threads/float(len(targets)),
min_target_threads,
max_target_threads,
component_lines/float(len(targets)),
min_target_lineobjs,
max_target_lineobjs))
print()
#### report #####
requests = set()
for ds in datasources:
requests.update(ds.requests)
report.step("read", line=total_lineobjs,
component=total_components,
host=total_hosts,
target=total_targets,
thread=len(threads),
request=len(requests))
return targets_byname
def proceed(logfolder, sr, plugin, report):
datasources = loadsources(logfolder, sr, plugin)
targetobjs = readsources(datasources, sr, report)
return targetobjs
| 36.352941
| 89
| 0.567242
| 1,256
| 11,124
| 4.820064
| 0.194268
| 0.016188
| 0.009085
| 0.00892
| 0.209283
| 0.181698
| 0.126528
| 0.084242
| 0.084242
| 0.068715
| 0
| 0.003987
| 0.346188
| 11,124
| 305
| 90
| 36.472131
| 0.828406
| 0.138439
| 0
| 0.198157
| 0
| 0.004608
| 0.058558
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.050691
| false
| 0.004608
| 0.059908
| 0.004608
| 0.170507
| 0.059908
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7f4992bb494868e3842c501796146ce55443adc
| 2,241
|
py
|
Python
|
checkpoint.py
|
GooLee0123/MBRNN
|
c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3
|
[
"MIT"
] | 1
|
2021-12-07T03:59:51.000Z
|
2021-12-07T03:59:51.000Z
|
checkpoint.py
|
GooLee0123/MBRNN
|
c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3
|
[
"MIT"
] | null | null | null |
checkpoint.py
|
GooLee0123/MBRNN
|
c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3
|
[
"MIT"
] | 1
|
2022-02-23T02:15:56.000Z
|
2022-02-23T02:15:56.000Z
|
import logging
import os
import shutil
import time
import torch
model_state = 'model_state.pt'
trainer_state = 'trainer_state.pt'
class Checkpoint():
def __init__(self, step, epoch, model, optim, path=None, opt=None):
self.step = step
self.epoch = epoch
self.model = model
self.optim = optim
self._path = path
self.opt = opt
self.logger = logging.getLogger(__name__)
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
@classmethod
def load(cls, model, optim=None, opt=None):
logger = logging.getLogger(__name__)
all_times = sorted(os.listdir(opt.ckpt_fd), reverse=True)
fchckpt = os.path.join(opt.ckpt_fd, all_times[0])
logger.info("load checkpoint from %s" % fchckpt)
resume_model = torch.load(os.path.join(fchckpt, model_state),
map_location=opt.device)
resume_checkpoint = torch.load(os.path.join(fchckpt, trainer_state),
map_location=opt.device)
model.load_state_dict(resume_model)
if optim is not None:
optim.load_state_dict(resume_checkpoint['optimizer'])
return Checkpoint(step=resume_checkpoint['step'],
epoch=resume_checkpoint['epoch'],
model=model,
optim=optim,
path=opt.ckpt_fd)
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
path = os.path.join(self.opt.ckpt_fd, date_time)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save(
{'epoch': self.epoch,
'step': self.step,
'optimizer': self.optim.state_dict()},
os.path.join(path, trainer_state))
torch.save(
self.model.state_dict(), os.path.join(path, model_state))
log_msg = "Validation loss being smaller than previous "
log_msg += "minimum, checkpoint is saved at %s" % path
self.logger.info(log_msg)
return path
| 30.69863
| 76
| 0.583222
| 271
| 2,241
| 4.627306
| 0.291513
| 0.033493
| 0.047847
| 0.041467
| 0.118022
| 0.07815
| 0
| 0
| 0
| 0
| 0
| 0.000648
| 0.311022
| 2,241
| 72
| 77
| 31.125
| 0.811529
| 0
| 0
| 0.071429
| 0
| 0
| 0.097278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.089286
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7f4e1c0cff8588ab79a5f138125b800da16d5b8
| 4,250
|
py
|
Python
|
test/eval_mines_color.py
|
alalagong/LEDNet
|
5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469
|
[
"MIT"
] | 3
|
2019-08-13T07:21:23.000Z
|
2020-06-27T16:18:22.000Z
|
test/eval_mines_color.py
|
alalagong/LEDNet
|
5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469
|
[
"MIT"
] | 1
|
2020-12-14T05:56:44.000Z
|
2020-12-14T05:56:44.000Z
|
test/eval_mines_color.py
|
alalagong/LEDNet
|
5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469
|
[
"MIT"
] | 1
|
2019-11-13T12:09:58.000Z
|
2019-11-13T12:09:58.000Z
|
import numpy as np
import torch
import os
import cv2
import importlib
from dataset import *
from PIL import Image
from argparse import ArgumentParser
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from dataset import cityscapes
from lednet import Net
from transform import Relabel, ToLabel, Colorize
import visdom
NUM_CHANNELS = 3
NUM_CLASSES = 20
#* *******************测试单张图片****************************
image_transform = ToPILImage()
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
def main(args):
modelpath = args.loadDir + args.loadModel
weightspath = args.loadDir + args.loadWeights
print("Loading model: " + modelpath)
print("Loading weights: " + weightspath)
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
# model.load_state_dict(torch.load(args.state))
# model.load_state_dict(torch.load(weightspath)) #not working if missing key
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print("Model and weights LOADED successfully")
model.eval()
if (not os.path.exists(args.datadir)):
print("Error: datadir could not be loaded")
# loader = DataLoader(
# cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset),
# num_workers=args.num_workers, batch_size=1 ,shuffle=False)
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
name ="4.png"
with open(image_path_city('/home/gongyiqun/images', name), 'rb') as f:
images = load_image(f).convert('RGB')
images = input_transform_cityscapes(images)
# For visualizer:
# must launch in other window "python3.6 -m visdom.server -port 8097"
# and access localhost:8097 to see it
if (args.visualize):
vis = visdom.Visdom()
if (not args.cpu):
images = images.cuda()
# labels = labels.cuda()
a=torch.unsqueeze(images,0)
inputs = Variable(a)
# targets = Variable(labels)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
# label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
label_color = Colorize()(label.unsqueeze(0))
filenameSave = "./save_color/"+"Others/"+name
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
# image_transform(label.byte()).save(filenameSave)
label_save = ToPILImage()(label_color)
label_save = label_save.resize((1241, 376), Image.BILINEAR)
# label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA)
label_save.save(filenameSave)
if (args.visualize):
vis.image(label_color.numpy())
# print(step, filenameSave)
# for step, (images, labels, filename, filenameGt) in enumerate(loader):
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--state')
parser.add_argument('--loadDir', default="../save/logs(KITTI)/")
parser.add_argument('--loadWeights', default="model_best.pth")
parser.add_argument('--loadModel', default="lednet.py")
parser.add_argument('--subset', default="val") # can be val, test, train, demoSequence
parser.add_argument('--datadir', default="")
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--visualize', action='store_true')
main(parser.parse_args())
| 31.481481
| 141
| 0.675059
| 526
| 4,250
| 5.311787
| 0.365019
| 0.032212
| 0.060845
| 0.02219
| 0.099499
| 0.085183
| 0.065855
| 0.065855
| 0.065855
| 0.065855
| 0
| 0.026582
| 0.185647
| 4,250
| 134
| 142
| 31.716418
| 0.780699
| 0.248
| 0
| 0.146341
| 0
| 0
| 0.102992
| 0.006929
| 0.012195
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.195122
| 0
| 0.231707
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bdbd0dddd803ccbb1c990600d899d8ab9de0788
| 2,440
|
py
|
Python
|
tests/test_resource_linkage.py
|
firesock/pydantic-jsonapi
|
b7dc891892ab3439a71f78a9a5fd067c4d651ca8
|
[
"MIT"
] | null | null | null |
tests/test_resource_linkage.py
|
firesock/pydantic-jsonapi
|
b7dc891892ab3439a71f78a9a5fd067c4d651ca8
|
[
"MIT"
] | null | null | null |
tests/test_resource_linkage.py
|
firesock/pydantic-jsonapi
|
b7dc891892ab3439a71f78a9a5fd067c4d651ca8
|
[
"MIT"
] | null | null | null |
import pytest
from pytest import raises
from pydantic_jsonapi.resource_linkage import ResourceLinkage
from pydantic import BaseModel, ValidationError
class ThingWithLinkageData(BaseModel):
data: ResourceLinkage
class TestResourceLinks:
@pytest.mark.parametrize(
'linkage, message',
[
(
None,
'null is valid for empty to-one relationships',
),
(
[],
'empty list valid for empty to-many relationships.',
),
(
{'id': 'abc123', 'type': 'item', 'meta': None},
'single resource identifier valid for non-empty to-one relationships.',
),
(
[
{'id': 'abc123', 'type': 'item', 'meta': None},
{'id': 'def456', 'type': 'item', 'meta': None},
],
'array of resource identifiers valid for non-empty to-many relationships.',
),
],
)
def test_valid_possibilities(self, linkage, message):
structure_to_validate = {
'data': linkage
}
validated = ThingWithLinkageData(**structure_to_validate)
assert validated.dict() == structure_to_validate, message
def test_invalid_resource_identifier(self):
structure_to_validate = {
'data': {}
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data', 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data',), 'msg': 'value is not a valid list', 'type': 'type_error.list'},
]
def test_invalid_resource_identifier_array(self):
structure_to_validate = {
'data': [
{}
],
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data',), 'msg': 'value is not a valid dict', 'type': 'type_error.dict'},
{'loc': ('data', 0, 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 0, 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
]
| 34.857143
| 97
| 0.527869
| 225
| 2,440
| 5.586667
| 0.288889
| 0.061257
| 0.105807
| 0.063644
| 0.537788
| 0.422434
| 0.422434
| 0.363564
| 0.297534
| 0.26253
| 0
| 0.00674
| 0.331148
| 2,440
| 69
| 98
| 35.362319
| 0.76348
| 0
| 0
| 0.295082
| 0
| 0
| 0.257787
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 1
| 0.04918
| false
| 0
| 0.065574
| 0
| 0.163934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bdd2e9e5e9fd87db022a69e90bc6723cd058b21
| 2,046
|
py
|
Python
|
src/tensorflow/keras_cnn.py
|
del680202/MachineLearning-memo
|
29284ca24041969eeb59851a43ab6c28c685fae5
|
[
"Apache-2.0"
] | 4
|
2017-04-24T15:01:55.000Z
|
2019-11-03T11:11:54.000Z
|
src/tensorflow/keras_cnn.py
|
aasd145tw/MachineLearning-memo
|
29284ca24041969eeb59851a43ab6c28c685fae5
|
[
"Apache-2.0"
] | null | null | null |
src/tensorflow/keras_cnn.py
|
aasd145tw/MachineLearning-memo
|
29284ca24041969eeb59851a43ab6c28c685fae5
|
[
"Apache-2.0"
] | 12
|
2017-05-10T13:39:17.000Z
|
2019-12-15T14:01:05.000Z
|
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
| 31
| 112
| 0.725806
| 322
| 2,046
| 4.416149
| 0.335404
| 0.042194
| 0.023207
| 0.016878
| 0.09564
| 0.029536
| 0
| 0
| 0
| 0
| 0
| 0.032167
| 0.13392
| 2,046
| 65
| 113
| 31.476923
| 0.770316
| 0.049853
| 0
| 0
| 0
| 0
| 0.080495
| 0.012384
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.195652
| 0
| 0.195652
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be156b5a97033cae1d2dce7ad771f398dbde2ad
| 4,942
|
py
|
Python
|
tests/blas/nodes/ger_test.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T07:58:06.000Z
|
2021-07-26T07:58:06.000Z
|
tests/blas/nodes/ger_test.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/blas/nodes/ger_test.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T13:01:48.000Z
|
2021-03-04T13:01:48.000Z
|
#!/usr/bin/env python3
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.dataflow.streaming_memory import StreamingMemory
from dace.transformation.interstate.sdfg_nesting import InlineSDFG
from dace.transformation.interstate.fpga_transform_sdfg import FPGATransformSDFG
import numpy as np
import argparse
import scipy
import dace
from dace.memlet import Memlet
import dace.libraries.blas as blas
from dace.libraries.standard.memory import aligned_ndarray
def pure_graph(implementation, dtype, veclen):
m = dace.symbol("m")
n = dace.symbol("n")
vtype = dace.vector(dtype, veclen)
sdfg = dace.SDFG("ger_test")
state = sdfg.add_state("ger")
sdfg.add_symbol("alpha", dtype)
sdfg.add_array("x", shape=[m], dtype=dtype)
sdfg.add_array("y", shape=[n / veclen], dtype=vtype)
sdfg.add_array("A", shape=[m, n / veclen], dtype=vtype)
sdfg.add_array("res", shape=[m, n / veclen], dtype=vtype)
x = state.add_read("x")
y = state.add_read("y")
A = state.add_read("A")
res = state.add_write("res")
ger_node = blas.Ger(name="ger")
ger_node.implementation = implementation
state.add_memlet_path(x, ger_node, dst_conn="_x", memlet=Memlet("x[0:m]"))
state.add_memlet_path(y,
ger_node,
dst_conn="_y",
memlet=Memlet(f"y[0:n/{veclen}]"))
state.add_memlet_path(A,
ger_node,
dst_conn="_A",
memlet=Memlet(f"A[0:m, 0:n/{veclen}]"))
state.add_memlet_path(ger_node,
res,
src_conn="_res",
memlet=Memlet(f"res[0:m, 0:n/{veclen}]"))
return ger_node, state, sdfg
def fpga_graph(dtype, veclen, tile_size_x, tile_size_y):
ger_node, state, sdfg = pure_graph("FPGA", dtype, veclen)
ger_node.expand(sdfg, state, tile_size_x=tile_size_x, tile_size_y=tile_size_y)
sdfg.apply_transformations_repeated([FPGATransformSDFG, InlineSDFG])
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated(
[InlineSDFG, StreamingMemory], [{}, {
"storage": dace.StorageType.FPGA_Local
}])
return sdfg
def run_test(ger, target):
x = np.ndarray(m, dtype=np.float32)
y = np.ndarray(n, dtype=np.float32)
A = np.ndarray((m, n), dtype=np.float32)
res = A.copy()
ref = res.copy()
x[:] = np.random.rand(m).astype(np.float32)
y[:] = np.random.rand(n).astype(np.float32)
A[:] = np.random.rand(m, n).astype(np.float32)
ger(alpha=alpha, x=x, y=y, A=A, res=res, m=m, n=n)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=A)
diff = np.linalg.norm(np.subtract(res, ref))
if diff >= args.eps * n * m:
raise RuntimeError(
"Unexpected result returned from ger rank 1 operation: "
"got:\n{}\nexpected:\n{} on {}".format(A, ref, target))
else:
print("Ok")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int, nargs="?", default=256)
parser.add_argument("M", type=int, nargs="?", default=512)
parser.add_argument("tile_size_x", type=int, nargs="?", default=16)
parser.add_argument("tile_size_y", type=int, nargs="?", default=32)
parser.add_argument("alpha", type=np.float32, nargs="?", default=1.0)
parser.add_argument("--target", dest="target", default="pure")
parser.add_argument("--eps", type=float, default=1e-6)
parser.add_argument("--veclen", type=int, default=8)
args = parser.parse_args()
n = args.N
m = args.M
tile_size_x = args.tile_size_x
tile_size_y = args.tile_size_y
alpha = args.alpha
veclen = args.veclen
if args.target == "pure":
ger_node, state, sdfg = pure_graph("pure", dace.float32, veclen)
ger_node.expand(sdfg, state)
sdfg.apply_transformations_repeated([InlineSDFG])
elif args.target == "fpga":
sdfg = fpga_graph(dace.float32, veclen, tile_size_x, tile_size_y)
else:
print("Unsupported target")
exit(-1)
x = aligned_ndarray(np.random.rand(m).astype(np.float32), alignment=4*veclen)
y = aligned_ndarray(np.random.rand(n).astype(np.float32), alignment=4*veclen)
A = aligned_ndarray(np.random.rand(m, n).astype(np.float32), alignment=4*veclen)
res = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
ref = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
res[:] = A[:]
ref[:] = A[:]
sdfg(x=x, y=y, A=A, res=res, m=dace.int32(m), n=dace.int32(n), alpha=alpha)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=ref)
diff = np.linalg.norm(res - ref)
if diff >= args.eps * n * m:
raise RuntimeError(f"Validation failed: {diff}")
else:
print("Validation successful.")
| 33.849315
| 84
| 0.633347
| 710
| 4,942
| 4.256338
| 0.202817
| 0.037062
| 0.045003
| 0.021509
| 0.343812
| 0.287889
| 0.216744
| 0.11681
| 0.093978
| 0.085374
| 0
| 0.016766
| 0.2155
| 4,942
| 145
| 85
| 34.082759
| 0.762703
| 0.019223
| 0
| 0.06422
| 0
| 0
| 0.072667
| 0.004748
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.091743
| 0
| 0.137615
| 0.027523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be16c8b647df2316a1c8f8f394a926e8273c86d
| 1,925
|
py
|
Python
|
spp.py
|
ninfueng/torch-cifar
|
f829c3375a9d9823cef4659f8bdfbd3800d51e80
|
[
"MIT"
] | null | null | null |
spp.py
|
ninfueng/torch-cifar
|
f829c3375a9d9823cef4659f8bdfbd3800d51e80
|
[
"MIT"
] | null | null | null |
spp.py
|
ninfueng/torch-cifar
|
f829c3375a9d9823cef4659f8bdfbd3800d51e80
|
[
"MIT"
] | null | null | null |
import math
from typing import List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
@torch.jit.script
def spatial_pyramid_pool(
input: Tensor, bins: Union[int, List[int]], mode: str = "max"
) -> Tensor:
"""Spatial Pyramid Pooling: https://arxiv.org/pdf/1406.4729.pdf
Args:
input (Tensor): an input tensor expected from the convolutional layer.
bins (List[int]): a list of integer of preferred size of outputs.
mode (str): how to reduce the spatial space.
Returns:
outputs (Tensor): a flatten tensor with size (batch, bins[0] * bins[0] + bins[1]
* bins[1] + ...)
"""
assert mode in ["max", "mean", "average", "avg"]
b, _, h, w = input.shape
bins = [bins] if isinstance(bins, int) else bins
outputs = []
for bin_ in bins:
h_kernel = math.ceil(h / bin_)
w_kernel = math.ceil(w / bin_)
h_stride = math.floor(h / bin_)
w_stride = math.floor(w / bin_)
if mode == "max":
output = F.max_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
else:
output = F.avg_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
output = output.view(b, -1)
outputs.append(output)
outputs = torch.cat(outputs, dim=-1)
return outputs
class SpaitalPyramidPool(nn.Module):
def __init__(self, bins: Union[int, List[int]], mode: str = "max") -> None:
super().__init__()
self.bins = bins
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return spatial_pyramid_pool(input, bins=self.bins, mode=self.mode)
if __name__ == "__main__":
input = torch.zeros(1, 512, 13, 13)
output = spatial_pyramid_pool(input, [1, 2, 3], "max")
print(output.shape)
| 29.166667
| 88
| 0.603636
| 264
| 1,925
| 4.234848
| 0.356061
| 0.050089
| 0.048301
| 0.061717
| 0.150268
| 0.150268
| 0.150268
| 0.150268
| 0.09839
| 0.09839
| 0
| 0.019176
| 0.268571
| 1,925
| 65
| 89
| 29.615385
| 0.774858
| 0.195844
| 0
| 0.047619
| 0
| 0
| 0.02452
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 1
| 0.071429
| false
| 0
| 0.142857
| 0.02381
| 0.285714
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be1d0ad6c2cd6a6b3082cd64ad7f9633b3033de
| 21,417
|
py
|
Python
|
src/SparseSC/utils/AzureBatch/azure_batch_client.py
|
wofein/SparseSC
|
fd8125015c65829458bfee2ae94c24981112d2d8
|
[
"MIT"
] | null | null | null |
src/SparseSC/utils/AzureBatch/azure_batch_client.py
|
wofein/SparseSC
|
fd8125015c65829458bfee2ae94c24981112d2d8
|
[
"MIT"
] | null | null | null |
src/SparseSC/utils/AzureBatch/azure_batch_client.py
|
wofein/SparseSC
|
fd8125015c65829458bfee2ae94c24981112d2d8
|
[
"MIT"
] | null | null | null |
"""
usage requires these additional modules
pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start
usage:
from SparseSC import fit, aggregate_batch_results
from SparseSC.utils.azure_batch_client import BatchConfig, run
_TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S")
BATCH_DIR= "path/to/my/batch_config/"
fit(x=x,..., batchDir=BATCH_DIR)
my_config = BatchConfig(
BATCH_ACCOUNT_NAME="MySecret",
BATCH_ACCOUNT_KEY="MySecret",
BATCH_ACCOUNT_URL="MySecret",
STORAGE_ACCOUNT_NAME="MySecret",
STORAGE_ACCOUNT_KEY="MySecret",
POOL_ID="my-compute-pool",
POOL_NODE_COUNT=0,
POOL_LOW_PRIORITY_NODE_COUNT=20,
POOL_VM_SIZE="STANDARD_A1_v2",
DELETE_POOL_WHEN_DONE=False,
JOB_ID="my-job" + _TIMESTAMP,
DELETE_JOB_WHEN_DONE=False,
CONTAINER_NAME="my-blob-container",
BATCH_DIRECTORY=BATCH_DIR,
)
run(my_config)
fitted_model = aggregate_batch_results("path/to/my/batch_config")
"""
# pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import pathlib
import importlib
from collections import defaultdict
import azure.storage.blob as azureblob
from azure.storage.blob.models import ContainerPermissions
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as models
from SparseSC.cli.stt import get_config
from ..print_progress import print_progress
from .BatchConfig import BatchConfig, validate_config
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from .constants import (
_STANDARD_OUT_FILE_NAME,
_CONTAINER_OUTPUT_FILE,
_CONTAINER_INPUT_FILE,
_BATCH_CV_FILE_NAME,
)
FOLD_FILE_PATTERN = "fold_{}.yaml"
# pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme
sys.path.append(".")
sys.path.append("..")
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def build_output_sas_url(config, _blob_client):
"""
build a sas token for the output container
"""
sas_token = _blob_client.generate_container_shared_access_signature(
config.CONTAINER_NAME,
ContainerPermissions.READ
+ ContainerPermissions.WRITE
+ ContainerPermissions.DELETE
+ ContainerPermissions.LIST,
datetime.datetime.utcnow() + datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS),
start=datetime.datetime.utcnow(),
)
_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(
config.STORAGE_ACCOUNT_NAME, config.CONTAINER_NAME, sas_token
)
return _sas_url
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print("-------------------------------------------")
print("Exception encountered:")
if (
batch_exception.error
and batch_exception.error.message
and batch_exception.error.message.value
):
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print("{}:\t{}".format(mesg.key, mesg.value))
print("-------------------------------------------")
def build_output_file(container_sas_url, fold_number):
"""
Uploads a local file to an Azure Blob storage container.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
# where to store the outputs
container_dest = models.OutputFileBlobContainerDestination(
container_url=container_sas_url, path=FOLD_FILE_PATTERN.format(fold_number)
)
dest = models.OutputFileDestination(container=container_dest)
# under what conditions should you attempt to extract the outputs?
upload_options = models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
)
# https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output
return models.OutputFile(
file_pattern=_CONTAINER_OUTPUT_FILE,
destination=dest,
upload_options=upload_options,
)
def upload_file_to_container(block_blob_client, container_name, file_path, duration_hours=24):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print("Uploading file {} to container [{}]...".format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name, blob_name, file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=duration_hours),
)
sas_url = block_blob_client.make_blob_url(
container_name, blob_name, sas_token=sas_token
)
return models.ResourceFile(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)
def create_pool(config, batch_service_client):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
image_ref_to_use = models.ImageReference(
publisher="microsoft-azure-batch",
offer="ubuntu-server-container",
sku="16-04-lts",
version="latest",
)
if config.REGISTRY_USERNAME:
registry = batch.models.ContainerRegistry(
user_name=config.REGISTRY_USERNAME,
password=config.REGISTRY_PASSWORD,
registry_server=config.REGISTRY_SERVER,
)
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER],
container_registries=[registry],
)
else:
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER]
)
new_pool = batch.models.PoolAddParameter(
id=config.POOL_ID,
virtual_machine_configuration=batch.models.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
container_configuration=container_conf,
node_agent_sku_id="batch.node.ubuntu 16.04",
),
vm_size=config.POOL_VM_SIZE,
target_dedicated_nodes=config.POOL_NODE_COUNT,
target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT,
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print("Creating job [{}]...".format(job_id))
job_description = batch.models.JobAddParameter(
id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)
)
batch_service_client.job.add(job_description)
def add_tasks(
config,
_blob_client,
batch_service_client,
container_sas_url,
job_id,
_input_file,
count,
):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: The input files
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print("Adding {} tasks to job [{}]...".format(count, job_id))
tasks = list()
for fold_number in range(count):
output_file = build_output_file(container_sas_url, fold_number)
# command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\''
command_line = "/bin/bash -c 'stt {} {} {}'".format(
_CONTAINER_INPUT_FILE, _CONTAINER_OUTPUT_FILE, fold_number
)
task_container_settings = models.TaskContainerSettings(
image_name=config.DOCKER_CONTAINER
)
tasks.append(
batch.models.TaskAddParameter(
id="Task_{}".format(fold_number),
command_line=command_line,
resource_files=[_input_file],
output_files=[output_file],
container_settings=task_container_settings,
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
_start_time = datetime.datetime.now()
timeout_expiration = _start_time + timeout
# print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",)
while datetime.datetime.now() < timeout_expiration:
sys.stdout.flush()
tasks = [t for t in batch_service_client.task.list(job_id)]
incomplete_tasks = [
task for task in tasks if task.state != models.TaskState.completed
]
hours, remainder = divmod((datetime.datetime.now() - _start_time).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print_progress(
len(tasks) - len(incomplete_tasks),
len(tasks),
prefix="Time elapsed {:02}:{:02}:{:02}".format(
int(hours), int(minutes), int(seconds)
),
decimals=1,
bar_length=min(len(tasks), 50),
)
error_codes = [t.execution_info.exit_code for t in tasks if t.execution_info and t.execution_info.exit_code ]
if error_codes:
codes = defaultdict(lambda : 0)
for cd in error_codes:
codes[cd] +=1
# import pdb; pdb.set_trace()
raise RuntimeError( "\nSome tasks have exited with a non-zero exit code including: " + ", ".join([ "{}({})".format(k,v) for k, v in codes.items() ] ))
if not incomplete_tasks:
print()
return True
time.sleep(1)
print()
raise RuntimeError(
"ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)
)
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print("Printing task output...")
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, _STANDARD_OUT_FILE_NAME
)
file_text = _read_stream_as_string(stream, encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = "utf-8"
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError("could not write data to stream or decode bytes")
def _download_files(config, _blob_client, out_path, count):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
for i in range(count):
blob_name = FOLD_FILE_PATTERN.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
_blob_client.get_blob_to_path(config.CONTAINER_NAME, blob_name, out_path)
def _download_results(config, _blob_client, out_path, count, ptrn=FOLD_FILE_PATTERN):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
results = []
for i in range(count):
blob_name = ptrn.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
with _blob_client.get_blob_to_stream(
config.CONTAINER_NAME, blob_name, out_path
) as blob:
results[i] = load(blob, Loader=Loader)
return results
def run(config: BatchConfig, wait=True) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:param boolean wait: If true, wait for the batch to complete and then
download the results to file
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print(
'Synthetic Controls Run "{}" start time: {}'.format(config.JOB_ID, start_time)
)
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
blob_client.create_container(config.CONTAINER_NAME, fail_on_exist=False)
CONTAINER_SAS_URL = build_output_sas_url(config, blob_client)
# The collection of data files that are to be processed by the tasks.
input_file_path = os.path.join(sys.path[0], _LOCAL_INPUT_FILE)
# Upload the data files.
input_file = upload_file_to_container(
blob_client, config.CONTAINER_NAME, input_file_path, config.STORAGE_ACCESS_DURATION_HRS
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
try:
create_pool(config, batch_client)
print("Created pool: ", config.POOL_ID)
except models.BatchErrorException:
print("Using pool: ", config.POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config.JOB_ID, config.POOL_ID)
# Add the tasks to the job.
add_tasks(
config,
blob_client,
batch_client,
CONTAINER_SAS_URL,
config.JOB_ID,
input_file,
n_folds,
)
if not wait:
return
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
def load_results(config: BatchConfig) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print('Load result for job "{}" start time: {}'.format(config.JOB_ID, start_time))
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
if __name__ == "__main__":
# TODO: this is not an ideal API
config_module = importlib.__import__("config")
run(config_module.config)
| 34.487923
| 178
| 0.693561
| 2,748
| 21,417
| 5.167031
| 0.16885
| 0.024509
| 0.034228
| 0.008029
| 0.432636
| 0.384745
| 0.362138
| 0.344954
| 0.335657
| 0.335657
| 0
| 0.003865
| 0.214736
| 21,417
| 620
| 179
| 34.543548
| 0.840409
| 0.32026
| 0
| 0.251497
| 0
| 0
| 0.064404
| 0.009191
| 0
| 0
| 0
| 0.004839
| 0
| 1
| 0.041916
| false
| 0.002994
| 0.068862
| 0
| 0.131737
| 0.110778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be31bb2955f81221fbda20bbf33d2351c12d6c3
| 20,773
|
py
|
Python
|
covid19/COVID19/code/controller/main.py
|
zhanqingheng/COVID-19
|
d050ad2effedb9090865d1104ccd5c5d04343f53
|
[
"MIT"
] | 16
|
2020-06-08T10:14:13.000Z
|
2022-03-30T02:44:04.000Z
|
covid19/COVID19/code/controller/main.py
|
zhanqingheng/COVID-19
|
d050ad2effedb9090865d1104ccd5c5d04343f53
|
[
"MIT"
] | 1
|
2021-11-18T10:03:42.000Z
|
2021-11-18T10:03:42.000Z
|
covid19/COVID19/code/controller/main.py
|
zhanqingheng/COVID-19
|
d050ad2effedb9090865d1104ccd5c5d04343f53
|
[
"MIT"
] | 4
|
2021-03-06T04:44:03.000Z
|
2021-12-09T07:22:50.000Z
|
from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
| 33.078025
| 98
| 0.588264
| 1,636
| 20,773
| 7.454156
| 0.127139
| 0.025584
| 0.00902
| 0.010824
| 0.262977
| 0.247642
| 0.18278
| 0.164002
| 0.134973
| 0.128085
| 0
| 0.009773
| 0.275935
| 20,773
| 627
| 99
| 33.130782
| 0.801011
| 0
| 0
| 0.326606
| 0
| 0
| 0.144611
| 0.052905
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07156
| false
| 0
| 0.040367
| 0.007339
| 0.183486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be41a8ed3e94194a6131c0c94be533e83696d98
| 3,402
|
py
|
Python
|
contrib/cirrus/podbot.py
|
juhp/libpod
|
bc7afd6d71da4173e4894ff352667a25987fa2ea
|
[
"Apache-2.0"
] | 2
|
2021-09-20T00:29:06.000Z
|
2021-11-28T08:36:20.000Z
|
contrib/cirrus/podbot.py
|
juhp/libpod
|
bc7afd6d71da4173e4894ff352667a25987fa2ea
|
[
"Apache-2.0"
] | 2
|
2020-01-04T03:31:18.000Z
|
2021-05-17T09:54:03.000Z
|
contrib/cirrus/podbot.py
|
juhp/libpod
|
bc7afd6d71da4173e4894ff352667a25987fa2ea
|
[
"Apache-2.0"
] | 1
|
2019-04-08T21:58:07.000Z
|
2019-04-08T21:58:07.000Z
|
#!/usr/bin/env python3
# Simple and dumb script to send a message to the #podman IRC channel on frenode
# Based on example from: https://pythonspot.com/building-an-irc-bot/
import os
import time
import random
import errno
import socket
import sys
class IRC:
response_timeout = 10 # seconds
irc = socket.socket()
def __init__(self, server, nickname, channel):
self.server = server
self.nickname = nickname
self.channel = channel
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _send(self, cmdstr):
self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
def message(self, msg):
data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
print(data)
self._send(data)
@staticmethod
def fix_newlines(bufr):
return bufr.replace('\\r\\n', '\n')
def _required_response(self, needle, haystack):
start = time.time()
end = start + self.response_timeout
while time.time() < end:
if haystack.find(needle) != -1:
return (False, haystack)
time.sleep(0.1)
try:
haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
except socket.error as serr:
if serr.errno == errno.EWOULDBLOCK:
continue
raise # can't handle this
return (True, haystack) # Error
def connect(self, username, password):
# This is ugly as sin, but seems to be a working send/expect sequence
print("connecting to: {0}".format(self.server))
self.irc.connect((self.server, 6667)) #connects to the server
self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
self._send("NICK {0}".format(self.nickname))
err, haystack = self._required_response('End of /MOTD command.'
''.format(self.nickname), "")
if err:
print(self.fix_newlines(haystack))
print("Error connecting to {0}".format(self.server))
return True
print("Logging in as {0}".format(username))
self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
err, _ = self._required_response("You are now identified for", "")
if err:
print("Error logging in to {0} as {1}".format(self.server, username))
return True
print("Joining {0}".format(self.channel))
self._send("JOIN {0}".format(self.channel))
err, haystack = self._required_response("{0} {1} :End of /NAMES list."
"".format(self.nickname, self.channel),
haystack)
print(self.fix_newlines(haystack))
if err:
print("Error joining {0}".format(self.channel))
return True
return False
def quit(self):
print("Quitting")
self._send("QUIT :my work is done here")
self.irc.close()
if len(sys.argv) < 3:
print("Error: Must pass desired nick and message as parameters")
else:
irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
if not err:
irc.message(" ".join(sys.argv[2:]))
time.sleep(5.0) # avoid join/quit spam
irc.quit()
| 34.363636
| 87
| 0.569959
| 424
| 3,402
| 4.507075
| 0.372642
| 0.057561
| 0.040293
| 0.028257
| 0.118263
| 0.030351
| 0
| 0
| 0
| 0
| 0
| 0.016793
| 0.299824
| 3,402
| 98
| 88
| 34.714286
| 0.785474
| 0.090829
| 0
| 0.103896
| 0
| 0
| 0.139475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.038961
| 0.077922
| 0.012987
| 0.298701
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be723fadb484c2875b98748f51d456625b23262
| 5,251
|
py
|
Python
|
topopt/mechanisms/problems.py
|
arnavbansal2764/topopt
|
74d8f17568a9d3349632e23840a9dc5b0d6c4d1f
|
[
"MIT"
] | 53
|
2020-04-14T10:13:04.000Z
|
2022-02-24T03:16:57.000Z
|
topopt/mechanisms/problems.py
|
arnavbansal2764/topopt
|
74d8f17568a9d3349632e23840a9dc5b0d6c4d1f
|
[
"MIT"
] | 5
|
2020-11-12T23:56:30.000Z
|
2021-09-30T19:24:06.000Z
|
topopt/mechanisms/problems.py
|
arnavbansal2764/topopt
|
74d8f17568a9d3349632e23840a9dc5b0d6c4d1f
|
[
"MIT"
] | 15
|
2020-02-12T01:32:07.000Z
|
2022-02-20T02:44:55.000Z
|
"""Compliant mechanism synthesis problems using topology optimization."""
import numpy
import scipy.sparse
from ..problems import ElasticityProblem
from .boundary_conditions import MechanismSynthesisBoundaryConditions
from ..utils import deleterowcol
class MechanismSynthesisProblem(ElasticityProblem):
r"""
Topology optimization problem to generate compliant mechanisms.
:math:`\begin{aligned}
\max_{\boldsymbol{\rho}} \quad &
\{u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}\}\\
\textrm{subject to}: \quad & \mathbf{K}\mathbf{u} =
\mathbf{f}_\text{in}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1,
\quad e=1, \dots, N.\\
\end{aligned}`
where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of
freedom corresponding to the output point and with zeros at all other
places.
Attributes
----------
spring_stiffnesses: numpy.ndarray
The spring stiffnesses of the
actuator and output displacement.
Emin: float
The minimum stiffness of elements.
Emax: float
The maximum stiffness of elements.
"""
@staticmethod
def lk(E: float = 1.0, nu: float = 0.3) -> numpy.ndarray:
"""
Build the element stiffness matrix.
Parameters
----------
E:
Young's modulus of the material.
nu:
Poisson's ratio of the material.
Returns
-------
The element stiffness matrix for the material.
"""
return ElasticityProblem.lk(1e0, nu)
def __init__(
self, bc: MechanismSynthesisBoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
nelx:
Number of elements in the x direction.
nely:
Number of elements in the x direction.
penalty:
Penalty value used to penalize fractional densities in SIMP.
bc:
Boundary conditions of the problem.
"""
super().__init__(bc, penalty)
self.Emin = 1e-6 # Minimum stiffness of elements
self.Emax = 1e2 # Maximum stiffness of elements
# Spring stiffnesses for the actuator and output displacement
self.spring_stiffnesses = numpy.full(
numpy.nonzero(self.f)[0].shape, 10.0)
def build_K(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
The stiffness matrix for the mesh.
"""
# Build the stiffness matrix using inheritance
K = super().build_K(xPhys, remove_constrained=False).tocsc()
# Add spring stiffnesses
spring_ids = numpy.nonzero(self.f)[0]
K[spring_ids, spring_ids] += self.spring_stiffnesses
# K = (K.T + K) / 2. # Make sure the stiffness matrix is symmetric
# Remove constrained dofs from matrix and convert to coo
if remove_constrained:
K = deleterowcol(K, self.fixed, self.fixed)
return K.tocoo()
def compute_objective(self, xPhys: numpy.ndarray, dobj: numpy.ndarray
) -> float:
r"""
Compute the objective and gradient of the mechanism synthesis problem.
The objective is :math:`u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}`
where :math:`\mathbf{l}` is a vector with the value 1 at
the degree(s) of freedom corresponding to the output point and with
zeros at all other places. The gradient of the objective is
:math:`\begin{align}
u_\text{out} &= \mathbf{l}^T\mathbf{u} = \mathbf{l}^T\mathbf{u} +
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial u_\text{out}}{\partial \rho_e} &=
(\mathbf{K}\boldsymbol{\lambda} + \mathbf{l})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
\end{align}`
where :math:`\mathbf{K}\boldsymbol{\lambda} = -\mathbf{l}`.
Parameters
----------
xPhys:
The density design variables.
dobj:
The gradient of the objective to compute.
Returns
-------
The objective of the compliant mechanism synthesis problem.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
u = self.u[:, 0][self.edofMat].reshape(-1, 8) # Displacement
λ = self.u[:, 1][self.edofMat].reshape(-1, 8) # Fixed vector (Kλ = -l)
obj = self.f[:, 1].T @ self.u[:, 0]
self.obje[:] = (λ @ self.KE * u).sum(1)
self.compute_young_moduli(xPhys, dobj) # Stores the derivative in dobj
dobj *= -self.obje
return obj
| 33.234177
| 79
| 0.591316
| 632
| 5,251
| 4.846519
| 0.28481
| 0.020568
| 0.013059
| 0.018283
| 0.274567
| 0.186419
| 0.153118
| 0.132876
| 0.110349
| 0.110349
| 0
| 0.008324
| 0.290802
| 5,251
| 157
| 80
| 33.44586
| 0.814178
| 0.572272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.138889
| 0
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be7ab6f787e652d44d15533e2b5246954d6801d
| 932
|
py
|
Python
|
tests/test_parse_icao24bit.py
|
Collen-Roller/arp
|
08eaa2dda3adb1dbd600597a6d03603669c8e06d
|
[
"MIT"
] | 2
|
2020-10-28T17:03:14.000Z
|
2021-01-27T10:44:33.000Z
|
tests/test_parse_icao24bit.py
|
Collen-Roller/arp
|
08eaa2dda3adb1dbd600597a6d03603669c8e06d
|
[
"MIT"
] | 8
|
2020-12-08T16:42:43.000Z
|
2020-12-29T00:41:33.000Z
|
tests/test_parse_icao24bit.py
|
Collen-Roller/arp
|
08eaa2dda3adb1dbd600597a6d03603669c8e06d
|
[
"MIT"
] | 1
|
2020-12-09T20:35:52.000Z
|
2020-12-09T20:35:52.000Z
|
import unittest
from flydenity import Parser
class TestParseIcao24Bit(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def test_parse_simple(self):
match = self.parser.parse("3D2591", icao24bit=True)
self.assertEqual(match, {"nation": "Germany", "description": "general", "iso2": "DE", "iso3": "DEU"})
def test_parse_strict(self):
sloppy_reg_sloppy_parser = self.parser.parse("3DX", icao24bit=True, strict=False)
sloppy_reg_strict_parser = self.parser.parse("3DX", icao24bit=True, strict=True)
strict_reg_sloppy_parser = self.parser.parse("3D2591", icao24bit=True, strict=False)
strict_reg_strict_parser = self.parser.parse("3D2591", icao24bit=True, strict=True)
self.assertTrue(sloppy_reg_sloppy_parser == strict_reg_sloppy_parser == strict_reg_strict_parser != sloppy_reg_strict_parser)
if __name__ == "__main__":
unittest.main()
| 35.846154
| 133
| 0.713519
| 115
| 932
| 5.469565
| 0.313043
| 0.09539
| 0.119237
| 0.133545
| 0.465819
| 0.394277
| 0.282989
| 0.282989
| 0
| 0
| 0
| 0.039693
| 0.162017
| 932
| 25
| 134
| 37.28
| 0.765685
| 0
| 0
| 0
| 0
| 0
| 0.081545
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.1875
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1be82da5cbe879b6b36fe90dd23217980058a69e
| 465
|
py
|
Python
|
ever/util/_main.py
|
Bobholamovic/ever
|
f38060674a40ed53072b9d9be99cc656a830398f
|
[
"Apache-2.0"
] | 22
|
2021-08-21T00:13:18.000Z
|
2022-03-28T19:38:10.000Z
|
ever/util/_main.py
|
Bobholamovic/ever
|
f38060674a40ed53072b9d9be99cc656a830398f
|
[
"Apache-2.0"
] | 2
|
2021-09-01T06:28:38.000Z
|
2021-12-06T07:17:57.000Z
|
ever/util/_main.py
|
Bobholamovic/ever
|
f38060674a40ed53072b9d9be99cc656a830398f
|
[
"Apache-2.0"
] | 6
|
2021-08-21T06:32:47.000Z
|
2022-02-10T07:41:29.000Z
|
import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
| 19.375
| 56
| 0.597849
| 68
| 465
| 4
| 0.5
| 0.044118
| 0.073529
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247312
| 465
| 23
| 57
| 20.217391
| 0.777143
| 0
| 0
| 0
| 0
| 0
| 0.381466
| 0.081897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bea69b9a810613a8cdcc7d4cd5f8e74e2b87b61
| 687
|
py
|
Python
|
resthelper/tests/test_build_url.py
|
rklonner/resthelper
|
c129a7ff3efb5447aeb9794142c4d640261d962d
|
[
"MIT"
] | null | null | null |
resthelper/tests/test_build_url.py
|
rklonner/resthelper
|
c129a7ff3efb5447aeb9794142c4d640261d962d
|
[
"MIT"
] | null | null | null |
resthelper/tests/test_build_url.py
|
rklonner/resthelper
|
c129a7ff3efb5447aeb9794142c4d640261d962d
|
[
"MIT"
] | null | null | null |
import unittest
from resthelper.utils import build_restful_url
class TestBuildUrl(unittest.TestCase):
def test_is_restful_https_url(self):
url = build_restful_url('https://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'https://testuser@jenkins1.tttech.com/rest/1.0/request')
def test_is_restful_http_url(self):
url = build_restful_url('http://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'http://testuser@jenkins1.tttech.com/rest/1.0/request')
if __name__ == '__main__':
unittest.main()
| 32.714286
| 68
| 0.622999
| 82
| 687
| 4.95122
| 0.353659
| 0.137931
| 0.167488
| 0.128079
| 0.586207
| 0.586207
| 0.463054
| 0.463054
| 0.275862
| 0.275862
| 0
| 0.023121
| 0.244541
| 687
| 21
| 69
| 32.714286
| 0.759152
| 0
| 0
| 0.266667
| 0
| 0
| 0.313953
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bee0a3b08699aa37d40800889d795e3cdf9fb23
| 2,918
|
py
|
Python
|
cwbot/kolextra/request/ItemDescriptionRequest.py
|
zeryl/RUcwbot
|
734716506066da599fcbc96d0a815a5e30f6e077
|
[
"BSD-3-Clause"
] | null | null | null |
cwbot/kolextra/request/ItemDescriptionRequest.py
|
zeryl/RUcwbot
|
734716506066da599fcbc96d0a815a5e30f6e077
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T02:48:19.000Z
|
2019-04-15T03:02:36.000Z
|
cwbot/kolextra/request/ItemDescriptionRequest.py
|
rlbond86/cwbot
|
2432a9c9d048b7600b53d5cb8f7ef608c6613258
|
[
"BSD-3-Clause"
] | null | null | null |
from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import re
class ItemDescriptionRequest(GenericRequest):
"Gets the description of an item and then parses various information from the response."
_itemIdPattern = re.compile(r'(?i)<!--\s*itemid:\s*(\d+)\s*-->')
def __init__(self, session, descId):
super(ItemDescriptionRequest, self).__init__(session)
self.url = session.serverURL + "desc_item.php?whichitem=%s" % descId
def parseResponse(self):
# Get the item name.
itemNamePattern = PatternManager.getOrCompilePattern("itemName")
match = itemNamePattern.search(self.responseText)
self.responseData["name"] = match.group(1)
# Get the item image.
imagePattern = PatternManager.getOrCompilePattern("itemImage")
match = imagePattern.search(self.responseText)
self.responseData["image"] = match.group(1)
# Get the item type.
typePattern = PatternManager.getOrCompilePattern("itemType")
match = typePattern.search(self.responseText)
if match:
self.responseData["type"] = match.group(1).rstrip()
# Get the autosell value.
autosellPattern = PatternManager.getOrCompilePattern("itemAutosell")
match = autosellPattern.search(self.responseText)
if match:
self.responseData["autosell"] = int(match.group(1))
else:
self.responseData["autosell"] = 0
# See if this is a cooking ingredient.
cookingPattern = PatternManager.getOrCompilePattern("isCookingIngredient")
match = cookingPattern.search(self.responseText)
if match:
self.responseData["isCookingIngredient"] = True
# See if the item is a cocktailcrafting ingredient.
cocktailcraftingPattern = PatternManager.getOrCompilePattern("isCocktailcraftingIngredient")
match = cocktailcraftingPattern.search(self.responseText)
if match:
self.responseData["isCocktailcraftingIngredient"] = True
# See if the item is a meatsmithing component.
meatsmithingPattern = PatternManager.getOrCompilePattern("isMeatsmithingComponent")
match = meatsmithingPattern.search(self.responseText)
if match:
self.responseData["isMeatsmithingComponent"] = True
# See if the item is a jewelrymaking component.
jewelrymakingPattern = PatternManager.getOrCompilePattern("isJewelrymakingComponent")
match = jewelrymakingPattern.search(self.responseText)
if match:
self.responseData["isJewelrymakingComponent"] = True
# See if the itemId is listed
match = self._itemIdPattern.search(self.responseText)
if match:
self.responseData["id"] = int(match.group(1))
else:
self.responseData["id"] = None
| 42.911765
| 100
| 0.675805
| 267
| 2,918
| 7.344569
| 0.333333
| 0.08975
| 0.100969
| 0.085671
| 0.284549
| 0.245793
| 0.224375
| 0
| 0
| 0
| 0
| 0.002677
| 0.232008
| 2,918
| 68
| 101
| 42.911765
| 0.872378
| 0.128513
| 0
| 0.191489
| 0
| 0
| 0.153318
| 0.079329
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.06383
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bef48d1f47271bb3d6c33f78c3cf6b32220029d
| 3,578
|
py
|
Python
|
VokeScan.py
|
DaduVoke/VokeScan
|
a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c
|
[
"MIT"
] | 2
|
2021-12-05T04:00:50.000Z
|
2022-03-24T17:53:26.000Z
|
VokeScan.py
|
DaduVoke/VokeScan
|
a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c
|
[
"MIT"
] | null | null | null |
VokeScan.py
|
DaduVoke/VokeScan
|
a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c
|
[
"MIT"
] | null | null | null |
import sys,time
def sprint(str):
for c in str + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3./90)
from colorama import Fore, Back, Style
sprint (Fore.RED + "გამარჯობა. tool-ი შექმინლია ლევან ყიფიანი-DaduVoke-ის მიერ @2021")
import socket
import _thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Core(object):
ipurl=0
mode=1024
menu1=False
f=None
network_speed="სიჩქარე"
menu2=False
def GetData(self, url):
self.url = url
try:
self.ipurl = socket.gethostbyname(self.url)
except Exception as e:
print ("თქვენ არასწორად შეიყვანეთ IP ან URL")
exit(0)
Core.ipurl=self.ipurl
print (22*" ",bcolors.OKGREEN,"=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=/VokeScaner=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=",bcolors.OKGREEN)
sprint('გთხოვთ აირჩიოთ 1 ან 2')
while Core.menu1 is not True:
choice = input("\n1 - მოკლე\n2 - გრძელი\n")
if choice == "1":
Core.mode=1024
menu=True
break
elif choice == "2":
Core.mode=64000
menu = True
break
else:
sprint("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
while Core.menu2 is not True:
sprint("მეორე ეტაპი! გთხოვთ აირჩიოთ გამოყენებული ინტერნეტის სიჩქარე (0.05(1) 0.03(2))")
choice = input("\n1 - მოკლე \n2 - გრძელი\n")
if choice == "1":
Core.network_speed=0.05
menu2=True
break
elif choice == "2":
Core.network_speed=0.3
menu2 = True
break
else:
print("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
def Start_Scan(self, port_start, port_end):
Core.f = open(Core.ipurl, "a")
try:
for x in range(port_start,port_end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((Core.ipurl,x))
if res is 0:
tmp="პორტი",x,"გახსნილია", socket.getservbyport(x)
tmp1=str(tmp[0])+" "+str(tmp[1])+" "+str(tmp[2])+" "+str(tmp[3])
print(bcolors.OKGREEN,tmp1)
Core.f.write(str(tmp)+"\n")
Core.f.close()
except Exception as e:
print (e)
try:
scan = Core()
scan.GetData(input("ჩაწერეთ IP ან მისამართი URL\n"))
print(bcolors.WARNING,"სიხშირე:",Core.mode,"\n სამიზნე:",Core.ipurl,"\n სკანერის სიჩქარე:",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD,"გთხოვთ დაიცადოთ რამდენიმე წამი...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| 18.162437
| 139
| 0.488262
| 398
| 3,578
| 4.344221
| 0.371859
| 0.034702
| 0.037016
| 0.031232
| 0.218045
| 0.192019
| 0.136495
| 0.136495
| 0.136495
| 0.136495
| 0
| 0.047359
| 0.386249
| 3,578
| 196
| 140
| 18.255102
| 0.739982
| 0
| 0
| 0.295455
| 0
| 0.011364
| 0.20911
| 0.023957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.056818
| 0
| 0.272727
| 0.147727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bef4c913e56949ae48100d1d528ebecb2bb01d8
| 53,296
|
py
|
Python
|
agent/src/clacks/agent/objects/object.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | 2
|
2015-01-26T07:15:19.000Z
|
2015-11-09T13:42:11.000Z
|
agent/src/clacks/agent/objects/object.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
agent/src/clacks/agent/objects/object.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The object base class.
"""
import copy
import zope.event
import pkg_resources
import os
from lxml import etree
from lxml.builder import E
from logging import getLogger
from zope.interface import Interface, implements
from clacks.common import Environment
from clacks.common.utils import N_, is_uuid
from clacks.common.components import PluginRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.agent.exceptions import ObjectException
# Status
STATUS_OK = 0
STATUS_CHANGED = 1
# Register the errors handled by us
C.register_codes(dict(
CREATE_NEEDS_BASE=N_("Creation of '%(location)s' lacks a base DN"),
READ_BACKEND_PROPERTIES=N_("Error reading properties for backend '%(backend)s'"),
ATTRIBUTE_BLOCKED_BY=N_("Attribute is blocked by %(source)s==%(value)s"),
ATTRIBUTE_READ_ONLY=N_("Attribute is read only"),
ATTRIBUTE_MANDATORY=N_("Attribute is mandatory"),
ATTRIBUTE_INVALID_CONSTANT=N_("Value is invalid - expected one of %(elements)s"),
ATTRIBUTE_INVALID_LIST=N_("Value is invalid - expected a list"),
ATTRIBUTE_INVALID=N_("Value is invalid - expected value of type '%(type)s'"),
ATTRIBUTE_CHECK_FAILED=N_("Value is invalid"),
ATTRIBUTE_NOT_UNIQUE=N_("Value is not unique (%(value)s)"),
ATTRIBUTE_NOT_FOUND=N_("Attribute not found"),
OBJECT_MODE_NOT_AVAILABLE=N_("Mode '%(mode)s' is not available for base objects"),
OBJECT_MODE_BASE_AVAILABLE=N_("Mode '%(mode)s' is only available for base objects"),
OBJECT_NOT_SUB_FOR=N_("Object of type '%(ext)s' cannot be added as to the '%(base)s' container"),
OBJECT_REMOVE_NON_BASE_OBJECT=N_("Cannot remove non base object"),
OBJECT_MOVE_NON_BASE_OBJECT=N_("Cannot move non base object"),
OBJECT_BASE_NO_RETRACT=N_("Base object cannot be retracted"),
FILTER_INVALID_KEY=N_("Invalid key '%(key)s' for filter '%(filter)s'"),
FILTER_MISSING_KEY=N_("Missing key '%(key)s' after processing filter '%(filter)s'"),
FILTER_NO_LIST=N_("Filter '%(filter)s' did not return a %(type)s value - a list was expected"),
ATTRIBUTE_DEPEND_LOOP=N_("Potential loop in attribute dependencies")
))
class Object(object):
"""
This class is the base class for all objects.
It contains getter and setter methods for the object
attributes and it is able to initialize itself by reading data from
backends.
It also contains the ability to execute the in- and out-filters for the
object properties.
All meta-classes for objects, created by the XML defintions, will inherit this class.
"""
_reg = None
_backend = None
_mode = False
_propsByBackend = {}
uuid = None
dn = None
orig_dn = None
log = None
createTimestamp = None
modifyTimestamp = None
myProperties = None
env = None
parent = None
owner = None
attributesInSaveOrder = None
def __saveOrder(self):
"""
Returns a list containing all attributes in the correct
save-order.
Due to the fact that some attributes depend on another,
we have to save some attributes first and then the others.
"""
data = self.__saveOrderHelper()
attrs = []
for level in sorted(data.keys(), reverse=True):
for attr in data[level]:
if attr not in attrs:
attrs.append(attr)
return attrs
def __saveOrderHelper(self, res=None, item=None, level=0):
"""
Helper method for '__saveOrder' to detect the dependency
depth (level) for an attribute
"""
if not res:
res = {}
if not level in res:
res[level] = []
if level == 10:
raise ValueError(C.make_error('ATTRIBUTE_DEPEND_LOOP'))
if not item:
for key in self.myProperties:
self.__saveOrderHelper(res, key, level + 1)
else:
if len(self.myProperties[item]['depends_on']):
for key in self.myProperties[item]['depends_on']:
self.__saveOrderHelper(res, key, level + 1)
res[level].append(item)
return res
def __init__(self, where=None, mode="update"):
self.env = Environment.getInstance()
# Instantiate Backend-Registry
self._reg = ObjectBackendRegistry.getInstance()
self.log = getLogger(__name__)
self.log.debug("new object instantiated '%s'" % type(self).__name__)
# Group attributes by Backend
propsByBackend = {}
props = getattr(self, '__properties')
self.myProperties = copy.deepcopy(props)
self.attributesInSaveOrder = self.__saveOrder()
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Load dynamic dropdown-values
if self.myProperties[key]['values_populate']:
cr = PluginRegistry.getInstance('CommandRegistry')
values = cr.call(self.myProperties[key]['values_populate'])
if type(values).__name__ == "dict":
self.myProperties[key]['values'] = values
else:
self.myProperties[key]['values'] = atypes['String'].convert_to(self.myProperties[key]['type'], values)
# Initialize an empty array for each backend
for be in self.myProperties[key]['backend']:
if be not in propsByBackend:
propsByBackend[be] = []
# Append property
propsByBackend[be].append(key)
self._propsByBackend = propsByBackend
self._mode = mode
# Initialize object using a DN
if where:
if mode == "create":
if is_uuid(where):
raise ValueError(C.make_error('CREATE_NEEDS_BASE', "base", location=where))
self.orig_dn = self.dn = where
else:
self._read(where)
# Set status to modified for attributes that do not have a value but are
# mandatory and have a default.
# This ensures that default values are passed to the out_filters and get saved
# afterwards.
# (Defaults will be passed to in-filters too, if they are not overwritten by _read())
for key in self.myProperties:
if not(self.myProperties[key]['value']) and self.myProperties[key]['default'] is not None and \
len(self.myProperties[key]['default']):
self.myProperties[key]['value'] = copy.deepcopy(self.myProperties[key]['default'])
if self.myProperties[key]['mandatory']:
self.myProperties[key]['status'] = STATUS_CHANGED
def set_foreign_value(self, attr, original):
self.myProperties[attr]['value'] = original['value']
self.myProperties[attr]['in_value'] = original['in_value']
self.myProperties[attr]['orig_value'] = original['orig_value']
def listProperties(self):
return self.myProperties.keys()
def getProperties(self):
return copy.deepcopy(self.myProperties)
def listMethods(self):
methods = getattr(self, '__methods')
return methods.keys()
def hasattr(self, attr):
return attr in self.myProperties
def _read(self, where):
"""
This method tries to initialize a object instance by reading data
from the defined backend.
Attributes will be grouped by their backend to ensure that only one
request per backend will be performed.
"""
# Generate missing values
if is_uuid(where):
#pylint: disable=E1101
if self._base_object:
self.dn = self._reg.uuid2dn(self._backend, where)
else:
self.dn = None
self.uuid = where
else:
self.dn = where
self.uuid = self._reg.dn2uuid(self._backend, where)
# Get last change timestamp
self.orig_dn = self.dn
if self.dn:
self.createTimestamp, self.modifyTimestamp = self._reg.get_timestamps(self._backend, self.dn)
# Load attributes for each backend.
# And then assign the values to the properties.
self.log.debug("object uuid: %s" % self.uuid)
for backend in self._propsByBackend:
try:
# Create a dictionary with all attributes we want to fetch
# {attribute_name: type, name: type}
info = dict([(k, self.myProperties[k]['backend_type']) for k in self._propsByBackend[backend]])
self.log.debug("loading attributes for backend '%s': %s" % (backend, str(info)))
be = ObjectBackendRegistry.getBackend(backend)
be_attrs = self._backendAttrs[backend] if backend in self._backendAttrs else None
attrs = be.load(self.uuid, info, be_attrs)
except ValueError as e:
raise ObjectException(C.make_error('READ_BACKEND_PROPERTIES', backend=backend))
# Assign fetched value to the properties.
for key in self._propsByBackend[backend]:
if key not in attrs:
self.log.debug("attribute '%s' was not returned by load" % key)
continue
# Keep original values, they may be overwritten in the in-filters.
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = attrs[key]
self.log.debug("%s: %s" % (key, self.myProperties[key]['value']))
# Once we've loaded all properties from the backend, execute the
# in-filters.
for key in self.myProperties:
# Skip loading in-filters for None values
if self.myProperties[key]['value'] is None:
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = []
continue
# Execute defined in-filters.
if len(self.myProperties[key]['in_filter']):
self.log.debug("found %s in-filter(s) for attribute '%s'" % (str(len(self.myProperties[key]['in_filter'])), key))
# Execute each in-filter
for in_f in self.myProperties[key]['in_filter']:
self.__processFilter(in_f, key, self.myProperties)
# Convert the received type into the target type if not done already
#pylint: disable=E1101
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Convert values from incoming backend-type to required type
if self.myProperties[key]['value']:
a_type = self.myProperties[key]['type']
be_type = self.myProperties[key]['backend_type']
# Convert all values to required type
if not atypes[a_type].is_valid_value(self.myProperties[key]['value']):
try:
self.myProperties[key]['value'] = atypes[a_type].convert_from(be_type, self.myProperties[key]['value'])
except Exception as e:
self.log.error("conversion of '%s' from '%s' to type '%s' failed: %s" % (key, be_type, a_type, str(e)))
else:
self.log.debug("converted '%s' from type '%s' to type '%s'!" % (key, be_type, a_type))
# Keep the initial value
self.myProperties[key]['last_value'] = self.myProperties[key]['orig_value'] = copy.deepcopy(self.myProperties[key]['value'])
def _delattr_(self, name):
"""
Deleter method for properties.
"""
if name in self.attributesInSaveOrder:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Do not allow remove mandatory attributes
if self.myProperties[name]['mandatory']:
raise AttributeError(C.make_error('ATTRIBUTE_MANDATORY', name))
# If not already in removed state
if len(self.myProperties[name]['value']) != 0:
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = copy.deepcopy(self.myProperties[name]['value'])
self.myProperties[name]['value'] = []
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _setattr_(self, name, value):
"""
This is the setter method for object attributes.
Each given attribute value is validated with the given set of
validators.
"""
# Store non property values
try:
object.__getattribute__(self, name)
self.__dict__[name] = value
return
except AttributeError:
pass
# A none value was passed to clear the value
if value is None:
self._delattr_(name)
return
# Try to save as property value
if name in self.myProperties:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Check if the given value has to match one out of a given list.
if len(self.myProperties[name]['values']) and value not in self.myProperties[name]['values']:
raise TypeError(C.make_error(
'ATTRIBUTE_INVALID_CONSTANT', name,
elements=", ".join(self.myProperties[name]['values'])))
# Set the new value
if self.myProperties[name]['multivalue']:
# Check if the new value is s list.
if type(value) != list:
raise TypeError(C.make_error('ATTRIBUTE_INVALID_LIST', name))
new_value = value
else:
new_value = [value]
# Eventually fixup value from incoming JSON string
s_type = self.myProperties[name]['type']
try:
new_value = self._objectFactory.getAttributeTypes()[s_type].fixup(new_value)
except Exception:
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Check if the new value is valid
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[s_type].is_valid_value(new_value):
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Validate value
if self.myProperties[name]['validator']:
props_copy = copy.deepcopy(self.myProperties)
res, error = self.__processValidator(self.myProperties[name]['validator'], name, new_value, props_copy)
if not res:
if len(error):
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED',
name, details=error))
else:
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED', name))
# Ensure that unique values stay unique. Let the backend test this.
#if self.myProperties[name]['unique']:
# backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend'])
# if not backendI.is_uniq(name, new_value):
# raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value))
# Assign the properties new value.
self.myProperties[name]['value'] = new_value
self.log.debug("updated property value of [%s|%s] %s:%s" % (type(self).__name__, self.uuid, name, new_value))
# Update status if there's a change
t = self.myProperties[name]['type']
current = copy.deepcopy(self.myProperties[name]['value'])
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[t].values_match(self.myProperties[name]['value'], self.myProperties[name]['orig_value']):
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = current
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _getattr_(self, name):
"""
The getter method object attributes.
(It differentiates between object attributes and class-members)
"""
methods = getattr(self, '__methods')
# If the requested property exists in the object-attributes, then return it.
if name in self.myProperties:
# We can have single and multivalues, return the correct type here.
value = None
if self.myProperties[name]['multivalue']:
value = self.myProperties[name]['value']
else:
if len(self.myProperties[name]['value']):
value = self.myProperties[name]['value'][0]
return value
# The requested property-name seems to be a method, return the method reference.
elif name in methods:
def m_call(*args, **kwargs):
return methods[name]['ref'](self, *args, **kwargs)
return m_call
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def getTemplate(self, theme="default"):
"""
Return the template data - if any. Else None.
"""
return Object.getNamedTemplate(self.env, self._templates, theme)
@staticmethod
def getNamedTemplate(env, templates, theme="default"):
"""
Return the template data - if any. Else None.
"""
ui = []
# If there's a template file, try to find it
if templates:
for template in templates:
path = None
# Absolute path
if template.startswith(os.path.sep):
path = template
# Relative path
else:
# Find path
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', theme, template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', theme, template)
if not os.path.exists(path):
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', "default", template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', "default", template)
if not os.path.exists(path):
return None
with open(path, "r") as f:
_ui = f.read()
# Build new merged resource element
root = etree.fromstring(_ui)
new_resources = []
resources = root.find("resources")
for include in resources.findall("include"):
rc = include.get("location")
location = os.path.join(os.path.dirname(path), rc)
if not os.path.exists(location):
raise IOError(C.make_error("NO_SUCH_RESOURCE", resource=location))
res = ""
with open(location, "r") as f:
res = f.read()
for resource in etree.fromstring(res).findall("qresource"):
files = []
prefix = resource.get("prefix")
for f in resource.findall("file"):
files.append(E.file(os.path.join(prefix, unicode(f.text))))
new_resources.append(E.resource(*files, location=rc))
root.replace(root.find("resources"), E.resources(*new_resources))
ui.append(etree.tostring(root))
return ui
def getAttrType(self, name):
"""
Return the type of a given object attribute.
"""
if name in self.myProperties:
return self.myProperties[name]['type']
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def check(self, propsFromOtherExtensions=None):
"""
Checks whether everything is fine with the extension and its given values or not.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Check if _mode matches with the current object type
#pylint: disable=E1101
if self._base_object and not self._mode in ['create', 'remove', 'update']:
raise ObjectException(C.make_error('OBJECT_MODE_NOT_AVAILABLE', mode=self._mode))
if not self._base_object and self._mode in ['create', 'remove']:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
# Check if we are allowed to create this base object on the given base
if self._base_object and self._mode == "create":
base_type = self.get_object_type_by_dn(self.dn)
if not base_type:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
if self.__class__.__name__ not in self._objectFactory.getAllowedSubElementsForObject(base_type):
raise ObjectException(C.make_error('OBJECT_NOT_SUB_FOR',
ext=self.__class__.__name__,
base=base_type))
# Transfer values form other commit processes into ourselfes
for key in self.attributesInSaveOrder:
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Transfer status into commit status
props[key]['commit_status'] = props[key]['status']
# Collect values by store and process the property filters
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Check if this attribute is blocked by another attribute and its value.
is_blocked = False
for bb in props[key]['blocked_by']:
if bb['value'] in props[bb['name']]['value']:
is_blocked = True
break
# Check if all required attributes are set. (Skip blocked once, they cannot be set!)
if not is_blocked and props[key]['mandatory'] and not len(props[key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', key))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Ensure that mandatory values are set
if props[prop_key]['mandatory'] and not len(props[prop_key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', prop_key))
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
return props
def commit(self, propsFromOtherExtensions=None):
"""
Commits changes of an object to the corresponding backends.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
self.check(propsFromOtherExtensions)
self.log.debug("saving object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Transfer status into commit status
for key in self.attributesInSaveOrder:
props[key]['commit_status'] = props[key]['status']
# Transfer values form other commit processes into ourselfes
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Adapt property states
# Run this once - If any state was adapted, then run again to ensure
# that all dependencies are processed.
first = True
_max = 5
required = False
while (first or required) and _max:
first = False
required = False
_max -= 1
for key in self.attributesInSaveOrder:
# Adapt status from dependent properties.
for propname in props[key]['depends_on']:
old = props[key]['commit_status']
props[key]['commit_status'] |= props[propname]['status'] & STATUS_CHANGED
props[key]['commit_status'] |= props[propname]['commit_status'] & STATUS_CHANGED
if props[key]['commit_status'] != old:
required = True
# Collect values by store and process the property filters
collectedAttrs = {}
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Do not save untouched values
if not props[key]['commit_status'] & STATUS_CHANGED:
continue
# Get the new value for the property and execute the out-filter
self.log.debug("changed: %s" % (key,))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
collectedAttrs[prop_key] = props[prop_key]
# Create a backend compatible list of all changed attributes.
toStore = {}
for prop_key in collectedAttrs:
# Collect properties by backend
for be in props[prop_key]['backend']:
if not be in toStore:
toStore[be] = {}
# Convert the properities type to the required format - if its not of the expected type.
be_type = collectedAttrs[prop_key]['backend_type']
s_type = collectedAttrs[prop_key]['type']
if not self._objectFactory.getAttributeTypes()[be_type].is_valid_value(collectedAttrs[prop_key]['value']):
collectedAttrs[prop_key]['value'] = self._objectFactory.getAttributeTypes()[s_type].convert_to(
be_type, collectedAttrs[prop_key]['value'])
# Append entry to the to-be-stored list
toStore[be][prop_key] = {'foreign': collectedAttrs[prop_key]['foreign'],
'orig': collectedAttrs[prop_key]['in_value'],
'value': collectedAttrs[prop_key]['value'],
'type': collectedAttrs[prop_key]['backend_type']}
# We may have a plugin without any attributes, like the group asterisk extension, in
# this case we've to update the object despite of the lack of properties.
if not len(toStore) and self._backend:
toStore[self._backend] = {}
# Leave the show if there's nothing to do
tmp = {}
for key, value in toStore.items():
# Skip NULL backend. Nothing to save, anyway.
if key == "NULL":
continue
tmp[key] = value
toStore = tmp
# Skip the whole process if there's no change at all
if not toStore:
return {}
# Update references using the toStore information
changes = {}
for be in toStore:
changes.update(toStore[be])
self.update_refs(changes)
# Handle by backend
p_backend = getattr(self, '_backend')
obj = self
zope.event.notify(ObjectChanged("pre %s" % self._mode, obj))
# Call pre-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PreCreate")
if self._mode in ["update"]:
self.__execute_hook("PreModify")
# First, take care about the primary backend...
if p_backend in toStore:
beAttrs = self._backendAttrs[p_backend] if p_backend in self._backendAttrs else {}
be = ObjectBackendRegistry.getBackend(p_backend)
if self._mode == "create":
obj.uuid = be.create(self.dn, toStore[p_backend], self._backendAttrs[p_backend])
elif self._mode == "extend":
be.extend(self.uuid, toStore[p_backend],
self._backendAttrs[p_backend],
self.getForeignProperties())
else:
be.update(self.uuid, toStore[p_backend], beAttrs)
# Eventually the DN has changed
if self._base_object:
dn = be.uuid2dn(self.uuid)
# Take DN for newly created objects
if self._mode == "create":
if self._base_object:
obj.dn = dn
elif dn != obj.dn:
self.update_dn_refs(dn)
obj.dn = dn
if self._base_object:
zope.event.notify(ObjectChanged("post move", obj))
obj.orig_dn = dn
# ... then walk thru the remaining ones
for backend, data in toStore.items():
# Skip primary backend - already done
if backend == p_backend:
continue
be = ObjectBackendRegistry.getBackend(backend)
beAttrs = self._backendAttrs[backend] if backend in self._backendAttrs else {}
if self._mode == "create":
be.create(self.dn, data, beAttrs)
elif self._mode == "extend":
be.extend(self.uuid, data, beAttrs, self.getForeignProperties())
else:
be.update(self.uuid, data, beAttrs)
zope.event.notify(ObjectChanged("post %s" % self._mode, obj))
# Call post-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PostCreate")
if self._mode in ["update"] and "PostModify":
self.__execute_hook("PostModify")
return props
def revert(self):
"""
Reverts all changes made to this object since it was loaded.
"""
for key in self.myProperties:
self.myProperties[key]['value'] = self.myProperties[key]['last_value']
self.log.debug("reverted object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
def getExclusiveProperties(self):
return [x for x, y in self.myProperties.items() if not y['foreign']]
def getForeignProperties(self):
return [x for x, y in self.myProperties.items() if y['foreign']]
def __processValidator(self, fltr, key, value, props_copy):
"""
This method processes a given process-list (fltr) for a given property (prop).
And return TRUE if the value matches the validator set and FALSE if
not.
"""
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
self.log.debug(" validator started (%s)" % key)
self.log.debug(" value: %s" % (value, ))
# Process the list till we reach the end..
lasterrmsg = ""
errormsgs = []
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
if 'condition' in curline:
# Build up argument list
args = [props_copy, key, value] + curline['params']
# Process condition and keep results
fname = type(curline['condition']).__name__
v, errors = (curline['condition']).process(*args)
# Log what happend!
self.log.debug(" %s: [Filter] %s(%s) called and returned: %s" % (
lptr, fname, ", ".join(["\"" + x + "\"" for x in curline['params']]), v))
# Append the result to the stack.
stack.append(v)
if not v:
if len(errors):
lasterrmsg = errors.pop()
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
v1 = stack.pop()
v2 = stack.pop()
fname = type(curline['operator']).__name__
res = (curline['operator']).process(v1, v2)
stack.append(res)
# Add last error message
if not res:
errormsgs.append(lasterrmsg)
lasterrmsg = ""
# Log what happend!
self.log.debug(" %s: [OPERATOR] %s(%s, %s) called and returned: %s" % (
lptr, fname, v1, v2, res))
# Attach last error message
res = stack.pop()
if not res and lasterrmsg != "":
errormsgs.append(lasterrmsg)
self.log.debug(" <- VALIDATOR ENDED (%s)" % key)
return res, errormsgs
def __processFilter(self, fltr, key, prop):
"""
This method processes a given process-list (fltr) for a given property (prop).
For example: When a property has to be stored in the backend, it will
run through the out-filter-process-list and thus will be transformed into a storable
key, value pair.
"""
# Search for replaceable patterns in the process-list.
fltr = self.__fillInPlaceholders(fltr, prop)
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
# Log values
self.log.debug(" -> FILTER STARTED (%s)" % key)
# Process the list till we reach the end..
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A filter is used to manipulate the 'value' or the 'key' or maybe both.
if 'filter' in curline:
# Build up argument list
args = [self, key, prop]
fname = type(curline['filter']).__name__
for entry in curline['params']:
args.append(entry)
# Process filter and keep results
key, prop = (curline['filter']).process(*args)
# Ensure that the processed data is still valid.
# Filter may mess things up and then the next cannot process correctly.
if key not in prop:
raise ObjectException(C.make_error('FILTER_INVALID_KEY',
key=key, filter=fname))
# Check if the filter returned all expected property values.
for pk in prop:
if not all(k in prop[pk] for k in ('backend', 'value', 'type')):
missing = ", ".join({'backend', 'value', 'type'} - set(prop[pk].keys()))
raise ObjectException(C.make_error('FILTER_MISSING_KEY', key=missing, filter=fname))
# Check if the returned value-type is list or None.
if type(prop[pk]['value']) not in [list, type(None)]:
raise ObjectException(C.make_error('FILTER_NO_LIST',
key=pk, filter=fname, type=type(prop[pk]['value'])))
self.log.debug(" %s: [Filter] %s(%s) called " % (lptr, fname,
", ".join(["\"" + x + "\"" for x in curline['params']])))
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
elif 'condition' in curline:
# Build up argument list
args = [key] + curline['params']
# Process condition and keep results
stack.append((curline['condition']).process(*args))
fname = type(curline['condition']).__name__
self.log.debug(" %s: [Condition] %s(%s) called " % (lptr, fname, ", ".join(curline['params'])))
# Handle jump, for example if a condition has failed, jump over its filter-chain.
elif 'jump' in curline:
# Jump to <line> -1 because we will increase the line ptr later.
olptr = lptr
if curline['jump'] == 'conditional':
if stack.pop():
lptr = curline['onTrue'] - 1
else:
lptr = curline['onFalse'] - 1
else:
lptr = curline['to'] - 1
self.log.debug(" %s: [Goto] %s ()" % (olptr, lptr))
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
a = stack.pop()
b = stack.pop()
stack.append((curline['operator']).process(a, b))
fname = type(curline['operator']).__name__
self.log.debug(" %s: [Condition] %s(%s, %s) called " % (lptr, fname, a, b))
# Log current values
#self.log.debug(" result")
#for pkey in prop:
# self.log.debug(" %s: %s" % (pkey, prop[pkey]['value']))
self.log.debug(" <- FILTER ENDED")
return prop
def __fillInPlaceholders(self, fltr, props):
"""
This method fill in placeholder into in- and out-filters.
"""
# Collect all property values
propList = {}
for key in props:
if props[key]['multivalue']:
propList[key] = props[key]['value']
else:
if props[key]['value'] and len(props[key]['value']):
propList[key] = props[key]['value'][0]
else:
propList[key] = None
# An inline function which replaces format string tokens
def _placeHolder(x):
try:
x = x % propList
except KeyError:
pass
return x
# Walk trough each line of the process list an replace placeholders.
for line in fltr:
if 'params' in fltr[line]:
fltr[line]['params'] = map(_placeHolder,
fltr[line]['params'])
return fltr
def get_object_type_by_dn(self, dn):
"""
Returns the objectType for a given DN
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn}, {'_type': 1})
return res[0]['_type'] if res.count() == 1 else None
def get_references(self, override=None):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for ref, info in self._objectFactory.getReferences(override or self.__class__.__name__).items():
for ref_attribute, dsc in info.items():
for idsc in dsc:
if self.myProperties[idsc[1]]['orig_value'] and len(self.myProperties[idsc[1]]['orig_value']):
oval = self.myProperties[idsc[1]]['orig_value'][0]
else:
oval = None
dns = index.search({'_type': ref, ref_attribute: oval}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
idsc[1],
getattr(self, idsc[1]),
dns or [],
self.myProperties[idsc[1]]['multivalue']))
return res
def update_refs(self, data):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
# Next iterration if there's no change for the relevant
# attribute
if not self_attr in data:
continue
# Load object and change value to the new one
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
o_value = data[self_attr]['orig']
if type(c_value) == list:
if type(o_value) == list:
c_value = filter(lambda x: x not in o_value, c_value)
else:
c_value = filter(lambda x: x != o_value, c_value)
if multivalue:
c_value.append(data[self_attr]['value'])
else:
c_value.append(data[self_attr]['value'][0])
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, data[self_attr]['value'][0])
c_obj.commit()
def remove_refs(self):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
if type(value) == list:
c_value = filter(lambda x: x not in value, c_value)
else:
c_value = filter(lambda x: x != value, c_value)
setattr(c_obj, ref_attr, c_value)
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def get_dn_references(self):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for info in self._objectFactory.getReferences("*", "dn").values():
for ref_attribute in info.keys():
dns = index.search({ref_attribute: self.dn}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
map(lambda s: s.decode('utf-8'), dns if dns else [])
))
return res
def update_dn_refs(self, new_dn):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
c_value.append(new_dn)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, new_dn)
c_obj.commit()
def remove_dn_refs(self):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def remove(self):
"""
Removes this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_REMOVE_NON_BASE_OBJECT'))
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Remove for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre remove", obj))
# Call pre-remove now
self.__execute_hook("PreRemove")
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.remove(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post remove", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def simulate_move(self, orig_dn):
"""
Simulate a moves for this object
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
obj = self
zope.event.notify(ObjectChanged("pre move", obj, dn=self.dn, orig_dn=orig_dn))
# Update the DN refs which have most probably changed
self.update_dn_refs(self.dn)
zope.event.notify(ObjectChanged("post move", obj, dn=self.dn, orig_dn=orig_dn))
def move(self, new_base):
"""
Moves this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
# Collect backends
backends = [getattr(self, '_backend')]
# Collect all other backends
for info in self.myProperties.values():
for be in info['backend']:
if not be in backends:
backends.append(be)
obj = self
zope.event.notify(ObjectChanged("pre move", obj))
# Move for primary backend
be = ObjectBackendRegistry.getBackend(backends[0])
be.move(self.uuid, new_base)
# Update the DN refs which have most probably changed
p_backend = getattr(self, '_backend')
be = ObjectBackendRegistry.getBackend(p_backend)
dn = be.uuid2dn(self.uuid)
self.update_dn_refs(dn)
zope.event.notify(ObjectChanged("post move", obj, dn=dn))
def retract(self):
"""
Removes this object extension
"""
#pylint: disable=E1101
if self._base_object:
raise ObjectException(C.make_error('OBJECT_BASE_NO_RETRACT'))
# Call pre-remove now
self.__execute_hook("PreRemove")
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Retract for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre retract", obj))
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.retract(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post retract", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def is_attr_set(self, name):
return len(self.myProperties[name]['in_value'])
def is_attr_using_default(self, name):
return not self.is_attr_set(name) and self.myProperties[name]['default']
def __execute_hook(self, hook_type):
# Call post-remove now
hooks = getattr(self, '__hooks')
if hook_type in hooks:
for hook in hooks[hook_type]:
hook["ref"](self)
class IObjectChanged(Interface):
def __init__(self, obj):
pass
class IAttributeChanged(Interface):
def __init__(self, attr, value):
pass
class ObjectChanged(object):
implements(IObjectChanged)
def __init__(self, reason, obj=None, dn=None, uuid=None, orig_dn=None, o_type=None):
self.reason = reason
self.uuid = uuid or obj.uuid
self.dn = dn or obj.dn
self.orig_dn = orig_dn or obj.orig_dn
self.o_type = o_type or obj.__class__.__name__
class AttributeChanged(object):
implements(IAttributeChanged)
def __init__(self, reason, obj, target):
self.reason = reason
self.target = target
self.uuid = obj.uuid
from clacks.agent.objects.proxy import ObjectProxy
| 38.287356
| 158
| 0.564001
| 6,047
| 53,296
| 4.837936
| 0.107491
| 0.054692
| 0.01128
| 0.01234
| 0.473047
| 0.395351
| 0.349342
| 0.309451
| 0.285148
| 0.250829
| 0
| 0.002912
| 0.336367
| 53,296
| 1,391
| 159
| 38.314881
| 0.824225
| 0.188157
| 0
| 0.371715
| 0
| 0
| 0.104291
| 0.007988
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053817
| false
| 0.005006
| 0.018773
| 0.010013
| 0.133917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf4cd25d9e85b2b0cb4131798b2cd2ef33b36d7
| 10,926
|
py
|
Python
|
idaes/apps/matopt/materials/lattices/diamond_lattice.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 112
|
2019-02-11T23:16:36.000Z
|
2022-03-23T20:59:57.000Z
|
idaes/apps/matopt/materials/lattices/diamond_lattice.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 621
|
2019-03-01T14:44:12.000Z
|
2022-03-31T19:49:25.000Z
|
idaes/apps/matopt/materials/lattices/diamond_lattice.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 154
|
2019-02-01T23:46:33.000Z
|
2022-03-23T15:07:10.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| 43.185771
| 114
| 0.507139
| 1,294
| 10,926
| 4.247295
| 0.188563
| 0.022926
| 0.065502
| 0.021834
| 0.356259
| 0.347707
| 0.324418
| 0.276201
| 0.228166
| 0.228166
| 0
| 0.075252
| 0.346879
| 10,926
| 252
| 115
| 43.357143
| 0.694927
| 0.09125
| 0
| 0.369458
| 0
| 0
| 0.066817
| 0.021451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.039409
| 0.039409
| 0.305419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf4f3ec8b611663d899f073f4f41ae66286507f
| 12,055
|
py
|
Python
|
elateridae_baits.py
|
AAFC-BICoE/elateridae-ortholog-baitset
|
8e17212a26539dfd79b414ffe8f243a906d32149
|
[
"MIT"
] | null | null | null |
elateridae_baits.py
|
AAFC-BICoE/elateridae-ortholog-baitset
|
8e17212a26539dfd79b414ffe8f243a906d32149
|
[
"MIT"
] | null | null | null |
elateridae_baits.py
|
AAFC-BICoE/elateridae-ortholog-baitset
|
8e17212a26539dfd79b414ffe8f243a906d32149
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""
Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits
Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks
Author Jackson Eyres jackson.eyres@canada.ca
License: MIT
Copywright: Government of Canada
"""
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
| 36.41994
| 117
| 0.616093
| 1,584
| 12,055
| 4.532828
| 0.210859
| 0.020056
| 0.019499
| 0.016295
| 0.21351
| 0.157939
| 0.132591
| 0.109053
| 0.085933
| 0.081198
| 0
| 0.013413
| 0.28876
| 12,055
| 330
| 118
| 36.530303
| 0.824003
| 0.267939
| 0
| 0.260417
| 0
| 0
| 0.097729
| 0.012231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036458
| false
| 0.015625
| 0.03125
| 0
| 0.098958
| 0.036458
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf638f00910b809a7d45e1aeabdb75e4e5aef9c
| 1,361
|
py
|
Python
|
poilab.py
|
octeufer/Annotate_Optimize
|
32d9cecc0159882d3f962990aba07168c4a023f5
|
[
"Apache-2.0"
] | null | null | null |
poilab.py
|
octeufer/Annotate_Optimize
|
32d9cecc0159882d3f962990aba07168c4a023f5
|
[
"Apache-2.0"
] | null | null | null |
poilab.py
|
octeufer/Annotate_Optimize
|
32d9cecc0159882d3f962990aba07168c4a023f5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import numpy as np
sys.path.append("d:/data/annooptimize")
import triangle
import time
tinternal = list()
def labstart():
points,tri = triangle.gentri("d:/data/annooptimize/Annodata/200600/poise.shp")
plabels = triangle.dynamicSize(points)
conflictg = triangle.conflictgraphdy(points,tri,plabels)
acg = triangle.accesssubg(conflictg)
len(acg)
allsolve = np.zeros((len(points),4,2),np.float64)
points2,tri2 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIhalf.shp")
plabels2 = triangle.dynamicSize(points2)
conflictg2 = triangle.conflictgraphdy(points2,tri2,plabels2)
acg2 = triangle.accesssubg(conflictg2)
points3,tri3 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIall.shp")
plabels3 = triangle.dynamicSize(points3)
conflictg3 = triangle.conflictgraphdy(points3,tri3,plabels3)
acg3 = triangle.accesssubg(conflictg3)
time.clock()
costs,tabucs= triangle.globaltabuiter2dy(acg,points,1,plabels)
tinternal.append(time.clock())
costs2,tabucs2= triangle.globaltabuiter2dy(acg2,points2,1,plabels2)
tinternal.append(time.clock())
costs3,tabucs3= triangle.globaltabuiter2dy(acg3,points3,1,plabels3)
tinternal.append(time.clock())
return tinternal,(costs,tabucs),(costs2,tabucs2),(costs3,tabucs3)
| 38.885714
| 87
| 0.722998
| 152
| 1,361
| 6.473684
| 0.375
| 0.020325
| 0.069106
| 0.057927
| 0.137195
| 0.137195
| 0.137195
| 0
| 0
| 0
| 0
| 0.053726
| 0.152094
| 1,361
| 34
| 88
| 40.029412
| 0.79896
| 0
| 0
| 0.103448
| 0
| 0
| 0.121326
| 0.106255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf7f1bc739f582663b9e33d97b9d4189cae0d04
| 473
|
py
|
Python
|
fitbit/__init__.py
|
erichilarysmithsr/python-fitbit
|
38cf916d0318aedc91b31d15431fa9c49a13d15f
|
[
"Apache-2.0"
] | null | null | null |
fitbit/__init__.py
|
erichilarysmithsr/python-fitbit
|
38cf916d0318aedc91b31d15431fa9c49a13d15f
|
[
"Apache-2.0"
] | null | null | null |
fitbit/__init__.py
|
erichilarysmithsr/python-fitbit
|
38cf916d0318aedc91b31d15431fa9c49a13d15f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Fitbit API Library
------------------
:copyright: 2012-2015 ORCAS.
:license: BSD, see LICENSE for more details.
"""
from .api import Fitbit, FitbitOauthClient, FitbitOauth2Client
# Meta.
__title__ = 'fitbit'
__author__ = 'Issac Kelly and ORCAS'
__author_email__ = 'bpitcher@orcasinc.com'
__copyright__ = 'Copyright 2012-2015 ORCAS'
__license__ = 'Apache 2.0'
__version__ = '0.1.3'
__release__ = '0.1.3'
# Module namespace.
all_tests = []
| 18.192308
| 62
| 0.684989
| 57
| 473
| 5.157895
| 0.719298
| 0.088435
| 0.115646
| 0.14966
| 0.197279
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064356
| 0.145877
| 473
| 25
| 63
| 18.92
| 0.663366
| 0.336152
| 0
| 0
| 0
| 0
| 0.305921
| 0.069079
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf7f576395a0ca86f448e1c60010a3d363f6af6
| 468
|
py
|
Python
|
bitcoinExchange/exchange/api/urls.py
|
pogginicolo98/start2impact_exchange
|
559c42cdeb2dec890d4b1145ed66a1a2f7c362cb
|
[
"MIT"
] | 1
|
2021-09-08T16:39:07.000Z
|
2021-09-08T16:39:07.000Z
|
bitcoinExchange/exchange/api/urls.py
|
pogginicolo98/start2impact_exchange
|
559c42cdeb2dec890d4b1145ed66a1a2f7c362cb
|
[
"MIT"
] | null | null | null |
bitcoinExchange/exchange/api/urls.py
|
pogginicolo98/start2impact_exchange
|
559c42cdeb2dec890d4b1145ed66a1a2f7c362cb
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from exchange.api.views import LatestOrdersListAPIView, OrderViewSet, ProfileAPIView
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'orders', OrderViewSet, basename='orders')
urlpatterns = [
path('profile/', ProfileAPIView.as_view(), name='profile-detail'),
path('orders/latest/', LatestOrdersListAPIView.as_view(), name='orders-latest'),
path('', include(router.urls))
]
| 36
| 84
| 0.767094
| 51
| 468
| 6.980392
| 0.54902
| 0.106742
| 0.05618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100427
| 468
| 12
| 85
| 39
| 0.845606
| 0
| 0
| 0
| 0
| 0
| 0.130342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf8ddafa4dc0ba6cd6a406c255c3270696943bb
| 848
|
py
|
Python
|
kevin/aggregate/process_html.py
|
toddoh/thisisallabout_backend
|
a0c7bad675bd3fff97f99c3e2b49f19a1fef7640
|
[
"MIT"
] | null | null | null |
kevin/aggregate/process_html.py
|
toddoh/thisisallabout_backend
|
a0c7bad675bd3fff97f99c3e2b49f19a1fef7640
|
[
"MIT"
] | 5
|
2021-03-18T22:18:49.000Z
|
2022-03-11T23:40:56.000Z
|
kevin/aggregate/process_html.py
|
toddoh/thisisallabout_backend
|
a0c7bad675bd3fff97f99c3e2b49f19a1fef7640
|
[
"MIT"
] | 1
|
2019-10-16T19:29:12.000Z
|
2019-10-16T19:29:12.000Z
|
from bs4 import BeautifulSoup
import requests
import re
def retrieveText():
print("Parsing text from online target")
url = "https://www.whitehouse.gov/the-press-office/2017/10/16/remarks-president-trump-and-senate-majority-leader-mitch-mcconnell-joint"
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
textwrapper = soup.find("div", { "class" : "field-item" })
textel = textwrapper.find_all("p", { "class" : None })
textstripped = []
for element in textel:
stripped = element.text.replace("\r", "\n").replace("\r", "").replace("\n", "").replace("Q ", "0002reporter: ").replace("THE PRESIDENT: ", "0001president: ").strip()
if "P.M." not in stripped and "A.M." not in stripped:
textstripped.append(stripped)
# print(textstripped)
return textstripped
| 38.545455
| 173
| 0.660377
| 102
| 848
| 5.480392
| 0.656863
| 0.028623
| 0.021467
| 0.050089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024286
| 0.174528
| 848
| 22
| 174
| 38.545455
| 0.774286
| 0.022406
| 0
| 0
| 0
| 0.0625
| 0.299517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bf9ff44f1b06f0e0c18c710168ee340dcb2a97f
| 869
|
py
|
Python
|
cfmacro/_resources/examples/lambda.py
|
gchiesa/cfmacro
|
9c546b7930a54a9b44efffdf87401726981e1b2a
|
[
"MIT"
] | null | null | null |
cfmacro/_resources/examples/lambda.py
|
gchiesa/cfmacro
|
9c546b7930a54a9b44efffdf87401726981e1b2a
|
[
"MIT"
] | 1
|
2019-07-30T08:49:20.000Z
|
2019-07-30T08:49:20.000Z
|
cfmacro/_resources/examples/lambda.py
|
gchiesa/cfmacro
|
9c546b7930a54a9b44efffdf87401726981e1b2a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
}
| 28.966667
| 99
| 0.674338
| 85
| 869
| 6.788235
| 0.517647
| 0.057192
| 0.051993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001473
| 0.218642
| 869
| 29
| 100
| 29.965517
| 0.848306
| 0.143844
| 0
| 0
| 0
| 0
| 0.179021
| 0.032168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bfcaa846cbe80234230889e864b2dd049be6c62
| 8,038
|
py
|
Python
|
tf2qa/predict_long.py
|
mikelkl/TF2-QA
|
3bca786d26565335df45538714532d6d3c070a2b
|
[
"MIT"
] | 17
|
2020-01-29T10:31:07.000Z
|
2022-01-10T03:36:00.000Z
|
tf2qa/predict_long.py
|
mikelkl/TF2-QA
|
3bca786d26565335df45538714532d6d3c070a2b
|
[
"MIT"
] | null | null | null |
tf2qa/predict_long.py
|
mikelkl/TF2-QA
|
3bca786d26565335df45538714532d6d3c070a2b
|
[
"MIT"
] | 4
|
2021-01-27T15:42:45.000Z
|
2021-12-12T20:41:51.000Z
|
import torch
import argparse
from roberta_modeling import RobertaJointForLong
from transformers.modeling_roberta import RobertaConfig, RobertaModel
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader
import utils
from tqdm import tqdm
import os
import json
import collections
import pickle
import pandas as pd
from utils_nq import read_candidates_from_one_split, compute_long_pred
from roberta_long_preprocess import InputLongFeatures
RawResult = collections.namedtuple("RawResult",
["unique_id",
"long_start_logits",
"long_end_logits"])
def load_cached_data(feature_dir, output_features=False, evaluate=False):
features = torch.load(feature_dir)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if output_features:
return dataset, features
return dataset
def to_list(tensor):
return tensor.detach().cpu().tolist()
def make_submission(output_prediction_file, output_dir):
print("***** Making submmision *****")
test_answers_df = pd.read_json(output_prediction_file)
def create_short_answer(entry):
"""
:param entry: dict
:return: str
"""
if entry['answer_type'] == 0:
return ""
# if entry["short_answers_score"] < 1.5:
# return ""
if entry["yes_no_answer"] != "NONE":
return entry["yes_no_answer"]
answer = []
for short_answer in entry["short_answers"]:
if short_answer["start_token"] > -1:
answer.append(str(short_answer["start_token"]) + ":" + str(short_answer["end_token"]))
return " ".join(answer)
def create_long_answer(entry):
if entry['answer_type'] == 0:
return ''
# if entry["long_answer_score"] < 1.5:
# return ""
answer = []
if entry["long_answer"]["start_token"] > -1:
answer.append(str(entry["long_answer"]["start_token"]) + ":" + str(entry["long_answer"]["end_token"]))
return " ".join(answer)
for var_name in ['long_answer_score', 'short_answers_score', 'answer_type']:
test_answers_df[var_name] = test_answers_df['predictions'].apply(lambda q: q[var_name])
test_answers_df["long_answer"] = test_answers_df["predictions"].apply(create_long_answer)
test_answers_df["short_answer"] = test_answers_df["predictions"].apply(create_short_answer)
test_answers_df["example_id"] = test_answers_df["predictions"].apply(lambda q: str(q["example_id"]))
long_answers = dict(zip(test_answers_df["example_id"], test_answers_df["long_answer"]))
short_answers = dict(zip(test_answers_df["example_id"], test_answers_df["short_answer"]))
sample_submission = pd.read_csv("data/sample_submission.csv")
long_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_long")].apply(
lambda q: long_answers[q["example_id"].replace("_long", "")], axis=1)
short_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_short")].apply(
lambda q: short_answers[q["example_id"].replace("_short", "")], axis=1)
sample_submission.loc[
sample_submission["example_id"].str.contains("_long"), "PredictionString"] = long_prediction_strings
sample_submission.loc[
sample_submission["example_id"].str.contains("_short"), "PredictionString"] = short_prediction_strings
sample_submission.to_csv(os.path.join(output_dir, "submission.csv"), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default="0,1,2,3,4,5,6,7", type=str)
parser.add_argument("--eval_batch_size", default=128, type=int)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--float16", default=True, type=bool)
parser.add_argument("--bert_config_file", default='roberta_large/config.json', type=str)
parser.add_argument("--init_restore_dir", default='check_points/roberta-large-long-V00/best_checkpoint.pth', type=str)
parser.add_argument("--predict_file", default='data/simplified-nq-test.jsonl', type=str)
parser.add_argument("--output_dir", default='check_points/roberta-large-long-V00',
type=str)
parser.add_argument("--predict_feat", default='dataset/test_data_maxlen512_roberta_tfidf_features.bin',
type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
bert_config = RobertaConfig.from_json_file(args.bert_config_file)
model = RobertaJointForLong(RobertaModel(bert_config), bert_config)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
dataset, features = load_cached_data(feature_dir=args.predict_feat, output_features=True, evaluate=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
print("***** Running evaluation *****")
print(" Num examples =", len(dataset))
print(" Batch size =", args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, example_indices = batch
inputs = {'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids}
start_logits, end_logits = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = str(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
long_start_logits=start_logits[i].cpu().numpy(),
long_end_logits=end_logits[i].cpu().numpy())
all_results.append(result)
pickle.dump(all_results, open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'wb'))
# all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb'))
print("Going to candidates file")
candidates_dict = read_candidates_from_one_split(args.predict_file)
print("Compute_pred_dict")
nq_pred_dict = compute_long_pred(candidates_dict, features, all_results, args.n_best_size)
output_prediction_file = os.path.join(args.output_dir, 'test_predictions.json')
print("Saving predictions to", output_prediction_file)
with open(output_prediction_file, 'w') as f:
json.dump({'predictions': list(nq_pred_dict.values())}, f)
# make_submission(output_prediction_file, args.output_dir)
| 43.923497
| 122
| 0.686365
| 1,040
| 8,038
| 4.991346
| 0.206731
| 0.027548
| 0.032556
| 0.013485
| 0.372375
| 0.256983
| 0.219033
| 0.176074
| 0.111732
| 0.090541
| 0
| 0.006448
| 0.189599
| 8,038
| 182
| 123
| 44.164835
| 0.790451
| 0.040806
| 0
| 0.075188
| 0
| 0
| 0.163603
| 0.031938
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037594
| false
| 0
| 0.105263
| 0.007519
| 0.203008
| 0.067669
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bfcf985c108d567ad3614fe9d2baeec4a87e0f1
| 9,385
|
py
|
Python
|
city-infrastructure-platform/settings.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 2
|
2020-11-23T22:08:58.000Z
|
2022-03-02T13:13:20.000Z
|
city-infrastructure-platform/settings.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 170
|
2019-12-31T13:37:04.000Z
|
2022-03-12T14:03:35.000Z
|
city-infrastructure-platform/settings.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 3
|
2020-05-08T05:58:02.000Z
|
2022-03-15T16:07:25.000Z
|
"""
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| 31.599327
| 90
| 0.716356
| 1,089
| 9,385
| 5.909091
| 0.312213
| 0.036364
| 0.03108
| 0.017405
| 0.102409
| 0.072883
| 0.034965
| 0.030458
| 0.018648
| 0
| 0
| 0.00402
| 0.151838
| 9,385
| 296
| 91
| 31.706081
| 0.804397
| 0.11593
| 0
| 0
| 0
| 0
| 0.412491
| 0.271
| 0
| 0
| 0
| 0
| 0.004464
| 1
| 0
| false
| 0.022321
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bfd7e8367e5e96a626394bb27f0b9266054e693
| 1,184
|
py
|
Python
|
test/tc/tet_tc_base_predict_multiclass.py
|
dumpmemory/Pytorch-NLU
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
[
"Apache-2.0"
] | 115
|
2021-08-29T04:28:40.000Z
|
2022-03-29T22:57:48.000Z
|
test/tc/tet_tc_base_predict_multiclass.py
|
dumpmemory/Pytorch-NLU
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
[
"Apache-2.0"
] | 2
|
2022-01-14T01:52:07.000Z
|
2022-03-04T11:40:10.000Z
|
test/tc/tet_tc_base_predict_multiclass.py
|
dumpmemory/Pytorch-NLU
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
[
"Apache-2.0"
] | 18
|
2021-09-23T06:41:10.000Z
|
2022-03-22T04:37:05.000Z
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块-多类分类
# 适配linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| 32
| 104
| 0.663007
| 143
| 1,184
| 5.307692
| 0.671329
| 0.039526
| 0.02635
| 0.057971
| 0.065876
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019427
| 0.173986
| 1,184
| 36
| 105
| 32.888889
| 0.756646
| 0.139358
| 0
| 0.086957
| 0
| 0.086957
| 0.368317
| 0.291089
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1bff51099f471eb1158044ba33a024f093e0aed7
| 3,079
|
py
|
Python
|
bin/nsa_fail/nsa_fail.py
|
changhoonhahn/SEDflow
|
4561ecfe3a38cc4c25df263d971a87e8a83f88ce
|
[
"MIT"
] | 18
|
2022-03-16T03:11:04.000Z
|
2022-03-30T16:01:42.000Z
|
bin/nsa_fail/nsa_fail.py
|
changhoonhahn/SEDflow
|
4561ecfe3a38cc4c25df263d971a87e8a83f88ce
|
[
"MIT"
] | null | null | null |
bin/nsa_fail/nsa_fail.py
|
changhoonhahn/SEDflow
|
4561ecfe3a38cc4c25df263d971a87e8a83f88ce
|
[
"MIT"
] | null | null | null |
import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
| 33.107527
| 92
| 0.528743
| 405
| 3,079
| 3.896296
| 0.34321
| 0.045627
| 0.038023
| 0.039924
| 0.221166
| 0.200253
| 0.157161
| 0.130545
| 0.130545
| 0.080482
| 0
| 0.027237
| 0.248782
| 3,079
| 92
| 93
| 33.467391
| 0.654994
| 0.126015
| 0
| 0.116667
| 0
| 0
| 0.082627
| 0.040254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.1
| 0
| 0.166667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
400075fe46c49c54066ef8f12574919b2debe75a
| 2,709
|
py
|
Python
|
studio/gs_provider.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
studio/gs_provider.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
studio/gs_provider.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
class GSProvider(KeyValueProvider):
def __init__(self, config, blocking_auth=True, verbose=10, store=None):
self.config = config
self.bucket = config.get('bucket', 'studioml-meta')
self.meta_store = GCloudArtifactStore(config, verbose)
super(GSProvider, self).__init__(
config,
blocking_auth,
verbose,
store)
@timeit
def _get(self, key, shallow=False):
bucket = self.meta_store._get_bucket_obj()
retval = {}
if shallow:
blob_iterator = bucket.list_blobs(
prefix=key, delimiter='/')
bloblist = list(blob_iterator)
blobnames = {b.name for b in bloblist}
prefixes = blob_iterator.prefixes
suffixes = [re.sub('^' + key, '', p) for p in prefixes | blobnames]
retval = set({})
for s in suffixes:
if s.endswith('/'):
retval.add(s[:-1])
else:
retval.add(s)
return retval
else:
blob_iterator = bucket.list_blobs(prefix=key)
for blob in blob_iterator:
suffix = re.sub('^' + key, '', blob.name)
if suffix == '':
return json.loads(blob.download_as_string())
path = suffix.split('/')
path = [p for p in path if p != '']
current_dict = retval
for subdir in path[:-1]:
if subdir != '':
if subdir not in current_dict.keys():
current_dict[subdir] = {}
current_dict = current_dict[subdir]
try:
current_dict[path[-1]] = json.loads(
blob.download_as_string())
except BaseException:
pass
if not any(retval):
return None
else:
return retval
def _delete(self, key):
self.meta_store._delete_file(key)
def _set(self, key, value):
no_retries = 10
sleep_time = 1
for i in range(no_retries):
try:
self.meta_store._get_bucket_obj().blob(key) \
.upload_from_string(json.dumps(value))
break
except BaseException as e:
self.logger.error('uploading data raised an exception:')
self.logger.exception(e)
time.sleep(sleep_time)
| 31.137931
| 79
| 0.51495
| 279
| 2,709
| 4.817204
| 0.344086
| 0.049107
| 0.03869
| 0.02381
| 0.133929
| 0.133929
| 0.053571
| 0
| 0
| 0
| 0
| 0.00486
| 0.392396
| 2,709
| 86
| 80
| 31.5
| 0.811665
| 0
| 0
| 0.098592
| 0
| 0
| 0.021779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056338
| false
| 0.014085
| 0.084507
| 0
| 0.211268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
40007ef606785b22cbc7c72b9274d6584b3f3fb5
| 46,830
|
py
|
Python
|
gslib/tests/test_ls.py
|
MikeJeffrey/gsutil
|
12f4258540ee83aee255ec1baf50e7e6faee10e2
|
[
"Apache-2.0"
] | null | null | null |
gslib/tests/test_ls.py
|
MikeJeffrey/gsutil
|
12f4258540ee83aee255ec1baf50e7e6faee10e2
|
[
"Apache-2.0"
] | null | null | null |
gslib/tests/test_ls.py
|
MikeJeffrey/gsutil
|
12f4258540ee83aee255ec1baf50e7e6faee10e2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from datetime import datetime
import os
import posixpath
import re
import stat
import subprocess
import sys
import time
import gslib
from gslib.commands import ls
from gslib.cs_api_map import ApiSelector
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import CaptureStdout
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.ls_helper import PrintFullInfoAboutObject
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
KMS_XML_SKIP_MSG = ('gsutil does not support KMS operations for S3 buckets, '
'or listing KMS keys with the XML API.')
BUCKET_LOCK_SKIP_MSG = ('gsutil does not support bucket lock operations for '
'S3 buckets or listing retention policy with XML API.')
class TestLsUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for ls command."""
def test_one_object_with_L_storage_class_update(self):
"""Tests the JSON storage class update time field."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML API has no concept of storage class update time')
# Case 1: Create an object message where Storage class update time is the
# same as Creation time.
current_time = datetime(2017, 1, 2, 3, 4, 5, 6, tzinfo=None)
obj_metadata = apitools_messages.Object(
name='foo',
bucket='bar',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=current_time,
etag='12345')
# Create mock object to point to obj_metadata.
obj_ref = mock.Mock()
obj_ref.root_object = obj_metadata
obj_ref.url_string = 'foo'
# Print out attributes of object message.
with CaptureStdout() as output:
PrintFullInfoAboutObject(obj_ref)
output = '\n'.join(output)
# Verify that no Storage class update time field displays since it's the
# same as Creation time.
find_stor_update_re = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val>.+)$',
re.MULTILINE)
stor_update_time_match = re.search(find_stor_update_re, output)
self.assertIsNone(stor_update_time_match)
# Case 2: Create an object message where Storage class update time differs
# from Creation time.
new_update_time = datetime(2017, 2, 3, 4, 5, 6, 7, tzinfo=None)
obj_metadata2 = apitools_messages.Object(
name='foo2',
bucket='bar2',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=new_update_time,
etag='12345')
# Create mock object to point to obj_metadata2.
obj_ref2 = mock.Mock()
obj_ref2.root_object = obj_metadata2
obj_ref2.url_string = 'foo2'
# Print out attributes of object message.
with CaptureStdout() as output2:
PrintFullInfoAboutObject(obj_ref2)
output2 = '\n'.join(output2)
# Verify that Creation time and Storage class update time fields display and
# are the same as the times set in the object message.
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
time_created_match = re.search(find_time_created_re, output2)
self.assertIsNotNone(time_created_match)
time_created = time_created_match.group('time_created_val')
self.assertEqual(
time_created,
datetime.strftime(current_time, '%a, %d %b %Y %H:%M:%S GMT'))
find_stor_update_re_2 = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val_2>.+)$',
re.MULTILINE)
stor_update_time_match_2 = re.search(find_stor_update_re_2, output2)
self.assertIsNotNone(stor_update_time_match_2)
stor_update_time = stor_update_time_match_2.group('stor_update_time_val_2')
self.assertEqual(
stor_update_time,
datetime.strftime(new_update_time, '%a, %d %b %Y %H:%M:%S GMT'))
@mock.patch.object(ls.LsCommand, 'WildcardIterator')
def test_satisfies_pzs_is_displayed_if_present(self, mock_wildcard):
bucket_uri = self.CreateBucket(bucket_name='foo')
bucket_metadata = apitools_messages.Bucket(name='foo', satisfiesPZS=True)
bucket_uri.root_object = bucket_metadata
bucket_uri.url_string = 'foo'
bucket_uri.storage_url = mock.Mock()
mock_wildcard.return_value.IterBuckets.return_value = [bucket_uri]
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stdout = self.RunCommand('ls', ['-Lb', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(stdout, 'Satisfies PZS:\t\t\tTrue')
class TestLs(testcase.GsUtilIntegrationTestCase):
"""Integration tests for ls command."""
def test_blank_ls(self):
if not RUN_S3_TESTS: # Blank `ls` command lists GS buckets.
self.RunGsUtil(['ls'])
def test_empty_bucket(self):
bucket_uri = self.CreateBucket()
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_empty_bucket_with_b(self):
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/\n' % suri(bucket_uri), stdout)
_Check1()
def test_bucket_with_Lb(self):
"""Tests ls -Lb."""
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
# Check that the bucket URI is displayed.
self.assertIn(suri(bucket_uri), stdout)
# Check that we don't see output corresponding to listing objects rather
# than buckets.
self.assertNotIn('TOTAL:', stdout)
# Toggle versioning on the bucket so that the modification time will be
# greater than the creation time.
self.RunGsUtil(['versioning', 'set', 'on', suri(bucket_uri)])
self.RunGsUtil(['versioning', 'set', 'off', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
find_metageneration_re = re.compile(
r'^\s*Metageneration:\s+(?P<metageneration_val>.+)$', re.MULTILINE)
find_time_created_re = re.compile(
r'^\s*Time created:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Time updated:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
metageneration_match = re.search(find_metageneration_re, stdout)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
if self.test_api == ApiSelector.XML:
# Check that lines for JSON-specific fields are not displayed.
self.assertIsNone(metageneration_match)
self.assertIsNone(time_created_match)
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
# Check that time created/updated lines are displayed.
self.assertIsNotNone(metageneration_match)
self.assertIsNotNone(time_created_match)
self.assertIsNotNone(time_updated_match)
# Check that updated time > created time.
time_created = time_created_match.group('time_created_val')
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
time_updated = time_updated_match.group('time_updated_val')
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
# Check that for bucket policy only fields.
self._AssertBucketPolicyOnly(False, stdout)
def test_bucket_with_Lb_bucket_policy_only(self):
if self.test_api == ApiSelector.JSON:
bucket_uri = self.CreateBucket(bucket_policy_only=True,
prefer_json_api=True)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stdout=True)
self._AssertBucketPolicyOnly(True, stdout)
def _AssertBucketPolicyOnly(self, value, stdout):
bucket_policy_only_re = re.compile(
r'^\s*Bucket Policy Only enabled:\s+(?P<bpo_val>.+)$', re.MULTILINE)
bucket_policy_only_match = re.search(bucket_policy_only_re, stdout)
bucket_policy_only_val = bucket_policy_only_match.group('bpo_val')
self.assertEqual(str(value), bucket_policy_only_val)
def test_bucket_with_lb(self):
"""Tests ls -lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_list_wildcard(self):
"""Tests listing multiple buckets with a wildcard."""
random_prefix = self.MakeRandomTestString()
bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
# This just double checks that the common prefix of the two buckets is what
# we think it should be (based on implementation detail of CreateBucket).
# We want to be careful when setting a wildcard on buckets to make sure we
# don't step outside the test buckets to affect other buckets.
common_prefix = posixpath.commonprefix(
[suri(bucket1_uri), suri(bucket2_uri)])
self.assertTrue(
common_prefix.startswith(
'%s://%sgsutil-test-test-bucket-list-wildcard' %
(self.default_provider, random_prefix)))
wildcard = '%s*' % common_prefix
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
actual = set(stdout.split())
self.assertEqual(expected, actual)
_Check1()
def test_nonexistent_bucket_with_ls(self):
"""Tests a bucket that is known not to exist."""
stderr = self.RunGsUtil(
['ls', '-lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-Lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-b', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
def test_list_missing_object(self):
"""Tests listing a non-existent object."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'missing')],
return_stderr=True,
expected_status=1)
self.assertIn('matched no objects', stderr)
def test_with_one_object(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
_Check1()
def location_redirect_test_helper(self, bucket_region, client_region):
bucket_host = 's3.%s.amazonaws.com' % bucket_region
client_host = 's3.%s.amazonaws.com' % client_region
with SetBotoConfigForTest([('s3', 'host', bucket_host)]):
bucket_uri = self.CreateBucket(location=bucket_region)
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1(uri):
stdout = self.RunGsUtil(['ls', uri], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
# sends a GET request
_Check1(suri(bucket_uri))
# sends a HEAD request, meaning error body is not included.
_Check1(suri(obj_uri))
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_400_location_redirect(self):
# ap-east-1 used here since regions launched before March 20, 2019 do
# some temporary redirecting for new buckets which suppresses 400 errors.
self.location_redirect_test_helper('ap-east-1', 'us-east-2')
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_301_location_redirect(self):
self.location_redirect_test_helper('eu-west-1', 'us-east-2')
@SkipForXML('Credstore file gets created only for json API')
def test_credfile_lock_permissions(self):
tmpdir = self.CreateTempDir()
filepath = os.path.join(tmpdir, 'credstore2')
option = 'GSUtil:state_dir={}'.format(tmpdir)
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['-o', option, 'ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
if os.name == 'posix':
self.assertTrue(os.path.exists(filepath))
mode = oct(stat.S_IMODE(os.stat(filepath).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
_Check1()
def test_one_object_with_l(self):
"""Tests listing one object with -l."""
obj_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-l', suri(obj_uri)], return_stdout=True)
output_items = stdout.split()
self.assertTrue(output_items[0].isdigit())
# Throws exception if time string is not formatted correctly.
time.strptime(stdout.split()[1], '%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(output_items[2], suri(obj_uri))
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['ls', '-d', '%s/dir*' % suri(bucket_uri)], return_stdout=True)
self.assertEqual(
'%s/dir/\n%s/dir2/\n' % (suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location_constraint(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location constraint should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location constraint:', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# Default location may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
self.assertRegex(stdout, r'Location constraint:\s+\S')
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
@unittest.skip('b/135700569')
@SkipForXML('Location type not available when using the GCS XML API.')
@SkipForS3('Location type not printed for S3 buckets.')
def test_location_type(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location type should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location type:', stdout)
# Default location type may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertRegex(stdout, r'Location type:\s+\S')
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri, bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
@SkipForXML('Requester Pays is not supported for the XML API.')
def test_requesterpays(self):
"""Tests listing a bucket with requester pays (billing) config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No requester pays configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Requester Pays enabled', stdout)
# Requester Pays configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['requesterpays', 'set', 'on', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tTrue', stdout)
# Clear and check
self.RunGsUtil(['requesterpays', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tFalse', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)], return_stdout=True)
self.assertIn(object_name, stdout)
if self.default_provider == 'gs':
self.assertIn(str(key_uri.generation), stdout)
self.assertIn('metageneration=%s' % key_uri.get_key().metageneration,
stdout)
if self.test_api == ApiSelector.XML:
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
else:
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
self.assertIn('etag=', stdout)
elif self.default_provider == 's3':
self.assertIn(key_uri.version_id, stdout)
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = b'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
find_content_length_re = r'Content-Length:\s+(?P<num>\d)'
self.assertRegex(stdout, find_content_length_re)
m = re.search(find_content_length_re, stdout)
content_length = int(m.group('num'))
self.assertGreater(content_length, 0)
self.assertLess(content_length, file_size)
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls',
suri(bucket_uri)
]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_slash_only(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='/', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/', stdout)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/',
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/foo/', stdout)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_trailing_two_slash(self):
"""Tests listing an object with two trailing slashes."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='//', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '//', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri,
object_name=wildcard_folder_object,
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name=object_matching_folder,
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True,
expected_status=1)
self.assertIn(
'Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri, '**')],
return_stdout=True)
self.assertNumLines(stdout, 3) # 2 object lines, one summary line.
_Check()
@SkipForS3('S3 anonymous access is not supported.')
def test_get_object_without_list_bucket_permission(self):
# Bucket is not publicly readable by default.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='permitted',
contents=b'foo')
# Set this object to be publicly readable.
self.RunGsUtil(['acl', 'set', 'public-read', suri(object_uri)])
# Drop credentials.
with self.SetAnonymousBotoCreds():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(suri(object_uri), stdout)
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_encrypted_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_uri = self.CreateObject(object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
# Listing object with key should return unencrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY1)]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectDecrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectDecrypted()
# Listing object without a key should return encrypted hashes.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectEncrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectEncrypted()
# Listing object with a non-matching key should return encrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY2)]):
_ListExpectEncrypted()
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_mixed_encryption(self):
"""Tests listing objects with various encryption interactions."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo2',
contents=TEST_ENCRYPTION_CONTENT2,
encryption_key=TEST_ENCRYPTION_KEY2)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo3',
contents=TEST_ENCRYPTION_CONTENT3,
encryption_key=TEST_ENCRYPTION_KEY3)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo4',
contents=TEST_ENCRYPTION_CONTENT4,
encryption_key=TEST_ENCRYPTION_KEY4)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo5',
contents=TEST_ENCRYPTION_CONTENT5)
# List 5 objects, one encrypted with each of four keys, and one
# unencrypted. Supplying keys [1,3,4] should result in four unencrypted
# listings and one encrypted listing (for key 2).
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY4)
]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectMixed():
"""Validates object listing."""
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY2_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY3_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY4_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_CRC32C, stdout)
_ListExpectMixed()
def test_non_ascii_project_fails(self):
stderr = self.RunGsUtil(['ls', '-p', 'ã', 'gs://fobarbaz'],
expected_status=1,
return_stderr=True)
self.assertIn('Invalid non-ASCII', stderr)
def set_default_kms_key_on_bucket(self, bucket_uri):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(
keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
# Make sure that the service account for the desired bucket's parent project
# is authorized to encrypt with the key above.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
return key_fqn
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_default_kms_key_listed_for_bucket(self):
bucket_uri = self.CreateBucket()
# Default KMS key is not set by default.
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+None')
# Default KMS key's name should be listed after being set on the bucket.
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+%s' % key_fqn)
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_kms_key_listed_for_kms_encrypted_object(self):
bucket_uri = self.CreateBucket()
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
# Copy an object into our bucket and encrypt using the key from above.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo',
kms_key_name=key_fqn)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
self.assertRegex(stdout, r'KMS key:\s+%s' % key_fqn)
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_retention_policy(self):
bucket_uri = self.CreateBucketWithRetentionPolicy(
retention_period_in_seconds=1)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Retention Policy\:\t*Present')
# Clearing Retention Policy on the bucket.
self.RunGsUtil(['retention', 'clear', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Retention Policy:')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_default_event_based_hold(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(['retention', 'event-default', 'set', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default Event-Based Hold:\t* *True')
# Clearing the default Event-Based Hold on the bucket.
self.RunGsUtil(['retention', 'event-default', 'release', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Default Event-Based Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_temporary_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'temp', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Temporary Hold')
# Clearing the Temporary Hold on the object.
self.RunGsUtil(['retention', 'temp', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Temporary Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_event_based_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'event', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Event-Based Hold')
# Clearing the Event-Based Hold on the object.
self.RunGsUtil(['retention', 'event', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Event-Based Hold')
| 44.137606
| 103
| 0.688661
| 6,037
| 46,830
| 5.140136
| 0.118105
| 0.041185
| 0.034321
| 0.043312
| 0.608714
| 0.571461
| 0.5189
| 0.488415
| 0.433341
| 0.39757
| 0
| 0.013385
| 0.197523
| 46,830
| 1,060
| 104
| 44.179245
| 0.812347
| 0.177258
| 0
| 0.432895
| 0
| 0
| 0.112742
| 0.016274
| 0
| 0
| 0
| 0.000943
| 0.205263
| 1
| 0.093421
| false
| 0
| 0.072368
| 0
| 0.172368
| 0.002632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4001b461738a1a675ced54e42a87a9e7681bbab2
| 2,217
|
py
|
Python
|
places/management/commands/load_places.py
|
aevtikheev/dvmn-yandex-afisha
|
7112977d6615124412b7e7ffc4abdcaa969b4078
|
[
"MIT"
] | null | null | null |
places/management/commands/load_places.py
|
aevtikheev/dvmn-yandex-afisha
|
7112977d6615124412b7e7ffc4abdcaa969b4078
|
[
"MIT"
] | null | null | null |
places/management/commands/load_places.py
|
aevtikheev/dvmn-yandex-afisha
|
7112977d6615124412b7e7ffc4abdcaa969b4078
|
[
"MIT"
] | null | null | null |
import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Uploads data for a place'
def add_arguments(self, parser):
parser.add_argument('data_urls', nargs='+', type=str)
def handle(self, *args, **options):
for url in options['data_urls']:
response = requests.get(url)
response.raise_for_status()
place_data = response.json()
new_place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'longitude': place_data['coordinates']['lng'],
'latitude': place_data['coordinates']['lat']
}
)
if created:
logging.info(f'Place "{new_place.title}" created')
else:
logging.info(f'Place "{new_place.title}" already exists')
for image_position, image_url in enumerate(place_data['imgs']):
try:
response = requests.get(image_url)
response.raise_for_status()
except (ReadTimeout, ConnectionError, HTTPError) as exception:
logging.exception(exception)
continue
new_image, _ = Image.objects.get_or_create(
place=new_place,
position=image_position
)
image_content = ContentFile(response.content)
image_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
new_image.image.save(image_name, image_content)
logging.info(f'Image {image_name} for place "{new_place.title}" uploaded')
| 39.589286
| 90
| 0.59991
| 229
| 2,217
| 5.628821
| 0.41048
| 0.048875
| 0.040341
| 0.041893
| 0.085337
| 0.046548
| 0.046548
| 0
| 0
| 0
| 0
| 0.000646
| 0.30221
| 2,217
| 55
| 91
| 40.309091
| 0.832579
| 0
| 0
| 0.043478
| 0
| 0
| 0.143437
| 0.011277
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4002a9f7b6d3888657a9b000e3fb8c2cb6fac5dd
| 18,227
|
py
|
Python
|
gslib/utils/ls_helper.py
|
dickmao/gsutil
|
3b61bf0e6188f65f78c72c79ea3cb69e9c61da4b
|
[
"Apache-2.0"
] | 1
|
2021-09-11T23:58:39.000Z
|
2021-09-11T23:58:39.000Z
|
gslib/utils/ls_helper.py
|
shinfan/gsutil
|
45b5fc020bed44c6342fe70ce8b081aa222d9213
|
[
"Apache-2.0"
] | null | null | null |
gslib/utils/ls_helper.py
|
shinfan/gsutil
|
45b5fc020bed44c6342fe70ce8b081aa222d9213
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self,
iterator_func,
logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False,
should_recurse=False,
exclude_patterns=None,
fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = six.ensure_str(blr.url_string)
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, six.ensure_str(pattern)):
return True
return False
| 36.971602
| 80
| 0.681242
| 2,226
| 18,227
| 5.352201
| 0.203504
| 0.024845
| 0.039282
| 0.045325
| 0.372587
| 0.34279
| 0.283028
| 0.231996
| 0.197079
| 0.177103
| 0
| 0.005071
| 0.242607
| 18,227
| 492
| 81
| 37.046748
| 0.85795
| 0.320184
| 0
| 0.285714
| 0
| 0
| 0.083132
| 0.003832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039867
| false
| 0.006645
| 0.063123
| 0
| 0.13289
| 0.225914
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4004bec8c10906a7cd716dc8ff33d14546f3a2fe
| 1,527
|
py
|
Python
|
src/detector/pre_process_test_data.py
|
DomGonthier/PecheFantome
|
d031a8fe5faa2ef35f2c1dbb8241281ffda22429
|
[
"MIT"
] | null | null | null |
src/detector/pre_process_test_data.py
|
DomGonthier/PecheFantome
|
d031a8fe5faa2ef35f2c1dbb8241281ffda22429
|
[
"MIT"
] | 8
|
2020-02-19T20:03:44.000Z
|
2022-02-03T19:27:24.000Z
|
src/detector/pre_process_test_data.py
|
DomGonthier/PecheFantome
|
d031a8fe5faa2ef35f2c1dbb8241281ffda22429
|
[
"MIT"
] | 3
|
2020-02-19T19:02:19.000Z
|
2021-12-14T14:06:25.000Z
|
import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
| 31.163265
| 115
| 0.59201
| 202
| 1,527
| 4.306931
| 0.277228
| 0.027586
| 0.048276
| 0.034483
| 0.165517
| 0.114943
| 0.05977
| 0.05977
| 0
| 0
| 0
| 0.016949
| 0.304519
| 1,527
| 48
| 116
| 31.8125
| 0.80226
| 0.153242
| 0
| 0.058824
| 0
| 0
| 0.063913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.117647
| 0
| 0.147059
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4004f14ddc4bfb878b0872bfe2604774deea7bcf
| 4,934
|
py
|
Python
|
tensorflow/python/training/localhost_cluster_performance_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 101
|
2016-12-03T11:40:52.000Z
|
2017-12-23T02:02:03.000Z
|
tensorflow/python/training/localhost_cluster_performance_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 9
|
2016-12-14T03:27:46.000Z
|
2017-09-13T02:29:07.000Z
|
tensorflow/python/training/localhost_cluster_performance_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 47
|
2016-12-04T12:37:24.000Z
|
2018-01-14T18:13:07.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
| 37.097744
| 80
| 0.694366
| 660
| 4,934
| 4.971212
| 0.318182
| 0.039622
| 0.032917
| 0.025602
| 0.248095
| 0.218226
| 0.190795
| 0.117647
| 0.082902
| 0.0573
| 0
| 0.030045
| 0.190515
| 4,934
| 132
| 81
| 37.378788
| 0.791437
| 0.207742
| 0
| 0.114943
| 0
| 0
| 0.113402
| 0.043299
| 0
| 0
| 0
| 0
| 0.045977
| 1
| 0.045977
| false
| 0
| 0.08046
| 0
| 0.172414
| 0.045977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
400c696eb52726be2cb58df8b7625711faea5a60
| 3,846
|
py
|
Python
|
src/utils.py
|
daochenzha/SimTSC
|
6e3200510e8e464049eab95db9540afdaf397f9c
|
[
"MIT"
] | 23
|
2022-01-06T05:15:35.000Z
|
2022-03-28T08:08:14.000Z
|
src/utils.py
|
daochenzha/SimTSC
|
6e3200510e8e464049eab95db9540afdaf397f9c
|
[
"MIT"
] | 2
|
2022-02-10T02:22:35.000Z
|
2022-03-28T16:45:17.000Z
|
src/utils.py
|
daochenzha/SimTSC
|
6e3200510e8e464049eab95db9540afdaf397f9c
|
[
"MIT"
] | 5
|
2022-01-09T08:58:24.000Z
|
2022-01-19T09:52:43.000Z
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
| 29.584615
| 103
| 0.621945
| 639
| 3,846
| 3.538341
| 0.173709
| 0.031844
| 0.068111
| 0.055728
| 0.677134
| 0.647943
| 0.599735
| 0.574967
| 0.574967
| 0.519239
| 0
| 0.01814
| 0.211648
| 3,846
| 129
| 104
| 29.813953
| 0.727573
| 0.053042
| 0
| 0.516854
| 0
| 0
| 0.022419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067416
| false
| 0
| 0.044944
| 0
| 0.168539
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
400d71727dfe67b72a8bc6849bc10bc05b88d55b
| 17,458
|
py
|
Python
|
mpinterfaces/mat2d/friction/analysis.py
|
yw-fang/MPInterfaces
|
ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e
|
[
"MIT"
] | 56
|
2015-06-23T03:03:18.000Z
|
2022-02-06T16:41:34.000Z
|
mpinterfaces/mat2d/friction/analysis.py
|
yw-fang/MPInterfaces
|
ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e
|
[
"MIT"
] | 21
|
2015-09-03T17:50:18.000Z
|
2022-03-01T02:26:34.000Z
|
mpinterfaces/mat2d/friction/analysis.py
|
joshgabriel/MPInterfaces
|
2799ae161fa94c78842092fb24ef468607afa465
|
[
"MIT"
] | 50
|
2015-09-17T19:09:36.000Z
|
2021-11-15T19:13:20.000Z
|
from __future__ import print_function, division, unicode_literals
import os
import warnings
import numpy as np
from scipy import interpolate
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.core.structure import Structure
from pymatgen import Element
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator as VE
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "ashtonmv@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_corrugation_factor(structure):
"""
Calculate the "corrugation factor" for a 2D material.
The corrugation factor is defined as the sum of the
outer hemispheres of ionic radii of the atoms on the
material's top and bottom surfaces, divided by the
planar area of the whole unit cell's 001 plane. Top
and bottom corrugation factors are returned
separately in the final dictionary. In general,
a larger corrugation factor means a smoother surface.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
corrugation_factors (dict): Dictionary of "top"
and "bottom" corrugation factors, e.g.
{"top": top_corrugation_factor,
"bottom": bottom_corrugation_factor}
"""
sites = structure.sites
valences = VE(structure).valences
formatted_valences = {}
for e in valences:
temp=e[-1]
if "+" in e or "-" in e:
try:
# Some element names have a number followed
# by a plus or minus, e.g. "O2-"
int(e[-2])
element = e[:-2]
except:
# Others are simply a plus or minus, e.g. "Cl-"
element = e[:-1]
else:
element = e
formatted_valences[Element(element)] = valences[e]
all_z_coords = [s.coords[2] for s in sites]
max_z = max(all_z_coords)
min_z = min(all_z_coords)
top_layer = [s for s in sites if abs(s.coords[2] - max_z) < 0.1]
bottom_layer = [s for s in sites if abs(s.coords[2] - min_z) < 0.1]
pi = np.pi
top_sphere_area = 0
bottom_sphere_area = 0
for site in top_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
top_sphere_area += 2*pi*r*r
for site in bottom_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
bottom_sphere_area += 2*pi*r*r
lattice = structure.lattice
area = abs(np.cross(lattice._matrix[0], lattice._matrix[1])[2])
corrugation = {"top": top_sphere_area / area,
"bottom": bottom_sphere_area / area}
return corrugation
def plot_gamma_surface(fmt='pdf'):
"""
Collect the energies from a grid of static energy
calculations to plot the Gamma surface between two layers of the 2D
material.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
lattice = Structure.from_file('POSCAR').lattice
area = np.cross(lattice._matrix[0], lattice._matrix[1])[2]
ax = plt.figure(figsize=(n_divs_x * 1.2, n_divs_y * 1.2)).gca()
ax.set_xlim(0, n_divs_x + 1)
ax.set_ylim(0, n_divs_y + 1)
energies = []
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
not_converged = []
for x in x_values:
energies.append([])
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy / area
energies[x].append(energy)
except:
not_converged.append('{}x{}'.format(x, y))
energies[x].append(0)
os.chdir('../')
energies[x].append(energies[x][0])
energies.append([])
# ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0]
if not_converged:
warnings.warn('{} did not converge.'.format(not_converged))
for coords in not_converged:
energies[int(coords.split('x')[0])][int(coords.split('x')[1])] = energy
minima = []
maxima = []
for x in x_values:
minima.append(min(energies[x]))
maxima.append(max(energies[x]))
abs_minimum = min(minima)
abs_maximum = max(maxima)
for x in range(n_divs_x + 1):
for y in range(n_divs_y + 1):
# Plot all energies relative to the global minimum.
scaled_energy = energies[x][y] - abs_minimum
if '{}x{}'.format(x, y) in not_converged:
color_code = 'w'
else:
color_code = plt.cm.jet(
scaled_energy/(abs_maximum - abs_minimum))
ax.add_patch(plt.Rectangle((x, y), width=1, height=1,
facecolor=color_code, linewidth=0))
# Get rid of annoying ticks.
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_xaxis().set_ticks([])
os.chdir('../../')
plt.savefig('gamma_surface.{}'.format(fmt), transparent=True)
plt.close()
def get_number_of_surface_atoms():
"""
Count the number of atoms at a 2D material's surface. This
enables energy and force calculations to be normalized to
the number of surface atoms.
Returns:
int. Number of surface atoms (top + bottom) for both
layers in the bilayer model.
"""
structure = Structure.from_file('friction/lateral/POSCAR')
heights = np.array([site.z for site in structure.sites])
max_height = max(heights)
min_height = min(heights)
n_atoms_top = len([height for height in heights if max_height - height < 0.1])
n_atoms_bottom = len([height for height in heights if height - min_height < 0.1])
return (n_atoms_top + n_atoms_bottom) * 2
def get_basin_and_peak_locations():
"""
Find which directories inside 'friction/lateral' represent
the minimum (basin) and maximum (peak) energy stacking
configurations.
Returns:
tuple. Of the form (basin, peak).
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
abs_maximum = -np.Infinity
abs_minimum = np.Infinity
for x in x_values:
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy
if energy < abs_minimum:
basin = dir
abs_minimum = energy
if energy > abs_maximum:
peak = dir
abs_maximum = energy
except:
pass
os.chdir('../')
os.chdir('../../')
return(basin, peak)
def plot_friction_force(fmt='pdf'):
"""
Plot the sinusoidal curve of delta E between basin and saddle
points for each normal spacing dz.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
f, (ax1, ax2) = plt.subplots(2, figsize=(16, 16))
spacings = sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)])
spc_range = spacings[-1] - spacings[0] + 0.1
for spacing in spacings:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2 +
(start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
ax1.plot(x, sinx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax1.set_xticklabels(ax1.get_xticks(), family='serif', fontsize=18)
ax1.set_yticklabels(ax1.get_yticks(), family='serif', fontsize=18)
ax1.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax1.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', family='serif', fontsize=24)
ax2.plot(x, cosx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax2.set_xticklabels(ax2.get_xticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax2.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_f\/(eV/\AA)}$', family='serif', fontsize=24)
os.chdir('../')
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
os.chdir('../../')
plt.savefig('F_f.{}'.format(fmt))
def plot_normal_force(basin_dir, fmt='pdf'):
"""
Plot the LJ-like curve of the energy at the basin point
as a function of normal spacing dz.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
fig = plt.figure(figsize=(16, 10))
ax = fig.gca()
ax2 = ax.twinx()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
ax.set_xlim(spacings[0], spacings[-1])
ax.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0))
ax2.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0.9))
E_z = ax.plot(xnew, ynew, color=plt.cm.jet(0),
linewidth=4, label=r'$\mathrm{E(z)}$')
F_N = ax2.plot(spacings, [-y for y in ynew_slope], color=plt.cm.jet(0.9),
linewidth=4, label=r'$\mathrm{F_N}$')
ax.set_ylim(ax.get_ylim())
ax.set_xticklabels(ax.get_xticks(), family='serif', fontsize=18)
ax.set_yticklabels(ax.get_yticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax.set_xlabel(r'$\mathrm{z\/(\AA)}$', fontsize=24)
ax.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_N\/(eV/\AA)}$', fontsize=24)
data = E_z + F_N
labs = [l.get_label() for l in data]
ax.legend(data, labs, loc='upper right', fontsize=24)
ax.plot(spacings, E, linewidth=0, marker='o', color=plt.cm.jet(0),
markersize=10, markeredgecolor='none')
os.chdir('../../')
plt.savefig('F_N.{}'.format(fmt))
def plot_mu_vs_F_N(basin_dir, fmt='pdf'):
"""
Plot friction coefficient 'mu' vs. F_Normal.
mu = F_friction / F_Normal.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
fig = plt.figure(figsize=(16, 10))
# ax = fig.gca()
# ax2 = ax.twinx()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd()) if
os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
# xnew = np.arange(spacings[0], spacings[-1], 0.001)
# ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
sorted_dirs = sorted([float(spc) for spc in os.listdir(os.getcwd())
if os.path.isdir(spc)])
for spacing in sorted_dirs:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
ax = plt.figure().gca()
ax.plot(F_N, mu, linewidth=2, marker='o', markeredgecolor='none',
markersize=3, color=plt.cm.jet(0))
plt.savefig('mu_vs_F_N.{}'.format(fmt))
def get_mu_vs_F_N(basin_dir):
"""
Essentially the same function as plotting, but without the plot.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
Returns:
dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where
forces are in nN.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
# Convert eV.A to nN
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
for spacing in sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)]):
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
try:
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
-
Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
except:
print('One or more jobs in {}/ have not converged.'.format(spacing))
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
return {'F_N': F_N, 'mu': mu, 'F_f': F_f}
| 32.815789
| 94
| 0.592966
| 2,438
| 17,458
| 4.096801
| 0.14397
| 0.015419
| 0.012115
| 0.018722
| 0.577994
| 0.537044
| 0.515018
| 0.505206
| 0.498999
| 0.486984
| 0
| 0.022025
| 0.269218
| 17,458
| 531
| 95
| 32.877589
| 0.760856
| 0.174934
| 0
| 0.479624
| 0
| 0
| 0.066505
| 0.008038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025078
| false
| 0.003135
| 0.034483
| 0
| 0.068966
| 0.00627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
400f0a8fc2e264478738eb502734b3f76efaa361
| 1,380
|
py
|
Python
|
aiopylimit/tests/test_aiopylimit.py
|
zealotous/aiopylimit
|
0f93a06e751b97959835187a05311deaffaed9d8
|
[
"Apache-2.0"
] | 4
|
2019-05-09T12:39:14.000Z
|
2022-01-05T20:36:06.000Z
|
aiopylimit/tests/test_aiopylimit.py
|
zealotous/aiopylimit
|
0f93a06e751b97959835187a05311deaffaed9d8
|
[
"Apache-2.0"
] | null | null | null |
aiopylimit/tests/test_aiopylimit.py
|
zealotous/aiopylimit
|
0f93a06e751b97959835187a05311deaffaed9d8
|
[
"Apache-2.0"
] | 1
|
2022-01-05T19:56:49.000Z
|
2022-01-05T19:56:49.000Z
|
from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
class TestPyLimit(asynctest.TestCase):
async def test_exception(self):
limit = AIOPyRateLimit(10, 10)
await self.assertAsyncRaises(AIOPyRateLimitException,
limit.attempt('test_namespace'))
async def test_throttle(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 20):
await asyncio.sleep(.5)
if x < 10:
self.assertTrue(await limit.attempt('test_namespace'))
else:
self.assertFalse(await limit.attempt('test_namespace'))
await asyncio.sleep(6)
self.assertTrue(await limit.attempt('test_namespace'))
async def test_peek(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 10):
self.assertTrue(await limit.attempt('test_namespace2'))
self.assertTrue(await limit.is_rate_limited('test_namespace2'))
await asyncio.sleep(10)
self.assertFalse(await limit.is_rate_limited('test_namespace2'))
| 39.428571
| 72
| 0.642754
| 149
| 1,380
| 5.805369
| 0.33557
| 0.069364
| 0.092486
| 0.115607
| 0.578035
| 0.543353
| 0.543353
| 0.254335
| 0.254335
| 0.254335
| 0
| 0.034653
| 0.268116
| 1,380
| 34
| 73
| 40.588235
| 0.821782
| 0
| 0
| 0.3
| 0
| 0
| 0.086232
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4010464a9caf650b2a6706b3ea8adb7b2458ae14
| 5,772
|
py
|
Python
|
bookworm/platform_services/_win32/tesseract_download.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 18
|
2019-07-19T22:12:15.000Z
|
2020-08-26T17:45:19.000Z
|
bookworm/platform_services/_win32/tesseract_download.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 44
|
2019-07-15T10:17:00.000Z
|
2020-07-26T11:22:53.000Z
|
bookworm/platform_services/_win32/tesseract_download.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 9
|
2019-09-03T13:13:31.000Z
|
2020-08-25T13:55:27.000Z
|
# coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
def get_downloadable_languages():
return (
"afr",
"sqi",
"amh",
"ara",
"hye",
"asm",
"aze_cyrl",
"aze",
"ben",
"eus",
"bel",
"bos",
"bre",
"bul",
"mya",
"cat",
"ceb",
"chr",
"chi_sim",
"hrv",
"ces",
"dan",
"nld",
"dzo",
"eng",
"epo",
"est",
"fao",
"fil",
"fin",
"fra",
"glg",
"kat_old",
"kat",
"deu",
"ell",
"guj",
"heb",
"hin",
"hun",
"isl",
"ind",
"gle",
"ita_old",
"ita",
"jpn_vert",
"jpn",
"jav",
"kan",
"kaz",
"khm",
"kor_vert",
"kor",
"kmr",
"kir",
"lao",
"lav",
"lit",
"ltz",
"mkd",
"msa",
"mal",
"mlt",
"mri",
"mar",
"mon",
"nep",
"nor",
"ori",
"pus",
"fas",
"pol",
"por",
"pan",
"que",
"ron",
"rus",
"gla",
"srp_latn",
"srp",
"snd",
"sin",
"slk",
"slv",
"spa_old",
"spa",
"sun",
"swa",
"swe",
"tgk",
"tam",
"tat",
"tel",
"tha",
"bod",
"tir",
"ton",
"tur",
"ukr",
"urd",
"uig",
"uzb_cyrl",
"uzb",
"vie",
"cym",
"fry",
"yid",
"yor",
)
def is_tesseract_available():
return sys.platform == "win32" and TesseractOcrEngine.check()
def get_tessdata():
return get_tesseract_path() / "tessdata"
def get_language_path(language):
return Path(get_tessdata(), f"{language}.traineddata")
def is_new_tesseract_version_available():
remote_version = requests.get(TESSERACT_VERSION_URL).text
return TesseractOcrEngine.get_tesseract_version() != remote_version
def download_tesseract_engine(progress_dlg):
tesseract_directory = get_tesseract_path()
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
try:
dl_request = HttpResource(TESSERACT_ENGINE_DOWNLOAD_URL).download()
progress_dlg.set_abort_callback(dl_request.cancel)
with TemporaryFile() as dlfile:
dl_request.download_to_file(dlfile, callback)
if dl_request.is_cancelled():
return
with progress_dlg.PulseContinuously(_("Extracting file...")):
with ZipFile(dlfile, "r") as zfile:
tesseract_directory.mkdir(parents=True, exist_ok=True)
zfile.extractall(path=tesseract_directory)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Success"),
# Translators: content of a messagebox
_("Tesseract engine downloaded successfully"),
)
return True
except ConnectionError:
log.debug("Failed to download tesseract OCR engine.", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
_(
"Could not download Tesseract OCR Engine.\nPlease check your internet and try again."
),
icon=wx.ICON_ERROR,
)
except:
log.exception(
"An error occurred while installing the Tesseract OCr Engine", exc_info=True
)
wx.GetApp().mainFrame.notify_user(
_("Error"),
_("Could not install the Tesseract OCR engine.\nPlease try again."),
icon=wx.ICON_WARNING,
)
def download_language(lang_code, variant, target_file, progress_dlg):
url_prefix = (
BEST_TRAINEDDATA_DOWNLOAD_URL
if variant == "best"
else FAST_TRAINEDDATA_DOWNLOAD_URL
)
download_url = url_prefix.format(lang_code=lang_code)
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
dl_request = HttpResource(download_url).download()
progress_dlg.set_abort_callback(dl_request.cancel)
dl_request.download_to_filesystem(target_file, callback)
return not dl_request.is_cancelled()
def remove_tesseract():
tesseract_path = get_tesseract_path()
shutil.rmtree(tesseract_path, ignore_errors=False)
| 26
| 139
| 0.573458
| 595
| 5,772
| 5.339496
| 0.431933
| 0.031161
| 0.028329
| 0.044067
| 0.286119
| 0.274788
| 0.274788
| 0.274788
| 0.274788
| 0.274788
| 0
| 0.002252
| 0.307519
| 5,772
| 221
| 140
| 26.117647
| 0.792594
| 0.02079
| 0
| 0.035533
| 0
| 0.010152
| 0.215158
| 0.003896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040609
| false
| 0
| 0.06599
| 0.020305
| 0.147208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4010dc640b95065e204f3d03308d81598d5d3d22
| 2,448
|
py
|
Python
|
python/plugins/processing/algs/grass7/ext/v_proj.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/processing/algs/grass7/ext/v_proj.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/processing/algs/grass7/ext/v_proj.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | 1
|
2021-12-25T08:40:30.000Z
|
2021-12-25T08:40:30.000Z
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_proj.py
---------
Date : November 2017
Copyright : (C) 2017 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'November 2017'
__copyright__ = '(C) 2017, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import QgsProcessingParameterString
def processInputs(alg, parameters, context, feedback):
# Grab the projection from the input vector layer
layer = alg.parameterAsLayer(parameters, 'input', context)
alg.setSessionProjectionFromLayer(layer)
layerCrs = layer.crs().toProj4()
# Creates a new location with this Crs
newLocation = 'newProj{}'.format(alg.uniqueSuffix)
alg.commands.append('g.proj proj4="{}" location={}'.format(
layerCrs, newLocation))
# Go to the newly created location
alg.commands.append('g.mapset mapset=PERMANENT location={}'.format(
newLocation))
# Import the layer
alg.loadVectorLayerFromParameter(
'input', parameters, context, feedback, False)
# Go back to default location
alg.commands.append('g.mapset mapset=PERMANENT location=temp_location')
# Grab the projected Crs
crs = alg.parameterAsCrs(parameters, 'crs', context)
alg.commands.append('g.proj -c proj4="{}"'.format(
crs.toProj4(), newLocation))
# Remove crs parameter
alg.removeParameter('crs')
# Add the location parameter with proper value
location = QgsProcessingParameterString(
'location',
'new location',
'newProj{}'.format(alg.uniqueSuffix)
)
alg.addParameter(location)
| 36
| 75
| 0.561683
| 231
| 2,448
| 5.874459
| 0.493506
| 0.032424
| 0.050111
| 0.053058
| 0.201179
| 0.125276
| 0.081061
| 0.081061
| 0.081061
| 0
| 0
| 0.027057
| 0.260212
| 2,448
| 67
| 76
| 36.537313
| 0.722253
| 0.489379
| 0
| 0
| 0
| 0
| 0.227899
| 0.050284
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
401141d52ec8be8928fc937b5ae582051fa62e45
| 1,919
|
py
|
Python
|
examples/diode/gmsh_diode2d.py
|
QuantumOfMoose/devsim
|
22f888119059a86bfc87ba9e7d9ac2cc90dadfb6
|
[
"Apache-2.0"
] | null | null | null |
examples/diode/gmsh_diode2d.py
|
QuantumOfMoose/devsim
|
22f888119059a86bfc87ba9e7d9ac2cc90dadfb6
|
[
"Apache-2.0"
] | null | null | null |
examples/diode/gmsh_diode2d.py
|
QuantumOfMoose/devsim
|
22f888119059a86bfc87ba9e7d9ac2cc90dadfb6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from devsim.python_packages.simple_physics import *
import diode_common
device="diode2d"
region="Bulk"
diode_common.Create2DGmshMesh(device, region)
# this is is the devsim format
write_devices (file="gmsh_diode2d_out.msh")
diode_common.SetParameters(device=device, region=region)
####
#### NetDoping
####
node_model(device=device, region=region, name="Acceptors", equation="1.0e18*step(0.5e-5-y);")
node_model(device=device, region=region, name="Donors" , equation="1.0e18*step(y-0.5e-5);")
node_model(device=device, region=region, name="NetDoping", equation="Donors-Acceptors;")
diode_common.InitialSolution(device, region)
####
#### Initial DC solution
####
solve(type="dc", absolute_error=1.0, relative_error=1e-12, maximum_iterations=30)
###
### Drift diffusion simulation at equilibrium
###
diode_common.DriftDiffusionInitialSolution(device, region)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=50)
v = 0.0
while v < 0.51:
set_parameter(device=device, name=GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
PrintCurrents(device, "top")
PrintCurrents(device, "bot")
v += 0.1
write_devices(file="gmsh_diode2d.dat", type="tecplot")
write_devices(file="gmsh_diode2d_dd.msh", type="devsim")
| 30.951613
| 93
| 0.755602
| 276
| 1,919
| 5.152174
| 0.474638
| 0.059072
| 0.050633
| 0.067511
| 0.2391
| 0.16526
| 0.16526
| 0.087201
| 0.087201
| 0.087201
| 0
| 0.033569
| 0.115164
| 1,919
| 61
| 94
| 31.459016
| 0.803887
| 0.337676
| 0
| 0
| 0
| 0
| 0.146481
| 0.036007
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
40114e46f1a2c773c276da8bbeeb5529999aac68
| 470
|
py
|
Python
|
python/astro_imaging/config.py
|
taranu/astro_imaging
|
a5a712576bd12762dc69f826703e077a859d8ec0
|
[
"Apache-2.0"
] | null | null | null |
python/astro_imaging/config.py
|
taranu/astro_imaging
|
a5a712576bd12762dc69f826703e077a859d8ec0
|
[
"Apache-2.0"
] | null | null | null |
python/astro_imaging/config.py
|
taranu/astro_imaging
|
a5a712576bd12762dc69f826703e077a859d8ec0
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
import os
path_base_default = os.getenv('ASTRO_IMAGING_DATA_PATH', default='./')
@dataclass
class Paths:
base: str = path_base_default
catalogs: str = None
images: str = None
def __post_init__(self):
if self.catalogs is None:
self.catalogs = os.path.join(self.base, 'catalogs')
if self.images is None:
self.images = os.path.join(self.base, 'images')
paths_default = Paths()
| 22.380952
| 70
| 0.66383
| 63
| 470
| 4.746032
| 0.396825
| 0.060201
| 0.100334
| 0.093645
| 0.120401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229787
| 470
| 20
| 71
| 23.5
| 0.825967
| 0
| 0
| 0
| 0
| 0
| 0.082979
| 0.048936
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4011b94aee384459cb359f2d52855f8d32eb9b50
| 8,018
|
py
|
Python
|
AT.py
|
MTandHJ/roboc
|
43e5b2f9ea520b76221a7334d34ef4aaf9b3334b
|
[
"MIT"
] | 8
|
2021-06-07T11:02:38.000Z
|
2022-03-17T11:30:28.000Z
|
AT.py
|
MTandHJ/roboc
|
43e5b2f9ea520b76221a7334d34ef4aaf9b3334b
|
[
"MIT"
] | null | null | null |
AT.py
|
MTandHJ/roboc
|
43e5b2f9ea520b76221a7334d34ef4aaf9b3334b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from typing import Tuple
import argparse
from src.loadopts import *
METHOD = "RobOC-AT"
SAVE_FREQ = 5
PRINT_FREQ = 20
FMT = "{description}={scale}-{leverage}" \
"={learning_policy}-{optimizer}-{lr}" \
"={attack}-{epsilon:.4f}-{stepsize}-{steps}" \
"={batch_size}={transform}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
# for orthogonal classifier
parser.add_argument("--scale", type=float, default=10.,
help="the length of weights")
parser.add_argument("--leverage", type=float, default=0.15,
help="the hyper-parameter governs the relative weight between clean and adversarial samples")
# adversarial training settings
parser.add_argument("--attack", type=str, default="pgd-squared")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=10)
# basic settings
parser.add_argument("--loss", type=str, default="square")
parser.add_argument("--optimizer", type=str, choices=("sgd", "adam"), default="sgd")
parser.add_argument("-mom", "--momentum", type=float, default=0.9,
help="the momentum used for SGD")
parser.add_argument("-beta1", "--beta1", type=float, default=0.9,
help="the first beta argument for Adam")
parser.add_argument("-beta2", "--beta2", type=float, default=0.999,
help="the second beta argument for Adam")
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4,
help="weight decay")
parser.add_argument("-lr", "--lr", "--LR", "--learning_rate", type=float, default=0.1)
parser.add_argument("-lp", "--learning_policy", type=str, default="default",
help="learning rate schedule defined in config.py")
parser.add_argument("--epochs", type=int, default=180)
parser.add_argument("-b", "--batch_size", type=int, default=128)
parser.add_argument("--transform", type=str, default='default',
help="the data augmentation which will be applied during training.")
parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--progress", action="store_true", default=False,
help="show the progress if true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("-m", "--description", type=str, default="RobOC-AT")
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
def load_cfg() -> Tuple[Config, str]:
from src.dict2obj import Config
from src.base import Coach, AdversaryForTrain
from src.utils import gpu, set_seed, load_checkpoint
cfg = Config()
set_seed(opts.seed)
# the model and other settings for training
model = load_model(opts.model)(
num_classes=get_num_classes(opts.dataset),
scale=opts.scale
)
device = gpu(model)
# load the dataset
trainset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=True
)
cfg['trainloader'] = load_dataloader(
dataset=trainset,
batch_size=opts.batch_size,
train=True,
show_progress=opts.progress
)
testset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=False
)
cfg['testloader'] = load_dataloader(
dataset=testset,
batch_size=opts.batch_size,
train=False,
show_progress=opts.progress
)
normalizer = load_normalizer(dataset_type=opts.dataset)
# load the optimizer and learning_policy
optimizer = load_optimizer(
model=model, optim_type=opts.optimizer, lr=opts.lr,
momentum=opts.momentum, betas=(opts.beta1, opts.beta2),
weight_decay=opts.weight_decay
)
learning_policy = load_learning_policy(
optimizer=optimizer,
learning_policy_type=opts.learning_policy,
T_max=opts.epochs
)
# generate the path for logging information and saving parameters
cfg['info_path'], cfg['log_path'] = generate_path(
method=METHOD, dataset_type=opts.dataset,
model=opts.model, description=opts.description
)
if opts.resume:
cfg['start_epoch'] = load_checkpoint(
path=cfg.info_path, model=model,
optimizer=optimizer, lr_scheduler=learning_policy
)
else:
cfg['start_epoch'] = 0
cfg['coach'] = Coach(
model=model, device=device,
loss_func=load_loss_func(opts.loss)(model=model),
normalizer=normalizer, optimizer=optimizer,
learning_policy=learning_policy
)
# set the attack
attack, bounds, preprocessing = load_attacks(
attack_type=opts.attack, dataset_type=opts.dataset,
stepsize=opts.stepsize, steps=opts.steps
)
cfg['attacker'] = AdversaryForTrain(
model=model, attacker=attack, device=device,
bounds=bounds, preprocessing=preprocessing, epsilon=opts.epsilon
)
cfg['valider'] = load_valider(
model=model, device=device, dataset_type=opts.dataset
)
return cfg
def evaluate(
valider, trainloader, testloader,
acc_logger, rob_logger, writter,
epoch = 8888
):
train_accuracy, train_success = valider.evaluate(trainloader)
valid_accuracy, valid_success = valider.evaluate(testloader)
print(f"Train >>> [TA: {train_accuracy:.5f}] [RA: {1 - train_success:.5f}]")
print(f"Test. >>> [TA: {valid_accuracy:.5f}] [RA: {1 - valid_success:.5f}]")
writter.add_scalars("Accuracy", {"train":train_accuracy, "valid":valid_accuracy}, epoch)
writter.add_scalars("Success", {"train":train_success, "valid":valid_success}, epoch)
acc_logger.train(data=train_accuracy, T=epoch)
acc_logger.valid(data=valid_accuracy, T=epoch)
rob_logger.train(data=1 - train_success, T=epoch)
rob_logger.valid(data=1 - valid_success, T=epoch)
def main(
coach, attacker, valider,
trainloader, testloader, start_epoch,
info_path, log_path
):
from src.utils import save_checkpoint, TrackMeter, ImageMeter
from src.dict2obj import Config
acc_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
acc_logger.plotter = ImageMeter(*acc_logger.values(), title="Accuracy")
rob_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
rob_logger.plotter = ImageMeter(*rob_logger.values(), title="Robustness")
for epoch in range(start_epoch, opts.epochs):
if epoch % SAVE_FREQ == 0:
save_checkpoint(info_path, coach.model, coach.optimizer, coach.learning_policy, epoch)
if epoch % PRINT_FREQ == 0:
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=epoch
)
running_loss = coach.adv_train(trainloader, attacker, leverage=opts.leverage, epoch=epoch)
writter.add_scalar("Loss", running_loss, epoch)
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=opts.epochs
)
acc_logger.plotter.plot()
rob_logger.plotter.plot()
acc_logger.plotter.save(writter)
rob_logger.plotter.save(writter)
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
from src.utils import mkdirs, readme
cfg = load_cfg()
mkdirs(cfg.info_path, cfg.log_path)
readme(cfg.info_path, opts)
readme(cfg.log_path, opts, mode="a")
writter = SummaryWriter(log_dir=cfg.log_path, filename_suffix=METHOD)
main(**cfg)
cfg['coach'].save(cfg.info_path)
writter.close()
| 33.974576
| 109
| 0.669494
| 979
| 8,018
| 5.312564
| 0.223698
| 0.0398
| 0.075178
| 0.019612
| 0.161123
| 0.130744
| 0.112286
| 0.088829
| 0.068833
| 0.068833
| 0
| 0.009537
| 0.202295
| 8,018
| 235
| 110
| 34.119149
| 0.803627
| 0.033425
| 0
| 0.120879
| 0
| 0.005495
| 0.151182
| 0.022742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016484
| false
| 0
| 0.054945
| 0
| 0.076923
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|