hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5f145cd20b239a3c57372ccdd383e5cb623483d | 4,570 | py | Python | src/luminol/testinsy/luminol_1205.py | zhouyuan0805/luminol-work | cb29a7edebb5f1d8a8a55d6d02e43df5502584f2 | [
"Apache-2.0"
] | null | null | null | src/luminol/testinsy/luminol_1205.py | zhouyuan0805/luminol-work | cb29a7edebb5f1d8a8a55d6d02e43df5502584f2 | [
"Apache-2.0"
] | null | null | null | src/luminol/testinsy/luminol_1205.py | zhouyuan0805/luminol-work | cb29a7edebb5f1d8a8a55d6d02e43df5502584f2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import pandas as pd
import datetime
import time
import numpy as np
import matplotlib.pyplot as plt
import csv
from luminol.anomaly_detector import AnomalyDetector
def listdir(path, list_name, file_name): #传入存储的list
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path):
listdir(file_path, list_name)
else:
list_name.append(file_path)
file_name.append(file)
def timestamp_to_datetime(x):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x))
def read_csv_to_df(file_path, delimiter='\t', flag=1):
def timestamp_to_datetime(x):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x))
if (flag == 1):
csv_data = pd.read_csv(file_path, delimiter=delimiter)
data_df = pd.DataFrame(csv_data)
data_df.ix[:, 0] = data_df.ix[:, 0].apply(timestamp_to_datetime)
return data_df
else:
data_df = pd.read_csv(file_path, delimiter=delimiter,
names=['kpi_time', 'kpi_value'], index_col=0, parse_dates=True)
return data_df
def df_to_series(data_df):
series_tmp = data_df.ix[:, 1]
series_tmp.index = data_df.ix[:, 0].apply(lambda x: datetime.datetime.strptime(x, FORMAT1))
return series_tmp
def series_to_df(data_series):
data_frame= pd.DataFrame(list(zip(data_series.index, data_series.values)))
data_frame.columns = ['kpi_time', 'kpi_value']
return data_frame
def series_to_csv(write_path, data_series):
df = series_to_df(data_series)
df.to_csv(write_path, index=False, header=False)
def score_to_df(score_data):
temp =[]
for timestamp, value in score.iteritems():
temp.append([timestamp_to_datetime(timestamp/1000), value])
temp_df = pd.DataFrame(temp, columns=['kpi_time', 'kpi_value'])
return temp_df
FORMAT1 = '%Y-%m-%d %H:%M:%S'
read_path = 'K://Algorithm_study_insuyan/data/luminol_ip30/data/'
sample_path = 'K://Algorithm_study_insuyan/data/luminol_ip30/sample_data/'
figure_path = 'K://Algorithm_study_insuyan/data/luminol_ip30/figure/'
score_path = 'K://Algorithm_study_insuyan/data/luminol_ip30/score/'
file_list = []
filename_list = []
filename = []
index_name = []
listdir(read_path, file_list, filename_list)
for i in filename_list:
filename.append(os.path.splitext(i)[0])
index_name.append(os.path.splitext(i)[0].split('_')[1])
for i in range(len(file_list)):
original_df = read_csv_to_df(file_list[i],flag=1)
original_series = df_to_series(original_df)
sample_series = original_series.resample('5min',how='mean',closed='left')
# print sample_series['2017-11-18']
series_to_csv(sample_path + '02/'+index_name[i] + '_5min_sample.csv', sample_series['2017-11-02'])
print index_name[i]
my_detector = AnomalyDetector(sample_path + '02/' + index_name[i] + '_5min_sample.csv')
score = my_detector.get_all_scores()
score_series = df_to_series(score_to_df(score))
series_to_csv(score_path + '02/' + filename[i] + '.csv', score_series)
#plot figure
fig, axes = plt.subplots(2, 1)
axes[0].plot(sample_series['2017-11-02'])
axes[0].set_title(index_name[i]+'5 min resample series and score D:1102')
axes[0].legend(['sample series'])
axes[1].plot(score_series, color='r', linestyle='-')
axes[1].legend(['score series'])
plt.savefig(figure_path + '020/' + filename[i] + '.png', dpi=300)
plt.close()
# for i in range(len(file_list)):
# original_df = read_csv_to_df(file_list[i],flag=1)
# original_series = df_to_series(original_df)
#
# sample_series = original_series.resample('5min',how='mean',closed='left')
# # print sample_series['2017-11-18']
# series_to_csv(sample_path + '02/'+index_name[i] + '_5min_sample.csv', sample_series['2017-11-02'])
# print index_name[i]
# my_detector = AnomalyDetector(sample_path + '02/' + index_name[i] + '_5min_sample.csv')
# score = my_detector.get_all_scores()
# score_series = df_to_series(score_to_df(score))
# series_to_csv(score_path + '02/' + filename[i] + '.csv', score_series)
#
# #plot figure
# fig, axes = plt.subplots(2, 1)
# axes[0].plot(sample_series['2017-11-02'])
# axes[0].set_title(index_name[i]+'5 min resample series and score D:1102')
# axes[0].legend(['sample series'])
# axes[1].plot(score_series, color='r', linestyle='-')
# axes[1].legend(['score series'])
# plt.savefig(figure_path + '020/' + filename[i] + '.png', dpi=300)
# plt.close()
| 37.459016 | 104 | 0.678556 |
4d01e959d61e7410ef3136cfe8e523428794357c | 1,606 | py | Python | yah/apis/x_token.py | sunsx0/yah | c073015dfa1fb2b5232c3ec4a9b9dbae571f7053 | [
"MIT"
] | null | null | null | yah/apis/x_token.py | sunsx0/yah | c073015dfa1fb2b5232c3ec4a9b9dbae571f7053 | [
"MIT"
] | null | null | null | yah/apis/x_token.py | sunsx0/yah | c073015dfa1fb2b5232c3ec4a9b9dbae571f7053 | [
"MIT"
] | null | null | null | import typing
import dataclasses as dc
from .._api_base import ApiBase, forward_resp
from .._http.types import Response
@dc.dataclass
class XTokenAuthApi(ApiBase):
async def login_token(self, x_token: str) -> bool:
payload = {
'type': 'x-token',
'retpath': 'https://www.yandex.ru/androids.txt'
}
headers = {
'Ya-Consumer-Authorization': f'OAuth {x_token}',
}
resp1: typing.Any = await self.client.post(
'https://mobileproxy.passport.yandex.net/1/bundle/auth/x_token/',
typing.Any,
data=payload,
headers=headers,
)
if resp1['status'] != 'ok':
return False
host: str = resp1['passport_host']
payload = {'track_id': resp1['track_id']}
resp2: Response = await self.client.get(
f'{host}/auth/session/',
Response,
query=payload,
response_parser=forward_resp,
)
if resp2.status != 404:
raise ValueError(f'Status check failed {resp2.status}')
return True
async def check_cookie_is_valid(self) -> bool:
resp: Response = await self.client.get(
'https://quasar.yandex.ru/get_account_config',
Response,
response_parser=forward_resp,
)
data = await resp.json()
return bool(data['status'] == 'ok')
async def refresh_cookies(self, x_token: str) -> bool:
return (
await self.check_cookie_is_valid()
or await self.login_token(x_token)
) | 30.301887 | 77 | 0.567248 |
290c491472a891fb79d6e119d41014eb1aa50c8e | 11,246 | py | Python | tests/dp_computations_test.py | zachferr/PipelineDP | 0e3b7f73b7959fedc9011b39c53d0dbc607815d2 | [
"Apache-2.0"
] | null | null | null | tests/dp_computations_test.py | zachferr/PipelineDP | 0e3b7f73b7959fedc9011b39c53d0dbc607815d2 | [
"Apache-2.0"
] | null | null | null | tests/dp_computations_test.py | zachferr/PipelineDP | 0e3b7f73b7959fedc9011b39c53d0dbc607815d2 | [
"Apache-2.0"
] | null | null | null | import unittest
import numpy as np
from scipy.stats import skew, kurtosis
import pipeline_dp
from pipeline_dp.dp_computations import *
class MeanVarParams(unittest.TestCase):
def test_l0_sensitivity(self):
params = pipeline_dp.dp_computations.MeanVarParams(
eps=1,
delta=1e-10,
low=2,
high=3,
max_partitions_contributed=4,
max_contributions_per_partition=5,
noise_kind=pipeline_dp.NoiseKind.LAPLACE)
self.assertEqual(params.l0_sensitivity(), 4)
def test_l1_sensitivity(self):
self.assertEqual(
pipeline_dp.dp_computations.compute_l1_sensitivity(
l0_sensitivity=4, linf_sensitivity=12), 48)
def test_l2_sensitivity(self):
self.assertEqual(
pipeline_dp.dp_computations.compute_l2_sensitivity(
l0_sensitivity=4, linf_sensitivity=12), 24)
def test_compute_sigma(self):
self.assertEqual(
pipeline_dp.dp_computations.compute_sigma(eps=1,
delta=1,
l2_sensitivity=10),
np.sqrt(2 * np.log(1.25)) * 10)
self.assertEqual(
pipeline_dp.dp_computations.compute_sigma(eps=0.5,
delta=1e-10,
l2_sensitivity=10),
np.sqrt(2 * np.log(1.25 / 1e-10)) * 20)
def _test_laplace_noise(self, results, value, eps, l1_sensitivity):
self.assertAlmostEqual(np.mean(results), value, delta=0.1)
self.assertAlmostEqual(np.std(results),
np.sqrt(2) * l1_sensitivity / eps,
delta=0.1)
self.assertAlmostEqual(skew(results), 0, delta=0.1)
self.assertAlmostEqual(kurtosis(results), 3, delta=0.1)
def _test_gaussian_noise(self, results, value, eps, delta, l2_sensitivity):
self.assertAlmostEqual(np.mean(results), value, delta=0.1)
self.assertAlmostEqual(np.std(results),
pipeline_dp.dp_computations.compute_sigma(
eps, delta, l2_sensitivity),
delta=0.1)
self.assertAlmostEqual(skew(results), 0, delta=0.1)
self.assertAlmostEqual(kurtosis(results), 0, delta=0.1)
def test_apply_laplace_mechanism(self):
results = [
pipeline_dp.dp_computations.apply_laplace_mechanism(
value=20, eps=0.5, l1_sensitivity=1) for _ in range(1000000)
]
self._test_laplace_noise(results, value=20, eps=0.5, l1_sensitivity=1)
def test_apply_gaussian_mechanism(self):
results = [
pipeline_dp.dp_computations.apply_gaussian_mechanism(
value=20, eps=0.5, delta=1e-10, l2_sensitivity=1)
for _ in range(1000000)
]
self._test_gaussian_noise(results,
value=20,
eps=0.5,
delta=1e-10,
l2_sensitivity=1)
def test_compute_dp_count(self):
params = pipeline_dp.dp_computations.MeanVarParams(
eps=0.5,
delta=1e-10,
low=2,
high=3,
max_partitions_contributed=1,
max_contributions_per_partition=1,
noise_kind=pipeline_dp.NoiseKind.LAPLACE)
l0_sensitivity = params.l0_sensitivity()
linf_sensitivity = params.max_contributions_per_partition
# Laplace Mechanism
l1_sensitivity = pipeline_dp.dp_computations.compute_l1_sensitivity(
l0_sensitivity, linf_sensitivity)
results = [
pipeline_dp.dp_computations.compute_dp_count(count=10,
dp_params=params)
for _ in range(1000000)
]
self._test_laplace_noise(results, 10, params.eps, l1_sensitivity)
# Gaussian Mechanism
params.noise_kind = pipeline_dp.NoiseKind.GAUSSIAN
l2_sensitivity = pipeline_dp.dp_computations.compute_l2_sensitivity(
l0_sensitivity, linf_sensitivity)
results = [
pipeline_dp.dp_computations.compute_dp_count(count=10,
dp_params=params)
for _ in range(1000000)
]
self._test_gaussian_noise(results, 10, params.eps, params.delta,
l2_sensitivity)
def test_compute_dp_sum(self):
params = pipeline_dp.dp_computations.MeanVarParams(
eps=0.5,
delta=1e-10,
low=2,
high=3,
max_partitions_contributed=1,
max_contributions_per_partition=1,
noise_kind=pipeline_dp.NoiseKind.LAPLACE)
l0_sensitivity = params.l0_sensitivity()
linf_sensitivity = params.max_contributions_per_partition * max(
params.low, params.high)
# Laplace Mechanism
l1_sensitivity = pipeline_dp.dp_computations.compute_l1_sensitivity(
l0_sensitivity, linf_sensitivity)
results = [
pipeline_dp.dp_computations.compute_dp_sum(sum=10, dp_params=params)
for _ in range(1000000)
]
self._test_laplace_noise(results, 10, params.eps, l1_sensitivity)
# Gaussian Mechanism
params.noise_kind = pipeline_dp.NoiseKind.GAUSSIAN
l2_sensitivity = pipeline_dp.dp_computations.compute_l2_sensitivity(
l0_sensitivity, linf_sensitivity)
results = [
pipeline_dp.dp_computations.compute_dp_sum(sum=10, dp_params=params)
for _ in range(1000000)
]
self._test_gaussian_noise(results, 10, params.eps, params.delta,
l2_sensitivity)
def test_equally_split_budget(self):
# The number of mechanisms must be bigger than 0.
with self.assertRaises(ValueError):
pipeline_dp.dp_computations.equally_split_budget(0.5, 1e-10, 0)
# Only one mechanism.
self.assertEqual(
pipeline_dp.dp_computations.equally_split_budget(0.5, 1e-10, 1),
[(0.5, 1e-10)])
# Multiple mechanisms.
expected_budgets = [(0.5 / 5, 1e-10 / 5) for _ in range(4)]
expected_budgets.append((0.5 - 4 * (0.5 / 5), 1e-10 - 4 * (1e-10 / 5)))
self.assertEqual(
pipeline_dp.dp_computations.equally_split_budget(0.5, 1e-10, 5),
expected_budgets)
def test_compute_dp_mean(self):
params = pipeline_dp.dp_computations.MeanVarParams(
eps=0.5,
delta=1e-10,
low=1,
high=20,
max_partitions_contributed=1,
max_contributions_per_partition=1,
noise_kind=pipeline_dp.NoiseKind.LAPLACE)
(count_eps,
count_delta), (_,
_) = pipeline_dp.dp_computations.equally_split_budget(
params.eps, params.delta, 2)
l0_sensitivity = params.l0_sensitivity()
count_linf_sensitivity = params.max_contributions_per_partition
# Laplace Mechanism
results = [
pipeline_dp.dp_computations.compute_dp_mean(count=1000,
sum=10000,
dp_params=params)
for _ in range(1000000)
]
count_values, sum_values, mean_values = zip(*results)
self._test_laplace_noise(
count_values, 1000, count_eps,
pipeline_dp.dp_computations.compute_l1_sensitivity(
l0_sensitivity, count_linf_sensitivity))
self.assertAlmostEqual(np.mean(sum_values), 10000, delta=0.2)
self.assertAlmostEqual(np.mean(mean_values), 10, delta=0.1)
# Gaussian Mechanism
params.noise_kind = pipeline_dp.NoiseKind.GAUSSIAN
results = [
pipeline_dp.dp_computations.compute_dp_mean(count=1000,
sum=10000,
dp_params=params)
for _ in range(1500000)
]
count_values, sum_values, mean_values = zip(*results)
self._test_gaussian_noise(
count_values, 1000, count_eps, count_delta,
pipeline_dp.dp_computations.compute_l2_sensitivity(
l0_sensitivity, count_linf_sensitivity))
self.assertAlmostEqual(np.mean(sum_values), 10000, delta=1)
self.assertAlmostEqual(np.mean(mean_values), 10, delta=0.1)
def test_compute_dp_var(self):
params = pipeline_dp.dp_computations.MeanVarParams(
eps=10,
delta=1e-10,
low=1,
high=20,
max_partitions_contributed=1,
max_contributions_per_partition=1,
noise_kind=pipeline_dp.NoiseKind.LAPLACE)
(count_eps, count_delta), (_, _), (
_, _) = pipeline_dp.dp_computations.equally_split_budget(
params.eps, params.delta, 3)
l0_sensitivity = params.l0_sensitivity()
count_linf_sensitivity = params.max_contributions_per_partition
# Laplace Mechanism
results = [
pipeline_dp.dp_computations.compute_dp_var(count=100000,
sum=1000000,
sum_squares=20000000,
dp_params=params)
for _ in range(1500000)
]
count_values, sum_values, sum_squares_values, var_values = zip(*results)
self._test_laplace_noise(
count_values, 100000, count_eps,
pipeline_dp.dp_computations.compute_l1_sensitivity(
l0_sensitivity, count_linf_sensitivity))
self.assertAlmostEqual(np.mean(sum_values), 1000000, delta=1)
self.assertAlmostEqual(np.mean(sum_squares_values), 20000000, delta=1)
self.assertAlmostEqual(np.mean(var_values), 100, delta=0.1)
# Gaussian Mechanism
params.noise_kind = pipeline_dp.NoiseKind.GAUSSIAN
results = [
pipeline_dp.dp_computations.compute_dp_var(count=100000,
sum=1000000,
sum_squares=20000000,
dp_params=params)
for _ in range(1500000)
]
count_values, sum_values, sum_squares_values, var_values = zip(*results)
self._test_gaussian_noise(
count_values, 100000, count_eps, count_delta,
pipeline_dp.dp_computations.compute_l2_sensitivity(
l0_sensitivity, count_linf_sensitivity))
self.assertAlmostEqual(np.mean(sum_values), 1000000, delta=1)
self.assertAlmostEqual(np.mean(sum_squares_values), 20000000, delta=1)
self.assertAlmostEqual(np.mean(var_values), 100, delta=0.1)
if __name__ == '__main__':
unittest.main()
| 42.11985 | 80 | 0.581629 |
30a08512982407dda76ceae222114005e407265d | 3,984 | py | Python | skbio/sequence/_rna.py | josenavas/scikit-bio | 5fae6925b2ed0c987d33246b18c285018cef7c0c | [
"BSD-3-Clause"
] | null | null | null | skbio/sequence/_rna.py | josenavas/scikit-bio | 5fae6925b2ed0c987d33246b18c285018cef7c0c | [
"BSD-3-Clause"
] | null | null | null | skbio/sequence/_rna.py | josenavas/scikit-bio | 5fae6925b2ed0c987d33246b18c285018cef7c0c | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import classproperty, overrides
from ._nucleotide_sequence import NucleotideSequence
from ._iupac_sequence import IUPACSequence
class RNA(NucleotideSequence):
"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
id : str, optional
Sequence identifier (e.g., an accession number).
description : str, optional
Description or comment about the sequence (e.g., "green fluorescent
protein").
quality : 1D array_like (int), optional
Phred quality scores stored as nonnegative integers, one per sequence
character. If provided, must be the same length as the RNA sequence.
Can be a 1D ``np.ndarray`` of integers or a structure that can be
converted into this representation using ``np.asarray``. A copy will
*not* be made if `quality` is already a 1D ``np.ndarray`` with an
``int`` ``dtype``. The array will be made read-only (i.e., its
``WRITEABLE`` flag will be set to ``False``).
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
case_insenstive : bool, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters.
Attributes
----------
id
description
sequence
quality
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
DNA
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> s = RNA('ACCGAAU')
>>> s
RNA('ACCGAAU', length=7)
Convert lowercase characters to uppercase:
>>> s = RNA('AcCGaaU', case_insensitive=True)
>>> s
RNA('ACCGAAU', length=7)
"""
@classproperty
@overrides(NucleotideSequence)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACGU")
@classproperty
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
| 34.643478 | 79 | 0.597139 |
544fca8cecd0a2b94a5aec40b9442f86036fd4d2 | 6,491 | py | Python | python/paddle/fluid/tests/unittests/test_operator.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 17,085 | 2016-11-18T06:40:52.000Z | 2022-03-31T22:52:32.000Z | python/paddle/fluid/tests/unittests/test_operator.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 29,769 | 2016-11-18T06:35:22.000Z | 2022-03-31T16:46:15.000Z | python/paddle/fluid/tests/unittests/test_operator.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4,641 | 2016-11-18T07:43:33.000Z | 2022-03-31T15:15:02.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid.op as op
import paddle.fluid.proto.framework_pb2 as framework_pb2
class TestGetAllProtos(unittest.TestCase):
def test_all(self):
all_protos = op.get_all_op_protos()
self.assertNotEqual(0, len(all_protos))
for each in all_protos:
self.assertTrue(each.IsInitialized())
class TestOpDescCreationMethod(unittest.TestCase):
def test_plain_input_output(self):
op_proto = framework_pb2.OpProto()
op_proto.type = "test"
ipt = op_proto.inputs.add()
ipt.name = "X"
ipt.comment = "not matter"
ipt = op_proto.inputs.add()
ipt.name = "Y"
ipt.comment = "not matter"
opt = op_proto.outputs.add()
opt.name = "Z"
opt.comment = "not matter"
op_proto.comment = "not matter"
self.assertTrue(op_proto.IsInitialized())
method = op.OpDescCreationMethod(op_proto)
output = method(X="a", Y="b", Z="c")
expected = framework_pb2.OpDesc()
expected.type = "test"
ipt_0 = expected.inputs.add()
ipt_0.parameter = "X"
ipt_0.arguments.extend(["a"])
ipt_1 = expected.inputs.add()
ipt_1.parameter = 'Y'
ipt_1.arguments.extend(['b'])
opt = expected.outputs.add()
opt.parameter = "Z"
opt.arguments.extend(["c"])
self.assertEqual(expected, output)
def test_multiple_input_plain_output(self):
op_proto = framework_pb2.OpProto()
op_proto.type = "fc"
ipt = op_proto.inputs.add()
ipt.name = "X"
ipt.comment = ""
ipt.duplicable = True
ipt = op_proto.inputs.add()
ipt.name = "W"
ipt.comment = ""
ipt.duplicable = True
ipt = op_proto.inputs.add()
ipt.name = "b"
ipt.comment = ""
out = op_proto.outputs.add()
out.name = "Y"
out.comment = ""
op_proto.comment = ""
self.assertTrue(op_proto.IsInitialized())
method = op.OpDescCreationMethod(op_proto)
generated1 = method(X="x", W="w", b="b", Y="y")
expected1 = framework_pb2.OpDesc()
tmp = expected1.inputs.add()
tmp.parameter = "X"
tmp.arguments.extend(['x'])
tmp = expected1.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w'])
tmp = expected1.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected1.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected1.type = 'fc'
self.assertEqual(expected1, generated1)
generated2 = method(
X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y')
expected2 = framework_pb2.OpDesc()
tmp = expected2.inputs.add()
tmp.parameter = "X"
tmp.arguments.extend(['x1', 'x2', 'x3'])
tmp = expected2.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w1', 'w2', 'w3'])
tmp = expected2.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected2.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected2.type = 'fc'
self.assertEqual(expected2, generated2)
def test_attrs(self):
op_proto = framework_pb2.OpProto()
op_proto.type = "test"
ipt = op_proto.inputs.add()
ipt.name = 'X'
ipt.comment = ""
def __add_attr__(name, type):
attr = op_proto.attrs.add()
attr.name = name
attr.comment = ""
attr.type = type
__add_attr__("int_attr", framework_pb2.INT)
__add_attr__("float_attr", framework_pb2.FLOAT)
__add_attr__("string_attr", framework_pb2.STRING)
__add_attr__("ints_attr", framework_pb2.INTS)
__add_attr__("floats_attr", framework_pb2.FLOATS)
__add_attr__("strings_attr", framework_pb2.STRINGS)
op_proto.comment = ""
self.assertTrue(op_proto.IsInitialized())
method = op.OpDescCreationMethod(op_proto)
generated = method(
X="a",
int_attr=10,
float_attr=3.2,
string_attr="test_str",
ints_attr=[0, 1, 2, 3, 4],
floats_attr=[0.2, 3.2, 4.5],
strings_attr=["a", "b", "c"])
expected = framework_pb2.OpDesc()
expected.type = "test"
ipt = expected.inputs.add()
ipt.parameter = "X"
ipt.arguments.extend(['a'])
attr = expected.attrs.add()
attr.name = "int_attr"
attr.type = framework_pb2.INT
attr.i = 10
attr = expected.attrs.add()
attr.name = "float_attr"
attr.type = framework_pb2.FLOAT
attr.f = 3.2
attr = expected.attrs.add()
attr.name = "string_attr"
attr.type = framework_pb2.STRING
attr.s = "test_str"
attr = expected.attrs.add()
attr.name = "ints_attr"
attr.type = framework_pb2.INTS
attr.ints.extend([0, 1, 2, 3, 4])
attr = expected.attrs.add()
attr.name = 'floats_attr'
attr.type = framework_pb2.FLOATS
attr.floats.extend([0.2, 3.2, 4.5])
attr = expected.attrs.add()
attr.name = 'strings_attr'
attr.type = framework_pb2.STRINGS
attr.strings.extend(['a', 'b', 'c'])
self.assertEqual(expected, generated)
class TestOpCreations(unittest.TestCase):
def test_all(self):
add_op = op.Operator("sum", X=["a", "b"], Out="z")
self.assertIsNotNone(add_op)
# Invoke C++ DebugString()
self.assertEqual('Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.',
str(add_op))
if __name__ == "__main__":
unittest.main()
| 29.371041 | 74 | 0.581729 |
3adb9b2e8a48721d0ec5f93f847296e11b5756fa | 5,505 | py | Python | experiments/test_defense.py | thunlp/ONION | 0f4ae2109253273ecac704935a500857fe002651 | [
"MIT"
] | 9 | 2021-09-18T13:09:23.000Z | 2022-01-09T02:50:05.000Z | experiments/test_defense.py | thunlp/ONION | 0f4ae2109253273ecac704935a500857fe002651 | [
"MIT"
] | 2 | 2021-10-16T07:21:44.000Z | 2021-11-05T09:08:11.000Z | experiments/test_defense.py | thunlp/ONION | 0f4ae2109253273ecac704935a500857fe002651 | [
"MIT"
] | null | null | null | from gptlm import GPT2LM
import torch
import argparse
from PackDataset import packDataset_util_bert
def read_data(file_path):
import pandas as pd
data = pd.read_csv(file_path, sep='\t').values.tolist()
sentences = [item[0] for item in data]
labels = [int(item[1]) for item in data]
processed_data = [(sentences[i], labels[i]) for i in range(len(labels))]
return processed_data
def filter_sent(split_sent, pos):
words_list = split_sent[: pos] + split_sent[pos + 1:]
return ' '.join(words_list)
def evaluaion(loader):
model.eval()
total_number = 0
total_correct = 0
with torch.no_grad():
for padded_text, attention_masks, labels in loader:
if torch.cuda.is_available():
padded_text, attention_masks, labels = padded_text.cuda(), attention_masks.cuda(), labels.cuda()
output = model(padded_text, attention_masks)[0]
_, idx = torch.max(output, dim=1)
correct = (idx == labels).sum().item()
total_correct += correct
total_number += labels.size(0)
acc = total_correct / total_number
return acc
def get_PPL(data):
all_PPL = []
from tqdm import tqdm
for i, sent in enumerate(tqdm(data)):
split_sent = sent.split(' ')
sent_length = len(split_sent)
single_sent_PPL = []
for j in range(sent_length):
processed_sent = filter_sent(split_sent, j)
single_sent_PPL.append(LM(processed_sent))
all_PPL.append(single_sent_PPL)
assert len(all_PPL) == len(data)
return all_PPL
def get_processed_sent(flag_li, orig_sent):
sent = []
for i, word in enumerate(orig_sent):
flag = flag_li[i]
if flag == 1:
sent.append(word)
return ' '.join(sent)
def get_processed_poison_data(all_PPL, data, bar):
processed_data = []
for i, PPL_li in enumerate(all_PPL):
orig_sent = data[i]
orig_split_sent = orig_sent.split(' ')[:-1]
assert len(orig_split_sent) == len(PPL_li) - 1
whole_sentence_PPL = PPL_li[-1]
processed_PPL_li = [ppl - whole_sentence_PPL for ppl in PPL_li][:-1]
flag_li = []
for ppl in processed_PPL_li:
if ppl <= bar:
flag_li.append(0)
else:
flag_li.append(1)
assert len(flag_li) == len(orig_split_sent)
sent = get_processed_sent(flag_li, orig_split_sent)
processed_data.append((sent, args.target_label))
assert len(all_PPL) == len(processed_data)
return processed_data
def get_orig_poison_data():
poison_data = read_data(args.poison_data_path)
raw_sentence = [sent[0] for sent in poison_data]
return raw_sentence
def prepare_poison_data(all_PPL, orig_poison_data, bar):
test_data_poison = get_processed_poison_data(all_PPL, orig_poison_data, bar=bar)
test_loader_poison = packDataset_util.get_loader(test_data_poison, shuffle=False, batch_size=32)
return test_loader_poison
def get_processed_clean_data(all_clean_PPL, clean_data, bar):
processed_data = []
data = [item[0] for item in clean_data]
for i, PPL_li in enumerate(all_clean_PPL):
orig_sent = data[i]
orig_split_sent = orig_sent.split(' ')[:-1]
assert len(orig_split_sent) == len(PPL_li) - 1
whole_sentence_PPL = PPL_li[-1]
processed_PPL_li = [ppl - whole_sentence_PPL for ppl in PPL_li][:-1]
flag_li = []
for ppl in processed_PPL_li:
if ppl <= bar:
flag_li.append(0)
else:
flag_li.append(1)
assert len(flag_li) == len(orig_split_sent)
sent = get_processed_sent(flag_li, orig_split_sent)
processed_data.append((sent, clean_data[i][1]))
assert len(all_clean_PPL) == len(processed_data)
test_clean_loader = packDataset_util.get_loader(processed_data, shuffle=False, batch_size=32)
return test_clean_loader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='sst-2')
parser.add_argument('--model_path', default='')
parser.add_argument('--clean_data_path', default='')
parser.add_argument('--poison_data_path', default='')
parser.add_argument('--target_label', default=1, type=int)
parser.add_argument('--record_file', default='record.log')
args = parser.parse_args()
LM = GPT2LM(use_tf=False, device='cuda' if torch.cuda.is_available() else 'cpu')
data_selected = args.data
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda()
packDataset_util = packDataset_util_bert()
file_path = args.record_file
f = open(file_path, 'w')
orig_poison_data = get_orig_poison_data()
clean_data = read_data(args.clean_data_path)
clean_raw_sentences = [item[0] for item in clean_data]
all_PPL = get_PPL(orig_poison_data)
all_clean_PPL = get_PPL(clean_raw_sentences)
for bar in range(-100, 0):
test_loader_poison_loader = prepare_poison_data(all_PPL, orig_poison_data, bar)
processed_clean_loader = get_processed_clean_data(all_clean_PPL, clean_data, bar)
success_rate = evaluaion(test_loader_poison_loader)
clean_acc = evaluaion(processed_clean_loader)
print('bar: ', bar, file=f)
print('attack success rate: ', success_rate, file=f)
print('clean acc: ', clean_acc, file=f)
print('*' * 89, file=f)
f.close()
| 35.063694 | 112 | 0.66049 |
4a1806928f6a65b3b307f4ced75da875a17f0e56 | 18,051 | py | Python | src/pyhees/section3_2_8.py | BRI-EES-House/pyhees | 7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b | [
"MIT"
] | null | null | null | src/pyhees/section3_2_8.py | BRI-EES-House/pyhees | 7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b | [
"MIT"
] | 3 | 2022-01-04T07:29:52.000Z | 2022-03-19T08:02:51.000Z | src/pyhees/section3_2_8.py | BRI-EES-House/pyhees | 7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b | [
"MIT"
] | 2 | 2022-01-19T07:57:10.000Z | 2022-03-07T00:25:54.000Z | from pyhees.section3_2_b import get_H
from pyhees.section3_2_c import get_nu_H, get_nu_C
from pyhees.section3_4_b_2 import get_glass_spec_category
from pyhees.section3_4 import common, window, door, heatbridge, earthfloor, gamma
from pyhees.section3_3_5 import *
from pyhees.section3_3_6 import *
# ============================================================================
# 8. 当該住戸の外皮の部位の面積等を用いて外皮性能を評価する方法
# ============================================================================
# ============================================================================
# 8.1 外皮平均熱貫流率
# ============================================================================
def calc_U_A(envelope):
"""外皮平均熱貫流率 (4)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 外皮平均熱貫流率, envelopeに計算結果を付加した辞書
"""
Region = envelope['Region']
sigma_A_i_U_i_H_i = 0
# 一般部位または開口部
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
H_i = calc_H_byKey(wall_i['Adjacent'], Region)
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
sigma_A_i_U_i_H_i += A_i * U_i * H_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
H_i = calc_H_byKey(window_i['Adjacent'], Region)
U_i, window_i = calc_Opening_U_i(window_i)
sigma_A_i_U_i_H_i += A_i * U_i * H_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
H_i = calc_H_byKey(door_i['Adjacent'], Region)
U_i, door_i = calc_Opening_U_i(door_i)
sigma_A_i_U_i_H_i += A_i * U_i * H_i
sigma_L_j_psi_j_H_j = 0
# 熱橋及び土間床等の外周部
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
# 温度差係数
H_j = 0
for i in range(len(heatbridge_j['ComponentNames'])):
# 接する部位に関するパラメータを持つ辞書を名前から得る
componentname = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, componentname)
# 2個目に部位がない場合はbreak
if component_i is None:
break
i_H_j = calc_H_byKey(component_i['Adjacent'], Region)
# (3章2節付録B)熱橋の温度差係数において複数の種類の隣接空間に接する場合は、温度差係数の大きい方の隣接空間の種類の値を採用する
if H_j < i_H_j:
H_j = i_H_j
L_j = heatbridge_j['Length']
if heatbridge_j['StructureType'] == 'Wood':
psi_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
elif heatbridge_j['StructureType'] == 'RC':
psi_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
elif heatbridge_j['StructureType'] == 'Steel':
psi_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
sigma_L_j_psi_j_H_j += L_j * psi_j * H_j
# 土間床等の外周部
foundation_list = envelope['Foundation']
for j in range(len(foundation_list)):
foundation_j = foundation_list[j]
L_j = foundation_j['OuterLength']
H_j = calc_H_byKey(foundation_j['Adjacent'], Region)
psi_j, foundation = calc_psi_F_j(foundation_j)
sigma_L_j_psi_j_H_j += L_j * psi_j * H_j
A_env = get_A_env(envelope)
U_A = (sigma_A_i_U_i_H_i + sigma_L_j_psi_j_H_j) / A_env
U_A_ceil = math.ceil(U_A * 10 ** 2) / (10 ** 2)
envelope['U_A'] = U_A_ceil
return U_A_ceil, envelope
# ============================================================================
# 8.2 暖房期の平均日射熱取得率及び冷房期の平均日射熱取得率
# ============================================================================
def calc_eta_A_H(envelope):
"""暖房期の平均日射熱取得率 (5)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 暖房期の平均日射熱取得率, envelopeに計算結果を付加した辞書
"""
Region = envelope['Region']
if Region in [8, '8']:
return None, envelope
A_i_eta_H_i_nu_H_i = 0.0
L_j_eta_H_i_nu_H_i = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
# 日射熱取得率を計算
if 'SolarGain' in wall_i and wall_i['SolarGain'] != 'No':
gamma_H_i = wall_i['GammaH']
eta_H_i = common.get_eta_H_i(gamma_H_i, U_i)
else:
eta_H_i = 0.0
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if wall_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, wall_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
# 日射熱取得率
if 'SolarGain' in window_i and window_i['SolarGain'] == 'No':
eta_H_i = 0.0
else:
eta_H_i = window.calc_eta_H_i_byDict(Region, window_i['Direction'], window_i['WindowPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if window_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, window_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
# 日射熱取得率
if 'SolarGain' in door_i and door_i['SolarGain'] == 'No':
eta_H_i = 0.0
else:
eta_H_i = door.calc_eta_H_i_byDict(Region, door_i['DoorPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if door_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, door_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# 熱橋
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
eta_H_i_sum = 0.0
nu_H_i_sum = 0.0
# 木造
if heatbridge_j['StructureType'] == 'Wood':
psi_i_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
# 鉄筋コンクリート造等
elif heatbridge_j['StructureType'] == 'RC':
psi_i_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
# 鉄骨造
elif heatbridge_j['StructureType'] == 'Steel':
psi_i_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
L_i_j = heatbridge_j['Length']
gamma_H_i_sum = 0
nu_H_i_sum = 0
for i in range(len(heatbridge_j['ComponentNames'])):
component_i_name = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, component_i_name)
# 熱橋の日除けの効果係数は熱橋jが接する一般部位の値
# 複数の一般部位に接するときは平均値をとる
gamma_H_i_sum += component_i['GammaH']
# 方位係数(付録C)
# 方位の異なる外皮の部位(一般部位又は開口部)に接する熱橋等の方位係数は、異なる方位の方位係数の平均値とする
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if component_i['Adjacent'] == 'Outside':
nu_H_i_sum += calc_nu_byKey(Region, component_i['Direction'], 'H')
else:
nu_H_i_sum += 0.0
gamma_H_i = gamma_H_i_sum / len(heatbridge_j['ComponentNames'])
# 日射熱取得率を計算
if 'SolarGain' in heatbridge_j and heatbridge_j['SolarGain'] != 'No':
eta_H_i = heatbridge.get_eta_dash_H_j(gamma_H_i, psi_i_j)
else:
eta_H_i = 0.0
nu_H_i = nu_H_i_sum / len(heatbridge_j['ComponentNames'])
L_j_eta_H_i_nu_H_i += L_i_j * eta_H_i * nu_H_i
# 土間床等の外周部の暖房期の日射熱取得率及び冷房期の日射熱取得率は0 (W/mK)/(W/m2K) とする。
L_j_eta_H_i_nu_H_i += earthfloor.get_eta_dash_H_j()
A_env = get_A_env(envelope)
eta_A_H = (A_i_eta_H_i_nu_H_i + L_j_eta_H_i_nu_H_i) / A_env * 100
eta_A_H_floor = math.floor(eta_A_H * 10 ** 1) / (10 ** 1)
envelope['eta_A_H'] = eta_A_H_floor
return eta_A_H_floor, envelope
def calc_eta_A_C(envelope):
"""冷房期の平均日射熱取得率 (5)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 冷房期の平均日射熱取得率, envelopeに計算結果を付加した辞書
"""
A_env = get_A_env(envelope)
Region = envelope['Region']
A_i_eta_C_i_nu_C_i = 0.0
L_j_eta_C_i_nu_C_i = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
# 日除けの効果係数
# 日射熱取得率を計算
if 'SolarGain' in wall_i and wall_i['SolarGain'] != 'No':
gamma_C_i = wall_i['GammaC']
eta_C_i = common.get_eta_C_i(gamma_C_i, U_i)
else:
eta_C_i = 0.0
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if wall_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, wall_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
# 日射熱取得率
if 'SolarGain' in window_i and window_i['SolarGain'] == 'No':
eta_C_i = 0.0
else:
eta_C_i = window.calc_eta_C_i_byDict(Region, window_i['Direction'], window_i['WindowPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if window_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, window_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
# 日射熱取得率 7
if 'SolarGain' in door_i and door_i['SolarGain'] == 'No':
eta_C_i = 0.0
else:
eta_C_i = door.calc_eta_C_i_byDict(Region, door_i['DoorPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if door_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, door_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# 熱橋
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
eta_C_i_sum = 0
nu_C_i_sum = 0
# 木造
if heatbridge_j['StructureType'] == 'Wood':
psi_i_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
# 鉄筋コンクリート造等
elif heatbridge_j['StructureType'] == 'RC':
psi_i_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
# 鉄骨造
elif heatbridge_j['StructureType'] == 'Steel':
psi_i_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
L_i_j = heatbridge_j['Length']
gamma_C_i_sum = 0
nu_C_i_sum = 0
for i in range(len(heatbridge_j['ComponentNames'])):
component_i_name = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, component_i_name)
# 熱橋の日除けの効果係数は熱橋jが接する一般部位の値
# 複数の一般部位に接するときは平均値をとる
gamma_C_i_sum += component_i['GammaC']
# 方位係数(付録C)
# 方位の異なる外皮の部位(一般部位又は開口部)に接する熱橋等の方位係数は、異なる方位の方位係数の平均値とする
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if component_i['Adjacent'] == 'Outside':
nu_C_i_sum += calc_nu_byKey(Region, component_i['Direction'], 'C')
else:
nu_C_i_sum += 0.0
gamma_C_i = gamma_C_i_sum / len(heatbridge_j['ComponentNames'])
# 日射熱取得率を計算
if 'SolarGain' in heatbridge_j and heatbridge_j['SolarGain'] != 'No':
eta_C_i = heatbridge.get_eta_dash_C_j(gamma_C_i, psi_i_j)
else:
eta_C_i = 0.0
nu_C_i = nu_C_i_sum / len(heatbridge_j['ComponentNames'])
L_j_eta_C_i_nu_C_i += L_i_j * eta_C_i * nu_C_i
# 土間床等の外周部の暖房期の日射熱取得率及び冷房期の日射熱取得率は0 (W/mK)/(W/m2K) とする。
L_j_eta_C_i_nu_C_i += earthfloor.get_eta_dash_C_j()
A_env = get_A_env(envelope)
eta_A_C = (A_i_eta_C_i_nu_C_i + L_j_eta_C_i_nu_C_i) / A_env * 100
eta_A_C_ceil = math.ceil(eta_A_C * 10 ** 1) / (10 ** 1)
envelope['eta_A_C'] = eta_A_C_ceil
return eta_A_C_ceil, envelope
# ============================================================================
# 8.3 床面積の合計に対する外皮の部位の面積の合計の比
# ============================================================================
def get_r_env(A_env, A_A):
"""床面積の合計に対する外皮の部位の面積の合計の比 (7)
Args:
A_env(float): 外皮の部位の面積の合計 (m2)
A_A(float): 床面積の合計 (m2)
Returns:
float: 床面積の合計に対する外皮の部位の面積の合計の比
"""
return A_env / A_A
def get_A_env(envelope):
"""外皮の部位の面積の合計 式(8)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float: 外皮の部位の面積の合計
"""
A_env = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
A_env += wall_list[i]['Area']
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
A_env += window_list[i]['WindowPart']['Area']
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
A_env += door_list[i]['DoorPart']['Area']
# 土間床の面積
foundation_list = envelope['Foundation']
for j in range(len(foundation_list)):
A_env += foundation_list[j]['Area']
return A_env
def calc_H_byKey(adjacent_type, region):
"""パラメータの値から温度差係数の表を参照する
Args:
adjacent_type(String): 隣接空間の種類
region(int): 地域区分
Returns:
float: 温度差係数
"""
# ノードの値と関数get_H内の隣接空間の種類名を対応づける
adjacent_dict = {
'Outside': '外気',
'Open': '外気に通じる空間',
'Connected': '外気・外気に通じる空間',
'Close': '外気に通じていない空間・外気に通じる床裏',
'Separator': '住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏'
}
return get_H(adjacent_dict[adjacent_type], region)
def calc_nu_byKey(region, Direction, season):
"""パラメータの値から暖房期・冷房期の方位係数の表を参照する
Args:
region(int): 地域区分
Direction(String): 方位
season(String): H'(暖房期)または'C'(冷房期)
Returns:
float: 方位係数
"""
# ノードの値と関数get_nu_H/get_nu_C内方位名を対応づける
Direction_dict = {'Top':'上面', 'N':'北', 'NE':'北東', 'E':'東', 'SE':'南東',
'S':'南', 'SW':'南西', 'W':'西', 'NW':'北西', 'Bottom':'下面'}
# 暖房期
if season == 'H':
return get_nu_H(region, Direction_dict[Direction])
# 冷房期
else:
return get_nu_C(region, Direction_dict[Direction])
def get_component_byName(wall_list, componentname):
"""名前から部位のパラメータを持つ辞書を得る
Args:
wall_list(List<dict>(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel)): 窓を除く外皮等のリスト
componentname: 部位の名前
componentname: str
Returns:
dict(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel): 部位のパラメータを持つ辞書
"""
for wall_i in wall_list:
if wall_i['Name'] == componentname:
return wall_i
| 30.135225 | 103 | 0.584012 |
cb8bf9ff415516845b93bb17523bfaad2c6d66c6 | 1,741 | py | Python | src/rezgui/widgets/ConfiguredSplitter.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rezgui/widgets/ConfiguredSplitter.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rezgui/widgets/ConfiguredSplitter.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | 1 | 2020-09-24T08:33:43.000Z | 2020-09-24T08:33:43.000Z | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Qt import QtWidgets
class ConfiguredSplitter(QtWidgets.QSplitter):
"""A QSplitter that remembers its widget sizes.
"""
def __init__(self, config, config_key, *nargs, **kwargs):
super(ConfiguredSplitter, self).__init__(*nargs, **kwargs)
self.config = config
self.config_key = config_key
self.splitterMoved.connect(self._splitterMoved)
def apply_saved_layout(self):
"""Call this after adding your child widgets."""
num_widgets = self.config.get(self.config_key + "/num_widgets", int)
if num_widgets:
sizes = []
for i in range(num_widgets):
key = "%s/size_%d" % (self.config_key, i)
size = self.config.get(key, int)
sizes.append(size)
self.setSizes(sizes)
return True
return False
def _splitterMoved(self, pos, index):
sizes = self.sizes()
self.config.setValue(self.config_key + "/num_widgets", len(sizes))
for i, size in enumerate(sizes):
key = "%s/size_%d" % (self.config_key, i)
self.config.setValue(key, size)
| 36.270833 | 76 | 0.655945 |
107cb879e83a96248df8678d73d19ae842ca6ee9 | 1,158 | py | Python | NGS_downstream/scripts/reduce_states.py | EddaSchulz/Xert_paper | dc641c3e8e2b8a1d0e338995a9dd83640a07339f | [
"MIT"
] | null | null | null | NGS_downstream/scripts/reduce_states.py | EddaSchulz/Xert_paper | dc641c3e8e2b8a1d0e338995a9dd83640a07339f | [
"MIT"
] | null | null | null | NGS_downstream/scripts/reduce_states.py | EddaSchulz/Xert_paper | dc641c3e8e2b8a1d0e338995a9dd83640a07339f | [
"MIT"
] | 1 | 2021-04-28T07:02:19.000Z | 2021-04-28T07:02:19.000Z | #!/usr/bin/env python3
# This script reduces the states within a BED file returned from ChromHMM and assigns new colors for visualization with UCSC
import re
import sys
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
strongRE_input = sys.argv[3]
weakRE_input = sys.argv[4]
poisedRE_input = sys.argv[5]
noRE_input = sys.argv[6]
strongRE_list = strongRE_input.split(",") # state1
weakRE_list = weakRE_input.split(",") # state2
poisedRE_list = poisedRE_input.split(",") # state3
noRE_list = noRE_input.split(",") # state4
for line in input:
if re.match('track', line):
output.write(line)
else:
tab = re.split('\t', line)
state = tab[3]
color = tab[8]
if state in strongRE_list:
color = '030,080,255'
state = 1
elif state in weakRE_input:
color = '050,200,255'
state = 2
elif state in poisedRE_input:
color = '189,188,188'
state = 3
elif state in noRE_input:
color = '255,255,255'
state = 4
else:
break
output.write(tab[0] + '\t' + tab[1] + '\t' + tab[2] + '\t' + str(state) + '\t' + tab[4] + '\t' + tab[5] + '\t' + tab[6] + '\t' + tab[7] + '\t' + color + '\n')
input.close()
output.close()
| 25.733333 | 160 | 0.639896 |
f4e033f7a02ef2b3ed5dc50fc4f098a1cd0f2261 | 179 | py | Python | fastface/metric/__init__.py | ethanwharris/light-face-detection | bd6a4c5a87c80937e51adc3999eacc14c80185e8 | [
"MIT"
] | null | null | null | fastface/metric/__init__.py | ethanwharris/light-face-detection | bd6a4c5a87c80937e51adc3999eacc14c80185e8 | [
"MIT"
] | null | null | null | fastface/metric/__init__.py | ethanwharris/light-face-detection | bd6a4c5a87c80937e51adc3999eacc14c80185e8 | [
"MIT"
] | null | null | null | __all__ = [
"WiderFaceAP",
"AveragePrecision",
"AverageRecall"
]
from .widerface_ap import WiderFaceAP
from .ap import AveragePrecision
from .ar import AverageRecall
| 17.9 | 37 | 0.743017 |
0a3347f158652e5571a7fcfeb1c1f890d75d0e06 | 3,111 | py | Python | icecreams101/settings.py | srgautam01/icecreams101 | 698c6beab59eff1a8ab31db463d64604b2ec24c2 | [
"MIT"
] | null | null | null | icecreams101/settings.py | srgautam01/icecreams101 | 698c6beab59eff1a8ab31db463d64604b2ec24c2 | [
"MIT"
] | null | null | null | icecreams101/settings.py | srgautam01/icecreams101 | 698c6beab59eff1a8ab31db463d64604b2ec24c2 | [
"MIT"
] | null | null | null | """
Django settings for icecreams101 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#fz+1psm$z-5b+4-czs_ueletnix&atu$h)2_@*d6l8kx^*lbn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'icecreams101.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'icecreams101.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.5 | 91 | 0.696239 |
79dd9685676372e0498cbb32681344dc73459e55 | 3,074 | py | Python | binding/python/multiverso/theano_ext/param_manager.py | yesme/Multiverso | e45369e1d07277f656b0900beb2709d86679fa53 | [
"MIT"
] | 445 | 2015-11-10T04:00:17.000Z | 2016-10-21T01:10:27.000Z | binding/python/multiverso/theano_ext/param_manager.py | yesme/Multiverso | e45369e1d07277f656b0900beb2709d86679fa53 | [
"MIT"
] | 57 | 2015-11-10T14:54:29.000Z | 2016-08-11T05:48:13.000Z | binding/python/multiverso/theano_ext/param_manager.py | yesme/Multiverso | e45369e1d07277f656b0900beb2709d86679fa53 | [
"MIT"
] | 156 | 2015-11-10T05:21:49.000Z | 2016-10-19T15:25:38.000Z | #!/usr/bin/env python
# coding:utf8
import lasagne
import numpy as np
import multiverso as mv
class MVModelParamManager(object):
'''
MVModelParamManager is manager to make managing and synchronizing the
variables in lasagne more easily
'''
def __init__(self, model):
''' The constructor of MVModelParamManager
The constructor will associate the parameter with multiverso array
table. The initial value of ArrayTableHandler will be same as the
parameters of model. If different parameters are used in different
processes, the average of them will be used as the initial value
'''
self.shapes = []
self.dtypes = []
self.sizes = []
self.all_param_list = []
self.model = model
for arr in self.get_all_param_values():
self.shapes.append(arr.shape)
# TODO: Now only float32 is supported in multiverso. So I store all
# the parameters in a float32 array. This place need modification
# after other types are supported
assert(np.dtype("float32") == arr.dtype)
self.dtypes.append(arr.dtype)
self.sizes.append(arr.size)
self.all_param_list.extend([i for i in np.nditer(arr)])
self.all_param_list = np.array(self.all_param_list)
self.tbh = mv.ArrayTableHandler(len(self.all_param_list), init_value=self.all_param_list)
mv.barrier() # add barrier to make sure the initial values have token effect
self.all_param_list = self.tbh.get()
self._set_all_param_to_model()
def get_all_param_values(self):
'''Get all param values of specific model
Gets the parameters of the model. It should return a list of Numpy
arrays with shapes and types matching the output of
`set_all_param_values()`.
'''
raise NotImplemented()
def set_all_param_values(self, params):
'''Set all param values of specific model
Sets the parameters of the model. The `params` argument should be a
list of Numpy arrays with shapes and types matching the output of
`get_all_param_values()`.
'''
raise NotImplemented()
def _set_all_param_to_model(self):
n = 0
params = []
for i, size in enumerate(self.sizes):
params.append(self.all_param_list[n:n + size].reshape(self.shapes[i]))
n += size
self.set_all_param_values(params)
def sync_all_param(self):
'''sync all parameters with multiverso server
This function will
1) calc all the delta of params in the model and add the delta to multiverso server
2) get the latest value from the multiverso server
'''
cur_model_params = np.concatenate([
arr.reshape(-1) for arr in self.get_all_param_values()])
params_delta = cur_model_params - self.all_param_list
self.tbh.add(params_delta)
self.all_param_list = self.tbh.get()
self._set_all_param_to_model()
| 37.036145 | 97 | 0.650943 |
8a6e1f5e7a23835821a75c5bb068d02649b12286 | 588 | py | Python | myapp/src/myapp/run.py | jmargutt/python-docker-app | 40f31fe69629110e6c8856f7d3620528d0f75ea1 | [
"MIT"
] | null | null | null | myapp/src/myapp/run.py | jmargutt/python-docker-app | 40f31fe69629110e6c8856f7d3620528d0f75ea1 | [
"MIT"
] | null | null | null | myapp/src/myapp/run.py | jmargutt/python-docker-app | 40f31fe69629110e6c8856f7d3620528d0f75ea1 | [
"MIT"
] | null | null | null | import datetime
import logging
logging.basicConfig()
ch = logging.getLogger()
ch.setLevel(logging.WARNING)
from myapp.utils import print_welcome_message
def main():
"""
template of a python app: print a message and exit
"""
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
try:
print_welcome_message()
except Exception as e:
logging.error('Error in print_welcome_message')
logging.error(e)
logging.info('my-app ran at %s', utc_timestamp)
if __name__ == "__main__":
main()
| 21 | 55 | 0.685374 |
8bc005f249251d05a392f74680c07d6c0013f34a | 112 | py | Python | setup.py | vipulgupta2048/clock-activity | a805922302554cfd562085c990b91b834d06e45f | [
"CNRI-Python",
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | setup.py | vipulgupta2048/clock-activity | a805922302554cfd562085c990b91b834d06e45f | [
"CNRI-Python",
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | setup.py | vipulgupta2048/clock-activity | a805922302554cfd562085c990b91b834d06e45f | [
"CNRI-Python",
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from sugar3.activity import bundlebuilder
bundlebuilder.start()
| 18.666667 | 41 | 0.714286 |
f6236b281330498702a0f6a9340bd776a221a8d3 | 9,188 | py | Python | tests/providers/google/cloud/utils/gcp_authenticator.py | ngwallace/airflow | 49ca0a018673d575949e7089d296a6428af5f18c | [
"Apache-2.0"
] | 2 | 2022-02-09T08:59:56.000Z | 2022-02-09T08:59:59.000Z | tests/providers/google/cloud/utils/gcp_authenticator.py | ngwallace/airflow | 49ca0a018673d575949e7089d296a6428af5f18c | [
"Apache-2.0"
] | null | null | null | tests/providers/google/cloud/utils/gcp_authenticator.py | ngwallace/airflow | 49ca0a018673d575949e7089d296a6428af5f18c | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from typing import Optional
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.models import Connection
# Please keep these variables in alphabetical order.
from tests.test_utils import AIRFLOW_MAIN_FOLDER
from tests.test_utils.logging_command_executor import CommandExecutor
GCP_AI_KEY = 'gcp_ai.json'
GCP_AUTOML_KEY = 'gcp_automl.json'
GCP_BIGQUERY_KEY = 'gcp_bigquery.json'
GCP_BIGTABLE_KEY = 'gcp_bigtable.json'
GCP_CLOUD_BUILD_KEY = 'gcp_cloud_build.json'
GCP_CLOUDSQL_KEY = 'gcp_cloudsql.json'
GCP_COMPUTE_KEY = 'gcp_compute.json'
GCP_COMPUTE_SSH_KEY = 'gcp_compute_ssh.json'
GCP_DATACATALOG_KEY = 'gcp_datacatalog.json'
GCP_DATAFLOW_KEY = 'gcp_dataflow.json'
GCP_DATAFUSION_KEY = 'gcp_datafusion.json'
GCP_DATAPROC_KEY = 'gcp_dataproc.json'
GCP_DATASTORE_KEY = 'gcp_datastore.json'
GCP_DLP_KEY = 'gcp_dlp.json'
GCP_FUNCTION_KEY = 'gcp_function.json'
GCP_GCS_KEY = 'gcp_gcs.json'
GCP_GCS_TRANSFER_KEY = 'gcp_gcs_transfer.json'
GCP_GKE_KEY = "gcp_gke.json"
GCP_KMS_KEY = "gcp_kms.json"
GCP_LIFE_SCIENCES_KEY = 'gcp_life_sciences.json'
GCP_MEMORYSTORE = 'gcp_memorystore.json'
GCP_PUBSUB_KEY = "gcp_pubsub.json"
GCP_SECRET_MANAGER_KEY = 'gcp_secret_manager.json'
GCP_SPANNER_KEY = 'gcp_spanner.json'
GCP_STACKDRIVER = 'gcp_stackdriver.json'
GCP_TASKS_KEY = 'gcp_tasks.json'
GCP_VERTEX_AI_KEY = 'gcp_vertex_ai.json'
GCP_WORKFLOWS_KEY = "gcp_workflows.json"
GMP_KEY = 'gmp.json'
G_FIREBASE_KEY = 'g_firebase.json'
GCP_AWS_KEY = 'gcp_aws.json'
KEYPATH_EXTRA = 'extra__google_cloud_platform__key_path'
KEYFILE_DICT_EXTRA = 'extra__google_cloud_platform__keyfile_dict'
SCOPE_EXTRA = 'extra__google_cloud_platform__scope'
PROJECT_EXTRA = 'extra__google_cloud_platform__project'
class GcpAuthenticator(CommandExecutor):
"""
Initialises the authenticator.
:param gcp_key: name of the key to use for authentication (see GCP_*_KEY values)
:param project_extra: optional extra project parameter passed to google cloud
connection
"""
original_account = None # type: Optional[str]
def __init__(self, gcp_key: str, project_extra: Optional[str] = None):
super().__init__()
self.gcp_key = gcp_key
self.project_extra = project_extra
self.project_id = self.get_project_id()
self.full_key_path = None
self._set_key_path()
@staticmethod
def get_project_id():
return os.environ.get('GCP_PROJECT_ID')
def set_key_path_in_airflow_connection(self):
"""
Set key path in 'google_cloud_default' connection to point to the full
key path
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = self.full_key_path
if extras.get(KEYFILE_DICT_EXTRA):
del extras[KEYFILE_DICT_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra if self.project_extra else self.project_id
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def set_dictionary_in_airflow_connection(self):
"""
Set dictionary in 'google_cloud_default' connection to contain content
of the json service account file.
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
with open(self.full_key_path) as path_file:
content = json.load(path_file)
extras[KEYFILE_DICT_EXTRA] = json.dumps(content)
if extras.get(KEYPATH_EXTRA):
del extras[KEYPATH_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def _set_key_path(self):
"""
Sets full key path - if GCP_CONFIG_DIR points to absolute
directory, it tries to find the key in this directory. Otherwise it assumes
that Airflow is running from the directory where configuration is checked
out next to airflow directory in config directory
it tries to find the key folder in the workspace's config
directory.
:param : name of the key file to find.
"""
if "GCP_CONFIG_DIR" in os.environ:
gcp_config_dir = os.environ["GCP_CONFIG_DIR"]
else:
gcp_config_dir = os.path.join(AIRFLOW_MAIN_FOLDER, os.pardir, "config")
if not os.path.isdir(gcp_config_dir):
self.log.info("The %s is not a directory", gcp_config_dir)
key_dir = os.path.join(gcp_config_dir, "keys")
if not os.path.isdir(key_dir):
self.log.error("The %s is not a directory", key_dir)
return
key_path = os.path.join(key_dir, self.gcp_key)
if not os.path.isfile(key_path):
self.log.error("The %s file is missing", key_path)
self.full_key_path = key_path
def _validate_key_set(self):
if self.full_key_path is None:
raise AirflowException("The gcp_key is not set!")
if not os.path.isfile(self.full_key_path):
raise AirflowException(
f"The key {self.gcp_key} could not be found. Please copy it to the {self.full_key_path} path."
)
def gcp_authenticate(self):
"""
Authenticate with service account specified via key name.
"""
self._validate_key_set()
self.log.info("Setting the Google Cloud key to %s", self.full_key_path)
# Checking if we can authenticate using service account credentials provided
self.execute_cmd(
[
'gcloud',
'auth',
'activate-service-account',
f'--key-file={self.full_key_path}',
f'--project={self.project_id}',
]
)
self.set_key_path_in_airflow_connection()
def gcp_revoke_authentication(self):
"""
Change default authentication to none - which is not existing one.
"""
self._validate_key_set()
self.log.info("Revoking authentication - setting it to none")
self.execute_cmd(['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}'])
self.execute_cmd(['gcloud', 'config', 'set', 'account', 'none', f'--project={self.project_id}'])
def gcp_store_authentication(self):
"""
Store authentication as it was originally so it can be restored and revoke
authentication.
"""
self._validate_key_set()
if not GcpAuthenticator.original_account:
GcpAuthenticator.original_account = self.check_output(
['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}']
).decode('utf-8')
self.log.info("Storing account: to restore it later %s", GcpAuthenticator.original_account)
def gcp_restore_authentication(self):
"""
Restore authentication to the original one.
"""
self._validate_key_set()
if GcpAuthenticator.original_account:
self.log.info("Restoring original account stored: %s", GcpAuthenticator.original_account)
subprocess.call(
[
'gcloud',
'config',
'set',
'account',
GcpAuthenticator.original_account,
f'--project={self.project_id}',
]
)
else:
self.log.info("Not restoring the original Google Cloud account: it is not set")
| 39.433476 | 110 | 0.659882 |
8edd0911ca6a1e6d48b1655def992e464d178ae0 | 3,247 | py | Python | samples/generation/web_app/models/model.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 7 | 2017-09-13T08:28:37.000Z | 2022-01-21T15:59:14.000Z | samples/generation/web_app/models/model.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 25 | 2019-03-25T13:49:43.000Z | 2019-05-02T13:58:13.000Z | samples/generation/web_app/models/model.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 2 | 2017-09-21T10:14:39.000Z | 2017-10-21T09:57:04.000Z | import json
import numpy
import tensorflow as tf
from keras_preprocessing.image import img_to_array, load_img
from tensorflow.python.keras.applications import VGG16
# Load models and support
from tensorflow.python.keras.applications.imagenet_utils import preprocess_input
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.utils import get_file
from tensorflow.python.saved_model import tag_constants
CLASS_INDEX = None
CLASS_INDEX_PATH = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
def get_predictions(predictions, top=5):
global CLASS_INDEX
if len(predictions.shape) != 2 or predictions.shape[1] != 1000:
raise ValueError(
f"`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, "
f"1000)). Found array with shape: {predictions.shape}"
)
if CLASS_INDEX is None:
file_path = get_file(
"imagenet_class_index.json", CLASS_INDEX_PATH, cache_subdir="models"
)
CLASS_INDEX = json.load(open(file_path))
l = []
for prediction in predictions:
top_indices = prediction.argsort()[-top:][::-1]
indexes = [tuple(CLASS_INDEX[str(i)]) + (prediction[i],) for i in top_indices]
indexes.sort(key=lambda x: x[2], reverse=True)
l.append(indexes)
return l
def vgg_prepare_img_224(img_path):
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = numpy.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def get_predicted_categories(img_224) -> list:
clear_session()
image_net_model = VGG16(weights="imagenet")
out = image_net_model.predict(img_224)
topn = get_predictions(out, top=5)
return topn
def prepare_img_size(img_path, size=299):
img = load_img(img_path, target_size=(size, size)) # this is a PIL image
x = img_to_array(img) # this is a Numpy array with shape (3, 256, 256)
x = x.reshape((1,) + x.shape) / 255
return x
def predict(sess, model_graph, input_tensor):
pass
def run_models(img_path, base_path, labels_path):
labels = []
with open(labels_path, "r") as f:
for label in f.readlines():
labels.append(label)
try:
img_224 = vgg_prepare_img_224(img_path)
top_n_prediction = get_predicted_categories(img_224)
zipped = [a for a in zip(*top_n_prediction[0])]
vgg_predictions = {cat: prob for cat, prob in zip(zipped[1], zipped[2])}
except:
vgg_predictions = dict()
try:
image_in = prepare_img_size(img_path)
clear_session()
graph = tf.Graph()
sess = tf.Session(graph=graph)
tf.saved_model.loader.load(sess, [tag_constants.SERVING], base_path)
category_result = predict(sess, model_graph=graph, input_tensor=image_in)
dlp_predictions = {k: v for v, k in zip(category_result, labels)}
except Exception as e:
dlp_predictions = dict()
print(f"failed dlp_prediction {e}")
message = "Assessment complete!"
results = {"dlp_predictions": dlp_predictions, "vgg_predictions": vgg_predictions}
return {"results": results, "message": message}
| 31.833333 | 105 | 0.68494 |
53744eddaf82673996169c0834ca82144990be96 | 102,030 | py | Python | test/parallel/test_torch.py | xymyeah/horovod | 987a1eb34e71f2626568b75ade24a630cc539cd6 | [
"Apache-2.0"
] | 2 | 2021-04-03T13:53:21.000Z | 2021-04-03T13:53:26.000Z | test/parallel/test_torch.py | xymyeah/horovod | 987a1eb34e71f2626568b75ade24a630cc539cd6 | [
"Apache-2.0"
] | 4 | 2021-04-15T15:14:24.000Z | 2021-05-25T10:53:23.000Z | test/parallel/test_torch.py | xymyeah/horovod | 987a1eb34e71f2626568b75ade24a630cc539cd6 | [
"Apache-2.0"
] | 1 | 2021-04-16T06:28:54.000Z | 2021-04-16T06:28:54.000Z | # Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import inspect
import itertools
import os
import platform
import sys
import unittest
import warnings
import time
import json
from collections.abc import Iterable
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import horovod.torch as hvd
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath
_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')
ccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor])
class TorchTests(unittest.TestCase):
"""
Tests for ops in horovod.torch.
"""
def __init__(self, *args, **kwargs):
super(TorchTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def convert_cpu_fp16_to_fp32(self, *values):
# PyTorch doesn't support any CPU ops on FP16 tensors.
# In case we need to do ops, we will convert tensor to FP32 here.
result = []
for value in values:
if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:
result.append(value.float())
else:
result.append(value)
return result
def cast_and_place(self, tensor, dtype):
if dtype.is_cuda:
return tensor.cuda(hvd.local_rank()).type(dtype)
return tensor.type(dtype)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not torch.cuda.is_available():
skip_or_fail_gpu_test(self, "No GPUs available")
@pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')
def test_horovod_reinit(self):
"""Test that Horovod can init -> shutdown -> init successfully."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Horovod cannot be re-initialized after shutdown when using MPI, so
# this test can only be done using the Gloo controller
self.skipTest("Gloo is not available")
hvd.init()
rank, size = hvd.rank(), hvd.size()
hvd.shutdown()
hvd.init()
rank2, size2 = hvd.rank(), hvd.size()
assert rank == rank2
assert size == size2
def test_horovod_is_initialized(self):
"""Test that is_initialized returned by hvd.is_initialized() is correct."""
hvd.init()
assert hvd.is_initialized()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Only applies for Gloo
self.skipTest("Gloo is not available")
hvd.shutdown()
assert not hvd.is_initialized()
hvd.init()
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
multiplied = tensor * size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
averaged = hvd.allreduce(tensor, average=True)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
multiplied = self.cast_and_place(tensor * size, dtype)
tensor = self.cast_and_place(tensor, dtype)
hvd.allreduce_(tensor, average=False)
tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_async_fused(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allreduce_async(tensor, average=False)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tensor, = self.convert_cpu_fp16_to_fp32(tensor)
multiplied = tensor * size
tests.append((dtype, multiplied, handle))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for dtype, multiplied, handle in tests:
summed = hvd.synchronize(handle)
summed, = self.convert_cpu_fp16_to_fp32(summed)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
iter = 0
dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
device = local_rank * 2 + (iter + local_rank) % 2
tensor = tensor.cuda(device).type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = factor * tensor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
multiplied *= size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = size * tensor
multiplied = multiplied * factor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.cuda.FloatTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_duplicate_name_error(self):
"""Test that the allreduce raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allreduce_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allreduce_async(tensor, name='duplicate_name')
assert False, 'hvd.allreduce_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=True)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False)
tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])
multiplied = [tensor * size for tensor in tensors]
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \
'hvd.grouped_allreduce produces incorrect results'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True)
tensors, averaged = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, averaged)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(averaged, tensors)]), \
'hvd.grouped_allreduce produces incorrect results for average'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
multiplied = [self.cast_and_place(tensor * size, dtype) for tensor in tensors]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False)
tensors, multiplied = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, multiplied)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(tensors, multiplied)]), \
'hvd.grouped_allreduce_ produces incorrect results'
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
tensors = [torch.FloatTensor(10) if i % 2 else torch.cuda.FloatTensor(10) for i in range(5)]
try:
hvd.grouped_allreduce(tensors, average=False)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_grouped_allreduce_grad(self):
"""Test the correctness of the grouped allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=False)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim) * size
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=True)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim)
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.data.min() == i
assert rank_tensor.data.max() == i
def test_horovod_allgather_async_fused(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
rank_shape = [17] * dim
tensor = torch.FloatTensor(*(rank_shape)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allgather_async(tensor)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tests.append((handle, rank_shape))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for handle, rank_shape in tests:
gathered = hvd.synchronize(handle)
gathered, = self.convert_cpu_fp16_to_fp32(gathered)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == rank_shape, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_duplicate_name_error(self):
"""Test that the allgather raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allgather_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allgather_async(tensor, name='duplicate_name')
assert False, 'hvd.allgather_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allgather_grad(self):
"""Test the correctness of the allgather gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
grad_list = []
for r, size in enumerate(tensor_sizes):
grad_list.append(self.cast_and_place(
torch.ones([size] + [17] * (dim - 1)), dtype) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast_(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
assert (tensor == broadcasted_tensor).min() == 1, \
'hvd.broadcast does not modify source tensor'
assert (broadcasted_tensor == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor = torch.FloatTensor(*([17] * 3)).fill_(1)
try:
hvd.broadcast(tensor, rank)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_duplicate_name_error(self):
"""Test that the broadcast raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
try:
for i in range(10):
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
assert False, 'hvd.broadcast_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_broadcast_grad(self):
"""Test the correctness of the broadcast gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
c = 1 if rank == root_rank else 0
expected = np.ones([17] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
collected = hvd.alltoall(tensor)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_splits_on_gpu(self):
"""Test that the alltoall works correctly when the splits argument is a tensor on GPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32, device="cuda")
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertEqual(received_splits.device.type, "cuda", "received_splits should be on GPU here")
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if rank % 2:
tensor = torch.empty(size, dtype=torch.int32)
else:
tensor = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the tensor length is not a multiple
of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size + 1)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size - 1)
splits = torch.ones(size, dtype=torch.int32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size)
splits = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.ones(tensor_size)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_grad(self):
"""Test the correctness of the alltoall gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
collected, received_splits = hvd.alltoall(tensor, splits)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad(self):
"""Test the correctness of the alltoall gradient with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
collected = hvd.alltoall(tensor)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_broadcast_state(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
def create_model(opt_class, opt_params):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = new_optimizer(opt_class, opt_params, model)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
return model, optimizer
def get_model_param_values(model):
params = sorted(model.state_dict().items())
return [(k, v.clone()) for k, v in params]
def get_optimizer_param_values(optimizer):
results = []
state_dict = optimizer.state_dict()
for group in state_dict['param_groups']:
for param_id in group['params']:
if param_id not in state_dict['state']:
continue
params = sorted(state_dict['state'][param_id].items())
for k, v in params:
results.append(
(k, v.clone() if torch.is_tensor(v) else v))
return results
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_param_values = get_model_param_values(model)
for name, model_param_value in model_param_values:
hvd.broadcast_(model_param_value, root_rank=0)
opt_param_values_updated = []
opt_param_values = get_optimizer_param_values(optimizer)
for name, opt_param_value in opt_param_values:
is_tensor = torch.is_tensor(opt_param_value)
if is_tensor:
hvd.broadcast_(opt_param_value, root_rank=0)
else:
opt_param_value = hvd.broadcast_object(opt_param_value, name=name)
opt_param_values_updated.append((name, opt_param_value))
opt_param_values = opt_param_values_updated
with temppath() as fname:
if hvd.rank() == 0:
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, fname)
model, optimizer = create_model(opt_class, opt_params)
if hvd.rank() == 0:
checkpoint = torch.load(fname)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model_param_value_after = get_model_param_values(model)
for before, after in zip(model_param_values,
model_param_value_after):
name, model_param_value = before
name_after, model_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(model_param_value),
type(model_param_value_after))
self.assertTrue(
(model_param_value == model_param_value_after).all())
expected_tensors = hvd.broadcast_object(len(optimizer.state_dict()['state'].values()))
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
self.assertEqual(len(optimizer.state_dict()['state'].values()), expected_tensors)
opt_param_values_after = get_optimizer_param_values(optimizer)
for before, after in zip(opt_param_values, opt_param_values_after):
name, opt_param_value = before
name_after, opt_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(opt_param_value),
type(opt_param_value_after))
if torch.is_tensor(opt_param_value):
self.assertTrue(
(opt_param_value == opt_param_value_after).all())
else:
self.assertEqual(opt_param_value, opt_param_value_after)
# TODO: investigate why this hangs on K80s
@unittest.skip
def test_broadcast_state_gpu(self):
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
# Set default tensor type, ensuring optimizer tensor-wrapping is robust
# to this setting.
try:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.test_broadcast_state()
finally:
torch.set_default_tensor_type(torch.FloatTensor)
def test_broadcast_state_options(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
params_0 = dict(lr=0.1, momentum=0.8, weight_decay=0.2, nesterov=True,
betas=(0.9, 0.999), etas=(0.8, 2.4), step_sizes=(1e-5, 100))
params_1 = dict(lr=0.2, momentum=0.9, weight_decay=0.1, nesterov=False,
betas=(0.8, 0.9), etas=(0.25, 1.75), step_sizes=(1e-7, 5))
def create_model(opt_class):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
params = params_0 if hvd.rank() == 0 else params_1
p = {
k: v for k, v in params.items()
if k in inspect.getargspec(opt_class.__init__).args
}
opt = opt_class(model.parameters(), **p)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
return model, opt
# Include subclass name so we can sort them lexicographically, otherwise different
# ranks will have different optimizer orderings
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
for _, opt_class in optimizers:
model, optimizer = create_model(opt_class)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
p0 = {
k: v for k, v in params_0.items()
if k in inspect.getargspec(opt_class.__init__).args
}
for k, p in p0.items():
p_actual = optimizer.param_groups[0][k]
if not isinstance(p, Iterable):
p_actual = [p_actual]
p = [p]
for i in range(len(p)):
self.assertEqual(type(p_actual[i]), type(p[i]))
self.assertAlmostEqual(p_actual[i], p[i], delta=1e-5)
# Ensure that the parameter option types are compatible with ops
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_broadcast_state_no_grad(self):
class ModelNoGrad(nn.Module):
def __init__(self, a, b):
super(ModelNoGrad, self).__init__()
self.a = nn.Parameter(a.int(), requires_grad=False)
self.b = nn.Parameter(b)
def forward(self, x):
return torch.index_select(self.b, 0, self.a.long()) * x
hvd.init()
a = torch.Tensor([1, 3])
b = torch.rand(4)
model = ModelNoGrad(a, b)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-6, momentum=0.9, nesterov=True)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
grad = optimizer.param_groups[0]['params'][1].grad
bgrad = hvd.broadcast(grad, root_rank=0)
assert optimizer.param_groups[0]['params'][0].grad is None
assert torch.all(torch.eq(grad, bgrad)).item()
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
def test_compression_fp16(self):
valid_dtypes = [torch.float32, torch.float64]
invalid_dtypes = [torch.uint8, torch.int8, torch.int16,
torch.int32, torch.int64]
tensor_size = [5] * 3
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, torch.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
def test_force_allreduce(self):
"""Test that allreduce is forced on all gradients during opt.step()."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(D_in, H)
self.fc2 = torch.nn.Linear(H, D_out)
self.fc3 = torch.nn.Linear(D_out, D_out)
def forward(self, x_):
x_ = F.relu(self.fc1(x_))
x1_ = self.fc2(x_)
x2_ = self.fc3(F.relu(x1_))
return x1_, x2_
def create_model(opt_class, opt_params):
model = Net()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
opt = new_optimizer(opt_class, opt_params, model)
opt = hvd.DistributedOptimizer(
opt, named_parameters=model.named_parameters())
return model, opt
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred1, y_pred2 = model(x)
if rank == 0:
loss = F.mse_loss(y_pred1, y, size_average=False)
else:
loss = F.mse_loss(y_pred2, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_model_parallelism(self):
"""Test that tensors on different GPUs are supported."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
first_device = local_rank * 2
second_device = local_rank * 2 + 1
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# Place parts of model on different GPUs.
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device)
def forward(self, x):
x = x.cuda(first_device)
x = self.conv1(x)
x = x.cuda(second_device)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_delta_optimizer(self):
"""Test that delta optimizer."""
hvd.init()
# TODO support non-MPI Adasum operation
# Only do this test if there are GPUs available.
if not hvd.mpi_enabled() or not torch.cuda.is_available():
self.skipTest("No GPUs available")
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank)
def forward(self, x):
x = x.cuda(local_rank)
x = self.conv1(x)
x = x.cuda(local_rank)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_duplicate_names(self):
"""Test that passing duplicate names to optimizer will fail."""
net1 = torch.nn.Conv2d(1, 1, 1)
net2 = torch.nn.Conv2d(1, 1, 1)
parameters = itertools.chain(net1.parameters(), net2.parameters())
opt = torch.optim.SGD(parameters, lr=0.1)
# This will have duplicate names, since both net1 and net2 have 'weight' and 'bias'
named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters())
try:
hvd.DistributedOptimizer(opt, named_parameters=named_parameters)
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_dynamic_requires_grad(self):
"""Test that makes sure that gradients can be turned off/on dynamically."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
gen = torch.nn.Conv2d(1, 10, 1)
disc = torch.nn.Conv2d(10, 1, 1)
inp = torch.rand([1, 1, 100, 100])
gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1)
gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters())
disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1)
disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters())
def train_step(train_generator=False, train_discriminator=False):
for p in gen.parameters():
p.requires_grad_(train_generator)
for p in disc.parameters():
p.requires_grad_(train_discriminator)
gen_opt.zero_grad()
disc_opt.zero_grad()
loss = disc(gen(inp)).sum()
loss.backward()
for p in gen.parameters():
assert train_generator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for generator is zero but it should be trained or vice versa.'
for p in disc.parameters():
assert train_discriminator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for discriminator is zero but it should be trained or vice versa.'
if train_generator:
gen_opt.step()
if train_discriminator:
disc_opt.step()
for x in range(10):
# Step 1: train generator.
train_step(train_generator=True)
# Step 2: train discriminator.
train_step(train_discriminator=True)
def test_gradient_clipping(self):
"""Test gradient clipping example."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.ones(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5)
model.bias = torch.nn.Parameter(torch.zeros(1))
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
prior_grad = model.weight.grad.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
clipped_grad = model.weight.grad.item()
assert abs(prior_grad) > abs(clipped_grad)
with optimizer.skip_synchronize():
optimizer.step()
def test_synchronize_step_warning(self):
"""
Test that .synchronize() followed by .step() without
optimizer.skip_synchronize() context will produce a warning.
"""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.zeros(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
with warnings.catch_warnings(record=True) as ws:
optimizer.step()
assert len(ws) == 1
assert 'optimizer.step() called without optimizer.skip_synchronize()' \
in str(ws[0].message)
def test_no_named_parameters(self):
"""Test that leaving the default named_parameters=None will not throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_missing_named_parameters(self):
"""Test that naming half of the model parameters will throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
opt = torch.optim.SGD(model.parameters(), lr=0.1)
try:
hvd.DistributedOptimizer(opt,
named_parameters=list(model.named_parameters())[0:1])
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_horovod_join_allreduce(self):
"""Test Join op with allreduce."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor]
dims = [1, 2, 3]
first_join_ranks = [0, 1]
cachings = [False, True]
for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings):
torch.manual_seed(1234)
def div(t, s):
if _1_5_api and dtype in integral_types:
return t.floor_divide(s)
return t / s
# Use two tensors to test fusion
tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100)
tensor_a = self.cast_and_place(tensor_a, dtype)
tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor_b = self.cast_and_place(tensor_b, dtype)
if caching:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if rank == first_join_rank:
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in integral_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged_a, div(tensor_a * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
assert torch.allclose(averaged_b, div(tensor_b * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
def test_horovod_join_allgather(self):
"""Test Join op with allgather."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
ret = hvd.join(hvd.local_rank())
def test_horovod_join_broadcast(self):
"""Test Join op with broadcast."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
ret = hvd.join(hvd.local_rank())
else:
try:
broadcasted_tensor = hvd.broadcast(tensor, 1, name="test_horovod_join_broadcast")
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
def test_horovod_sync_batch_norm(self):
"""Tests Horovod version of SyncBatchNorm."""
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
ts_list = [
torch.stack([
torch.tensor([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
])
for r in range(hvd.size())
]),
torch.stack([
torch.tensor([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
])
for r in range(hvd.size())
]),
]
for ts in ts_list:
sync_bn = hvd.SyncBatchNorm(num_features=4)
sync_bn.cuda(hvd.local_rank())
bn = torch.nn.BatchNorm1d(num_features=4)
bn.cuda(hvd.local_rank())
ts = ts.cuda(hvd.local_rank()).float()
ts1 = ts.clone().requires_grad_()
ts2 = ts.clone().requires_grad_()
# Training
sync_bn_out = sync_bn(ts1[hvd.rank()].unsqueeze(0))
bn_out = bn(ts2)
assert torch.allclose(sync_bn_out, bn_out[hvd.rank()].unsqueeze(0), 1e-6)
assert torch.allclose(sync_bn.running_mean, bn.running_mean, 1e-6)
assert torch.allclose(sync_bn.running_var, bn.running_var, 1e-6)
# Gradients
sync_bn_out.sum().backward()
bn_out.mean(dim=0).sum().backward()
assert torch.allclose(hvd.allreduce(sync_bn.weight.grad, name='sync_bn.weight.grad'), bn.weight.grad, 1e-6)
assert torch.allclose(hvd.allreduce(sync_bn.bias.grad, name='sync_bn.bias.grad'), bn.bias.grad, 1e-6)
assert torch.allclose(hvd.allreduce(ts1.grad, name='ts1.grad'), ts2.grad, 1e-6)
@pytest.mark.skip(reason='https://github.com/horovod/horovod/issues/2496')
def test_timeline_api(self):
hvd.init()
def check_file(fname, check_cycle=True):
if hvd.rank() == 0:
with open(fname, 'r') as timeline_file:
timeline_text = timeline_file.read()
assert 'allreduce.test_allreduce' in timeline_text, timeline_text
assert 'start_time_since_epoch_in_micros' in timeline_text, timeline_text
assert 'NEGOTIATE_ALLREDUCE' in timeline_text, timeline_text
assert 'ALLREDUCE' in timeline_text, timeline_text
json_obj = json.loads(timeline_text)
assert json_obj is not None
if check_cycle:
assert 'CYCLE_START' in timeline_text, timeline_text
with temppath() as fname1:
hvd.start_timeline(fname1, mark_cycles=True)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that mark_cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname1)
# Test resuming with a different filename.
with temppath() as fname2:
hvd.start_timeline(fname2, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname2)
# Test resuming with a different filename, but mark_cycles=False
with temppath() as fname3:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname3, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that events can be registered in timeline file.
hvd.stop_timeline()
check_file(fname3, check_cycle=False)
# Test resuming with a different filename, but mark_cycles=True
with temppath() as fname4:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname4, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname4, check_cycle=True)
with temppath() as fname5:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname5, mark_cycles=False)
hvd.start_timeline(fname5, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname5, check_cycle=False)
hvd.shutdown()
def test_optimizer_no_named_parameters(self):
hvd.init()
model = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 10))
optimizer = torch.optim.SGD(
[{"params": model[0].parameters()}, {"params": model[1].parameters()}, ],
lr=0.001,
)
optimizer = hvd.DistributedOptimizer(optimizer)
params = optimizer._parameter_names
self.assertEqual(len(params), len(set(params.values())))
# Make sure all workers have the same set of parameter names
all_param_names = hvd.allgather_object(set(params.values()))
self.assertEqual(len(all_param_names), hvd.size())
for param_names in all_param_names:
self.assertEqual(all_param_names[0], param_names)
def test_sparse_embeddings(self):
"""Test that Horovod will correctly aggregate sparse gradients."""
hvd.init()
for sparse_as_dense in [False, True]:
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embedding = nn.Embedding(10, 3, sparse=True)
def forward(self, x):
x = self.embedding(x)
return x
model = Net()
if hvd.rank() == 0:
inp = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
else:
inp = torch.LongTensor([[1, 3, 4], [4, 7, 9]])
# list() see: https://github.com/pytorch/pytorch/issues/47594
opt = torch.optim.SparseAdam(list(model.parameters()), lr=0.1)
opt = hvd.DistributedOptimizer(opt, sparse_as_dense=sparse_as_dense)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_async_sparse_allreduce(self):
"""Test that allgather over indices and values is equivalent to allreduce."""
hvd.init()
# Generate random tensors, then convert them to sparse
def random_sparse_tensor(*shape):
t = torch.rand(*shape)
t[t < 0.8] = 0
return t.to_sparse()
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]
allreduced_tensors = [hvd.allreduce(t.to_dense()) for t in tensors]
handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i))
for i, t in enumerate(tensors)]
allgathered_tensors = [handle() for handle in handles]
for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):
assert torch.allclose(reduced, gathered.to_dense(), 1e-6)
if __name__ == "__main__":
unittest.main()
| 41.695954 | 126 | 0.5803 |
2ec256bee4c19af2b4ec33434bdd679ddb22337a | 1,781 | py | Python | src/classifier/model.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | null | null | null | src/classifier/model.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | 1 | 2021-02-24T17:11:26.000Z | 2021-02-24T17:11:31.000Z | src/classifier/model.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | 1 | 2021-12-10T12:27:17.000Z | 2021-12-10T12:27:17.000Z | import json
import torch
import torch.nn.functional as F
from transformers import BertTokenizer
from .sunbert_classifer import SunbertClassifier
with open("config.json") as json_file:
config = json.load(json_file)
class Model:
"""
:Base Model
"""
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.tokenizer = BertTokenizer.from_pretrained(config["BERT_MODEL"])
classifier = SunbertClassifier(len(config["CLASS_NAMES"]))
classifier.load_state_dict(
torch.load(config["PRE_TRAINED_MODEL"], map_location=self.device)
)
classifier = classifier.eval()
self.classifier = classifier.to(self.device)
def predict(self, text):
encoded_text = self.tokenizer.encode_plus(
text,
max_length = config["MAX_SEQUENCE_LEN"],
add_special_tokens = True,
return_token_type_ids = False,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = "pt",
)
input_ids = encoded_text["input_ids"].to(self.device)
attention_mask = encoded_text["attention_mask"].to(self.device)
with torch.no_grad():
probabilities = F.softmax(self.classifier(input_ids, attention_mask), dim = 1)
confidence, predicted_class = torch.max(probabilities, dim=1)
predicted_class = predicted_class.cpu().item()
probabilities = probabilities.flatten().cpu().numpy().tolist()
return(
config["CLASS_NAMES"][predicted_class],
confidence,
dict(zip(config["CLASS_NAMES"], probabilities))
)
model = Model()
def get_model():
return model
| 27.828125 | 90 | 0.637282 |
bc1f3d20b8dc42f6917418c913d874ee4e77a2d7 | 1,963 | py | Python | video_consumer.py | jamesee/rabbitMQ-video | 39241ab0507b992daa666c75628f662ee99b8830 | [
"BSD-3-Clause"
] | null | null | null | video_consumer.py | jamesee/rabbitMQ-video | 39241ab0507b992daa666c75628f662ee99b8830 | [
"BSD-3-Clause"
] | null | null | null | video_consumer.py | jamesee/rabbitMQ-video | 39241ab0507b992daa666c75628f662ee99b8830 | [
"BSD-3-Clause"
] | null | null | null | """Kombu-based Video Stream Consumer
Written by Minsu Jang
Date: 2018-06-09
Reference
- Building Robust RabbitMQ Consumers With Python and Kombu: Part 1 (https://medium.com/python-pandemonium/building-robust-rabbitmq-consumers-with-python-and-kombu-part-1-ccd660d17271)
- Building Robust RabbitMQ Consumers With Python and Kombu: Part 2 (https://medium.com/python-pandemonium/building-robust-rabbitmq-consumers-with-python-and-kombu-part-2-e9505f56e12e)
"""
import cv2
import numpy as np
import sys
import time
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
# Default RabbitMQ server URI
rabbit_url = 'amqp://guest:guest@localhost:5672//'
# Kombu Message Consuming Worker
class Worker(ConsumerMixin):
def __init__(self, connection, queues):
self.connection = connection
self.queues = queues
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.queues,
callbacks=[self.on_message],
accept=['image/jpeg'])]
def on_message(self, body, message):
# get the original jpeg byte array size
size = sys.getsizeof(body) - 33
# size = sys.getsizeof(body.tobytes()) - 33
# jpeg-encoded byte array into numpy array
np_array = np.frombuffer(body, dtype=np.uint8)
# np_array = np.frombuffer(body.tobytes(), dtype=np.uint8)
np_array = np_array.reshape((size, 1))
# decode jpeg-encoded numpy array
image = cv2.imdecode(np_array, 1)
# show image
cv2.imshow("image", image)
cv2.waitKey(1)
# send message ack
message.ack()
def run():
exchange = Exchange("video-exchange", type="direct")
queues = [Queue("video-queue", exchange, routing_key="video")]
with Connection(rabbit_url, heartbeat=4) as conn:
worker = Worker(conn, queues)
worker.run()
if __name__ == "__main__":
run() | 33.271186 | 183 | 0.671931 |
fb1ee2c1bb859d15ad7c64f81394e02cbdeb75f5 | 308 | py | Python | pgdrive/examples/__init__.py | gamecraftCZ/pgdrive | 11fbb5a5ca1dc354d755f00eb282bcffe5720bcc | [
"Apache-2.0"
] | null | null | null | pgdrive/examples/__init__.py | gamecraftCZ/pgdrive | 11fbb5a5ca1dc354d755f00eb282bcffe5720bcc | [
"Apache-2.0"
] | null | null | null | pgdrive/examples/__init__.py | gamecraftCZ/pgdrive | 11fbb5a5ca1dc354d755f00eb282bcffe5720bcc | [
"Apache-2.0"
] | null | null | null | from pgdrive.examples.ppo_expert.numpy_expert import expert
def get_terminal_state(info):
if info["crash"]:
state = "Crash"
elif info["out_of_road"]:
state = "Out of Road"
elif info["arrive_dest"]:
state = "Success"
else:
state = "Max Step"
return state
| 22 | 59 | 0.616883 |
0528ebe8b96c27862853093f52784bd16947b82f | 98,928 | py | Python | idler.py | robert-osborne/IdleIdler | e3a93f2ebc90d36d266301a2b88a6695e34def18 | [
"MIT"
] | 2 | 2021-04-06T14:57:46.000Z | 2021-11-21T01:32:46.000Z | idler.py | robert-osborne/IdleIdler | e3a93f2ebc90d36d266301a2b88a6695e34def18 | [
"MIT"
] | 3 | 2021-09-08T02:59:10.000Z | 2022-03-12T00:54:53.000Z | idler.py | robert-osborne/IdleIdler | e3a93f2ebc90d36d266301a2b88a6695e34def18 | [
"MIT"
] | 3 | 2021-03-23T18:25:57.000Z | 2021-04-20T12:16:31.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2021 Robert Osborne
#
# python3 idler.py --help
#
import argparse
import textwrap
import time
import sys
import os
import configparser
import datetime
import distutils
import json
import math
import glob
import shutil
import pyautogui
import pygetwindow as gw
from PIL import Image, ImageChops, ImageStat
from pathlib import Path
from PIL import ImageGrab
from functools import partial
ImageGrab.grab = partial(ImageGrab.grab, all_screens=True)
# GLOBALS
# Yeah, yeah, globals are bad, sue me
config = configparser.ConfigParser()
top_x = 0
top_y = 0
screen_scale = 2
infinite_loop = False
# Usually 376, 426, etc. and set restart on 386, 436, ...
CHARGE_TIME = 60.0 * 2.5
CHARGE_INTERVAL = 15.0
HAVI_ULT = '8'
APP_NAME = "Idle Champions"
RUNTIME_MINUTES = 20
GEM_LOOPS = 20000
DEFAULT_ADVENTURE = "madwizard"
DEFAULT_LEVEL_DELAY = 20
NUM_FAMILIARS = 6
MAX_TOTAL_IMAGE_MEAN = 35.0
MAX_IMAGE_MEAN = 10.0
# TODO: launch checklist
# [ ] Change run to no-modron, just charge briv at end of timer
# [ ] Create backup git on github with full history
# [ ] Squash history
# [ ] Make public
# TODO: things on my todo list
# [ ] Use flag for restart with Steam image vs. x,y (more stable?)
# [ ] Flag to pause on level 1 and allow Shandie's dash to reset
# [ ] Flag to do briv swapping at each zone complete (heavy duty and occupies entire time)
# [ ] Make champ flags work so don't need team in config file or can modify team in config file (for chores)
# [ ] Add more champs to the familiar leveling code
# [ ] Level Shandie and then wait for dash to trigger
COUNTDOWN = 5
DEFAULT_DELAY = 0.7
DEFAULT_DRAG = 0.1
LEVEL_TRYS=20
# Handle retinae vs standard displays by swapping prefixes
first_prefix = "./images/sml-"
second_prefix = "./images/"
# speed characters
have_briv = True
have_binwin = True
have_celeste = True
have_donaar = False
have_deekin = True
have_havilar = True
have_minsc = True
have_sentry = True
have_viper = False
have_shandie = True
have_melf = True
have_gold = True
bounty_size = "small"
verbose = False
debugging = False
MENU_BUTTON_WIDTH = 30
MENU_BUTTON_HEIGHT = 30
def verbose_print(msg):
global verbose
if verbose:
print(msg)
def debug_print(msg):
global debugging
if debugging:
print(msg)
def with_top_offset(off_x, off_y, as_point=False):
x, y = top_x + off_x, top_y + off_y
if as_point:
return pyautogui.Point(x, y)
return x, y
def menu_location():
# Point(x=113, y=147)
# return with_top_offset(0, 0)
return with_top_offset(32, 73)
def top_location_from_menu(x, y):
# menu top offset + middle of image
x, y = x - 32 - 9, y - 73 - 9
return x, y
def print_reverse_without_offset(x, y, as_point=False):
x = x - top_x
y = y - top_y
print("Offset from top_x, top_y: %d,%d", (x, y))
# Point(x=113, y=147)
# return with_top_offset(0, 0)
if as_point:
return pyautogui.Point(x, y)
return x, y
def move_to_menu():
x, y = menu_location()
pyautogui.moveTo(x,y)
def move_to_offset(x, y, duration=0.0):
x, y = with_top_offset(x, y)
pyautogui.moveTo(x,y, duration=duration)
def click_offset(x, y, duration=0.0, delay=None, tag=None):
move_to_offset(x, y, duration=duration)
pyautogui.click()
if tag:
verbose_print("%s: clicking on %d, %d" % (tag, x, y))
if delay:
time.sleep(delay)
def click_spec_at(x, y, duration=0.0, delay=DEFAULT_DELAY, tag=None):
# don't use this if modron_specialization is on
time.sleep(delay)
click_offset(x, y, duration=duration, delay=delay, tag=tag)
def region_for_screenshot(x, y, width, height):
x, y = with_top_offset(x, y)
return (screen_scale * x, screen_scale * y, screen_scale * width, screen_scale * height)
def location_for_screenshot(x,y):
return screen_scale * x, screen_scale * y
def safe_image_compare(im1, im2, save=False, max_mean=MAX_TOTAL_IMAGE_MEAN):
diff = ImageChops.difference(im1, im2)
stat = ImageStat.Stat(diff)
# im1.save("safe_im1.png")
# im2.save("safe_im2.png")
debug_print("mean=%s" % str(stat.mean))
debug_print("rms=%s" % str(stat.rms))
match = True
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) > max_mean:
match = False
if stat.mean[0] > MAX_IMAGE_MEAN:
match = False
if stat.mean[1] > MAX_IMAGE_MEAN:
match = False
if stat.mean[2] > MAX_IMAGE_MEAN:
match = False
if save and not match:
im1.save("cmp-im1.png")
im2.save("cmp-im2.png")
diff.save("cmp-diff.png")
return match
# returns found, ready
# found is True if menu found at expected place
# ready is True if menu is not greyed out (e.g. no Okay button)
menu_blue_png = Image.open("images/menu_blue.png")
menu_blue = menu_blue_png.convert('RGB')
menu_blue2_png = Image.open("images/menu_blue.png")
menu_blue2 = menu_blue_png.convert('RGB')
menu_grey_png = Image.open("images/menu_grey.png")
menu_grey = menu_grey_png.convert('RGB')
def check_for_menu():
x, y = menu_location()
# pyautogui.moveTo(x, y, duration=0.1)
x, y = location_for_screenshot(x, y)
im1 = pyautogui.screenshot(region=(x, y, MENU_BUTTON_WIDTH, MENU_BUTTON_HEIGHT)).convert('RGB')
im1.save("testmenu.png")
# menu_blue.save("testblue.png")
if safe_image_compare(im1, menu_blue):
return True, True
if safe_image_compare(im1, menu_blue2):
return True, True
if safe_image_compare(im1, menu_grey):
return True, False
return False, False
def hunt_for_menu(level_images):
global top_x, top_y
pos = pyautogui.position()
verbose_print("pos=%s" % str(pos))
# x, y = location_for_screenshot(pos.x, pos.y)
verbose_print("x,y=%d,%d" % (pos.x, pos.y))
verbose_print("Configured top_x,top_y = %d,%d" % (top_x, top_y))
off_x, off_y = 20, 20
image_size = 30
region = (screen_scale * (pos.x - off_x), screen_scale * (pos.y - off_y),
screen_scale * (30+off_x), screen_scale * (30+off_y))
verbose_print("region=%s" % str(region))
im1 = pyautogui.screenshot(region=region)
if verbose:
im1.save("testmenu.png")
im1 = im1.convert('RGB')
found_x = 0
found_y = 0
for i in range(0,off_x*2):
for j in range(0,off_y*2):
im2 = im1.crop((i, j, i+30, j+30))
if safe_image_compare(im2, menu_blue):
if verbose:
im2.save("testfoundmenu.png")
verbose_print("found i,j=%d,%d" % (i, j))
# adjust for actual center of the image
x, y = (pos.x-off_x)*2 + i + image_size/2, (pos.y-off_y)*2 + j + image_size/2
verbose_print("center x,y=%f,%f" % (x, y))
x, y = x/screen_scale - 31 - 8, y/screen_scale - 75 - 5
x = int(x)
y = int(y)
verbose_print("Guess: x,y=%f,%f == top_x,top_y=%d,%d " % (x, y, top_x, top_y))
found_x = x
found_y = y
break
if found_x:
break
if not found_x:
return 0, 0, False
# Jitter
for x_jitter in range(-1, 2, 1):
for y_jitter in range(-1, 2, 1):
top_x = found_x + x_jitter
top_y = found_y + y_jitter
verbose_print("trying jitter %d,%d => %d,%d" % (x_jitter, y_jitter, top_x, top_y))
level, plus = get_current_zone(level_images=level_images, save=True, tries=1)
if level > 0:
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, plus, on_boss()))
return top_x, top_y, True
return 0, 0, False
def enter_code(code):
for a in code:
pyautogui.press(a)
time.sleep(0.2)
def activate_app(app_name, tries=2, reset_top=False):
for c in range(0,tries):
try:
window = gw.getWindowsWithTitle(app_name)[0]
window.activate()
time.sleep(0.2)
active = gw.getActiveWindow()
if active.title == app_name:
if reset_top:
global top_x, top_y, top_offset
top_x, top_y = active.left+1, active.top+top_offset
verbose_print("Updating top_x, top_y = %d,%d" % (top_x, top_y))
return active
if active.title == "":
# active menu is a pull down or some crap ... move to a neutral corner
pyautogui.moveTo(500,500)
verbose_print("window title: %s try again" % gw.getActiveWindow().title)
except gw.PyGetWindowException as a:
# print("%s not found, starting at %s" % (APP_NAME, datetime.datetime.now()))
verbose_print("WARNING: %s: %s" % (app_name, a, ))
except Exception as a:
# print("%s not found, starting at %s" % (APP_NAME, datetime.datetime.now()))
verbose_print("WARNING: %s: %s" % (app_name, a, ))
return False
# TODO: group/sort these according to target zone so we find zone quicker when at the end
def load_level_images():
images = {}
for f in glob.glob('levels/*.png'):
debug_print(f)
images[f] = Image.open(f).convert('RGB').crop((0, 0, 60, 56))
return images
OFFSET_xx1 = 1829
OFFSET_Y = 14
IMAGE_WIDTH = 60
IMAGE_HEIGHT = 56
# TODO: LEGACY set top_x and top_x by finding menu
def get_menu(tries=10, update=False):
for i in range(0,tries):
try:
# menu_home = locate('menu.png', region=(0,0,400,400))
menu_home = locate('menu_blue.png', 'menu_grey.png')
x = menu_home.x * 2 + 1829
y = menu_home.y * 2 + 14
return x, y
except Exception:
time.sleep(1)
# TODO: make this work
def verify_menu(tries=10, update=False):
menu_blue_nr = Image.open("menu_blue_nr.png")
verbose_print("Verifying menu ...")
for i in range(0,tries):
# First check using existing top_x, top_y (if exists)
if top_x != 0 or top_y != 0:
found, ready = check_for_menu()
verbose_print("Verifying menu found=%s,ready=%s" % (found, ready))
if found or ready:
return True
else:
# Image hunt!
try:
menu_home = locate('menu_blue.png', 'menu_grey.png')
# x, y = location_for_screenshot(x, y)
# x, y = menu_location()
#
# found ... all good!
if menu_home:
print("menu_home=%s x,y=%d,%d" % (menu_home, menu_home.x, menu_home.y))
verbose_print("Verifying menu: locateAll with Image")
positions = pyautogui.locateAllOnScreen(menu_blue_nr)
if positions:
for pos in positions:
print("locateAll: x,y=%d,%d" % (pos.left, pos.top))
verbose_print("Verifying menu: locateAll with filename")
positions = pyautogui.locateAllOnScreen("./menu_blue_nr.png")
if positions:
for pos in positions:
print("locateAll: x,y=%d,%d" % (pos.left, pos.top))
verbose_print("Verifying menu: locate with filename")
return True
except Exception as e:
print("image hunt %s" % e)
def get_level_region():
# grab first zone icon
region = region_for_screenshot(956, 90, 30, 28)
return (region[0]+1, region[1]-1, region[2], region[3])
boss = Image.open("levels/bosss.png").convert('RGB')
zone = Image.open("images/zone_complete.png").convert('RGB')
def on_boss(save_images=False, fast=True):
# grab boss icon, on boss if it is black
region = region_for_screenshot(1154, 93, 22, 22)
# boss
# x = x + 2219 - 1829
# y = y + 10 - 14
pause = pyautogui.PAUSE
if save_images:
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0.0
im1 = pyautogui.screenshot(region=region).convert('RGB')
if save_images:
im1.save("onboss.png")
boss.save("theboss.png")
diff = ImageChops.difference(im1, boss)
if save_images:
diff.save("bossdiff.png")
stat = ImageStat.Stat(diff)
pyautogui.PAUSE = pause
pyautogui.FAILSAFE = True
return safe_image_compare(im1, boss)
def zone_complete(save_images=False, fast=True):
# grab boss icon, on boss if it is black
region = region_for_screenshot(1154+75, 93-25, 5, 10)
# boss
# x = x + 2219 - 1829
# y = y + 10 - 14
pause = pyautogui.PAUSE
if fast:
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0.0
im1 = pyautogui.screenshot(region=region).convert('RGB')
if save_images:
im1.save("zonetest.png")
zone.save("zonefound.png")
diff = ImageChops.difference(im1, zone)
if save_images:
diff.save("diffdiff.png")
stat = ImageStat.Stat(diff)
if fast:
pyautogui.PAUSE = pause
pyautogui.FAILSAFE = True
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) < MAX_IMAGE_MEAN:
return True
return False
# object to support finding images by index
class LevelFinder(object):
levels = []
images = {}
black = None
index = 0
def load_level_images(self):
self.levels = []
self.images = {}
for f in glob.glob('levels/*.png'):
debug_print(f)
key = f[7:][:-4]
if key == "bosss" or key == "boss":
continue
if key == "black":
self.black = Image.open(f).convert('RGB').crop((0, 0, 60, 56))
continue
self.images[key] = Image.open(f).convert('RGB').crop((0, 0, 60, 56))
self.levels.append(key)
self.total_images = len(self.levels)
self.levels.sort()
return self.images
def __init__(self):
self.index = 0
self.total_images = 0
self.load_level_images()
def get_current_zone(self, save=False, tries=LEVEL_TRYS):
im = None
for i in range(0, tries):
verbose_print("get_current_zone attempt %d" % i)
region = get_level_region()
raw_im = pyautogui.screenshot(region=region)
im = raw_im.convert('RGB')
# check if black first ...
diff = ImageChops.difference(im, self.black)
stat = ImageStat.Stat(diff)
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) < MAX_IMAGE_MEAN:
time.sleep(.1)
continue
# start search at last index ...
for idx in range(0, self.total_images):
key = self.levels[(self.index + idx) % self.total_images]
img = self.images[key]
diff = ImageChops.difference(im, img)
stat = ImageStat.Stat(diff)
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) < MAX_IMAGE_MEAN:
try:
level = int(key[:3])
plus = (key[-1:] != 's')
self.index = (self.index + idx) % self.total_images
if not plus:
self.index -= 1
# print("idx = %d" % idx)
return level, plus
except Exception:
break
if save:
im.save('my_screenshot%d.png' % i)
time.sleep(.1)
return -1, False
def get_current_zone(level_images, save=False, tries=LEVEL_TRYS):
im = None
for i in range(0,tries):
verbose_print("get_current_zone attempt %d" % i)
region = get_level_region()
raw_im = pyautogui.screenshot(region=region)
im = raw_im.convert('RGB')
for name, img in level_images.items():
diff = ImageChops.difference(im, img)
stat = ImageStat.Stat(diff)
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) < 20.0:
match = name[7:10]
if match == "bla" or match == "bos":
break
try:
level = int(name[7:10])
plus = (name[10:11] != 's')
return level, plus
except Exception:
break
if save:
im.save('my_screenshot%d.png' % i)
time.sleep(.1)
return -1, False
def get_current_level(x, y, level_images, save=False):
im = None
for i in range(0,LEVEL_TRYS):
verbose_print("Current level attempt %d" % i)
im = pyautogui.screenshot(region=(x, y, 60, 56))
for name, img in level_images.items():
diff = ImageChops.difference(im.convert('RGB'), img)
stat = ImageStat.Stat(diff)
if (stat.mean[0] + stat.mean[1] + stat.mean[2]) < MAX_IMAGE_MEAN:
match = name[7:10]
if match == "bla" or match == "bos":
break
try:
level = int(name[7:10])
plus = (name[10:11] == 's')
return level, plus
except Exception:
break
if save:
im.save('my_screenshot%d.png' % i)
time.sleep(.1)
return -1, False
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def load_player_json():
user_file = os.path.join(Path.home(),
"Library/Application Support/Steam/steamapps/common/IdleChampions",
"IdleDragonsMac.app/Contents/Resources/Data/StreamingAssets",
"downloaded_files/webRequestLog.txt")
player_info = []
with open(user_file, "r") as f:
for line in f:
if "current_area" in line:
info = json.loads(line)
player_info.append(info)
return player_info
# repair a broken desktop shortcut
def repair_shortcut():
# short cut destination
idle_path = os.path.join(Path.home(), config.get("idler", "steam_app_path"))
if not os.path.isdir(idle_path):
print("ERROR: app path is incorrect: %s", idle_path)
print("ERROR: check that Idle Champions is installed")
return False
short_cut = os.path.join(Path.home(), config.get("idler", "shortcut_path"))
if not os.path.isdir(short_cut):
print("ERROR: short cut path is missing: %s", short_cut)
print("ERROR: create the Idle Champions shortcut in Steam")
return False
# cp .icns file
icns_source = os.path.join(idle_path, config.get("idler", "steam_icns"))
icns_dest = os.path.join(short_cut, config.get("idler", "shortcut_icns"))
verbose_print("copying %s to %s" % (icns_source, icns_dest))
shutil.copy(icns_source, icns_dest)
# cp info.plst
info_source = "./documentation/Info.plist"
info_dest = os.path.join(short_cut, "Contents/Info.plist")
verbose_print("copying %s to %s" % (info_source, info_dest))
shutil.copy(info_source, info_dest)
def dump_stats(args, player_stats):
# print(json.dumps(player_stats[0], indent=4, sort_keys=True))
# return
bs_tiny = 0
bs_small = 0
bs_medium = 0
bs_large = 0
bc_tiny = 0
bc_small = 0
bc_medium = 0
bc_large = 0
# check which line it is in:
buffs = None
for stat_block in player_stats:
if "details" in stat_block:
buffs = stat_block["details"]["buffs"]
break
for buff in buffs:
if not "buff_id" in buff:
continue
buff_id = buff["buff_id"]
if buff_id == "31":
bs_tiny = int(buff["inventory_amount"])
elif buff_id == "32":
bs_small = int(buff["inventory_amount"])
elif buff_id == "33":
bs_medium = int(buff["inventory_amount"])
elif buff_id == "34":
bs_large = int(buff["inventory_amount"])
elif buff_id == "17":
bc_tiny = int(buff["inventory_amount"])
elif buff_id == "18":
bc_small = int(buff["inventory_amount"])
elif buff_id == "19":
bc_medium = int(buff["inventory_amount"])
elif buff_id == "20":
bc_large = int(buff["inventory_amount"])
ilvls = bs_tiny * 1 + bs_small*2 + bs_medium * 6 + bs_large * 24
print("Blacksmith Contracts: %d ilvls" % ilvls)
print(" tiny=%d x 1 + small=%d x 2 + medium=%d x 6 + large=%d x 24 = %d ilvls" % (
bs_tiny, bs_small, bs_medium, bs_large, ilvls,
))
tokens = bc_tiny*12 + bc_small*72 + bc_medium * 576 + bc_large * 1152
runs = tokens / 2500
print("Bounty Contracts: %d tokens (%d free play runs)" % (tokens, runs))
print(" tiny=%d x 12 + small=%d x 72 + medium=%d x 576 + large=%d x 1152 = %d tokens (%d runs)" % (
bc_tiny, bc_small, bc_medium, bc_large, tokens, runs
))
# Dangerous, you will accept screenshare from whomever asks ...
# TODO: Need to add an image check for person intended.
def accept_screen_share(is_on):
if not is_on:
return
pyautogui.moveTo(1400, 50, duration=0.0)
pyautogui.click()
time.sleep(1.0)
pyautogui.moveTo(924, 300, duration=0.0)
pyautogui.click()
def locate(png_name, png_name2=None, click_image_index=0, search_region=None, can_swap=True, screen_shot=None):
verbose_print("locating %s" % str(png_name))
global first_prefix, second_prefix
if not screen_shot:
screen_shot = pyautogui.screenshot(region=search_region)
screen_shot.save("test"+png_name)
if search_region:
x_off = search_region[0]
y_off = search_region[1]
try:
if click_image_index > 0:
positions = pyautogui.locateAll(first_prefix+png_name,
screen_shot,
greyscale=0.9,
)
positions = list(positions)
box = positions[click_image_index]
by2 = pyautogui.Point((x_off+box.left+(box.width/2)) / 2, (y_off+box.top+(box.height/2)) / 2)
else:
box = pyautogui.locate(first_prefix+png_name,
screen_shot,
grayscale=True,
)
by2 = pyautogui.Point((x_off+box.left+(box.width/2)) / 2, (y_off+box.top+(box.height/2)) / 2)
verbose_print("locate(%s) = %s" % (png_name, str(by2)))
return by2
except Exception as e:
verbose_print("locate(%s) = %s" % (png_name, str(e)))
pass
# only recurse once per image ...
if not can_swap:
if png_name2:
return locate(png_name2,
click_image_index=click_image_index,
search_region=search_region,
can_swap=True,
screen_shot=screen_shot)
return None
# swap so we find the right resolution faster next time (won't swap if second also raises)
verbose_print("swapping from %s to %s" % (first_prefix, second_prefix))
t = first_prefix
first_prefix = second_prefix
second_prefix = t
return locate(png_name,
png_name2=png_name2,
click_image_index=click_image_index,
search_region=search_region,
can_swap=False, screen_shot=screen_shot)
def drag_image(png_name, delta_x, delta_y, duration=DEFAULT_DRAG, delay=DEFAULT_DELAY):
start = locate(png_name)
pyautogui.moveTo(start.x, start.y)
pyautogui.mouseDown(x=start.x, y=start.y, button=pyautogui.LEFT)
pyautogui.dragRel(delta_x, delta_y, duration=duration, button=pyautogui.LEFT, mouseDownUp=False)
pyautogui.mouseUp(button=pyautogui.LEFT)
time.sleep(delay)
verbose_print("Location: %s" % str(start))
# print("%s" % str(button)
# pyautogui.click(button, clicks=2)
return "Dragged {0}".format(png_name)
def goto_image(png_name, png_name2=None, delay=0.5):
return click_image(png_name, png_name2=png_name2, delay=delay, click=False)
def click_image(png_name, png_name2=None, delay=0.5, click=True, click_image_index=0):
global verbose
button = None
try:
button = locate(png_name, click_image_index=click_image_index)
except Exception:
if png_name2:
try:
button = locate(png_name2, click_image_index=click_image_index)
except Exception:
pass
if not button:
return ""
if verbose:
print("Location: %s" % str(button))
pyautogui.moveTo(button.x, button.y)
time.sleep(delay)
if not click:
return "Moved"
pyautogui.click()
time.sleep(delay)
# print("%s" % str(button))
# pyautogui.click(button, clicks=2)
return "Clicked {0}".format(png_name)
def check_crashed_app():
try:
window = gw.getWindowsWithTitle("Problem Report for Idle Champions")[0]
except Exception:
window = None
if not window:
return False
print("Detected Crash!")
# window.activate()
window.close()
# click [OK]
click_ok()
startup_idle_champions()
def restart_steam():
print("Quitting Steam")
try:
app = activate_app("Steam")
if app:
debug_print("App for CMD-q %s" % app.title)
debug_print("Sending CMD-q")
pyautogui.hotkey('command', 'q', interval=0.1)
# pyautogui.keyDown('command')
# pyautogui.press('q')
# pyautogui.keyUp('command')
except Exception as e:
debug_print("ERROR: CMD-q to Steam.app: %s" % e)
time.sleep(10.0)
short_cut = "/Applications/Steam.app"
try:
if not os.path.exists(short_cut):
print("ERROR: Path to Steam.app is incorrect" % short_cut)
sys.exit(1)
result = os.system("open '%s'" % short_cut)
verbose_print("open shortcut_path (%s) returns %s" % (short_cut, str(result)))
except Exception as e:
print("ERROR: could not launch %s" % short_cut)
print("ERROR: %s" % str(e))
sys.exit(1)
time.sleep(10.0)
def shutdown_app(keyboard=True):
if keyboard:
verbose_print("Shutdown Idle Champions with CMD-Q")
try:
app = activate_app(APP_NAME)
if app:
debug_print("App for CMD-q %s" % app.title)
debug_print("Sending CMD-q")
pyautogui.hotkey('command', 'q', interval=0.1)
# pyautogui.keyDown('command')
# pyautogui.press('q')
# pyautogui.keyUp('command')
return
except Exception as e:
pass
verbose_print("Shutdown Idle Champions with close")
try:
windows = gw.getWindowsWithTitle(APP_NAME)
for window in windows:
if window.title == APP_NAME:
window.close()
time.sleep(20.0)
return
print("Warning: shutdown: '%s' not an exact match for '%s'" % (window.title, APP_NAME))
raise gw.PyGetWindowException("No exact match for 'Idle Champions'")
except Exception as e:
raise gw.PyGetWindowException("ERROR: shutdown: '%s'" % e)
# Startup using Steam App
# Warning: will shutdown app if running!
def startup_idle_champions(tries=20):
# TODO: loop on this block until we find menu.png if not using preset top_x, top_y
# Bring up steam
print("Restarting Idle Champions")
for attempt in range(0,tries):
if config.getboolean("idler", "shortcut_restarting"):
verbose_print("Starting app with shortcut")
try:
short_cut = os.path.join(Path.home(), config.get("idler", "shortcut_path"))
if not os.path.exists(short_cut):
print("ERROR: create a %s desktop short cut using Steam" % short_cut)
sys.exit(1)
result = os.system("open '%s'" % short_cut)
verbose_print("open shortcut_path (%s) returns %s" % (short_cut, str(result)))
except Exception as e:
print("ERROR: could not launch %s" % short_cut)
print("ERROR: %s" % str(e))
sys.exit(1)
elif config.getboolean("idler", "shortcut_start_xy"):
# TODO: fall back to click_image if this fails
x = config.getint("steam", "start_x")
y = config.getint("steam", "start_y")
pyautogui.moveTo(x, y)
time.sleep(0.1)
pyautogui.click()
time.sleep(1.0)
else:
verbose_print("Looking for the steam app")
# move mouse to top corner
steam = activate_app("Steam")
# click [Play] or [Stop]
verbose_print("Clicking Play/Stop")
# NOTE: start_with_image is more finicky that start with x,y
if config.getboolean("steam", "start_with_image"):
click_image("steam_play.png")
# now restore the app to front
print("Waiting for Idle to launch.")
found_app = False
ignore_errors = 20
for s in range(40, 0, -1):
verbose_print(" %d seconds" % (s/2))
time.sleep(0.5)
# bring to front
try:
windows = gw.getWindowsWithTitle(APP_NAME)
for window in windows:
if window.title == APP_NAME:
found_app = activate_app(APP_NAME, reset_top=True)
raise gw.PyGetWindowException("No exact match for 'Idle Champions'")
except gw.PyGetWindowException as a:
if s <= ignore_errors:
print("Not found yet: %s: %s" % (datetime.datetime.now(), a))
else:
verbose_print("Not found yet: %s: %s" % (datetime.datetime.now(), a))
except Exception as a:
if s <= ignore_errors:
print("Not found yet: %s: %s" % (datetime.datetime.now(), a))
else:
verbose_print("Not found yet: %s: %s" % (datetime.datetime.now(), a))
if found_app:
break
# click ok or find menu for 20 seconds
if click_ok(startup=True, count=20, ic_app=found_app):
return True
# Try restarting Steam if this has been going on for a while
if attempt > 10:
restart_steam()
# Try killing the app and trying again
shutdown_app(True)
return False
def click_ok(count=1, startup=False, ic_app=None):
# Look for an OK button
found_ok = False
move = 50
# loop attempting a "smart" startup using remembered or hinted top_x, top_y
known_okays = [
# (635, 633), brings up sentry char sheet
(635, 565),
(635, 522),
(635, 505),
(635, 475),
(635, 408),
(750, 370),
]
ready = False
found_menu = False
for s in range(count, 0, -1):
if ready:
return True
# start by clicking on known OK locations to skip movies/okay seeking
verbose_print(" Madly clicking on possible okay locations")
for pair in known_okays:
x, y = with_top_offset(pair[0], pair[1])
pyautogui.moveTo(x, y, 0.1)
pyautogui.click(x, y)
time.sleep(0.1)
# TODO: set top x, y if not using location hints
# check for greyed our AND normal menu button, greyed out find okay, normal we're done!
verbose_print(" Checking for menu button")
found, ready = check_for_menu()
if ready:
return True
# if found_menu:
# # second check, now need to manually hunt for Okay button
# break
# found_menu = found
if not found and count != 0:
try:
if gw.getActiveWindow().title != APP_NAME:
raise Exception("wrong window")
except Exception as e:
ic_app = activate_app(APP_NAME)
time.sleep(0.5)
time.sleep(0.5)
return False
# give up on fast method, now go looking for okay image and reset top_x, top_y using menu image
for s in range(count, 0, -1):
try:
found_level, plus = get_current_level(x, y, level_images, False)
if found_level > 0:
print(" Found %d level." % found_level)
return x,y
except Exception:
pass
if count > 0:
time.sleep(1.0)
try:
x1, y1 = get_menu(1)
# found! we can just leave now
return x1, y1
except Exception:
pass
if not found_ok:
try:
found_ok = click_image("okay.png")
if found_ok:
time.sleep(2)
print(" Found okay button.")
except Exception:
pass
pyautogui.moveRel(0, move)
move = -move
time.sleep(.8)
def foreground_or_start(tries=2):
# windows = gw.getAllTitles()
# print("%s" % windows)
activated = activate_app(APP_NAME, tries=tries, reset_top=True)
if not activated:
startup_idle_champions()
# Don't have top_x, top_y set? Figure it out!
if top_x == 0 and top_y == 0:
verify_menu()
# im1 = pyautogui.screenshot()
# im1.save('my_screenshot.png')
# window = pyautogui.getWindowsWithTitle("Idle Champions")
# print("window=%s" % str(window))
# Bring app to foreground
# try:
# click_image('dockicon.png')
# except Exception:
# print("can't find dock icon")
def wrap_it_up():
# Wait for animation before Continue ...
foreground_or_start()
time.sleep(0.5)
pyautogui.press("r")
time.sleep(0.9)
click_offset(559, 491, duration=0.1, delay=0.1, tag="Click Complete")
for i in range(0,30):
# Click Skip like Crazy for a bit
click_offset(1158, 650, duration=0.1, delay=0.1, tag="Click Skip")
time.sleep(0.1)
click_offset(635, 595, duration=0.1, delay=0.1, tag="Click Continue")
time.sleep(5.5)
def wrap_it_up2(position):
# Wait for animation before Continue ...
attempt = 0
complete = ""
skipped = False
while attempt < 40:
print("attempt %s" % attempt)
if not complete:
foreground_or_start()
time.sleep(0.5)
pyautogui.press("r")
time.sleep(0.5)
complete = click_image('complete.png', 'complete2.png')
if complete:
print("Completed Adventure")
if complete and not skipped:
print("Skipping")
# position = locate('menu.png')
for _ in range(0, 16):
menu_offset_click(position, 430, 120)
skipped = True
result = click_image('continue.png')
if result:
print("Viewed Adventure Stats")
break
time.sleep(0.5)
attempt += 1
time.sleep(1.5)
def start_it_up(adventure):
# Start mad wizard (one should work)
# Click on city
click_offset(324, 682, duration=0.1, delay=0.1, tag="Launch Adventure Picker")
foreground_or_start()
time.sleep(0.5)
if adventure == DEFAULT_ADVENTURE:
click_offset(366, 160, duration=0.1, delay=0.1, tag="Launch Mad Wizard")
else:
click_offset(366, 220, duration=0.1, delay=0.1, tag="Launch Terror")
# time to settle (and for initial hit)
time.sleep(0.5)
click_offset(801, 558, duration=0.1, delay=0.1, tag="Click Start Objective")
def menu_offset(pos, x, y):
x = pos.x + 1380 / 2 + x
y = pos.y + 895 / 2 + y
return pyautogui.Point(x,y)
def menu_offset_click(pos, x, y):
x = pos.x + 1380 / 2 + x
y = pos.y + 895 / 2 + y
pyautogui.click(x, y)
time.sleep(0.2)
def menu_offset_move(pos, x, y):
x = pos.x + 1380 / 2 + x
y = pos.y + 895 / 2 + y
pyautogui.moveTo(x, y, 2.0)
time.sleep(0.2)
def place_click_familiars(num_familiars):
pyautogui.keyDown("f")
click_offset(180, 695, duration=0.1, delay=0.1, tag="Click Damage Leveler")
click_offset(933, 240, duration=0.1, delay=0.1, tag="1st Battlefield Clicker")
if num_familiars < 4:
return
click_offset(869, 325, duration=0.1, delay=0.1, tag="2nd Battlefield Clicker")
click_offset(1000, 325, duration=0.1, delay=0.1, tag="3rd Battlefield Clicker")
if num_familiars < 6:
return
click_offset(869, 391, duration=0.1, delay=0.1, tag="5th Battlefield Clicker")
click_offset(1000, 391, duration=0.1, delay=0.1, tag="6th Battlefield Clicker")
pyautogui.keyUp("f")
def restart_stacking(args):
charge_time = args.charge
shutdown_app(args.keyboard_shutdown)
time.sleep(charge_time)
startup_idle_champions()
def charge_briv(level, plus, images, args):
screenshare = args.screenshare
charge_time = args.charge
briv_target = args.target - args.briv_recharge_areas
restart = args.restart
print("Recharging Briv starting at %s" % (datetime.datetime.now()))
GO_BACK_DELAY=2.0
pyautogui.press("g")
time.sleep(0.5)
pyautogui.press("w")
# time to settle
time.sleep(2.0)
# restart charging ... so good
if restart:
if on_boss():
verbose_print(" %d & boss; go back one" % level)
pyautogui.press("left")
time.sleep(GO_BACK_DELAY)
shutdown_app(args.keyboard_shutdown)
accept_screen_share(screenshare)
time.sleep(charge_time)
startup_idle_champions()
time.sleep(5.0)
# manual charging ... still better than a poke in the eye with a sharp stick
else:
# make sure we are not on a boss or zone without a spinner
while True:
verbose_print("charge_briv %d %s" % (level, plus))
if on_boss():
verbose_print(" %d & boss; go back one" % level)
pyautogui.press("left")
time.sleep(GO_BACK_DELAY)
break
elif level == briv_target:
verbose_print(" Just go for it %d" % level)
break
pyautogui.press("left")
time.sleep(GO_BACK_DELAY)
try:
level, plus = get_current_level(x, y, level_images, False)
except Exception:
break
elif level == briv_target + 6 and plus:
pyautogui.press("left")
time.sleep(GO_BACK_DELAY)
pyautogui.press("left")
time.sleep(GO_BACK_DELAY)
break
else:
verbose_print(" Done")
break
charging = charge_time
while charging > 0.0:
verbose_print("Charging Briv: %f more seconds" % (charging))
if charging > CHARGE_INTERVAL:
accept_screen_share(screenshare)
foreground_or_start()
if on_boss():
print("%d & boss; go back one" % level)
pyautogui.press("left")
time.sleep(CHARGE_INTERVAL)
charging -= CHARGE_INTERVAL
else:
time.sleep(charging)
break
# start going forward again ... why is this sooooooo slow
print("Resuming ...")
foreground_or_start()
pyautogui.press("left")
time.sleep(1.5)
pyautogui.press("q")
time.sleep(0.25)
pyautogui.press("g")
time.sleep(0.25)
pyautogui.press("q")
return True
def remove_familiars(position, ult):
pyautogui.keyDown("f")
time.sleep(0.1)
offset = 230
if ult == 4:
offset += 90
if ult == 5:
offset += 120
menu_offset_click(position, offset, 10)
menu_offset_click(position, offset, 10)
pyautogui.keyUp("f")
pass
def place_other_familiars(position, familiars):
pyautogui.keyDown("f")
# place more click familiars
# drag_image('familiar.png', 135, -135)
if familiars >= 3:
menu_offset_click(position, 135, -195)
# drag_image('familiar.png', 275, -135)
if familiars >= 4:
menu_offset_click(position, 275, -195)
# drag_image('familiar.png', 135, -195)
if familiars >= 5:
menu_offset_click(position, 135, -135)
# drag_image('familiar.png', 275, -195)
if familiars >= 6:
menu_offset_click(position, 275, -135)
# drag_image('familiar.png', 195, -255)
if familiars >= 7:
menu_offset_click(position, 195, -255)
pyautogui.keyUp("f")
return
# binwin (slot 3)
# drag_image('familiar.png', -225, 165)
menu_offset_click(position, -225, 165)
if familiars <= 8:
return
# Shandie (slot 7)
drag_image('familiar.png', 100, 165)
if familiars <= 9:
return
# jarlaxle or stoki (slot 4)
drag_image('familiar.png', -120, 165)
if familiars <= 10:
return
# Deekin (slot 1)
# drag_image('familiar.png', -450, 165)
pyautogui.keyUp("f")
SPECS = {
"1_of_2": {"x": 515, "y": 585},
"2_of_2": {"x": 760, "y": 585},
"1_of_3": {"x": 384, "y": 585},
"2_of_3": {"x": 635, "y": 585},
"3_of_3": {"x": 885, "y": 585},
}
TEAM_DEFINITIONS = {
# Speedsters
"briv": {"key": "f5", "bs": 19, "as": 30, "spec": "1_of_3", "short":"-B",},
"shandie": {"key": "f6", "bs": 24, "as": 30, "spec": "1_of_3", "short":"-S",},
"havi": {"key": "f10", "bs": 21, "as": 0, "spec": "1_of_2", "short":"-H",},
"deekin": {"key": "f1", "bs": 16, "as": 0, "spec": "3_of_3", "short":"-D",},
"melf": {"key": "f12", "bs": 12, "as": 30, "spec": "2_of_3", "short":"-M",},
"sentry": {"key": "f4", "bs": 20, "as": 30, "spec": "2_of_3", "short":"-Y",},
"hew": {"key": "f8", "bs": 15, "as": 30, "spec": "2_of_2", "short":"-W",},
# Extras
"viper": {"key": "f7", "bs": 12, "as": 30, "spec": "2_of_2", },
"binwin": {"key": "f3", "bs": 21, "as": 30, "spec": "2_of_2", },
"drizzt": {"key": "f9", "bs": 19, "as": 30, "spec": "1_of_2", },
"omin": {"key": "f3", "bs": 20, "as": 70, "spec": "2_of_3", },
"jarlaxle":{"key": "f4", "bs": 12, "as": 30, "spec": "2_of_2", },
# fix
"minsc": {"key": "f7", "bs": 4, "as": 30, "spec": "2_of_2", },
"strix": {"key": "f11", "bs": 16, "as": 30, "spec": "3_of_3", },
"hitch": {"key": "f7", "bs": 4, "as": 30, "spec": "2_of_2", },
}
def level_champ_with_keys(args, champ, between_champs=0.1):
if champ not in TEAM_DEFINITIONS:
print('ERROR: champ "%s" has no definition for F Key leveling' % champ)
return None
definition = TEAM_DEFINITIONS[champ]
verbose_print("Leveling %s %s" % (champ, definition))
for c in range(0,definition['bs']):
pyautogui.press(definition['key'])
time.sleep(DEFAULT_DELAY)
if not args.modron_specialization:
spec = SPECS[definition["spec"]]
click_spec_at(spec["x"], spec["y"], delay=0.3, tag=champ)
time.sleep(between_champs)
return definition["key"]
def level_team_with_keys(args, team, between_champs=0.1):
have_shandie = ("shandie" in team)
have_hew = ("hew" in team)
leveling_keys = []
if have_shandie:
key = level_champ_with_keys(args, "shandie", between_champs=between_champs)
leveling_keys.append(key)
if "havi" in team:
key = level_champ_with_keys(args, "havi", between_champs=between_champs)
leveling_keys.append(key)
# fire ult! once
pyautogui.press("1")
if have_shandie:
pyautogui.press("2")
for champ in team.split(','):
champ = champ.strip()
if champ in ["shandie", "havi"]:
continue
key = level_champ_with_keys(args, champ, between_champs=between_champs)
leveling_keys.append(key)
# TODO: wait here for shandie to start dashing ...
# Load the Formation
pyautogui.press('q')
time.sleep(DEFAULT_DELAY)
pyautogui.press('g')
time.sleep(DEFAULT_DELAY)
# more rounds of leveling based on those F keys
for i in range(0, 20):
for f_key in leveling_keys:
pyautogui.press(f_key)
if have_hew:
for i in range(0, 20):
pyautogui.press(args.hew_ult)
time.sleep(0.1)
return leveling_keys
def click_third_spec(delay=0.0):
if click_image("select.png", click_image_index=2):
pyautogui.moveRel(0, -120, duration=0.1)
time.sleep(delay)
def click_second_spec(delay=0.0):
if click_image("select.png", click_image_index=1):
pyautogui.moveRel(0, -120, duration=0.1)
time.sleep(delay)
def click_first_spec(delay=0.0):
click_image("select.png")
pyautogui.moveRel(0, -120, duration=0.1)
time.sleep(delay)
return
pyautogui.moveRel(550, 0, duration=0.1)
for i in range(0, 8):
pyautogui.click()
def click_with_position(image, target, offset_x=0, offset_y=0, click=True):
verbose_print("click_with_position(%s,%s)" % (image, str(target)))
if not target:
time.sleep(0.2)
target = locate(image)
pyautogui.moveTo(target.x+offset_x, target.y+offset_y, duration=0.0)
time.sleep(0.1)
if click:
pyautogui.click()
time.sleep(0.2)
return target
def handle_extras(args):
if args.F1:
pyautogui.press("f1")
if args.F2:
pyautogui.press("f2")
if args.F3:
pyautogui.press("f3")
if args.F4:
pyautogui.press("f4")
if args.F5:
pyautogui.press("f5")
if args.F6:
pyautogui.press("f6")
if args.F7:
pyautogui.press("f7")
if args.F8:
pyautogui.press("f8")
if args.F9:
pyautogui.press("f9")
if args.F10:
pyautogui.press("f10")
if args.F11:
pyautogui.press("f11")
if args.F12:
pyautogui.press("f12")
def get_bool_config(cfg, key, default):
try:
return bool(distutils.util.strtobool(cfg['idler'][key]))
except Exception:
return default
def add_champs_to_parser(parser):
for name, v in TEAM_DEFINITIONS:
lc = name.lower()
parser.add_argument("--"+lc, help="Use "+name,
default=False,
dest="use_"+lc,
action="store_true")
parser.add_argument("--no-"+lc, help="Don't use "+name,
dest="use_"+lc,
action="store_false")
def load_config():
global config, top_x, top_y
# Load defaults
defaults = "./defaults.cfg"
if not os.path.exists(defaults):
print("Missing %s file" % defaults)
sys.exit(0)
config.read(defaults)
# load local overrides
local = "./local.cfg"
if os.path.exists(local):
config.read(local)
# Get the .idler overrides, these should be what was created by ./idler.py init
config_path = os.path.join(Path.home(), '.idler')
if os.path.exists(config_path):
config.read(config_path)
if config.getboolean("idler", "use_top_hint"):
top_x = config.getint("idler", "top_hint_x")
top_y = config.getint("idler", "top_hint_y")
verbose_print("Config top_x,top_y = %d,%d" % (top_x, top_y))
# object to support logging all of tracking logs to a permanent file
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
self.file.flush()
# object to support logging all of tracking logs to a permanent file
class Tracker(object):
file = None
started = False
verbose = False
zones = None
bosses_per_run = None
start_of_session = None
total_runs = 0
start_of_run = None
longest_run = None
bosses_this_session = None
def __init__(self, now, zones=0, verbose=False, logfile=None, log_mode="a"):
self.start_of_session = None
self.start_of_run = None
self.zones = zones
self.bosses_per_run = self.zones / 5
self.bosses_this_session = 0
self.total_runs = 0
self.started = False
if logfile:
self.file = open(logfile, log_mode)
def elapsed(self, td):
seconds = td.total_seconds()
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
return hours, minutes, seconds
def start_loop(self, now, level, plus):
if not self.started:
self.start_of_run = now
self.start_of_session = now
self.started = True
return
self.total_runs += 1
print("Loop %d started: %s: %d%s" % (self.total_runs, now, level, "+" if plus else ""))
self.bosses_this_session += self.bosses_per_run
run_elapsed = now - self.start_of_run
run_bph = float(self.bosses_per_run) / float(run_elapsed.total_seconds()) * 60.0 * 60.0
run_hours, run_minutes, run_seconds = self.elapsed(run_elapsed)
session_elapsed = now - self.start_of_session
session_bph = float(self.bosses_this_session) / float(session_elapsed.total_seconds()) * 60.0 * 60.0
session_hours, session_minutes, session_seconds = self.elapsed(session_elapsed)
print("Session: %d:%d:%d BPH: %.2f Run: %d:%d:%d BPH: %.2f" % (
session_hours, session_minutes, session_seconds,
session_bph,
run_hours, run_minutes, run_seconds,
run_bph,
))
self.start_of_run = now
def flush(self):
self.file.flush()
def start_tracking(self, now, level, plus):
print("Gem farming session started: %s: with detected level %d%s" % (now, level, "+" if plus else ""))
epilog="""Commands:
The following commands are available:
1. Gem Farming with or without Modron Automation (see README for more details):
./idler.py modron
./idler.py no-modron
2. Buying bounties quickly, the following commands will by 50 bounties of the given type:
./idler.py small 5
./idler.py medium 5
3. Opening silver chests quickly, the following command will open 5 batches of 50 silver chests:
./idler.py silver 5
4. Quick reset stacking, assuming Briv is at a level where he can no longer advance:
./idler.py --charge 15 stack 5
"""
def main_method():
global top_x, top_y, top_offset, debugging, verbose, infinite_loop
load_config()
# get defaults from config file
# have_briv = get_bool_config(config, "use_briv", have_briv)
# have_havilar = get_bool_config(config, "use_havilar", have_havilar)
# have_binwin = get_bool_config(config, "use_binwin", have_binwin)
# have_deekin = get_bool_config(config, "use_deekin", have_deekin)
# have_sentry = get_bool_config(config, "use_sentry", have_sentry)
# have_shandie = get_bool_config(config, "use_shandie", have_shandie)
# have_melf = get_bool_config(config, "use_melf", have_melf)
# have_hew = get_bool_config(config, "use_hew", have_melf)
steam_start_with_image = get_bool_config(config, "steam_start_with_image", True)
steam_start_x = get_bool_config(config, "steam_start_x", True)
default_charge_time = config.getfloat("idler", "briv_charge_time")
briv_restart_charging = config.getboolean("idler", "briv_restart_charging")
briv_boss_handling = config.getboolean("idler", "briv_boss_handling")
level_images = load_level_images()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(epilog)
)
parser.add_argument("--tee", help="Also send output to a logfile (appending)",
default=None,
type=str)
parser.add_argument("--keyboard-shutdown",
dest="keyboard_shutdown",
default=config.getboolean("idler", "keyboard_shutdown"),
help="Shutdown %s by sending CMD-Q" % APP_NAME, action="store_true")
parser.add_argument("--no-keyboard-shutdown", "--close",
dest="keyboard_shutdown",
help="Shutdown %s by closing the app." % APP_NAME,
action="store_false")
# meta
parser.add_argument("-m", "--mirt", help="Set reasonable defaults for a Mirt run (no Deekin)",
action="store_true")
parser.add_argument("-v","--vajra", help="Set reasonable defaults for a Vajra run (no Minsc)",
action="store_true")
parser.add_argument("-a", "--adventure", default=DEFAULT_ADVENTURE,
help="Adventure to run (madwizard, terror) (default %s)" % DEFAULT_ADVENTURE,
type=str)
parser.add_argument("-f", "--familiars", default=NUM_FAMILIARS,
help="How many familiars do you have (default %d)" % NUM_FAMILIARS, type=int)
parser.add_argument("--target", default=config.getint("idler", "modron_target"),
help="What zone is your Modron core set to restart (default %d)" % config.getint("idler", "modron_target"),
type=int)
parser.add_argument("--briv-recharge-areas", "--briv-areas", default=config.getint("idler", "briv_recharge_areas"),
help="How many areas before your Modron area goal should Briv start recharging (default is %s which works for Triple Skip Briv, use 15 for Quad skip Briv)" % config.getint("idler", "briv_recharge_areas"),
type=int)
parser.add_argument("--charge", default=default_charge_time,
help="Amount of time for Briv charging, either method (default %f)" % default_charge_time,
type=float)
parser.add_argument("--no-boss", default=default_charge_time,
help="Amount of time for Briv charging, either method (default %f)" % default_charge_time,
type=float)
#how to spec
parser.add_argument("--specialization", default=config.getboolean("idler", "modron_specialization"),
dest="modron_specialization",
help="Specialization automaticaly done by modron.",
action="store_true")
parser.add_argument("--no-specialization", "--fkeys",
dest="modron_specialization",
help="Specialization not automaticaly done by modron.",
action="store_false")
#skip boss
parser.add_argument("--briv-boss", default=briv_boss_handling,
dest="briv_boss",
help="Remove Briv if on a boss (Quad Briv) via formation 'e'",
action="store_true")
parser.add_argument("--no-briv-boss",
dest="briv_boss",
help="No special handling for Briv on bosses",
action="store_false")
#restart
parser.add_argument("--restart", default=briv_restart_charging,
dest="restart",
help="Briv charging via quit/restart",
action="store_true")
parser.add_argument("--no-restart", help="Briv charging by waiting.",
dest="restart",
action="store_false")
parser.add_argument("--charge-shandie", default=config.getint("idler", "charge_shandie"),
dest="charge_shandie",
help="Charge Shandie's dash on startup (default %d seconds)" % 0,
type=int)
parser.add_argument("--size", default="small",
help="Size of bounties to open (small or medium,default small)",
type=str)
parser.add_argument("--codes", default="codes.list",
help="List of codes to enter into the chest tool.",
type=str)
parser.add_argument("-r", "--runloops", default=GEM_LOOPS,
help="How many loops gem run (default %d)" % GEM_LOOPS,
type=int)
parser.add_argument("-l", "--level", default=DEFAULT_LEVEL_DELAY,
help="How many seconds to wait before leveling champs (default %d)" % DEFAULT_LEVEL_DELAY,
type=int)
parser.add_argument("--F1", help="Activate slot 1st hero (1 level).", action="store_true")
parser.add_argument("--F2", help="Activate slot 2nd hero (1 level).", action="store_true")
parser.add_argument("--F3", help="Activate slot 3rd hero (1 level).", action="store_true")
parser.add_argument("--F4", help="Activate slot 4th hero (1 level).", action="store_true")
parser.add_argument("--F5", help="Activate slot 5th hero (1 level).", action="store_true")
parser.add_argument("--F6", help="Activate slot 6th hero (1 level).", action="store_true")
parser.add_argument("--F7", help="Activate slot 7th hero (1 level).", action="store_true")
parser.add_argument("--F8", help="Activate slot 8th hero (1 level).", action="store_true")
parser.add_argument("--F9", help="Activate slot 9th hero (1 level).", action="store_true")
parser.add_argument("--F10", help="Activate slot 10th hero (1 level).", action="store_true")
parser.add_argument("--F11", help="Activate slot 11th hero (1 level).", action="store_true")
parser.add_argument("--F12", help="Activate slot 12th hero (1 level).", action="store_true")
parser.add_argument("--modron", help="Depend on Modron to reset and level.",
default=config.getboolean('idler', 'use_modron'),
dest="use_modron",
action="store_true")
parser.add_argument("--no-modron", help="Manual resetting of levels.",
dest="use_modron",
action="store_false")
parser.add_argument("--save_mismatch", help="When checking level, save any mismatches.",
action="store_true")
parser.add_argument("--in-progress", help="Start up with a game in progress.",
action="store_true")
parser.add_argument("-O", "--odds", help="Briv odds of jumping",
type=float, default=99.0)
parser.add_argument("--header", help="Height of the Idle Champions application header",
type=int,
default=config.getint("idler", "header_height"))
parser.add_argument("--countdown",
help="Seconds to wait before starting command (default %d)" % COUNTDOWN,
type=int,
default=COUNTDOWN, )
parser.add_argument("-c", "--confirm_buy", help="Confirm buying gold chests (skips Yes/No prompt).",
action="store_true")
parser.add_argument("-x", "--experimental", help="Don't use this.",
action="store_true")
parser.add_argument("--verbose", help="Debugging aid.", action="store_true")
parser.add_argument("--debug", help="Debugging aid, very noisy.", action="store_true")
parser.add_argument("--screenshare", "--ss",
help="Screen share accept active.",
action="store_true")
parser.add_argument('-F', '--formation', metavar='formation', type=str,
help="Formation key to use to set initial formations and familiars",
default=None)
parser.add_argument("--havi-ult", default=config.get('idler', 'havi_ult'),
help="Key that hits Havi's ult. (default %s)" % config.get('idler', 'havi_ult'),
type=str)
parser.add_argument("--hew-ult", default=config.get('idler', 'hew_ult'),
help="Key that hits Hews's ult. (default %s)" % config.get('idler', 'hew_ult'),
type=str)
# Commands and arguments
parser.add_argument('command', metavar='command', type=str, nargs="?",
help="""Action to perform (modron, stats, run, silver, stack, bounty, keep-alive)
run: loop on adventures for N minutes to acquire gems and/or patron currency
press: press the specified key every few seconds
buy: buy N gold chests """, default="stats")
parser.add_argument('loops', metavar='N', type=int, nargs="?",
help="""Argument (N) to the action (number of chests/minutes)""",
default=0)
parser.add_argument('extras', metavar='N', type=int, nargs="*",
help="""Argument (N+) to the action (e.g. bs contracts)""",
default=0)
args = parser.parse_args()
verbose = args.verbose
debugging = args.debug
verbose_print("Command = %s" % args.command)
debug_print("Debugging On")
top_offset = args.header
patron = "None"
speed_team = config.get("idler", "speed_team")
if args.tee:
Tee(args.tee, "a")
if args.vajra:
speed_team = config.get("idler", "vajra_speed_team")
patron = "Vajra"
if args.mirt:
speed_team = config.get("idler", "mirt_speed_team")
patron = "Mirt"
# Apply args to speed team
have_briv = False
if "briv" in speed_team:
have_briv = True
champs_list = []
if have_briv:
champs_list.append("briv")
if have_celeste:
champs_list.append("celeste")
if have_donaar:
champs_list.append("donaar")
if have_deekin:
champs_list.append("deekin")
if have_shandie:
champs_list.append("shandie")
if have_melf:
champs_list.append("melf")
if have_minsc:
champs_list.append("minsc")
if have_viper:
champs_list.append("viper")
if have_binwin:
champs_list.append("binwin")
if have_havilar:
champs_list.append("havilar")
if have_sentry:
champs_list.append("sentry")
if have_gold:
champs_list.append("[gold]")
champs = ",".join(champs_list)
if args.screenshare:
print("Sreenshare mode!")
if args.command == "pytest":
print("merged: %s" % list(pyautogui.locateAllOnScreen('./merged.png')))
print("merged2: %s" % list(pyautogui.locateAllOnScreen('./merged2.png')))
print("merged3: %s" % list(pyautogui.locateAllOnScreen('./merged3.png')))
sys.exit(0)
if args.command == "stats":
player_stats = load_player_json()
dump_stats(args, player_stats)
print("Champs you can put in your team:")
champs = ",".join([key for key in TEAM_DEFINITIONS.keys()])
print(" %s" % champs)
sys.exit(0)
if args.command == "init":
print("Configuring system, this will take a minute or two ...")
time.sleep(5)
init_config_path = os.path.join(Path.home(), '.idler')
init_config = configparser.ConfigParser(allow_no_value=True)
if os.path.exists(init_config_path):
print("Updating ~/.idler file")
init_config.read(init_config_path)
else:
print("Creating ~/.idler file")
if not config.getboolean("idler", "shortcut_restarting"):
print("Looking for the steam app")
# move mouse to top corner
steam = activate_app("Steam")
time.sleep(1)
# click [Play] or [Stop]
print("Looking for Play or Stop")
try:
location = locate("steam_play.png", "steam_stop.png")
if "steam" not in init_config:
init_config.add_section("steam")
init_config["steam"]["; middle pixel of the Idle Champions [play] button on Steam"] = None
init_config["steam"]["start_with_image"] = "no"
init_config["steam"]["start_x"] = str(int(location.x))
init_config["steam"]["start_y"] = str(int(location.y))
print("Found Steam Play/Stop Location: %s" % str(location))
except Exception as e:
print("Error finding Steam Play/Stop location: %s" % str(e))
print("Hover over the blue menu icon in the top left corner of the Idle Champions game. Do not click!")
time.sleep(5.0)
print("Looking for the %s app" % APP_NAME)
ic_app = activate_app(APP_NAME)
time.sleep(1)
for tries in range(0, 2):
try:
# location = locate("menu.png")
# top_x, top_y = top_location_from_menu(int(location.x), int(location.y))
print("Screen shot in ", end='')
for i in range(10,0,-1):
print('%d ...' % i, end='', flush=True)
time.sleep(1)
top_x, top_y, found = hunt_for_menu(level_images)
if not found:
continue
if "idler" not in init_config:
init_config.add_section("idler")
init_config["idler"]["; top left pixel of the app when launched"] = None
init_config["idler"]["use_top_hint"] = "yes"
init_config["idler"]["top_hint_x"] = str(top_x)
init_config["idler"]["top_hint_y"] = str(top_y)
print("Found app top x,y: %d,%d" % (top_x, top_y))
break
except Exception as e:
print("Error finding Menu Icon location: %s" % str(e))
print("Checking init with current zone ...")
level, plus = get_current_zone(level_images=level_images, save=True, tries=1)
if level > 0:
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, plus, on_boss()))
else:
print("Zone not found, check again with ./idler.py zone")
print("Updating ~/.idler.py")
with open(init_config_path, 'w') as f:
f.write("# created by idler.py, a Idle Champions script engine\n")
f.write("# Warning: edit at on risk\n")
init_config.write(f)
sys.exit(0)
if args.command == "Tracker" or args.command == "Track":
print("Test Tracker ...")
try:
now = datetime.datetime.now()
tracker = Tracker(now=now-datetime.timedelta(minutes=11, seconds=12),
zones=args.target,
verbose=verbose,)
print("start track %s" % now)
tracker.start_tracking(now, 20, False)
print("start loop %s" % now)
tracker.start_loop(now, 221, False)
now = now + datetime.timedelta(minutes=11, seconds=12)
print("start loop %s" % now)
tracker.start_loop(now, 1, False)
now = now + datetime.timedelta(minutes=10, seconds=33)
print("start loop T %s" % now)
tracker.start_loop(now, 1, True)
now = now + datetime.timedelta(minutes=12, seconds=1)
print("start loop %s" % now)
tracker.start_loop(now, 6, False)
except Exception as e:
print("Error: %s" % str(e))
sys.exit(0)
if args.command == "teststeam":
restart_steam()
sys.exit(0)
if args.command == "testhunt":
verbose = True
print("Test Hunt for Menu ...")
print("Screen shot in ", end='')
for i in range(10,0,-1):
print('%d ...' % i, end='', flush=True)
time.sleep(1)
for round in range(0,5):
print("")
print("######## Round %d ############" % round)
found, ready = check_for_menu()
# x, y, found = hunt_for_menu(level_images)
if found:
print("Found %s, Ready %s" %(found, ready))
break
if round == 4:
break
print("Next screen shot in ", end='')
for i in range(5,0,-1):
print('%d ...' % i, end='', flush=True)
time.sleep(1)
sys.exit(0)
if args.command == "mouse":
print("You have 5 seconds to hover ...")
time.sleep(5)
pos = pyautogui.position()
print("raw mouse: %s" % str(pos))
off_x, off_y = print_reverse_without_offset(int(pos.x), int(pos.y))
print("offset from top_x,top_y = %d, %d" % (off_x, off_y))
sys.exit(0)
if args.command == "zone":
print("Looking for the %s app" % APP_NAME)
time.sleep(1)
found_app = activate_app(APP_NAME, reset_top=True)
time.sleep(1)
finder = LevelFinder()
level, plus = finder.get_current_zone(True)
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, not plus, on_boss()))
if level <= 0:
print("Could not find zone, zone image saved in my_screenshot*.png")
found, grey = check_for_menu()
print("Menu found=%s greyed out=%s" % (found, grey))
sys.exit(0)
no_modron_commands = ["run", "no-core", "no-modron", ]
if args.command in no_modron_commands:
if args.use_modron:
print("WARNING: Modron mode enabled but you are using the No Modron run command.")
print("Patron:%s / Familiars:%d / Minutes:%d / Team:%s (CTRL-C to stop)" % (
patron, args.familiars, args.loops, speed_team))
if args.command == "buy":
confirmation_msg = ""
if not args.confirm_buy:
confirmation_msg = "type Y to buy or N/"
msg = ("Buy %d gold chests for %d gems (%sCTRL-C to stop)" % (
args.loops, args.loops * 500, confirmation_msg))
if args.confirm_buy:
print(msg)
else:
agreed = query_yes_no(msg, default="no")
if not agreed:
sys.exit(1)
while args.command == "goto":
pyautogui.moveTo(1400, 50, duration=2.0)
pyautogui.click()
pyautogui.moveTo(924, 292, duration=2.0)
pyautogui.click()
time.sleep(5.0)
print("mouse: %s" % str(pyautogui.position()))
if args.command == "bs":
tiny = args.loops
small = args.extras[0]
medium = args.extras[1]
large = args.extras[2]
ilvls = tiny * 1 + small*2 + medium * 6 + large * 24
print("tiny=%d x 1 small=%d x 2 medium=%d x 6 large=%d x 24 = %d ilvls" % (
tiny,small,medium,large, ilvls,
))
sys.exit(1)
if args.command == "bc":
small = args.loops
medium = args.extras[0]
large = args.extras[1]
tokens = small*72 + medium * 576 + large * 1152
runs = tokens / 2500
print("small=%d x 72 medium=%d x 576 large=%d x 1152 = %d tokens (%d runs)" % (
small,medium,large, tokens, runs
))
sys.exit(1)
reduction = 0.032
if args.command == "briv4":
reduction = 0.04
args.command = "briv"
if args.command == "briv3":
args.command = "briv"
while args.command == "briv":
stacks = float(args.loops)
jumps = 0
print("stacks=%f jumps=%d odds=%f percent=%f" % (stacks, jumps, args.odds, reduction))
while stacks > 50.0:
stacks -= stacks * reduction
stacks = math.floor(stacks)
skipped = jumps * 3
levels = jumps * 3 + float(jumps) / args.odds * 100.0
print("stacks=%f jumps=%d skipped=%d levels=%d" % (
stacks, jumps, skipped, levels))
jumps += 1
sys.exit(1)
if args.command == "check":
print("Test Startup Complete")
check_for_menu()
sys.exit(0)
while args.command == "cmp":
im1 = Image.open("my_screenshot0.png").convert('RGB')
im2 = Image.open("levels/691.png").convert('RGB')
diff = ImageChops.difference(im1, im2)
result = ImageStat.Stat(diff)
print("mean=%s" % str(result.mean))
print("rms=%s" % str(result.rms))
diff.save('diff.png')
if diff.getbbox():
print("Not same, check diff.png, %s" % str(diff.getbbox()))
else:
print("Same")
sys.exit(1)
if args.command == "repair_shortcut":
result = repair_shortcut()
sys.exit(0 if result else 1)
# Commands above this line don't require Idle Champions to be running
# ########################################################################
# Start idle champions and foreground it
print("Starting/Foregrounding Idle Champions")
if args.countdown > 0:
print("Script will start in ...", end='', flush=True)
for s in range(args.countdown, 0, -1):
print(" %d ..." % s, end='', flush=True)
time.sleep(1.0)
print("now")
foreground_or_start(tries=5)
time.sleep(1.0)
# TODO: check that top_x and top_y have been set
verbose_print("Using top_x,top_y = %d,%d" % (top_x, top_y))
loops = 0
crashes = 0
# ########################################################################
# Commands below this line require Idle Champions to be running
while args.command == "complete":
loops += 1
complete = zone_complete(save_images=True, fast=True)
if complete:
print("zone complete")
else:
print("zone incomplete")
if loops > 10000:
sys.exit(0)
if args.command == "testfkey":
print("level_team_with_keys(args,[%s])" % speed_team)
level_team_with_keys(args,speed_team, between_champs=1.0)
sys.exit(0)
if args.command == "teststart":
print("Test Startup Complete")
sys.exit(0)
while args.command == "zap":
pyautogui.press("e")
time.sleep(5.0)
while args.command == "keep-alive":
time.sleep(args.loops)
print("Checking for game at %s" % datetime.datetime.now())
foreground_or_start()
continue
while args.command == "goto":
pyautogui.moveTo(2028, 20, duration=2.0)
print("mouse: %s" % str(pyautogui.position()))
break
if args.command == "codes":
last_len = 12
next_code_target = with_top_offset(89, 582, as_point=True)
unlock_target = with_top_offset(632, 559, as_point=True)
toggle_size_target = with_top_offset(63, 669, as_point=True)
expired_target = with_top_offset(636, 390, as_point=True)
flip_target = with_top_offset(726, 358, as_point=True)
close_target = with_top_offset(1265, 12, as_point=True)
with open(args.codes, "r") as f:
for code in f:
code = code.replace('-', '', )
code = code.replace(' ', '', )
code = code.strip()
print("Entering code '%s'" % code)
next_code_target = click_with_position("unlock.png", next_code_target)
time.sleep(2.5)
# wait for the code screen
if len(code) == 12:
if last_len != 12:
last_len = 12
print("Click mode button")
toggle_size_target = click_with_position("sizebutton.png", toggle_size_target)
elif len(code) == 16:
if last_len != 16:
last_len = 16
print("Click mode button")
toggle_size_target = click_with_position("sizebutton.png", toggle_size_target)
else:
print("IGNORING: %s (len %d)" %(code, len(code)))
continue
enter_code(code)
time.sleep(3.5)
unlock_target = click_with_position("unlock.png", unlock_target)
time.sleep(6.0)
flip_target = click_with_position("flip.png", flip_target)
time.sleep(0.2)
expired_target = click_with_position("expired.png", expired_target)
time.sleep(4.0)
flip_target = click_with_position("flip.png", flip_target)
time.sleep(0.2)
close_target = click_with_position("close.png", close_target)
time.sleep(2.0)
sys.exit(1)
if args.command == "bounty" or args.command == "small" or args.command == "medium":
start_image = "bountysmall.png"
bounty_size = "small"
if args.command == "medium" or args.size == "medium":
bounty_size = "medium"
start_image = "bountymedium.png"
print("Buying %s bounties of size %s" % (args.loops, bounty_size))
# Inventory Region
region = region_for_screenshot(350, 170, 565, 325)
try:
bounty_target = locate(start_image, search_region=region)
except Exception:
print("Error: could not find bounty image %s: is the inventory open?" % (start_image))
sys.exit(1)
if not bounty_target:
print("Error: could not find bounty image %s: is the inventory open?" % (start_image))
sys.exit(1)
# use offset instead of image find ...
bar_target = with_top_offset(742, 386, as_point=True)
go_target = with_top_offset(555, 432, as_point=True)
while True:
move_to_menu()
loops += 1
print("Buying bounty %d of %d" % (loops, args.loops))
bounty_target = click_with_position(start_image, bounty_target)
time.sleep(0.25)
bar_target = click_with_position("bountybar.png", bar_target)
time.sleep(0.25)
go_target = click_with_position("bountygo.png", go_target)
# drops can take a while to process, give it sec or two
if loops >= args.loops:
sys.exit(0)
time.sleep(1.5)
sys.exit(0)
if args.command == "silver" or args.command == "gold":
mouse_move_speed = 0.5
time.sleep(mouse_move_speed)
inventory_target = None
bar_target = None
go_target = None
flip_target = None
done_target = None
delay = 2.5
if args.command == "gold":
delay = 7
while True:
loops += 1
print("Opening 50 silver chests batch %d of %d" % (loops, args.loops))
# inventory_target = click_with_position("openinventory.png", inventory_target, 40, 100)
# move_to_menu()
# time.sleep(2)
click_offset(132, 126, duration=mouse_move_speed, delay=0.75)
# bar_target = click_with_position("bountybar.png", bar_target)
click_offset(744, 385, duration=mouse_move_speed, delay=0.75)
# go_target = click_with_position("openopen.png", go_target, click=False)
click_offset(551, 431, duration=mouse_move_speed, delay=delay)
# flip_target = click_with_position("openflip.png", flip_target)
click_offset(726, 359, duration=mouse_move_speed, delay=delay)
# click in same place for show all
# flip_target = click_with_position("openflip.png", flip_target)
click_offset(726, 359, duration=mouse_move_speed, delay=delay)
# done_target = click_with_position("opendone.png", done_target)
pyautogui.press("esc")
# pyautogui.moveRel(300, 0, duration=0.0)
time.sleep(1.75)
if loops >= args.loops:
sys.exit(1)
while args.command == "testimages":
level, plus = get_current_zone(level_images, args.save_mismatch)
if level > 0:
print("zone found %d, %s, %s" % (level, plus, on_boss()))
else:
print("not found")
print("sleeping ... ")
time.sleep(3.0)
if args.command == "stack":
for s in range(args.loops, 0, -1):
print("===== Stacking: %d to go (charge_time=%d) =====" % (s, args.charge))
restart_stacking(args)
if s > 1:
time.sleep(15.0)
sys.exit(0)
if args.command == "testboss":
time.sleep(2.0)
is_on_boss = on_boss()
print("on boss = %s" % is_on_boss)
sys.exit(0)
if args.command == "testzone":
print("Testing zone detection")
found_app = activate_app(APP_NAME)
print("%s" % str(found_app))
print("%d,%d" % (found_app.left, found_app.top))
print("Configured top_x,top_y = %d,%d" % (top_x, top_y))
top_x, top_y = found_app.left+1, found_app.top+top_offset
print("new top_x,top_y = %d,%d" % (top_x, top_y))
level, plus = get_current_zone(level_images, True, tries=3)
if level <= 0:
sys.exit("Cound not find zone, saved in my_screenshot*.png")
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, plus, on_boss()))
sys.exit(0)
if args.command == "legacyzone":
print("Legacy zone detection")
x, y = get_menu(tries=10)
region = get_level_region()
print("%d, %d vs %s" % (x, y, region))
level, plus = get_current_level(x, y, level_images, args.save_mismatch)
print("old %s, %s" % (level, plus))
sys.exit(0)
if args.command == "jimmy":
finder = LevelFinder()
for i in range(1,args.loops+1):
now = datetime.datetime.now()
print("Jimmy loops %d of %d (%s)" % (i, args.loops, str(now)))
print("Next attack in ", end='', flush=True)
for s in range(int(args.charge), 0, -1):
print(" %d ..." % s, end='', flush=True)
time.sleep(1.0)
print("Attack!")
pyautogui.press('g')
time.sleep(3.0)
pyautogui.press('q')
time.sleep(12.0)
pyautogui.press('w')
time.sleep(10.0)
pyautogui.press('e')
# level, plus = finder.get_current_zone()
# need images for the above
level = 1000
if level >= 1490:
print("Jimmy exiting at level %d" % (level))
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
sys.exit(0)
# time.sleep(args.charge)
# click back to 1
for j in range(0,3):
pyautogui.keyDown('shift')
click_offset(924, 105)
pyautogui.keyUp('shift')
time.sleep(0.5)
click_offset(971, 106)
time.sleep(1.5)
# time.sleep(0.1)
pyautogui.keyDown('shift')
click_offset(924, 105)
pyautogui.keyUp('shift')
time.sleep(0.5)
click_offset(971, 106)
pyautogui.keyUp('shift')
time.sleep(1.5)
sys.exit(0)
finder = None
while args.command == "zone2":
if not finder:
finder = LevelFinder()
level, plus = finder.get_current_zone(True)
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, not plus, on_boss()))
if level <= 0:
print("Could not find zone, zone image saved in my_screenshot*.png")
if args.command == "modron":
infinite_loop = True
# try:
# verified = verify_menu(update=False)
# except Exception:
# print("ERROR: Can't verify menu location. Exiting.")
print("Modron Gem Farming: Briv recharge=%d; modron goal=%d; charge=%f seconds; havi ult=%s; hew ult=%s shandie=%ds" % (
args.target-args.briv_recharge_areas,
args.target,
args.charge, args.havi_ult, args.hew_ult,
args.charge_shandie
))
finder = LevelFinder()
print("(Hit CTRL-C to stop or move mouse to the corner of the screen)")
need_havi_ult = True
need_recharge = True
log_restarted = False
need_leveling = not config.getboolean("idler", "familiar_leveling")
log_initial = True
last_level = -1
now = datetime.datetime.now()
tracker = Tracker(now=now,
zones=args.target,
verbose=verbose,)
last_level_time = now
while True:
now = datetime.datetime.now()
try:
level, plus = finder.get_current_zone(save=args.save_mismatch)
if verbose and not debugging:
print("Zone found %d (at start zone: %s)" % (level, plus))
if debugging:
print("Zone found %d (at start zone: %s), (on_boss: %s)" % (level, plus, on_boss()))
except Exception as e:
print("Error getting current level: %s" % str(e))
level = -2
plus = False
verbose_print("Level %d" % level)
if log_initial:
tracker.start_tracking(now, level, plus)
log_initial = False
# check for stalled or hung game
if last_level == level and level > 0:
# check delta
delta = (now - last_level_time).total_seconds()
if delta > 30:
# try 'q' 'g' to see if it unsticks
pyautogui.press('q')
pyautogui.press('g')
pyautogui.press('q')
time.sleep(5.0)
if delta > 90:
print("Error stuck at zone %s at %s for %d seconds ..." % (level, datetime.datetime.now(), delta))
# kill the app and restart
shutdown_app(args.keyboard_shutdown)
# attempt restart below
level = -1
else:
last_level = level
last_level_time = now
if level <= 0:
verbose_print("Error: is restart needed?")
try:
accept_screen_share(args.screenshare)
shutdown_app(args.keyboard_shutdown)
except Exception as e:
pass
time.sleep(1.0)
try:
foreground_or_start(tries=5)
# TODO: Need to be able to see if in auto or ran by end zone or ... or maybe if stuck triggered?
# time.sleep(1.0)
# pyautogui.press("g")
except Exception as e:
print("Error restarting... wait and try again %s" % str(e))
time.sleep(10.0)
elif level == 1 and not plus and log_restarted and args.charge_shandie > 0:
need_recharge = True
log_restarted = False
time.sleep(0.2)
pyautogui.press("g")
tracker.start_loop(now, level, plus)
print("Loop started %s: %d (charging shandie for %d seconds)" % (
datetime.datetime.now(), level, args.charge_shandie))
for i in range(0, 20):
pyautogui.press("f5")
for i in range(0, 20):
pyautogui.press("f6")
time.sleep(args.charge_shandie)
foreground_or_start()
if need_havi_ult:
need_havi_ult = False
print("Havi Ult")
pyautogui.press(args.havi_ult)
time.sleep(0.5)
pyautogui.press("g")
time.sleep(5.0)
elif level == 1 and need_leveling:
if log_restarted:
log_restarted = False
tracker.start_loop(now, level, plus)
print("Loop started %s: %d" % (datetime.datetime.now(), level))
# Manual leveling
level_team_with_keys(args, speed_team, between_champs=DEFAULT_DELAY)
need_leveling = False
need_recharge = True
elif level < 40 and need_havi_ult:
need_recharge = True
if log_restarted:
tracker.start_loop(now, level, plus)
log_restarted = False
if level >= 11:
need_havi_ult = False
print("Havi Ult")
for i in range(0,40):
pyautogui.press(args.havi_ult)
time.sleep(0.1)
time.sleep(1.0)
elif level < args.target - 50:
diff = args.target - level
if args.briv_boss:
# foreground_or_start()
debug_print("checking for team on_boss")
if plus and on_boss(fast=True):
verbose_print("team is on_boss")
pyautogui.press('e')
# wait until on next level
# new_level = level
# while new_level == level:
# new_level, new_plus = finder.get_current_zone()
while on_boss(fast=True):
pass
# resume
pyautogui.press('q')
time.sleep(0.5)
pyautogui.press('q')
time.sleep(0.5)
if args.screenshare:
accept_screen_share(args.screenshare)
else:
time.sleep(diff*0.25)
foreground_or_start()
elif level < args.target - args.briv_recharge_areas:
verbose_print("continue at %d" % level)
continue
else:
verbose_print("check for recharge at %d" % level)
log_restarted = True
if need_recharge:
charge_briv(level, plus, level_images, args)
last_level = -1
last_level_time = datetime.datetime.now()
verbose_print("Recharge finished: %s" % last_level_time)
need_recharge = False
need_havi_ult = True
OFFSET_xx2 = 1925 - OFFSET_xx1
OFFSET_xx3 = 2025 - OFFSET_xx1
OFFSET_xx4 = 2122 - OFFSET_xx1
if args.command == "grab":
region = get_level_region()
raw_im = pyautogui.screenshot(region=region)
im = raw_im.convert('RGB')
im.save("1xx.png")
sys.exit(0)
x, y = menu_location()
pyautogui.moveTo(x, y)
x, y = location_for_screenshot(440, 240)
region = region_for_screenshot(350, 170, 565, 325)
im = pyautogui.screenshot(region=region)
im.save("inventory.png")
sys.exit(0)
level, plus = get_current_zone(level_images, args.save_mismatch)
# x, y = get_menu()
print("x = %f y = %f" % (x, y))
# x01
# x = menu_home.x * 2 + 1830
# y = menu_home.y * 2 + 10
im = pyautogui.screenshot(region=(x, y, IMAGE_WIDTH, IMAGE_HEIGHT))
im.save("1xx.png")
# x02
# x = menu_home.x * 2 + 1927
im = pyautogui.screenshot(region=(x+OFFSET_xx2, y, IMAGE_WIDTH, IMAGE_HEIGHT))
im.save("2xx.png")
# x03
# x = menu_home.x * 2 + 2025
im = pyautogui.screenshot(region=(x+OFFSET_xx3, y, IMAGE_WIDTH, IMAGE_HEIGHT))
im.save("3xx.png")
# x04
# x = menu_home.x * 2 + 2122
im = pyautogui.screenshot(region=(x+OFFSET_xx4, y, IMAGE_WIDTH, IMAGE_HEIGHT))
im.save("4xx.png")
# boss
# x = menu_home.x * 2 + 2219
# im = pyautogui.screenshot(region=(x, y, 56, 56))
# im.save("boss.png")
sys.exit(1)
while args.command == "monitor":
time.sleep(1.0)
menu_home = locate('menu.png')
print("menu_home.x = %f menu_home.y = %f" % (menu_home.x, menu_home.y))
x = menu_home.x * 2 + 1830
y = menu_home.y * 2 + 10
# Try grabbing a small section of screen
for i in range(0,300):
time.sleep(5)
im = pyautogui.screenshot(region=(x, y, IMAGE_WIDTH, IMAGE_HEIGHT))
# in list?
found = False
for name, img in level_images.items():
diff = ImageChops.difference(im.convert('RGB'), img).getbbox()
if not diff:
try:
level = int(name[7:10])
except Exception:
level = 0
print("Found %s again %s" % (name, level))
found = True
break
if found:
continue
print("Saving %i" % i)
im.save('my_screenshot%d.png' % i)
break
if args.command == "move":
x = args.loops
y = args.extras[0]
found_app = activate_app(APP_NAME)
rect = found_app._rect
print("app=%s" % str(found_app))
sys.exit(0)
# click_second_spec(delay=1.0)
while args.command == "press":
keys = ["q", "w", "e"]
print("Pressing %s" % keys[args.loops-1])
pyautogui.press(keys[args.loops-1])
time.sleep(10)
# click_second_spec(delay=1.0)
while args.command == "buy":
found = click_image("1chest.png", "1chestH.png", delay=0.5)
time.sleep(0.25)
while found:
pyautogui.moveRel(900, 0, duration=0.0)
time.sleep(0.25)
pyautogui.click()
loops += 1
if loops >= args.loops:
break
time.sleep(2.5)
pyautogui.moveRel(-900, 0, duration=0.0)
time.sleep(0.25)
pyautogui.click()
time.sleep(0.25)
if loops >= args.loops:
break
start_time = datetime.datetime.now()
do_startup = True
if args.in_progress:
do_startup = False
wait_minutes = 10 if args.loops == 0 else args.loops
while args.command in no_modron_commands:
infinite_loop = True
loop_time = datetime.datetime.now()
menu_home = None
ult = 0
loops += 1
if loops > args.runloops:
break
print("Starting loop %d at %s" % (loops, datetime.datetime.now()))
if do_startup:
# Startup by clicking on the Mad Wizard City
start_it_up(args.adventure)
for i in range(0, 20):
time.sleep(1.0)
blue, grey = check_for_menu()
if blue or grey:
break
# We are now on Level 1: Time to GO
# Drop Fams First
print("Dropping up to %d Familiars" % (args.familiars,))
time.sleep(DEFAULT_DELAY)
pyautogui.press('g')
time.sleep(DEFAULT_DELAY)
# Now we have formations!
# place_click_familiars(args.familiars)
pyautogui.press('q')
time.sleep(DEFAULT_DELAY)
# Level Champs
print("Leveling up Champs")
level_team_with_keys(args, speed_team, between_champs=DEFAULT_DELAY)
print("Running for %d minutes before checking for Briv Charging %s" % (args.loops, datetime.datetime.now()))
for m in range(wait_minutes, 0, -1):
print(" %d minutes" % m)
time.sleep(60.0)
do_startup = True
# check the level and charge Briv
# recharge Briv
if have_briv:
while True:
try:
time.sleep(10)
level, plus = get_current_zone(level_images, args.save_mismatch)
if level >= args.target:
charge_briv(level, plus, level_images, args)
break
except Exception as a:
print("Briv Charge Error: %s" % str(a))
pass
# shutdown the loop
print("Wrapping up starting at %s" % (datetime.datetime.now()))
try:
wrap_it_up()
except Exception as a:
print("Wrap Up Error: %s" % str(a))
pass
# dump some stats
run_time = datetime.datetime.now() - start_time
loop_time = datetime.datetime.now() - loop_time
print("Loops: %d Runtime: %s This Loop: %s Average Loop: %s Crashes: %d" % (
loops,
run_time,
loop_time,
run_time / float(loops),
crashes)
)
# print("%s" % list(pyautogui.locateAllOnScreen('./burger2.png')))
if __name__ == "__main__":
first_loop = True
while first_loop or infinite_loop:
try:
main_method()
except Exception as e:
print("WARNING: exception caught: %s" % e)
time.sleep(5.0)
| 36.157895 | 228 | 0.55876 |
6f83e3077b4d9894ca83df47fd03821fb60ba9ee | 152 | py | Python | src/util/config.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | 2 | 2020-07-26T09:48:22.000Z | 2021-09-30T01:51:13.000Z | src/util/config.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | 2 | 2022-01-13T03:56:13.000Z | 2022-03-12T01:03:29.000Z | src/util/config.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | null | null | null |
"""
Config file
Author: Patrick Henriksen <patrick@henriksen.as>
"""
import logging
LOGS_LEVEL = logging.INFO
logging.basicConfig(level=LOGS_LEVEL)
| 12.666667 | 48 | 0.769737 |
dd7d8e9ec96af5d7ca4e0e5dc9d69c3e298a0b24 | 140 | py | Python | application.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | 9 | 2021-03-22T09:18:58.000Z | 2022-03-02T01:42:11.000Z | application.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | null | null | null | application.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | 2 | 2022-03-29T07:59:12.000Z | 2022-03-31T09:10:47.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 06:14:38 2020
@author: Hephyrius
"""
from interface import Ui_MainWindow
Ui_MainWindow() | 15.555556 | 35 | 0.692857 |
840d094470046408432647b1e1d81540585e7461 | 297 | py | Python | ImagePreprocessing/basic.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | ImagePreprocessing/basic.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | ImagePreprocessing/basic.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | import cv2
# 读取一副图像 第一个参数是图像路径
# 第二个参数代表读取方式,1表示3通道彩色,0表示单通道灰度
im = cv2.imread(r"me.jpg", 1)
# 在"test"窗口中显示图像im
cv2.imshow("test", im)
# 等待用户按键反馈
cv2.waitKey()
# 销毁所有创建的窗口
cv2.destroyAllWindows()
# 打印图像数据的数据结构类型
print(type(im))
# 打印图像的尺寸
print(im.shape)
# 将图像保存到指定路径
# cv2.imwrite('lena.jpg',im)
| 16.5 | 31 | 0.734007 |
9c3975834cc3558e0f12fe9b4e2e4a0c55a58011 | 89 | py | Python | definitions.py | Coveochatbot/megageniale-mlapi | 61666c33a4313c9906d874fa04dd6c6bd45df583 | [
"MIT"
] | null | null | null | definitions.py | Coveochatbot/megageniale-mlapi | 61666c33a4313c9906d874fa04dd6c6bd45df583 | [
"MIT"
] | 3 | 2018-10-20T23:10:14.000Z | 2018-12-15T00:44:49.000Z | definitions.py | Coveochatbot/megageniale-mlapi | 61666c33a4313c9906d874fa04dd6c6bd45df583 | [
"MIT"
] | 1 | 2018-11-12T14:38:09.000Z | 2018-11-12T14:38:09.000Z | import os
class Definitions:
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
| 14.833333 | 57 | 0.741573 |
2a3ae55c013fdd802beee753d344f4bf233fe2e7 | 516 | py | Python | env/lib/python3.8/site-packages/plotly/validators/sunburst/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/sunburst/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/sunburst/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="sunburst", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| 36.857143 | 80 | 0.645349 |
197dd98a8a0ef863d903c08738f910fa72ccf6eb | 2,032 | py | Python | pyrfm/random_feature/tests/test_mb.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 7 | 2020-05-31T01:47:27.000Z | 2021-12-26T03:45:14.000Z | pyrfm/random_feature/tests/test_mb.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 2 | 2019-12-01T01:18:38.000Z | 2020-08-27T12:07:26.000Z | pyrfm/random_feature/tests/test_mb.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 3 | 2021-03-17T13:46:56.000Z | 2022-03-18T21:43:45.000Z | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_allclose_dense_sparse
from pyrfm import MB, SparseMB
from pyrfm import intersection
from sklearn.utils.extmath import safe_sparse_dot
import pytest
# generate data
rng = np.random.RandomState(0)
n_samples = 300
n_features = 50
X = rng.random_sample(size=(n_samples, n_features))
Y = rng.random_sample(size=(n_samples, n_features))
@pytest.mark.parametrize("dense_output", [True, False])
def test_mb(dense_output):
# compute exact kernel
kernel = intersection(X, Y)
# approximate kernel mapping
mb_transform = MB(n_components=10000, dense_output=dense_output)
X_trans = mb_transform.fit_transform(X)
Y_trans = mb_transform.transform(Y)
kernel_approx = safe_sparse_dot(X_trans, Y_trans.T,
dense_output=dense_output)
error = kernel - kernel_approx
assert np.mean(np.abs(error)) < 50 / mb_transform.n_grids_
# for sparse matrix
X_trans_sp = mb_transform.fit_transform(csr_matrix(X))
assert_allclose_dense_sparse(X_trans_sp, X_trans)
# Are output dense/sparse matrices same?
def test_mb_output():
mb_transform = MB(n_components=10000, dense_output=True)
X_trans_dense = mb_transform.fit_transform(X)
mb_transform = MB(n_components=10000, dense_output=False)
X_trans_sparse = mb_transform.fit_transform(X)
assert_allclose_dense_sparse(X_trans_sparse.toarray(), X_trans_dense)
def test_sparse_mb():
mb_transform = SparseMB(n_components=10000)
X_trans = mb_transform.fit_transform(X)
assert X_trans.nnz <= n_samples*n_features*2
assert_almost_equal(np.max(np.sum(X_trans, axis=1)), n_features, decimal=2)
X_trans_sp = mb_transform.fit_transform(csr_matrix(X))
assert X_trans.nnz <= n_samples*n_features*2
assert_almost_equal(np.max(np.sum(X_trans, axis=1)), n_features, decimal=2)
assert_almost_equal(X_trans.toarray(), X_trans_sp.toarray())
| 35.649123 | 79 | 0.757874 |
f6c03fc6b4158681d14d077c20aff06963bfc509 | 8,638 | py | Python | ngraph/python/tests/test_ngraph/test_ops_unary.py | lsdace30095/openvino | bdbb04f47be94cd8cb2fbb97b766d2e1231337ff | [
"Apache-2.0"
] | 1 | 2020-08-25T06:01:49.000Z | 2020-08-25T06:01:49.000Z | ngraph/python/tests/test_ngraph/test_ops_unary.py | x1aoo/openvino | 393e9295cdf84ffaacf6da857b31ac384addfaf4 | [
"Apache-2.0"
] | null | null | null | ngraph/python/tests/test_ngraph/test_ops_unary.py | x1aoo/openvino | 393e9295cdf84ffaacf6da857b31ac384addfaf4 | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from ngraph.impl import Shape, Type
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_35929, xfail_issue_34323, xfail_issue_36483
@xfail_issue_35929
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, range_start, range_end",
[
(ng.absolute, np.abs, -1, 1),
(ng.abs, np.abs, -1, 1),
(ng.acos, np.arccos, -1, 1),
(ng.acosh, np.arccosh, -1, 1),
(ng.asin, np.arcsin, -1, 1),
(ng.asinh, np.arcsinh, -1, 1),
(ng.atan, np.arctan, -100.0, 100.0),
(ng.atanh, np.arctanh, -100.0, 100.0),
(ng.ceiling, np.ceil, -100.0, 100.0),
(ng.ceil, np.ceil, -100.0, 100.0),
(ng.cos, np.cos, -100.0, 100.0),
(ng.cosh, np.cosh, -100.0, 100.0),
(ng.exp, np.exp, -100.0, 100.0),
(ng.floor, np.floor, -100.0, 100.0),
(ng.log, np.log, 0, 100.0),
(ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0),
(ng.sign, np.sign, -100.0, 100.0),
(ng.sin, np.sin, -100.0, 100.0),
(ng.sinh, np.sinh, -100.0, 100.0),
(ng.sqrt, np.sqrt, 0.0, 100.0),
(ng.tan, np.tan, -1.0, 1.0),
(ng.tanh, np.tanh, -100.0, 100.0),
],
)
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
expected = numpy_fn(input_data)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected, rtol=0.001)
@xfail_issue_35929
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, range_start, range_end",
[
(ng.absolute, np.abs, -1, 1),
(ng.abs, np.abs, -1, 1),
(ng.acos, np.arccos, -1, 1),
(ng.asin, np.arcsin, -1, 1),
(ng.atan, np.arctan, -100.0, 100.0),
(ng.ceiling, np.ceil, -100.0, 100.0),
(ng.ceil, np.ceil, -100.0, 100.0),
(ng.cos, np.cos, -100.0, 100.0),
(ng.cosh, np.cosh, -100.0, 100.0),
(ng.exp, np.exp, -100.0, 100.0),
(ng.floor, np.floor, -100.0, 100.0),
(ng.log, np.log, 0, 100.0),
(ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0),
(ng.sign, np.sign, -100.0, 100.0),
(ng.sin, np.sin, -100.0, 100.0),
(ng.sinh, np.sinh, -100.0, 100.0),
(ng.sqrt, np.sqrt, 0.0, 100.0),
(ng.tan, np.tan, -1.0, 1.0),
(ng.tanh, np.tanh, -100.0, 100.0),
],
)
def test_unary_op_array_using_constants(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
expected = numpy_fn(input_data)
result = run_op_numeric_data(input_data, ng_api_fn)
assert np.allclose(result, expected, rtol=0.001)
@pytest.mark.skip(reason="Segmentation fault")
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, input_data",
[
pytest.param(ng.absolute, np.abs, np.float32(-3)),
pytest.param(ng.abs, np.abs, np.float32(-3)),
pytest.param(ng.acos, np.arccos, np.float32(-0.5)),
pytest.param(ng.asin, np.arcsin, np.float32(-0.5)),
pytest.param(ng.atan, np.arctan, np.float32(-0.5)),
pytest.param(ng.ceiling, np.ceil, np.float32(1.5), marks=xfail_issue_36483),
pytest.param(ng.ceil, np.ceil, np.float32(1.5), marks=xfail_issue_36483),
pytest.param(ng.cos, np.cos, np.float32(np.pi / 4.0)),
pytest.param(ng.cosh, np.cosh, np.float32(np.pi / 4.0)),
pytest.param(ng.exp, np.exp, np.float32(1.5)),
pytest.param(ng.floor, np.floor, np.float32(1.5)),
pytest.param(ng.log, np.log, np.float32(1.5)),
pytest.param(ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
pytest.param(ng.sign, np.sign, np.float32(0.0)),
pytest.param(ng.sin, np.sin, np.float32(np.pi / 4.0)),
pytest.param(ng.sinh, np.sinh, np.float32(0.0)),
pytest.param(ng.sqrt, np.sqrt, np.float32(3.5)),
pytest.param(ng.tan, np.tan, np.float32(np.pi / 4.0)),
pytest.param(ng.tanh, np.tanh, np.float32(0.1234)),
],
)
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected)
@xfail_issue_34323
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, input_data",
[
(ng.absolute, np.abs, np.float32(-3)),
(ng.abs, np.abs, np.float32(-3)),
(ng.acos, np.arccos, np.float32(-0.5)),
(ng.acosh, np.arccosh, np.float32(-0.5)),
(ng.asin, np.arcsin, np.float32(-0.5)),
(ng.asinh, np.arcsinh, np.float32(-0.5)),
(ng.atan, np.arctan, np.float32(-0.5)),
(ng.atanh, np.arctanh, np.float32(-0.5)),
(ng.ceiling, np.ceil, np.float32(1.5)),
(ng.ceil, np.ceil, np.float32(1.5)),
(ng.cos, np.cos, np.float32(np.pi / 4.0)),
(ng.cosh, np.cosh, np.float32(np.pi / 4.0)),
(ng.exp, np.exp, np.float32(1.5)),
(ng.floor, np.floor, np.float32(1.5)),
(ng.log, np.log, np.float32(1.5)),
(ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
(ng.sign, np.sign, np.float32(0.0)),
(ng.sin, np.sin, np.float32(np.pi / 4.0)),
(ng.sinh, np.sinh, np.float32(0.0)),
(ng.sqrt, np.sqrt, np.float32(3.5)),
(ng.tan, np.tan, np.float32(np.pi / 4.0)),
(ng.tanh, np.tanh, np.float32(0.1234)),
],
)
def test_unary_op_scalar_using_constants(ng_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data)
result = run_op_numeric_data(input_data, ng_api_fn)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))]
)
def test_logical_not(input_data):
expected = np.logical_not(input_data)
result = run_op_node([input_data], ng.logical_not)
assert np.allclose(result, expected)
@xfail_issue_34323
@pytest.mark.parametrize(
"input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))]
)
def test_logical_not_using_constants(input_data):
expected = np.logical_not(input_data)
result = run_op_numeric_data(input_data, ng.logical_not)
assert np.allclose(result, expected)
def test_sigmoid():
input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)
result = run_op_node([input_data], ng.sigmoid)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
expected = np.array(list(map(sigmoid, input_data)))
assert np.allclose(result, expected)
@xfail_issue_34323
def test_softmax():
axis = 0
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
result = run_op_node([input_tensor], ng.softmax, axis)
expected = [[0.00426978, 0.01160646, 0.03154963], [0.08576079, 0.23312202, 0.6336913]]
assert np.allclose(result, expected)
def test_erf():
input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)
expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]
result = run_op_node([input_tensor], ng.erf)
assert np.allclose(result, expected)
@xfail_issue_34323
def test_erf_using_constants():
input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)
expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]
result = run_op_numeric_data(input_tensor, ng.erf)
assert np.allclose(result, expected)
def test_hswish():
float_dtype = np.float32
data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
node = ng.hswish(data)
assert node.get_type_name() == "HSwish"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [3, 10]
assert node.get_output_element_type(0) == Type.f32
| 36.91453 | 99 | 0.608358 |
65853aaa55b695cac67514c19fc50a7a5aa2aa0d | 2,101 | py | Python | example/custom.py | saqebakhter/python-sms-activate-ru | a0cb004a61b41210af1ffac2357802cb23f950ba | [
"Apache-2.0"
] | 1 | 2018-09-10T14:32:09.000Z | 2018-09-10T14:32:09.000Z | example/custom.py | saqebakhter/python-sms-activate-ru | a0cb004a61b41210af1ffac2357802cb23f950ba | [
"Apache-2.0"
] | null | null | null | example/custom.py | saqebakhter/python-sms-activate-ru | a0cb004a61b41210af1ffac2357802cb23f950ba | [
"Apache-2.0"
] | 1 | 2020-01-14T16:16:06.000Z | 2020-01-14T16:16:06.000Z | import time
from smsactivateru import Sms, SmsTypes, SmsService, GetBalance, GetFreeSlots, GetNumber, SetStatus, GetStatus
"""
create wrapper with secret api-key
search here: http://sms-activate.ru/index.php?act=profile
https://5sim.net/settings/security
"""
wrapper = Sms('API KEY', 'http://sms-activate.api.5sim.net/stubs/handler_api.php')
# getting balance
balance = GetBalance().request(wrapper)
# show balance
print('На счету {} руб.'.format(balance))
# getting free slots (count available phone numbers for each services)
available_phones = GetFreeSlots(
country=SmsTypes.Country.RU
).request(wrapper)
# show for vk.com, whatsapp and youla.io)
print('vk.com: {} номеров'.format(available_phones.VkCom.count))
print('whatsapp: {} номеров'.format(available_phones.Whatsapp.count))
print('youla.io: {} номеров'.format(available_phones.Youla.count))
# try get phone for youla.io
activation = GetNumber(
service=SmsService().Youla,
country=SmsTypes.Country.RU,
operator=SmsTypes.Operator.Beeline
).request(wrapper)
# show activation id and phone for reception sms
print('id: {} phone: {}'.format(str(activation.id), str(activation.phone_number)))
# getting and show current activation status
response = GetStatus(id=activation.id).request(wrapper)
print(response)
# .. send phone number to you service
user_action = input('Press enter if you sms was sent or type "cancel": ')
if user_action == 'cancel':
set_as_cancel = SetStatus(
id=activation.id,
status=SmsTypes.Status.Cancel
).request(wrapper)
print(set_as_cancel)
exit(1)
# set current activation status as SmsSent (code was sent to phone)
set_as_sent = SetStatus(
id=activation.id,
status=SmsTypes.Status.SmsSent
).request(wrapper)
print(set_as_sent)
# .. wait code
while True:
time.sleep(1)
response = GetStatus(id=activation.id).request(wrapper)
if response['code']:
print('Your code:{}'.format(response['code']))
break
# set current activation status as End (you got code and it was right)
set_as_end = SetStatus(
id=activation.id,
status=SmsTypes.Status.End
).request(wrapper)
print(set_as_end)
| 30.449275 | 110 | 0.753451 |
6cdf1cf4f4f8a4450a324558c1d91858710d9e74 | 2,154 | py | Python | CS50x 2022/Week 6/Lab 6/World Cup/tournament.py | mm-marwat/CS50x-2022-Psets-and-Labs | 1bdd6f7e1246ca379c84bcbce5d573445ec698d8 | [
"MIT"
] | 1 | 2022-02-10T16:28:00.000Z | 2022-02-10T16:28:00.000Z | CS50x 2022/Week 6/Lab 6/World Cup/tournament.py | mm-marwat/CS50x-2022-Psets-and-Labs | 1bdd6f7e1246ca379c84bcbce5d573445ec698d8 | [
"MIT"
] | null | null | null | CS50x 2022/Week 6/Lab 6/World Cup/tournament.py | mm-marwat/CS50x-2022-Psets-and-Labs | 1bdd6f7e1246ca379c84bcbce5d573445ec698d8 | [
"MIT"
] | null | null | null | # Simulate a sports tournament
import csv
import sys
import random
# Number of simluations to run
N = 1000
def main():
# Ensure correct usage
if len(sys.argv) != 2:
sys.exit("Usage: python tournament.py FILENAME")
teams = []
# TODO: Read teams into memory from file
with open(sys.argv[1], 'r') as file:
reader = csv.DictReader(file)
# This dictionary will temporiarily hold each team's name and rating until it's added to the list team above
team_dic = {}
for row in reader:
team = row["team"]
rating = int(row["rating"])
team_dic = {"team": team, "rating": rating}
teams.append(team_dic)
counts = {}
# TODO: Simulate N tournaments and keep track of win counts
for i in range(N):
winner = simulate_tournament(teams)
if winner in counts:
counts[winner] = counts[winner] + 1
else:
counts[winner] = 1
# Print each team's chances of winning, according to simulation
for team in sorted(counts, key=lambda team: counts[team], reverse=True):
print(f"{team}: {counts[team] * 100 / N:.1f}% chance of winning")
def simulate_game(team1, team2):
"""Simulate a game. Return True if team1 wins, False otherwise."""
rating1 = team1["rating"]
rating2 = team2["rating"]
probability = 1 / (1 + 10 ** ((rating2 - rating1) / 600))
return random.random() < probability
def simulate_round(teams):
"""Simulate a round. Return a list of winning teams."""
winners = []
# Simulate games for all pairs of teams
for i in range(0, len(teams), 2):
if simulate_game(teams[i], teams[i + 1]):
winners.append(teams[i])
else:
winners.append(teams[i + 1])
return winners
def simulate_tournament(teams):
"""Simulate a tournament. Return name of winning team."""
# TODO
rounds = len(teams)
if rounds >= 2:
teams = simulate_round(teams)
return simulate_tournament(teams)
else:
winner = teams[0]["team"]
return winner
if __name__ == "__main__":
main()
| 25.951807 | 116 | 0.606314 |
a8f0a79f5063b87a9e69329703f8c654328e1170 | 972 | py | Python | test_durak_pytest.py | Fayzak/PythonDevelopLesson10 | 7401ef134d69f6de58b002d08c846f18d8690cf0 | [
"MIT"
] | null | null | null | test_durak_pytest.py | Fayzak/PythonDevelopLesson10 | 7401ef134d69f6de58b002d08c846f18d8690cf0 | [
"MIT"
] | null | null | null | test_durak_pytest.py | Fayzak/PythonDevelopLesson10 | 7401ef134d69f6de58b002d08c846f18d8690cf0 | [
"MIT"
] | null | null | null | import pytest
import durak_game
from durak_game import DurakGame
from durak_game import Hands
class TestGame:
def setup(self):
self.gen = durak_game.gen_deck()
def teardown(self):
pass
def test_gen(self):
"""
Проверка на правильное неповторное генерирование колод
:return:
"""
test_gen = Hands(self.gen)
h_hand = test_gen.gen_h_hand()
c_hand = test_gen.gen_c_hand()
assert h_hand != c_hand
def test_count_cards(self):
"""
Проверка на количество кард в колоде
:return:
"""
with pytest.raises(AssertionError):
assert len(self.gen) == 34
def test_count_hands(self):
"""
Проверка на одинаковое количество кард в руке
:return:
"""
test_gen = Hands(self.gen)
h_hand = test_gen.gen_h_hand()
c_hand = test_gen.gen_c_hand()
assert len(h_hand) == len(c_hand)
| 23.142857 | 62 | 0.591564 |
ddcaf2ed3ea0a4c2f736faef8c6ee83f6b04d8f1 | 239 | py | Python | site-packages/serpent/game_launchers/__init__.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 6,762 | 2017-09-17T20:28:40.000Z | 2022-03-31T12:35:47.000Z | site-packages/serpent/game_launchers/__init__.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 159 | 2017-09-19T21:54:58.000Z | 2021-03-26T18:15:58.000Z | serpent/game_launchers/__init__.py | PiterPentester/SerpentAI | 614bafd3c2df3ee6736309d46a7b92325f9a2d15 | [
"MIT"
] | 880 | 2017-09-23T01:16:50.000Z | 2022-03-27T18:58:30.000Z | from serpent.game_launchers.steam_game_launcher import SteamGameLauncher
from serpent.game_launchers.executable_game_launcher import ExecutableGameLauncher
from serpent.game_launchers.web_browser_game_launcher import WebBrowserGameLauncher | 79.666667 | 83 | 0.92887 |
4a5ebb38c4154d25695c1c99a6ffbcd028cd27a5 | 2,219 | py | Python | tests/test_SVqcVCF.py | dbmi-bgm/granite | 88a68f95c18f8541e01697f0012d1cd86c00db47 | [
"MIT"
] | 3 | 2020-08-06T15:36:53.000Z | 2021-01-20T15:31:05.000Z | tests/test_SVqcVCF.py | dbmi-bgm/granite | 88a68f95c18f8541e01697f0012d1cd86c00db47 | [
"MIT"
] | 1 | 2021-01-08T16:54:16.000Z | 2021-01-08T16:54:16.000Z | tests/test_SVqcVCF.py | dbmi-bgm/granite | 88a68f95c18f8541e01697f0012d1cd86c00db47 | [
"MIT"
] | 2 | 2020-02-28T22:33:17.000Z | 2021-01-27T17:47:20.000Z | #################################################################
# Libraries
#################################################################
import sys, os
import pytest
import json
from granite.SVqcVCF import (
main as main_SVqcVCF
)
#################################################################
# Tests
#################################################################
def test_fail_SVqcVCF_no_SVTYPE():
#Variables
args = {'inputfile': 'tests/files/SVqcVCF_no_SVTYPE.vcf.gz','outputfile':'tests/files/main_test.out','samples':["TESTSAMPLE", "TESTSAMPLE2"], 'verbose': None}
# Run and Tests
with pytest.raises(Exception, match = "ERROR in parsing vcf, variant at position 1:46000 does not contain SVTYPE in INFO"):
main_SVqcVCF(args)
def test_fail_SVqcVCF_wrong_SVTYPE():
#Variables
args = {'inputfile': 'tests/files/SVqcVCF_wrong_SVTYPE.vcf.gz','outputfile':'tests/files/main_test.out','samples':["TESTSAMPLE", "TESTSAMPLE2"], 'verbose': None}
# Run and Tests
with pytest.raises(Exception, match = "ERROR in parsing vcf, variant at position 1:46000 contains unexpected SVTYPE \"SNV\" in INFO"):
main_SVqcVCF(args)
def test_success_SVqcVCF_twoSamples():
#Variables
args = {'inputfile': 'tests/files/SVqcVCF_success.vcf.gz','outputfile':'tests/files/main_test.out','samples':["TESTSAMPLE", "TESTSAMPLE2"], 'verbose': None}
# Run
main_SVqcVCF(args)
# Tests
with open('tests/files/main_test.out') as fi:
d1 = json.load(fi)
with open('tests/files/SVqcVCF_twoSamples.json') as fi:
d2 = json.load(fi)
assert d1 == d2
# Clean
os.remove('tests/files/main_test.out')
def test_success_SVqcVCF_oneSample():
#Variables
args = {'inputfile': 'tests/files/SVqcVCF_success.vcf.gz','outputfile':'tests/files/main_test.out','samples':["TESTSAMPLE"], 'verbose': None}
# Run
main_SVqcVCF(args)
# Tests
with open('tests/files/main_test.out') as fi:
d1 = json.load(fi)
with open('tests/files/SVqcVCF_oneSample.json') as fi:
d2 = json.load(fi)
assert d1 == d2
# Clean
os.remove('tests/files/main_test.out')
| 38.258621 | 165 | 0.5863 |
b4475a4a03f7c9093555067fc078911dbff9de53 | 730 | py | Python | utils.py | mark-baba/FinalYearProject | 3f27a66a67a256b6b90e988d6523ab9ea7ca1af0 | [
"MIT"
] | null | null | null | utils.py | mark-baba/FinalYearProject | 3f27a66a67a256b6b90e988d6523ab9ea7ca1af0 | [
"MIT"
] | null | null | null | utils.py | mark-baba/FinalYearProject | 3f27a66a67a256b6b90e988d6523ab9ea7ca1af0 | [
"MIT"
] | null | null | null | # import xlrd
# import openpyxl as xl
from time import time
def sem_to_year(sem):
return round((int(sem)/2))
# def xlsx_to_xls(file):
# wb = xl.load_workbook(file)
# filepath = f"{file.rsplit('.', 1)[0]}.xls"
# wb.save(filepath)
# return filepath
def len_check(roll):
if not isinstance(roll, str):
roll = str(roll)
if len(roll) == 1:
return f"00{roll}"
elif len(roll) == 2:
return f"0{roll}"
else:
return roll
def create_roll(dept, batch, roll) -> str:
return f"{dept}{batch}/{len_check(str(roll))}"
def generate_employee_id():
time_var = str(time())
time_var = time_var[len(time_var)-3:]
print (time_var)
return f"RCC-TCHR-{time_var}"
| 22.121212 | 50 | 0.609589 |
f889f9a8be51ae0aca6475292608bdf446bda1a3 | 1,712 | py | Python | app/core/migrations/0001_initial.py | NinoBaus/recipe-app-api | 9b84197aa8c76d73b26cc24a84b57e77ee61e833 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | NinoBaus/recipe-app-api | 9b84197aa8c76d73b26cc24a84b57e77ee61e833 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | NinoBaus/recipe-app-api | 9b84197aa8c76d73b26cc24a84b57e77ee61e833 | [
"MIT"
] | null | null | null | # Generated by Django 3.2rc1 on 2021-03-21 12:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.352941 | 266 | 0.640187 |
65cdb673096ca85da04d8bf1666061051ffb8b51 | 400 | py | Python | helper_files/get_todays_date.py | kant/COVID_Web_Scraper | d98a29e387db10fc5d9c78e7ff2a727607b4f43b | [
"MIT"
] | null | null | null | helper_files/get_todays_date.py | kant/COVID_Web_Scraper | d98a29e387db10fc5d9c78e7ff2a727607b4f43b | [
"MIT"
] | 1 | 2020-09-30T21:25:10.000Z | 2020-09-30T21:25:10.000Z | helper_files/get_todays_date.py | kant/COVID_Web_Scraper | d98a29e387db10fc5d9c78e7ff2a727607b4f43b | [
"MIT"
] | 2 | 2021-01-04T04:45:21.000Z | 2021-01-10T02:24:06.000Z |
from datetime import date
def get_todays_date():
###################
#
# This function gets today's current day and month values for later use in
# the program.
#
###################
todays_date = str(date.today())
current_day = todays_date.split('-')[2]
current_month= todays_date.split('-')[1]
return current_day, current_month | 21.052632 | 78 | 0.55 |
93ce6276be78c3af2e2169265f73c2dfd4766931 | 537 | py | Python | django_rest_auth_embedded/views/registration.py | Volkova-Natalia/django_rest_auth_embedded | 43fe1d23f59332a7794365348989599cde44af6e | [
"MIT"
] | null | null | null | django_rest_auth_embedded/views/registration.py | Volkova-Natalia/django_rest_auth_embedded | 43fe1d23f59332a7794365348989599cde44af6e | [
"MIT"
] | 1 | 2021-02-26T16:56:31.000Z | 2021-03-24T09:47:43.000Z | django_rest_auth_embedded/views/registration.py | Volkova-Natalia/django_rest_auth_embedded | 43fe1d23f59332a7794365348989599cde44af6e | [
"MIT"
] | null | null | null | from .base import BaseView
from ..serializers import RegistrationSerializer
class RegistrationView(BaseView):
# --------------------------------------------------
def post(self, request, *args, **kwargs):
serializer = RegistrationSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
return self.response_201(
data={
'id': user.id,
}
)
return self.response_400(data=serializer.errors)
| 26.85 | 62 | 0.527002 |
4509fffaedb7c6d7a0c36bc110715bc5d120aecf | 21,759 | py | Python | textpy/scanners.py | goodmami/textpy | 4c1d504bae1a4018a76c566dbcbe70fe3b2282e9 | [
"MIT"
] | 3 | 2020-03-25T10:27:32.000Z | 2022-02-07T04:28:29.000Z | textpy/scanners.py | goodmami/textpy | 4c1d504bae1a4018a76c566dbcbe70fe3b2282e9 | [
"MIT"
] | null | null | null | textpy/scanners.py | goodmami/textpy | 4c1d504bae1a4018a76c566dbcbe70fe3b2282e9 | [
"MIT"
] | null | null | null |
import re
from functools import partial
__all__ = [
'Match',
'Scanner',
'Dot',
'CharacterClass',
'Literal',
'Regex',
'Spacing',
'Integer',
'Float',
'BoundedString',
'Bounded',
'Sequence',
'Choice',
'Repeat',
'ZeroOrMore',
'OneOrMore',
'Optional',
'Lookahead',
'NegativeLookahead',
'Nonterminal',
'Group',
'split',
]
try:
from textpy._scanners import (
Scanner as c_Scanner,
Dot as c_Dot,
CharacterClass as c_CharacterClass,
Literal as c_Literal,
Regex as c_Regex,
Spacing as c_Spacing,
Integer as c_Integer,
Float as c_Float,
BoundedString as c_BoundedString,
Bounded as c_Bounded,
Sequence as c_Sequence,
Choice as c_Choice,
Repeat as c_Repeat,
Optional as c_Optional,
Lookahead as c_Lookahead,
NegativeLookahead as c_NegativeLookahead,
Nonterminal as c_Nonterminal,
Group as c_Group,
)
except ImportError:
c_Scanner = None
c_Dot = None
c_CharacterClass = None
c_Literal = None
c_Regex = None
c_Spacing = None
c_Integer = None
c_Float = None
c_BoundedString = None
c_Bounded = None
c_Sequence = None
c_Choice = None
c_Repeat = None
c_Optional = None
c_Lookahead = None
c_NegativeLookahead = None
c_Nonterminal = None
c_Group = None
NOMATCH = -1
EOS = -2
NORMAL = 0
CAPTURE = 1
TRACE = 2
class Match(object):
def __init__(self, s, pos, endpos, value=None):
self.string = s
self.pos = pos
self.endpos = endpos
self.value = value
# self.lastindex = sum(n.lastindex for n in ast)
def start(self, group=0):
if group == 0:
return self.pos
else:
return self._groups[group].start()
def end(self, group=0):
if group == 0:
return self.endpos
else:
return self._groups[group].end()
def span(self, group=0):
if group == 0:
return (self.pos, self.endpos)
else:
return self._groups[group].span()
def group(self, group=0):
if group == 0:
start, end = self.pos, self.endpos
else:
start, end = self._groups[group].span()
return self.string[start:end]
class py_Scanner(object):
capturing = False
action = None
def __init__(self, action=None):
self.action = action
def scan(self, s, pos=0):
try:
return self._scan(s, pos)
except IndexError:
return NOMATCH
def _match(self, s, pos, mode):
try:
end = self._scan(s, pos)
if end == NOMATCH:
return None
else:
val = s[pos:end]
action = self.action
if action is not None:
val = action(val)
return Match(s, pos, end, val)
except IndexError:
return None
def match(self, s, pos=0, trace=False):
return self._match(s, pos, TRACE if trace else NORMAL)
class py_Dot(py_Scanner):
def __repr__(self): return 'Dot()'
def __str__(self): return '.'
def _scan(self, s, pos):
s[pos] # check for IndexError
return pos + 1
class py_CharacterClass(py_Scanner):
def __init__(self, clsstr, action=None):
self.action = action
self._clsstr = clsstr
self._ranges = []
self._chars = []
i = 0
while i < len(clsstr)-2:
if clsstr[i+1] == u'-':
self._ranges.append((clsstr[i], clsstr[i+2]))
else:
self._chars.append(clsstr[i])
i += 1
# remaining character(s) cannot be ranges
while i < len(clsstr):
self._chars.append(clsstr[i])
i += 1
def __repr__(self): return 'CharacterClass({})'.format(repr(self._clsstr))
def __str__(self): return '[{}]'.format(self._clsstr)
def _scan(self, s, pos):
c = s[pos]
if c in self._chars or any(a <= c <= b for a, b in self._ranges):
return pos + 1
return NOMATCH
class py_Literal(py_Scanner):
def __init__(self, x, action=None):
self.action = action
self._x = x
self._xlen = len(x)
def __repr__(self): return 'Literal({})'.format(repr(self._x))
def __str__(self): return '"{}"'.format(self._x)
def _scan(self, s, pos):
end = pos + self._xlen
if s[pos:end] != self._x:
return NOMATCH
return end
class py_Regex(py_Scanner):
def __init__(self, pattern, action=None):
self.action = action
if hasattr(pattern, 'match'):
self.regex = pattern
else:
self.regex = re.compile(pattern)
def __repr__(self): return 'Regex({})'.format(repr(self.regex.pattern))
def __str__(self): return '/{}/'.format(self.regex.pattern)
def _scan(self, s, pos):
m = self.regex.match(s, pos=pos)
if m is None:
return NOMATCH
else:
return m.end()
class py_Spacing(py_Scanner):
def __init__(self, ws=u' \t\n\r\f\v', action=None):
self.action = action
self._ws = ws
def __repr__(self):
return 'Spacing({})'.format(
repr(self._ws) if self._ws != u' \t\n\r\f\v' else ''
)
def __str__(self): return '[{}]*'.format(repr(self._ws)[1:-1])
def _scan(self, s, pos):
ws = self._ws
try:
while s[pos] in ws:
pos += 1
except IndexError:
pass
return pos
class py_Integer(py_Scanner):
def __repr__(self): return 'Integer()'
def __str__(self): return 'Integer'
def _scan(self, s, pos):
# [-+]? \d+
if s[pos] in u'-+':
pos += 1
numdigits = _scan_digits(s, pos)
if numdigits == 0:
return NOMATCH
return pos + numdigits
class py_Float(py_Scanner):
def __repr__(self): return 'Float()'
def __str__(self): return 'Float'
def _scan(self, s, pos):
# one of:
# [-+]? \d+\.\d* ([eE][-+]?\d+)?
# [-+]? \.\d+ ([eE][-+]?\d+)?
# [-+]? \d+ [eE][-+]?\d+
# note that bare integers (e.g. 1, -1, etc.) are not accepted
if s[pos] in u'-+': # [-+]?
pos += 1
if s[pos] == u'.': # \.\d+ ([eE][-+]?\d+)?
dpos = _scan_digits(s, pos+1)
if dpos == 0:
return NOMATCH
pos += dpos + 1
pos += _scan_exponent(s, pos)
else: # other two patterns begin with \d+
dpos = _scan_digits(s, pos)
if dpos == 0:
return NOMATCH
pos += dpos
if s[pos] == u'.': # \d+\.\d* ([eE][-+]?\d+)?
pos += 1
pos += _scan_digits(s, pos)
pos += _scan_exponent(s, pos)
else: # \d+ [eE][-+]?\d+
dpos = _scan_exponent(s, pos)
if dpos == 0:
return NOMATCH
pos += dpos
return pos
class py_BoundedString(py_Scanner):
def __init__(self, first, last, action=None):
self.action = action
self.first = first
self.last = last
def __repr__(self):
return 'BoundedString("{}", "{}")'.format(self.first, self.last)
def __str__(self): return 'BoundedString'
def _scan(self, s, pos):
a, b = self.first, self.last
alen, blen = len(a), len(b)
if s[pos:pos+alen] != a:
return NOMATCH
pos += alen
while s[pos:pos+blen] != b:
if s[pos] == u'\\':
pos += 2
else:
pos += 1
return pos + blen
class py_Bounded(py_Scanner):
def __init__(self, lhs, body, rhs, action=None):
self.action = action
self._lhs = lhs
self._body = body
self._rhs = rhs
def __repr__(self):
return 'Bounded({}, {}, {})'.format(
repr(self._lhs), repr(self._body), repr(self._rhs)
)
def __str__(self): return 'Bounded'
def _scan(self, s, pos):
end = self._lhs._scan(s, pos)
if end >= 0:
end = self._body._scan(s, end)
if end >= 0:
end = self._rhs._scan(s, end)
return end
def _match(self, s, pos, mode):
end = self._lhs._scan(s, pos)
m = None
if end >= 0:
m = self._body._match(s, end, mode)
if m is not None:
end = self._rhs._scan(s, m.endpos)
if end < 0:
m = None
else:
if mode == TRACE:
m = Match(s, pos, end, [m])
else:
action = self.action
if action is not None:
m.value = action(m.value)
m.pos = pos
m.endpos = end
return m
class py_Sequence(py_Scanner):
def __init__(self, *scanners, action=None):
self.action = action
self._scanners = scanners
self.capturing = any(s.capturing for s in scanners)
def __repr__(self):
return 'Sequence({})'.format(', '.join(map(repr, self._scanners)))
def __str__(self):
return ' '.join(map(str, self._scanners))
def _scan(self, s, pos):
for scanner in self._scanners:
pos = scanner._scan(s, pos)
if pos == NOMATCH:
break
return pos
def _match(self, s, pos, mode):
val = []
end = pos
for scanner in self._scanners:
if mode == TRACE or scanner.capturing:
m = scanner._match(s, end, mode)
if m is None:
return None
end = m.endpos
if mode == TRACE:
val.append(m)
elif scanner.action is None:
val.extend(m.value)
else:
val.append(m.value)
else:
end = scanner._scan(s, end)
if end == NOMATCH:
return None
if mode != TRACE:
if not self.capturing:
val = s[pos:end]
action = self.action
if action is not None:
val = action(val)
return Match(s, pos, end, val)
class py_Choice(py_Scanner):
def __init__(self, *scanners, action=None):
self.action = action
self._scanners = scanners
self.capturing = any(s.capturing for s in scanners)
def __repr__(self):
return 'Choice({})'.format(', '.join(map(repr, self._scanners)))
def __str__(self):
return ' | '.join(map(str, self._scanners))
def _scan(self, s, pos):
for scanner in self._scanners:
endpos = scanner._scan(s, pos)
if endpos >= 0:
return endpos
return NOMATCH
def _match(self, s, pos, mode):
val = None
for scanner in self._scanners:
m = scanner._match(s, pos, mode)
if m is not None:
if mode == TRACE:
m = Match(s, pos, m.endpos, [m])
else:
action = self.action
if action is not None:
m.value = action(m.value)
return m
return None
class py_Repeat(py_Scanner):
def __init__(self, scanner, min=0, max=-1, delimiter=None, action=None):
self.action = action
self._scanner = scanner
self._min = min
self._max = max
self._delimiter = delimiter
self.capturing = (scanner.capturing or
(delimiter is not None and delimiter.capturing))
def __repr__(self):
return 'Repeat({}, min={}, max={}, delimiter={})'.format(
repr(self._scanner), self._min, self._max, repr(self._delimiter)
)
def __str__(self): return '{}{{{},{}{}}}'.format(
str(self._scanner),
self._min,
self._max,
':' + str(self._delimiter) if self._delimiter is not None else ''
)
def _scan(self, s, pos):
scanner, delimiter = self._scanner, self._delimiter
a, b = self._min, self._max
count = 0
try:
newpos = scanner._scan(s, pos)
while newpos >= 0 and count != b:
pos = newpos
count += 1
if delimiter is not None:
newpos = delimiter._scan(s, pos)
if newpos < 0:
break
newpos = scanner._scan(s, newpos)
else:
newpos = scanner._scan(s, pos)
except IndexError:
pass
if count >= a:
return pos
return NOMATCH
def _match(self, s, pos, mode):
scanner, delimiter = self._scanner, self._delimiter
s_is_grp = scanner.capturing
d_is_grp = delimiter.capturing if delimiter is not None else False
a, b = self._min, self._max
count = 0
val = []
end = pos
try:
m = scanner._match(s, end, mode)
while m is not None and count != b:
end = m.endpos
count += 1
if mode == TRACE:
val.append(m)
elif s_is_grp:
if scanner.action is None:
val.extend(m.value)
else:
val.append(m.value)
if delimiter is not None:
if mode == TRACE or d_is_grp:
m = delimiter._match(s, end, mode)
if m is None:
break
d_end = m.endpos
if mode == TRACE:
val.append(m)
elif delimiter.action is None:
val.extend(m.value)
else:
val.append(m.value)
else:
d_end = delimiter._scan(s, end)
if d_end == NOMATCH:
break
m = scanner._match(s, d_end, mode)
else:
m = scanner._match(s, end, mode)
except IndexError:
pass
if count >= a:
if mode != TRACE:
if not (s_is_grp or d_is_grp):
val = s[pos:end]
action = self.action
if action is not None:
val = action(val)
return Match(s, pos, end, val)
return None
class py_Optional(py_Scanner):
def __init__(self, scanner, default=..., action=None):
self.action = action
self._scanner = scanner
if default is Ellipsis:
self._default = [] if scanner.capturing else ''
else:
self._default = default
self.capturing = scanner.capturing
def __repr__(self): return 'Optional({})'.format(repr(self._scanner))
def __str__(self): return str(self._scanner) + '?'
def _scan(self, s, pos):
scanner = self._scanner
try:
end = scanner._scan(s, pos)
if end == NOMATCH:
end = pos
except IndexError:
end = pos
return end
def _match(self, s, pos, mode):
scanner = self._scanner
m = scanner._match(s, pos, mode)
if mode == TRACE:
return Match(s, pos, pos, [m]) # m could be None
elif m is None:
return Match(s, pos, pos, self._default)
else:
return m
class py_Lookahead(py_Scanner):
def __init__(self, scanner):
self._scanner = scanner
def __repr__(self): return 'Lookahead({})'.format(repr(self._scanner))
def __str__(self): return '&' + str(self._scanner)
def _scan(self, s, pos):
scanner = self._scanner
if scanner._scan(s, pos) == NOMATCH:
return NOMATCH
else:
return pos
class py_NegativeLookahead(py_Scanner):
def __init__(self, scanner):
self._scanner = scanner
def __repr__(self):
return 'NegativeLookahead({})'.format(repr(self._scanner))
def __str__(self): return '!' + str(self._scanner)
def _scan(self, s, pos):
scanner = self._scanner
if scanner._scan(s, pos) == NOMATCH:
return pos
else:
return NOMATCH
class py_Nonterminal(py_Scanner):
def __init__(self, grammar, name, action=None):
self.action = action
self._grammar = grammar
self._name = name
def __repr__(self): return 'Nonterminal(<dict>, "{}")'.format(self._name)
def __str__(self): return self._name
def _scan(self, s, pos):
scanner = self._grammar[self._name]
if scanner is None:
raise Exception(
'Nonterminal {} is not associated with a grammar'
.format(self._name)
)
return scanner._scan(s, pos)
def _match(self, s, pos, mode):
scanner = self._grammar[self._name]
m = scanner._match(s, pos, mode)
if m is not None:
if mode == TRACE:
m = Match(s, pos, m.endpos, [m])
else:
action = self.action
if action is not None:
m.value = action(m.value)
return m
class py_Group(py_Scanner):
def __init__(self, scanner, action=None):
self.action = action
self._scanner = scanner
self.action = action
self.capturing = True
def __repr__(self): return 'Group({})'.format(repr(self._scanner))
def __str__(self): return '({})'.format(str(self._scanner))
def _scan(self, s, pos):
scanner = self._scanner
return scanner._scan(s, pos)
def _match(self, s, pos, mode):
scanner = self._scanner
m = scanner._match(s, pos, mode)
if m is not None:
if mode == TRACE:
m = Match(s, pos, m.endpos, [m])
elif self.action is None:
m.value = [m.value]
else:
m.value = self.action(m.value)
return m
# use fast versions if available
Scanner = c_Scanner or py_Scanner
Dot = c_Dot or py_Dot
CharacterClass = c_CharacterClass or py_CharacterClass
Literal = c_Literal or py_Literal
Regex = c_Regex or py_Regex
Spacing = c_Spacing or py_Spacing
Integer = c_Integer or py_Integer
Float = c_Float or py_Float
BoundedString = c_BoundedString or py_BoundedString
Bounded = c_Bounded or py_Bounded
Sequence = c_Sequence or py_Sequence
Choice = c_Choice or py_Choice
Repeat = c_Repeat or py_Repeat
Optional = c_Optional or py_Optional
Lookahead = c_Lookahead or py_Lookahead
NegativeLookahead = c_NegativeLookahead or py_NegativeLookahead
Nonterminal = c_Nonterminal or py_Nonterminal
Group = c_Group or py_Group
# convenient partial applications
ZeroOrMore = partial(Repeat, min=0, max=-1, delimiter=None)
OneOrMore = partial(Repeat, min=1, max=-1, delimiter=None)
# utility functions
def split(s, sep=u' \t\v\n\f\r', maxsplit=-1, esc=u'\\', quotes=u'"\''):
start = pos = numsplit = 0
end = len(s)
tokens = []
in_quotes = False
q = u''
while pos < end and (maxsplit < 0 or numsplit < maxsplit):
c = s[pos]
if c in esc:
if pos == end-1:
raise ValueError('Runaway escape sequence.')
pos += 1
elif in_quotes is True:
if c == q:
tokens.append(s[start:pos+1])
numsplit += 1
start = pos+1
in_quotes = False
elif c in quotes:
if start < pos:
tokens.append(s[start:pos])
numsplit += 1
start = pos
q = c
in_quotes = True
elif c in sep:
if start < pos:
tokens.append(s[start:pos])
numsplit += 1
start = pos + 1
pos += 1
if start < end:
tokens.append(s[start:end])
return tokens
# helper functions
# (these helpers do not follow the normal return-value semantics)
def _scan_digits(s, pos):
i = 0
try:
while u'0' <= s[pos+i] <= u'9':
i += 1
except IndexError:
pass
return i
def _scan_exponent(s, pos):
numdigits = 0
try:
if s[pos] in u'eE':
if s[pos+1] in '-+':
numdigits = _scan_digits(s, pos+2)
if numdigits > 0:
return numdigits + 2
else:
numdigits = _scan_digits(s, pos+1)
if numdigits > 0:
return numdigits + 1
except IndexError:
pass
return 0
| 29.28533 | 78 | 0.495795 |
d82a8ba14a243bbae73326e95b5ab2b4eaeb4c4a | 10,761 | py | Python | congress/tests/policy_engines/test_agnostic_performance.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 50 | 2015-04-21T14:12:01.000Z | 2020-06-01T06:23:13.000Z | congress/tests/policy_engines/test_agnostic_performance.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | congress/tests/policy_engines/test_agnostic_performance.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 25 | 2015-05-22T04:02:33.000Z | 2020-01-14T12:15:12.000Z | # Copyright (c) 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tenacity
from oslo_config import cfg
from oslo_log import log as logging
from congress.datalog import base
from congress.datalog import compile
from congress.policy_engines import agnostic
from congress.tests.api import base as api_base
from congress.tests import base as testbase
from congress.tests.datasources import performance_datasource_driver
from congress.tests import helper
LOG = logging.getLogger(__name__)
NREC_THEORY = 'non-recursive theory'
DB_THEORY = 'database'
ACTION_THEORY = 'action'
class TestRuntimePerformance(testbase.TestCase):
"""Tests for Runtime performance that are not specific to any theory.
To run one test:
nosetests -v \
congress/tests/policy_engines/test_agnostic_performance.py:TestRuntimePerformance.test_foo
To collect profiling data:
python -m cProfile -o profile.out `which nosetests` -v \
congress/tests/policy_engines/test_agnostic_performance.py:TestRuntimePerformance.test_foo
To parse and sort profiling data in different ways:
import pstats
pstats.Stats('profile.out').strip_dirs().sort_stats("cum").print_stats()
pstats.Stats('profile.out').strip_dirs().sort_stats("time").print_stats()
pstats.Stats('profile.out').strip_dirs().sort_stats("calls").print_stats()
"""
def setUp(self):
super(TestRuntimePerformance, self).setUp()
self._agnostic = agnostic.Runtime()
self._agnostic.create_policy(NREC_THEORY,
kind=base.NONRECURSIVE_POLICY_TYPE)
self._agnostic.create_policy(DB_THEORY, kind=base.DATABASE_POLICY_TYPE)
self._agnostic.debug_mode()
self._agnostic.insert('', target=NREC_THEORY)
def _create_event(self, table, tuple_, insert, target):
return compile.Event(compile.Literal.create_from_table_tuple(table,
tuple_),
insert=insert, target=target)
def _create_large_tables(self, n, theory):
facts = [compile.Fact('p', (i, j, k))
for i in range(n) for k in range(n) for j in range(n)]
facts.extend(compile.Fact('q', (i,)) for i in range(n))
self._agnostic.initialize_tables(['p', 'q'], facts, theory)
def test_insert_nonrecursive(self):
MAX = 100
th = NREC_THEORY
for i in range(MAX):
self._agnostic.insert('r(%d)' % i, th)
def test_insert_database(self):
MAX = 100
th = DB_THEORY
for i in range(MAX):
self._agnostic.insert('r(%d)' % i, th)
def test_update_nonrecursive(self):
MAX = 10000
th = NREC_THEORY
updates = [self._create_event('r', (i,), True, th)
for i in range(MAX)]
self._agnostic.update(updates)
def test_update_database(self):
MAX = 1000
th = DB_THEORY
updates = [self._create_event('r', (i,), True, th)
for i in range(MAX)]
self._agnostic.update(updates)
def test_indexing(self):
MAX = 100
th = NREC_THEORY
for table in ('a', 'b', 'c'):
updates = [self._create_event(table, (i,), True, th)
for i in range(MAX)]
self._agnostic.update(updates)
# With indexing, this query should take O(n) time where n is MAX.
# Without indexing, this query will take O(n^3).
self._agnostic.insert('d(x) :- a(x), b(x), c(x)', th)
ans = ' '.join(['d(%d)' % i for i in range(MAX)])
self.assertTrue(helper.datalog_equal(self._agnostic.select('d(x)',
th), ans))
def test_runtime_initialize_tables(self):
MAX = 700
longstring = 'a' * 100
facts = (compile.Fact('p',
(1, 2, 'foo', 'bar', i, longstring + str(i)))
for i in range(MAX))
th = NREC_THEORY
self._agnostic.initialize_tables(['p'], facts, th)
def test_select_1match(self):
# with different types of policies (exercise indexing, large sets,
# many joins, etc)
MAX = 10
th = NREC_THEORY
self._create_large_tables(MAX, th)
self._agnostic.insert('r(x,y) :- p(x,x,y), q(x)', th)
for i in range(100):
# This select returns 1 result
self._agnostic.select('r(1, 1)', th)
def test_select_100matches(self):
# with different types of policies (exercise indexing, large sets,
# many joins, etc)
MAX = 10
th = NREC_THEORY
self._create_large_tables(MAX, th)
self._agnostic.insert('r(x,y) :- p(x,x,y), q(x)', th)
# This takes about 60ms per select
for i in range(10):
# This select returns 100 results
self._agnostic.select('r(x, y)', th)
def test_simulate_latency(self):
# We think the cost will be the sum of the simulate call + the cost to
# do and undo the evaluation, so this test should focus on the cost
# specific to the simulate call, so the test should do a minimal
# amount of evaluation.
MAX = 10
th = NREC_THEORY
self._create_large_tables(MAX, th)
self._agnostic.create_policy(ACTION_THEORY,
kind=base.ACTION_POLICY_TYPE)
self._agnostic.insert('q(0)', th)
self._agnostic.insert('s(x) :- q(x), p(x,0,0)', th)
# This takes about 13ms per simulate. The query for s(x) can use
# indexing, so it should be efficient.
for _ in range(100):
self._agnostic.simulate('s(x)', th, 'p-(0,0,0)',
ACTION_THEORY, delta=True)
def test_simulate_throughput(self):
# up to 250 requests per second
pass
def test_update_rate(self):
pass
def test_concurrency(self):
pass
class TestDsePerformance(testbase.SqlTestCase):
def setUp(self):
super(TestDsePerformance, self).setUp()
self.services = api_base.setup_config(with_fake_datasource=False,
node_id="perf")
cfg.CONF.set_override(
'drivers',
[('congress.tests.datasources.performance_datasource_driver'
'.PerformanceTestDriver')])
self.node = self.services['node']
self.engine = self.services['engine']
def tearDown(self):
self.node.stop()
super(TestDsePerformance, self).tearDown()
@tenacity.retry(wait=tenacity.wait_fixed(0.1))
def wait_til_query_nonempty(self, query, policy):
if len(self.engine.select(query, target=policy)) == 0:
raise Exception("Query %s is not empty" % query)
def test_initialize_tables_dse(self):
"""Test performance of initializing data with DSE and Engine.
This test populates the tables exported by a datasource driver,
and then invokes the poll() method to send that data to the
policy engine. It tests the amount of time to send tables
across the DSE and load them into the policy engine.
"""
MAX_TUPLES = 700
# install datasource driver we can control
kwds = helper.datasource_openstack_args()
kwds['poll_time'] = 0
driver = performance_datasource_driver.PerformanceTestDriver('data',
args=kwds)
self.node.register_service(driver)
self.engine.create_policy('data')
# set return value for datasource driver
facts = [(1, 2.3, 'foo', 'bar', i, 'a'*100 + str(i))
for i in range(MAX_TUPLES)]
driver.state = {'p': facts}
# Send formula to engine (so engine subscribes to data:p)
policy = self.engine.DEFAULT_THEORY
formula = compile.parse1(
'q(1) :- data:p(1, 2.3, "foo", "bar", 1, %s)' % ('a'*100 + '1'))
self.engine.process_policy_update(
[compile.Event(formula, target=policy)])
# Poll data and wait til it arrives at engine
driver.poll()
self.wait_til_query_nonempty('q(1)', policy)
def test_initialize_tables_full(self):
"""Test performance of initializing data with Datasource, DSE, Engine.
This test gives a datasource driver the Python data that would
have resulted from making an API call and parsing it into Python
and then polls that datasource, waiting until the data arrives
in the policy engine. It tests the amount of time required to
translate Python data into tables, send those tables over the DSE,
and load them into the policy engine.
"""
MAX_TUPLES = 700
# install datasource driver we can control
kwds = helper.datasource_openstack_args()
kwds['poll_time'] = 0
driver = performance_datasource_driver.PerformanceTestDriver('data',
args=kwds)
self.node.register_service(driver)
self.engine.create_policy('data')
# set return value for datasource driver
facts = [{'field1': 1,
'field2': 2.3,
'field3': 'foo',
'field4': 'bar',
'field5': i,
'field6': 'a'*100 + str(i)}
for i in range(MAX_TUPLES)]
driver.client_data = facts
# Send formula to engine (so engine subscribes to data:p)
policy = self.engine.DEFAULT_THEORY
formula = compile.parse1(
'q(1) :- data:p(1, 2.3, "foo", "bar", 1, %s)' % ('a'*100 + '1'))
LOG.info("publishing rule")
self.engine.process_policy_update(
[compile.Event(formula, target=policy)])
# Poll data and wait til it arrives at engine
driver.poll()
self.wait_til_query_nonempty('q(1)', policy)
| 37.235294 | 96 | 0.605148 |
16384702c75d0f96f37fabb23e958d17a9922ba1 | 5,442 | py | Python | api/inventory/inventory.py | ufosoftwarellc/cannlytics | 236bd597e30530666400fef6dceaae6de6aa587b | [
"MIT"
] | 1 | 2021-06-07T13:53:06.000Z | 2021-06-07T13:53:06.000Z | api/inventory/inventory.py | ufosoftwarellc/cannlytics | 236bd597e30530666400fef6dceaae6de6aa587b | [
"MIT"
] | null | null | null | api/inventory/inventory.py | ufosoftwarellc/cannlytics | 236bd597e30530666400fef6dceaae6de6aa587b | [
"MIT"
] | null | null | null | """
Inventory Views | Cannlytics API
Created: 4/21/2021
API to interface with inventory.
"""
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view(['GET', 'POST'])
def inventory(request, format=None):
"""Get, create, or update inventory."""
if request.method == 'GET':
return Response({'error': 'not_implemented'}, content_type='application/json')
elif request.method == 'POST':
return Response({'error': 'not_implemented'}, content_type='application/json')
# Return an error if no author is specified.
# error_message = 'Unknown error, please notify <support@cannlytics.com>'
# return Response(
# {'error': error_message},
# content_type='application/json',
# status=status.HTTP_400_BAD_REQUEST
# )
#------------------------------------------------------------------
# Items ✓
#------------------------------------------------------------------
# # Create an item using: POST /items/v1/create
# item_name = 'New Old-Time Moonshine Teenth'
# item = Item.create_from_json(track, cultivator.license_number, {
# 'ItemCategory': 'Flower & Buds',
# 'Name': item_name,
# 'UnitOfMeasure': 'Ounces',
# 'Strain': strain_name,
# })
# # Create additional products for future use.
# item = Item.create_from_json(track, cultivator.license_number, {
# 'ItemCategory': 'Shake/Trim',
# 'Name': 'New Old-Time Moonshine Shake',
# 'UnitOfMeasure': 'Grams',
# 'Strain': strain_name,
# })
# # Get the item's UID.
# new_item = None
# items = track.get_items(license_number=cultivator.license_number)
# for i in items:
# print(i.name, '|', i.product_category_name)
# if i.name == item_name:
# new_item = i
# # Change the Unit Of Measure Type using: POST /items/v1/update
# new_item.update(unit_of_measure='Grams')
# # View the item using: GET /Items/v1/{id}
# traced_item = track.get_items(uid=new_item.id, license_number=cultivator.license_number)
# print('Successfully created, updated, and retrieved item:')
# print(traced_item.id, '|', traced_item.unit_of_measure)
# # Create items used for batches.
# clone = Item.create_from_json(track, cultivator.license_number, {
# 'ItemCategory': 'Seeds',
# 'Name': 'New Old-Time Moonshine Mature Plants',
# 'UnitOfMeasure': 'Each',
# 'Strain': strain_name,
# })
# # Get the clone for future use.
# clone_uid = '12324'
# clone_item = track.get_items(uid=clone_uid, license_number=cultivator.license_number)
#------------------------------------------------------------------
# Packages ✓
#------------------------------------------------------------------
# Step 1 Using the Package created in Harvest Step 1 OR create a
# package from an existing package that you have found.
# Create a package from another package using: POST /packages/v1/create
# Get the package created earlier.
# packs = track.get_packages(license_number=cultivator.license_number)
# package_id = '13801'
# traced_package = track.get_packages(
# uid=package_id,
# license_number=cultivator.license_number
# )
# new_package_tag = 'YOUR_SECOND_PACKAGE_TAG'
# new_package_data = {
# 'Tag': new_package_tag,
# 'Location': 'Warehouse',
# 'Item': 'New Old-Time Moonshine Teenth',
# 'Quantity': 1.75,
# 'UnitOfMeasure': 'Grams',
# # 'PatientLicenseNumber': 'X00001',
# 'Note': '1st teenth for sale.',
# # 'IsProductionBatch': False,
# # 'ProductionBatchNumber': None,
# # 'IsDonation': False,
# # 'ProductRequiresRemediation': False,
# # 'UseSameItem': True,
# 'ActualDate': today,
# 'Ingredients': [
# {
# 'Package': traced_package.label,
# 'Quantity': 1.75,
# 'UnitOfMeasure': 'Grams'
# }
# ]
# }
# traced_package.create_package(new_package_data)
# new_package = track.get_packages(label=new_package_tag, license_number=cultivator.license_number)
# print(new_package.last_modified)
# # Step 2 Using the new package created in Packages Step 1
# # change the item of a package using: POST/packages/v1/change/item
# new_package.change_item(item_name='New Old-Time Moonshine Kief')
# new_package = track.get_packages(uid=new_package.id, license_number=cultivator.license_number)
# print(new_package.last_modified)
# # Step 3 Using the new package created in Packages Step 1
# # adjust the weight to 0 using: POST/packages/v1/adjust
# adjustment = {
# 'Label': new_package_tag,
# 'Quantity': -1.75,
# 'UnitOfMeasure': 'Grams',
# 'AdjustmentReason': 'Drying',
# 'AdjustmentDate': today,
# 'ReasonNote': None
# }
# new_package.adjust(weight=-1.75, note='Look ma, no weight!')
# new_package = track.get_packages(uid=new_package.id, license_number=cultivator.license_number)
# print(new_package.last_modified)
# # Step 4 Using the new package created in Packages Step 1
# # Finish a package using: POST/packages/v1/finish
# new_package.finish()
# new_package = track.get_packages(uid=new_package.id, license_number=cultivator.license_number)
# print(new_package.last_modified)
# # Step 5 Using the new package created in Packages Step 1
# # Unfinish a package using: POST/packages/v1/unfinish
# new_package.unfinish()
# new_package = track.get_packages(uid=new_package.id, license_number=cultivator.license_number)
# print(new_package.last_modified)
| 34.66242 | 99 | 0.661521 |
3441e7dfd5e31f371bc74ca6cd0a26cadfe3bba3 | 2,110 | py | Python | py/desispec/test/test_pipeline_plan.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | 24 | 2015-09-29T06:06:29.000Z | 2022-01-14T07:31:45.000Z | py/desispec/test/test_pipeline_plan.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | 1,452 | 2015-02-26T00:14:23.000Z | 2022-03-31T23:35:10.000Z | py/desispec/test/test_pipeline_plan.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | 25 | 2015-02-06T21:39:13.000Z | 2022-02-22T14:16:31.000Z | """
tests desispec.pipeline.core
"""
import os
import unittest
import shutil
import time
import numpy as np
#
# from desispec.pipeline.common import *
# from desispec.pipeline.graph import *
# from desispec.pipeline.plan import *
# import desispec.io as io
#
# from . import pipehelpers as ph
class TestPipelinePlan(unittest.TestCase):
def setUp(self):
# self.prod = "test"
# self.raw = ph.fake_raw()
# self.redux = ph.fake_redux(self.prod)
# ph.fake_env(self.raw, self.redux, self.prod, self.prod)
# self.specs = [ x for x in range(10) ]
pass
def tearDown(self):
# if os.path.exists(self.raw):
# shutil.rmtree(self.raw)
# if os.path.exists(self.redux):
# shutil.rmtree(self.redux)
# ph.fake_env_clean()
pass
def test_select_nights(self):
# allnights = [
# "20150102",
# "20160204",
# "20170103",
# "20170211"
# ]
# checkyear = [
# "20170103",
# "20170211"
# ]
# checkmonth = [
# "20160204",
# "20170211"
# ]
# selyear = select_nights(allnights, r"2017.*")
# self.assertTrue(selyear == checkyear)
# selmonth = select_nights(allnights, r"[0-9]{4}02[0-9]{2}")
# self.assertTrue(selmonth == checkmonth)
pass
def test_graph_night(self):
# grph, expcnt, bricks = graph_night(ph.fake_night(), self.specs, False)
pass
def test_create_load_prod(self):
# grph, expcnt, pix = graph_night(ph.fake_night(), self.specs, False)
# expnightcnt, allpix = create_prod()
# fullgrph = load_prod()
# self.assertTrue(grph == fullgrph)
pass
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
#- This runs all test* functions in any TestCase class in this file
if __name__ == '__main__':
unittest.main()
| 25.421687 | 80 | 0.581991 |
d3e34ba24cfbe2d0eafdad311fb456c5ae632479 | 3,145 | py | Python | liberapay/utils/cbor.py | lewjs16/liberapay.com | 63620a621865a62ecf8fd40a713ae53e137bbbcf | [
"PostgreSQL",
"CC0-1.0"
] | null | null | null | liberapay/utils/cbor.py | lewjs16/liberapay.com | 63620a621865a62ecf8fd40a713ae53e137bbbcf | [
"PostgreSQL",
"CC0-1.0"
] | 1 | 2021-06-02T01:18:36.000Z | 2021-06-02T01:18:36.000Z | liberapay/utils/cbor.py | lewjs16/liberapay.com | 63620a621865a62ecf8fd40a713ae53e137bbbcf | [
"PostgreSQL",
"CC0-1.0"
] | null | null | null | from datetime import date, timedelta
from decimal import Decimal
import cbor2
from markupsafe import Markup
from ..i18n.currencies import Money, MoneyBasket
from .types import Object
CBORTag = cbor2.encoder.CBORTag
encode_semantic = cbor2.encoder.encode_semantic
# Dates
# =====
# Upstream issue: https://github.com/agronholm/cbor2/issues/37
# Spec: https://j-richter.github.io/CBOR/date.html
EPOCH = date(1970, 1, 1)
def encode_date(encoder, value):
encode_semantic(encoder, CBORTag(100, value.isoformat()))
def decode_date(decoder, value, shareable_index=None):
if type(value) == str:
return date(*map(int, value.split('-')))
elif type(value) == int:
return EPOCH + timedelta(days=value)
else:
raise TypeError("expected str or int, got %r" % type(value))
cbor2.encoder.default_encoders[date] = encode_date
cbor2.decoder.semantic_decoders[100] = decode_date
# Markup
# ======
def encode_Markup(encoder, value):
raise NotImplementedError()
cbor2.encoder.default_encoders[Markup] = encode_Markup
# Money and MoneyBasket
# =====================
# Spec: https://liberapay.github.io/specs/cbor-money.html
def encode_Money(encoder, value):
if set(value.__dict__.keys()) == {'amount', 'currency'}:
encode_semantic(encoder, CBORTag(77111, '%s%s' % (value.currency, value.amount)))
else:
attrs = {
k: v for k, v in value.__dict__.items()
if k not in {'amount', 'currency'}
}
encode_semantic(encoder, CBORTag(77111, [value.currency, value.amount, attrs]))
def decode_Money(decoder, value, shareable_index=None):
if type(value) is list:
r = Money(amount=value[1], currency=value[0])
if len(value) > 2:
r.__dict__.update(value[2])
if len(value) > 3:
raise ValueError("the array is longer than expected (%i > 3)" % len(value))
elif type(value) is str:
r = Money(value[3:], value[:3])
else:
raise TypeError("expected list or str, got %r" % type(value))
return r
def encode_MoneyBasket(encoder, value):
amounts = {k: v for k, v in value.amounts.items() if v}
if value.__dict__:
attrs = value.__dict__
encode_semantic(encoder, CBORTag(77112, dict(amounts, attrs=attrs)))
else:
encode_semantic(encoder, CBORTag(77112, amounts))
def decode_MoneyBasket(decoder, value, shareable_index=None):
r = MoneyBasket()
r.__dict__.update(value.pop('attrs', ()))
for k, v in value.items():
if len(k) == 3 and k.isupper():
r.amounts[k] = Decimal(v)
else:
raise ValueError("key %r is not a currency code" % k)
return r
cbor2.encoder.default_encoders[Money] = encode_Money
cbor2.encoder.default_encoders[MoneyBasket] = encode_MoneyBasket
cbor2.decoder.semantic_decoders[77111] = decode_Money
cbor2.decoder.semantic_decoders[77112] = decode_MoneyBasket
# Object
# ======
def encode_Object(encoder, value):
cbor2.encoder.encode_map(encoder, value.__dict__)
cbor2.encoder.default_encoders[Object] = encode_Object
dumps = cbor2.dumps
loads = cbor2.loads
| 27.112069 | 91 | 0.668998 |
8d5542ddb77cbe1187844f2354eead0bae4f3fb4 | 2,379 | py | Python | tests/unit/stream_alert_shared/test_backoff_handlers.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | 7 | 2018-12-26T14:38:08.000Z | 2022-03-09T13:21:00.000Z | tests/unit/stream_alert_shared/test_backoff_handlers.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | 14 | 2018-05-09T19:18:15.000Z | 2021-06-02T02:34:09.000Z | tests/unit/stream_alert_shared/test_backoff_handlers.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | 1 | 2018-12-06T20:51:58.000Z | 2018-12-06T20:51:58.000Z | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mock import Mock, patch
from stream_alert.shared.backoff_handlers import (
backoff_handler,
giveup_handler,
success_handler
)
def _get_details(with_wait=False):
"""Return a details dict that conforms to what the backoff handlers expected
Only the on_backoff handler will contain a 'wait' value
"""
details = {'elapsed': 1.2345, 'tries': 3, 'target': Mock(__name__='func')}
if with_wait:
details['wait'] = 1.0
return details
@patch('logging.Logger.debug')
def test_backoff_handler_debug(log_mock):
"""Backoff Handlers - Backoff Handler, Debug"""
on_backoff = backoff_handler()
on_backoff(_get_details(True))
log_mock.assert_called()
@patch('logging.Logger.info')
def test_backoff_handler_info(log_mock):
"""Backoff Handlers - Backoff Handler, Info"""
on_backoff = backoff_handler(False)
on_backoff(_get_details(True))
log_mock.assert_called()
@patch('logging.Logger.debug')
def test_giveup_handler_debug(log_mock):
"""Backoff Handlers - Giveup Handler, Debug"""
on_giveup = giveup_handler(True)
on_giveup(_get_details())
log_mock.assert_called()
@patch('logging.Logger.info')
def test_giveup_handler_info(log_mock):
"""Backoff Handlers - Giveup Handler, Info"""
on_giveup = giveup_handler()
on_giveup(_get_details())
log_mock.assert_called()
@patch('logging.Logger.debug')
def test_success_handler_debug(log_mock):
"""Backoff Handlers - Success Handler, Debug"""
on_success = success_handler(True)
on_success(_get_details())
log_mock.assert_called()
@patch('logging.Logger.info')
def test_success_handler_info(log_mock):
"""Backoff Handlers - Success Handler, Info"""
on_success = success_handler()
on_success(_get_details())
log_mock.assert_called()
| 29.012195 | 80 | 0.735603 |
747b6cba90e6d3f1a00ec37d64ce4853d25e8117 | 79,379 | py | Python | airflow/jobs/scheduler_job.py | yjwong/airflow | d19ddffc173c877632d2233aa6e0458038c1f4df | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/jobs/scheduler_job.py | yjwong/airflow | d19ddffc173c877632d2233aa6e0458038c1f4df | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/jobs/scheduler_job.py | yjwong/airflow | d19ddffc173c877632d2233aa6e0458038c1f4df | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import sched
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializedDagNotFound, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas])
blocking_task_list = "\n".join(
[ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = sched.scheduler()
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
timers.enter(delay, 1, repeat, args, kwargs)
timers.enter(delay, 1, repeat, arguments, kwargs)
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
)
.all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s", len(to_reset), task_instance_str
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
| 42.653949 | 110 | 0.616171 |
38d08bb3194a1ea0651ef41a9f38de7d9e14021a | 4,347 | py | Python | azure/mgmt/network/v2017_06_01/models/probe.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/probe.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/probe.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class Probe(SubResource):
"""A load balancer probe.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar load_balancing_rules: The load balancer rules that use this probe.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2017_06_01.models.SubResource]
:param protocol: The protocol of the end point. Possible values are:
'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the
probe to be successful. If 'Http' is specified, a 200 OK response from the
specifies URI is required for the probe to be successful. Possible values
include: 'Http', 'Tcp'
:type protocol: str or
~azure.mgmt.network.v2017_06_01.models.ProbeProtocol
:param port: The port for communicating the probe. Possible values range
from 1 to 65535, inclusive.
:type port: int
:param interval_in_seconds: The interval, in seconds, for how frequently
to probe the endpoint for health status. Typically, the interval is
slightly less than half the allocated timeout period (in seconds) which
allows two full probes before taking the instance out of rotation. The
default value is 15, the minimum value is 5.
:type interval_in_seconds: int
:param number_of_probes: The number of probes where if no response, will
result in stopping further traffic from being delivered to the endpoint.
This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
:type number_of_probes: int
:param request_path: The URI used for requesting health status from the
VM. Path is required if a protocol is set to http. Otherwise, it is not
allowed. There is no default value.
:type request_path: str
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'load_balancing_rules': {'readonly': True},
'protocol': {'required': True},
'port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'interval_in_seconds': {'key': 'properties.intervalInSeconds', 'type': 'int'},
'number_of_probes': {'key': 'properties.numberOfProbes', 'type': 'int'},
'request_path': {'key': 'properties.requestPath', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, port, id=None, interval_in_seconds=None, number_of_probes=None, request_path=None, provisioning_state=None, name=None, etag=None):
super(Probe, self).__init__(id=id)
self.load_balancing_rules = None
self.protocol = protocol
self.port = port
self.interval_in_seconds = interval_in_seconds
self.number_of_probes = number_of_probes
self.request_path = request_path
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 47.25 | 164 | 0.647803 |
f3d7e285f35c2826832e9df72e7b89a921356e4f | 11,755 | py | Python | integrations/airflow/marquez_airflow/dag.py | OleksandrDvornik/marquez | 70079cf2f17a193f620b801ac9962ad543dc0d23 | [
"Apache-2.0"
] | null | null | null | integrations/airflow/marquez_airflow/dag.py | OleksandrDvornik/marquez | 70079cf2f17a193f620b801ac9962ad543dc0d23 | [
"Apache-2.0"
] | null | null | null | integrations/airflow/marquez_airflow/dag.py | OleksandrDvornik/marquez | 70079cf2f17a193f620b801ac9962ad543dc0d23 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import List, Union, Optional
import airflow.models
from airflow.models import DagRun
from airflow.utils.db import provide_session
from airflow.utils.state import State
# Handling of import of different airflow versions
from airflow.version import version as AIRFLOW_VERSION
from marquez_airflow.extractors import StepMetadata, BaseExtractor
from marquez_airflow.extractors.extractors import Extractors
from marquez_airflow.utils import (
JobIdMapping,
get_location,
DagUtils,
get_custom_facets,
new_lineage_run_id
)
from pkg_resources import parse_version
if parse_version(AIRFLOW_VERSION) >= parse_version("1.10.11"):
from airflow import LoggingMixin
else:
# Corrects path of import for Airflow versions below 1.10.11
from airflow.utils.log.logging_mixin import LoggingMixin
from marquez_airflow.marquez import MarquezAdapter
_MARQUEZ = MarquezAdapter()
@provide_session
def lineage_run_id(run_id, task, session=None):
"""
Macro function which returns the generated run id for a given task. This
can be used to forward the run id from a task to a child run so the job
hierarchy is preserved. Invoke as a jinja template, e.g.
PythonOperator(
task_id='render_template',
python_callable=my_task_function,
op_args=['{{ task_run_id(run_id, task) }}'], # task_run_id macro invoked
provide_context=False,
dag=dag
)
:param run_id:
:param task:
:param session:
:return:
"""
name = DAG._marquez_job_name(task.dag_id, task.task_id)
ids = JobIdMapping.get(name, run_id, session)
if ids is None:
return ""
elif isinstance(ids, list):
return "" if len(ids) == 0 else ids[0]
else:
return str(ids)
class DAG(airflow.models.DAG, LoggingMixin):
def __init__(self, *args, extractor_mapper=None, **kwargs):
self.log.debug("marquez-airflow dag starting")
macros = {}
if kwargs.__contains__("user_defined_macros"):
macros = kwargs["user_defined_macros"]
macros["lineage_run_id"] = lineage_run_id
kwargs["user_defined_macros"] = macros
super().__init__(*args, **kwargs)
self.extractors = {}
if extractor_mapper:
self.extractor_mapper = extractor_mapper
else:
self.extractor_mapper = Extractors()
def add_task(self, task):
super().add_task(task)
# Purpose: some extractors, called patchers need to hook up to internal components of
# operator to extract necessary data. The hooking up is done on instantiation
# of extractor via patch() method. That's why extractor is created here.
patcher = self.extractor_mapper.get_patcher_class(task.__class__)
if patcher:
self.extractors[task.task_id] = patcher(task)
def create_dagrun(self, *args, **kwargs):
# run Airflow's create_dagrun() first
dagrun = super(DAG, self).create_dagrun(*args, **kwargs)
create_dag_start_ms = self._now_ms()
try:
self._register_dagrun(
dagrun,
kwargs.get('external_trigger', False),
DagUtils.get_execution_date(**kwargs)
)
except Exception as e:
self.log.error(
f'Failed to record metadata: {e} '
f'{self._timed_log_message(create_dag_start_ms)}',
exc_info=True)
return dagrun
# We make the assumption that when a DAG run is created, its
# tasks can be safely marked as started as well.
# Doing it other way would require to hook up to
# scheduler, where tasks are actually started
def _register_dagrun(self, dagrun: DagRun, is_external_trigger: bool, execution_date: str):
self.log.debug(f"self.task_dict: {self.task_dict}")
# Register each task in the DAG
for task_id, task in self.task_dict.items():
t = self._now_ms()
try:
step = self._extract_metadata(dagrun, task)
job_name = self._marquez_job_name(self.dag_id, task.task_id)
run_id = new_lineage_run_id(dagrun.run_id, task_id)
task_run_id = _MARQUEZ.start_task(
run_id,
job_name,
self.description,
DagUtils.to_iso_8601(self._now_ms()),
dagrun.run_id,
self._get_location(task),
DagUtils.get_start_time(execution_date),
DagUtils.get_end_time(execution_date, self.following_schedule(execution_date)),
step,
{**step.run_facets, **get_custom_facets(task, is_external_trigger)}
)
JobIdMapping.set(
job_name,
dagrun.run_id,
task_run_id
)
except Exception as e:
self.log.error(
f'Failed to record task {task_id}: {e} '
f'{self._timed_log_message(t)}',
exc_info=True)
def handle_callback(self, *args, **kwargs):
self.log.debug(f"handle_callback({args}, {kwargs})")
try:
dagrun = args[0]
self.log.debug(f"handle_callback() dagrun : {dagrun}")
self._report_task_instances(
dagrun,
kwargs.get('session')
)
except Exception as e:
self.log.error(
f'Failed to record dagrun callback: {e} '
f'dag_id={self.dag_id}',
exc_info=True)
return super().handle_callback(*args)
def _report_task_instances(self, dagrun, session):
task_instances = dagrun.get_task_instances()
for task_instance in task_instances:
try:
self._report_task_instance(task_instance, dagrun, session)
except Exception as e:
self.log.error(
f'Failed to record task instance: {e} '
f'dag_id={self.dag_id}',
exc_info=True)
def _report_task_instance(self, task_instance, dagrun, session):
task = self.get_task(task_instance.task_id)
# Note: task_run_id could be missing if it was removed from airflow
# or the job could not be registered.
task_run_id = JobIdMapping.pop(
self._marquez_job_name_from_task_instance(task_instance), dagrun.run_id, session)
step = self._extract_metadata(dagrun, task, task_instance)
job_name = self._marquez_job_name(self.dag_id, task.task_id)
run_id = new_lineage_run_id(dagrun.run_id, task.task_id)
if not task_run_id:
task_run_id = _MARQUEZ.start_task(
run_id,
job_name,
self.description,
DagUtils.to_iso_8601(task_instance.start_date),
dagrun.run_id,
self._get_location(task),
DagUtils.to_iso_8601(task_instance.start_date),
DagUtils.to_iso_8601(task_instance.end_date),
step,
{**step.run_facets, **get_custom_facets(task, False)}
)
if not task_run_id:
self.log.warning('Could not emit lineage')
self.log.debug(f'Setting task state: {task_instance.state}'
f' for {task_instance.task_id}')
if task_instance.state in {State.SUCCESS, State.SKIPPED}:
_MARQUEZ.complete_task(
task_run_id,
job_name,
DagUtils.to_iso_8601(task_instance.end_date),
step
)
else:
_MARQUEZ.fail_task(
task_run_id,
job_name,
DagUtils.to_iso_8601(task_instance.end_date),
step
)
def _extract_metadata(self, dagrun, task, task_instance=None) -> StepMetadata:
extractor = self._get_extractor(task)
task_info = f'task_type={task.__class__.__name__} ' \
f'airflow_dag_id={self.dag_id} ' \
f'task_id={task.task_id} ' \
f'airflow_run_id={dagrun.run_id} '
if extractor:
try:
self.log.debug(
f'Using extractor {extractor.__class__.__name__} {task_info}')
step = self._extract(extractor, task_instance)
if isinstance(step, StepMetadata):
return step
# Compatibility with custom extractors
if isinstance(step, list):
if len(step) == 0:
return StepMetadata(
name=self._marquez_job_name(self.dag_id, task.task_id)
)
elif len(step) >= 1:
self.log.warning(
f'Extractor {extractor.__class__.__name__} {task_info} '
f'returned more then one StepMetadata instance: {step} '
f'will drop steps except for first!'
)
return step[0]
except Exception as e:
self.log.error(
f'Failed to extract metadata {e} {task_info}',
exc_info=True)
else:
self.log.warning(
f'Unable to find an extractor. {task_info}')
return StepMetadata(
name=self._marquez_job_name(self.dag_id, task.task_id)
)
def _extract(self, extractor, task_instance) -> \
Union[Optional[StepMetadata], List[StepMetadata]]:
if task_instance:
step = extractor.extract_on_complete(task_instance)
if step:
return step
return extractor.extract()
def _get_extractor(self, task) -> Optional[BaseExtractor]:
if task.task_id in self.extractors:
return self.extractors[task.task_id]
extractor = self.extractor_mapper.get_extractor_class(task.__class__)
self.log.debug(f'extractor for {task.__class__} is {extractor}')
if extractor:
self.extractors[task.task_id] = extractor(task)
return self.extractors[task.task_id]
return None
def _timed_log_message(self, start_time):
return f'airflow_dag_id={self.dag_id} ' \
f'duration_ms={(self._now_ms() - start_time)}'
@staticmethod
def _get_location(task):
try:
if hasattr(task, 'file_path') and task.file_path:
return get_location(task.file_path)
else:
return get_location(task.dag.fileloc)
except Exception:
return None
@staticmethod
def _marquez_job_name_from_task_instance(task_instance):
return DAG._marquez_job_name(task_instance.dag_id, task_instance.task_id)
@staticmethod
def _marquez_job_name(dag_id: str, task_id: str) -> str:
return f'{dag_id}.{task_id}'
@staticmethod
def _now_ms():
return int(round(time.time() * 1000))
| 37.199367 | 99 | 0.599915 |
654696f4b8d48963a247a1e9ff67efc2fe0d6da3 | 5,369 | py | Python | buddy/settings.py | encorehu/django-buddy | 9b3e351fd814d43c685718fd8cd109a215315c7a | [
"MIT"
] | 12 | 2015-01-12T11:18:06.000Z | 2019-05-03T08:03:52.000Z | buddy/settings.py | encorehu/django-buddy | 9b3e351fd814d43c685718fd8cd109a215315c7a | [
"MIT"
] | null | null | null | buddy/settings.py | encorehu/django-buddy | 9b3e351fd814d43c685718fd8cd109a215315c7a | [
"MIT"
] | 5 | 2016-03-11T09:31:50.000Z | 2021-04-07T11:40:19.000Z | # Django settings for buddy project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = './buddy/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
('css',STATIC_ROOT+'css/'),
('images',STATIC_ROOT+'images/'),
('js',STATIC_ROOT+'js/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h4!^0fn=4@#ih81l@qz-v@3z&s%(&5k4nx=dmn1unhcuyhjf00'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'buddy.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'buddy.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'./templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'chat',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
} | 34.416667 | 108 | 0.684671 |
57fd6c83d2dac5e72ee77770fa997d6975e8d7d1 | 1,434 | py | Python | motrack/analyze.py | martinholub/motrack | f8dc0557ba93c5d9554543116b4913971a5559b5 | [
"MIT"
] | null | null | null | motrack/analyze.py | martinholub/motrack | f8dc0557ba93c5d9554543116b4913971a5559b5 | [
"MIT"
] | null | null | null | motrack/analyze.py | martinholub/motrack | f8dc0557ba93c5d9554543116b4913971a5559b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import utils
import datetime
import re
basepath = "res/"
fnames_in = "*.txt"
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
fname_out = "res_" + time_stamp + ".txt"
# distance_re = re.compile("Total dist in mm:")
# time_re = re.compile("Total time in sec:")
# warn_re = re.compile("")
numeric_re = re.compile(r"[0-9]+\.[0-9]+$")
with open(fname_out, "w") as f_out:
f_out.write("fname,dist[mm],time[s],#warns\n")
for i, fn in enumerate(list(glob.glob(basepath + fnames_in))):
base = utils.adjust_filename(fn, "")
warnings = []
with open(fn) as f_in:
f_out.write("{},".format(base))
for line in f_in:
if line.startswith("Total dist"):
dist = numeric_re.search(line).group(0)
f_out.write("{},".format(dist))
if line.startswith("Total time"):
time = numeric_re.search(line).group(0)
f_out.write("{},".format(time))
if line.startswith("WARNING"):
warnings.append(line)
if warnings:
num_warnings = len(warnings)
f_out.write("{:d}\n".format(num_warnings))
else:
f_out.write("{:d}\n".format(0))
# f.close is implicit
# f.close is implicit
| 32.590909 | 66 | 0.527894 |
016fba515785a3ef2bf3e6a803d7826f41b21357 | 7,014 | py | Python | src/dataset.py | neuromation/ml-recipe-hier-attention | 14792ae219d8fc21f6eae0096949b4781332ad03 | [
"Apache-2.0"
] | 3 | 2020-10-31T01:04:00.000Z | 2021-02-07T07:35:48.000Z | src/dataset.py | neuro-inc/ml-recipe-hier-attention | 14792ae219d8fc21f6eae0096949b4781332ad03 | [
"Apache-2.0"
] | 1 | 2020-04-30T15:25:42.000Z | 2020-04-30T15:25:42.000Z | src/dataset.py | neuromation/ml-recipe-hier-attention | 14792ae219d8fc21f6eae0096949b4781332ad03 | [
"Apache-2.0"
] | 1 | 2020-07-02T16:50:07.000Z | 2020-07-02T16:50:07.000Z | import random
import re
from functools import lru_cache
from pathlib import Path
from typing import Tuple, List, Dict, Union, Iterator
import numpy as np
import torch
from nltk.tokenize import PunktSentenceTokenizer, WordPunctTokenizer
from torch import LongTensor, FloatTensor
from torch.utils.data import Sampler, DataLoader
from tqdm.auto import tqdm
from src.const import IMBD_ROOT
TText = List[List[int]] # text[i_sentece][j_word]
TItem = Dict[str, Union[TText, int]]
# params was choosen as 98% quantile:
SNT_CLIP = 100
TXT_CLIP = 40
class ImdbReviewsDataset:
_path_to_data: Path
_snt_clip: int
_txt_clip: int
_s_tokenizer: PunktSentenceTokenizer
_w_tokenizer: WordPunctTokenizer
# data fields
_paths: List[Path]
_texts: List[TText]
_labels: List[int]
_txt_lens: List[int]
_snt_lens: List[int]
_vocab: Dict[str, int]
def __init__(self,
path_to_data: Path,
vocab: Dict[str, int],
snt_clip: int = SNT_CLIP,
txt_clip: int = TXT_CLIP
):
self._path_to_data = path_to_data
self._vocab = vocab
self._snt_clip = snt_clip
self._txt_clip = txt_clip
self._s_tokenizer = PunktSentenceTokenizer()
self._w_tokenizer = WordPunctTokenizer()
self._html_re = re.compile('<.*?>')
self._paths = []
self._texts = []
self._labels = []
self._txt_lens = []
self._snt_lens = []
self._load_data()
def __len__(self) -> int:
return len(self._texts)
@lru_cache(maxsize=50_000) # equal to number of reviews in imdb
def __getitem__(self, i: int) -> TItem:
return {
'txt': self._texts[i],
'label': self._labels[i],
'txt_len': self._txt_lens[i],
'snt_len': self._snt_lens[i]
}
def _load_data(self) -> None:
files = list((self._path_to_data / 'neg').glob('*_*.txt')) + \
list((self._path_to_data / 'pos').glob('*_*.txt'))
print(f'Dataset loading from {self._path_to_data}.')
for file_path in tqdm(files):
with open(file_path, 'r') as f:
text, snt_len_max, txt_len = self.tokenize_plane_text(f.read())
label = 1 if file_path.parent.name == 'pos' else 0
self._paths.append(file_path)
self._texts.append(text)
self._labels.append(label)
self._snt_lens.append(snt_len_max)
self._txt_lens.append(txt_len)
def tokenize_plane_text(self, text_plane: str
) -> Tuple[TText, int, int]:
tokenize_w = self._w_tokenizer.tokenize
tokenize_s = self._s_tokenizer.tokenize
text_plane = text_plane.lower()
text_plane = re.sub(self._html_re, ' ', text_plane)
text = [[self.vocab[w] for w in tokenize_w(s)
if w in self._vocab.keys()][:self._snt_clip]
for s in tokenize_s(text_plane)][:self._txt_clip]
snt_len_max = max([len(snt) for snt in text])
txt_len = len(text)
return text, snt_len_max, txt_len
@staticmethod
def get_imdb_vocab(imdb_root: Path) -> Dict[str, int]:
with open(imdb_root / 'imdb.vocab') as f:
words = f.read().splitlines()
# note, that we keep 0 for padding token
ids = list(range(1, len(words) + 1))
vocab = dict(zip(words, ids))
return vocab
@property
def vocab(self) -> Dict[str, int]:
return self._vocab
@property
def txt_lens(self) -> List[int]:
return self._txt_lens
def collate_docs(batch: List[TItem]
) -> Dict[str, Union[LongTensor, FloatTensor]]:
max_snt = max([item['snt_len'] for item in batch])
max_txt = max([item['txt_len'] for item in batch])
n_docs = len(batch) # number of documents in batch
labels_tensor = torch.zeros((n_docs, 1), dtype=torch.float32)
docs_tensor = torch.zeros((n_docs, max_txt, max_snt),
dtype=torch.int64)
for i_doc, item in enumerate(batch):
labels_tensor[i_doc] = item['label']
for i_snt, snt in enumerate(item['txt']): # type: ignore
snt_len = len(snt)
docs_tensor[i_doc, i_snt, 0:snt_len] = torch.tensor(snt)
return {'features': docs_tensor, 'targets': labels_tensor}
class SimilarRandSampler(Sampler):
_ids: List[int]
_bs: int
_k: int
_len: int
def __init__(self,
keys: List[int],
bs: int,
diversity: int = 10
):
super().__init__(data_source=None)
assert (bs >= 1) & (diversity >= 1)
self._ids = np.argsort(keys.copy()).tolist()
self._bs = bs
self._k = diversity
self._len = int(np.ceil(len(self._ids) / self._bs))
def __iter__(self) -> Iterator[int]:
cur_ids = self._ids.copy()
similar_key_batches = []
while cur_ids:
idx = random.choice(range(len(cur_ids)))
lb = max(0, idx - self._k * self._bs)
rb = min(len(cur_ids), idx + self._k * self._bs)
batch = random.sample(cur_ids[lb: rb], min(self._bs, rb - lb))
# rm ids from current batch from our pull
cur_ids = [e for e in cur_ids if e not in batch]
similar_key_batches.extend(batch)
return iter(similar_key_batches)
def __len__(self) -> int:
return self._len
def get_datasets(imbd_root: Path = IMBD_ROOT
) -> Tuple[ImdbReviewsDataset, ImdbReviewsDataset]:
vocab = ImdbReviewsDataset.get_imdb_vocab(imbd_root)
train_set = ImdbReviewsDataset(imbd_root / 'train', vocab)
test_set = ImdbReviewsDataset(imbd_root / 'test', vocab)
print(f'Train dataset was loaded, {len(train_set)} samples.\n'
f'Test dataset was loaded, {len(test_set)} samples.')
return train_set, test_set
def get_test_dataset(imbd_root: Path = IMBD_ROOT) -> ImdbReviewsDataset:
vocab = ImdbReviewsDataset.get_imdb_vocab(imbd_root)
return ImdbReviewsDataset(imbd_root / 'test', vocab)
def get_loaders(batch_size: int,
n_workers: int = 4,
imbd_root: Path = IMBD_ROOT,
) -> Tuple[DataLoader, DataLoader, Dict[str, int]]:
train_set, test_set = get_datasets(imbd_root=imbd_root)
args = {'num_workers': n_workers, 'batch_size': batch_size,
'collate_fn': collate_docs}
train_loader = DataLoader(
dataset=train_set,
sampler=SimilarRandSampler(keys=train_set.txt_lens,
bs=batch_size),
**args
)
test_loader = DataLoader(
dataset=test_set,
sampler=SimilarRandSampler(keys=test_set.txt_lens,
bs=batch_size),
**args
)
return train_loader, test_loader, train_loader.dataset.vocab
| 30.495652 | 79 | 0.599943 |
82175d7f680495c310a0b6bd9f144a47f15cda35 | 2,902 | py | Python | tensorflow/compiler/mlir/runlit.site.cfg.py | freewind2016/tensorflow | 84d6b43f5fc72c9ac5eb0fc95b91b23f532a2738 | [
"Apache-2.0"
] | 4 | 2020-06-28T08:25:36.000Z | 2021-08-12T12:41:34.000Z | tensorflow/compiler/mlir/runlit.site.cfg.py | freewind2016/tensorflow | 84d6b43f5fc72c9ac5eb0fc95b91b23f532a2738 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:12:24.000Z | 2022-02-10T02:04:13.000Z | tensorflow/compiler/mlir/runlit.site.cfg.py | freewind2016/tensorflow | 84d6b43f5fc72c9ac5eb0fc95b91b23f532a2738 | [
"Apache-2.0"
] | 4 | 2019-11-28T12:18:07.000Z | 2021-08-01T16:12:17.000Z | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner site configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import lit.llvm
# Handle the test srcdir for platforms. On windows, things are weird with bazel.
if platform.system() == 'Windows':
srcdir = os.environ['TEST_SRCDIR']
real_test_srcdir = srcdir[:srcdir.find('tensorflow/compiler/mlir')]
external_srcdir = os.path.join(real_test_srcdir, 'external')
else:
real_test_srcdir = os.environ['TEST_SRCDIR']
external_srcdir = real_test_srcdir
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by lit.py. The structure is common for lit
# tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
config.llvm_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'llvm')
config.mlir_obj_root = os.path.join(real_test_srcdir)
config.mlir_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'mlir')
# TODO(jpienaar): Replace with suffices in build rule.
config.suffixes = ['.td', '.mlir', '.pbtxt']
mlir_tf_tools_dirs = [
'tensorflow/compiler/mlir',
'tensorflow/compiler/mlir/hlo',
'tensorflow/compiler/mlir/lite',
'tensorflow/compiler/mlir/tensorflow',
'tensorflow/compiler/mlir/tfjs',
'tensorflow/compiler/mlir/xla',
'tensorflow/compiler/aot',
'tensorflow/compiler/xla/service/mlir_gpu',
'tensorflow/compiler/xla/service/gpu/tests',
]
config.mlir_tf_tools_dirs = [
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'], s)
for s in mlir_tf_tools_dirs
]
test_dir = os.environ['TEST_TARGET']
test_dir = test_dir.strip('/').rsplit(':', 1)[0]
config.mlir_test_dir = os.path.join(real_test_srcdir,
os.environ['TEST_WORKSPACE'], test_dir)
if platform.system() == 'Windows':
# Configure this to work with msys2, TF's preferred windows bash.
config.lit_tools_dir = '/usr/bin'
lit.llvm.initialize(lit_config, config)
# Let the main config do the real work.
lit_config.load_config(
config,
os.path.join(
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'],
'tensorflow/compiler/mlir/runlit.cfg.py')))
# pylint: enable=undefined-variable
| 38.184211 | 80 | 0.738456 |
194502fe136d492da99677147a2645edbf0f22ad | 916 | py | Python | smnsdkrequests/v20171105/DeleteTopicAttribute.py | xunmeibuyue/IntelligentPeephole | c3bebf8792f019c859539607846971f33fee7cc2 | [
"Apache-2.0"
] | null | null | null | smnsdkrequests/v20171105/DeleteTopicAttribute.py | xunmeibuyue/IntelligentPeephole | c3bebf8792f019c859539607846971f33fee7cc2 | [
"Apache-2.0"
] | null | null | null | smnsdkrequests/v20171105/DeleteTopicAttribute.py | xunmeibuyue/IntelligentPeephole | c3bebf8792f019c859539607846971f33fee7cc2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
#Copyright (C) 2017. Huawei Technologies Co., LTD. All rights reserved.
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of Apache License, Version 2.0.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#Apache License, Version 2.0 for more detail
"""
create at 2017/11/4
"""
__author__ = 'pengzl'
from smnsdkcore.request import CommonRequest
from smnsdkcore.http import httpmethod
class DeleteTopicAttribute(CommonRequest):
def __init__(self):
super(DeleteTopicAttribute, self).__init__()
self.set_method(httpmethod.DELETE)
def set_topic_urn(self, topic_urn):
uri = '/v2/{project_id}/notifications/topics/' + topic_urn + '/attributes'
self.set_uri(uri)
| 31.586207 | 82 | 0.730349 |
4f2b1cff7eb2dd12b7e5ce2eef2f86d7696fb867 | 6,579 | py | Python | src/Vector/VectorNd.py | EnderRayquaza/Vector | f8348d1f06912b45b808fdcf779202dc1f97f201 | [
"MIT"
] | 2 | 2021-04-15T11:16:42.000Z | 2021-11-08T21:46:26.000Z | src/Vector/VectorNd.py | EnderRayquaza/Vector | f8348d1f06912b45b808fdcf779202dc1f97f201 | [
"MIT"
] | null | null | null | src/Vector/VectorNd.py | EnderRayquaza/Vector | f8348d1f06912b45b808fdcf779202dc1f97f201 | [
"MIT"
] | null | null | null | import math
class VectorNd:
"""
A mathematical vector in n dimensions.
...
v2.0.5
by EnderRayquaza
Attributes
----------
l : list
A list with its composants.
st : float
Its standard.
dir : list
Its direction. It's a vector that its standars is equal to 1.
name : str
Its name.
Methodes
--------
calculate_param()
Calculates its standard and its direction.
__add__(v)
Operator +
Adds the vector v and this vector.
__sub__(v)
Operator -
Subtracts the vector v to this vector.
__mul__(k)
Operator *
Multiplies the number k and this vector.
__truediv__(k)
Operator /
Divides the number k to this vector.
__iadd__(v)
Operator +=
Adds the vector v to this vector.
__isub__(v)
Operator -=
Subtracts the vector v to this vector.
__imul__(k)
Operator *=
Multiplies the number k to this vector.
__itruediv__(k)
Operator /=
Divides the number k to this vector.
__pow__(v)
Operator **
Returns the scalar product of this vector and v.
__mod__(v)
Operator %
Returns the angle between this vector and v.
"""
def __init__(self, l, name="None"):
"""
Parameters
----------
l : list
A list with its composants.
name : str
Its name.
"""
self.l = l
total = 0
for comp in self.l:
total += comp**2
self.st = math.sqrt(total)
if(self.st != 0):
self.dir = list()
for comp in self.l:
self.dir.append(comp/self.st)
else:
self.dir = [0]*len(self.l)
self.name = name
def calculate_param(self):
"""
Calculates its standard and its direction.
"""
total = 0
for comp in self.l:
total += comp**2
self.st = math.sqrt(total)
if(self.st != 0):
self.dir = list()
for comp in self.l:
self.dir.append(comp/self.st)
else:
self.dir = [0]*len(self.l)
def __add__(self, v):
"""
Operator +
Adds the vector v and this vector.
Parameters
----------
v : VectorNd
The vector which is added.
"""
new_l = list()
for i in range(len(self.l)):
new_l.append(self.l[i]+v.l[i])
return VectorNd(new_l, self.name + " + " + v.name)
def __sub__(self, v):
"""
Operator -
Subtracts the vector v to this vector.
Parameters
----------
v : VectorNd
The vector which is subtracted.
"""
new_l = list()
for i in range(len(self.l)):
new_l.append(self.l[i]-v.l[i])
return VectorNd(new_l, self.name + " - " + v.name)
def __mul__(self, k):
"""
Operator *
Multiplies the number k to this vector.
Parameters
----------
k : float
The number which multiplied this vector.
"""
new_l = list()
for i in range(len(self.l)):
new_l.append(self.l[i]*k)
return VectorNd(new_l, self.name + " * " + str(k))
def __truediv__(self, k):
"""
Operator /
Divides the number k to this vector.
Parameters
----------
k : float
The number which divided this vector.
"""
new_l = list()
for i in range(len(self.l)):
new_l.append(self.l[i]/k)
return VectorNd(new_l, self.name + " / " + str(k))
def __iadd__(self, v):
"""
Operator +=
Adds the vector v to this vector.
Parameters
----------
v : VectorNd
The vector which is added.
"""
for i in range(len(self.l)):
self.l[i] += v.l[i]
self.calculate_param()
return self
def __isub__(self, v):
"""
Operator -=
Subtracts the vector v to this vector.
Parameters
----------
v : VectorNd
The vector which is substracted.
"""
for i in range(len(self.l)):
self.l[i] -= v.l[i]
self.calculate_param()
return self
def __imul__(self, k):
"""
Operator *=
Multiplies the number k to this vector.
Parameters
----------
k : float
The number which multiplies this vector.
"""
for i in range(len(self.l)):
self.l[i] *= k
self.calculate_param()
return self
def __itruediv__(self, k):
"""
Operator /
Divides the number k to this vector.
Parameters
----------
k : float
The number which divided this vector.
"""
for i in range(len(self.l)):
self.l[i] /= k
self.calculate_param()
return self
def __pow__(self, v):
"""
Operator **
Returns the scalar product of this vector and v.
Parameters
----------
v : VectorNd
The vector with which the scalar product is calculated.
"""
sc_prod = 0
for i in range(len(self.l)):
sc_prod += self.l[i]*v.l[i])
return self.x*v.x+self.y*v.y+self.z*v.z
def __mod__(self, v):
"""
Operator %
Returns the angle in degrees between this vector and v.
Parameters
----------
v : VectorNd
The vector with which the angle which is calculated is formed.
"""
return math.degrees(math.acos(self**v/(self.st*v.st)))
def show(self, name="v"):
"""
Prints its composants, its standard and its direction.
Parameters
----------
name : str
The name of the Vector.
"""
if(name is None):
if(self.name == "None"):
name = "v"
else:
name = self.name
print(name, "(", end="")
for comp in self.l:
print(comp, "/", end="")
print(")")
print("||", name, "|| = ", self.st, sep="")
print("dir(",
for comp in self.dir:
print(comp, "/", end="")
print(")")
def V0_():
return VectorNd([0], "0_")
| 23.165493 | 74 | 0.47454 |
41f50c42fa1358b7c0d7a738f35cdfa45a936662 | 234 | py | Python | upload_studio/executors/utils.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 9 | 2019-03-26T14:50:00.000Z | 2020-11-10T16:44:08.000Z | upload_studio/executors/utils.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 22 | 2019-03-02T23:16:13.000Z | 2022-02-27T10:36:36.000Z | upload_studio/executors/utils.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 5 | 2019-04-24T00:51:30.000Z | 2020-11-06T18:31:49.000Z | import subprocess
def get_flac_version():
return subprocess.check_output(['flac', '--version']).decode().split('\n')[0]
def get_lame_version():
return subprocess.check_output(['lame', '--version']).decode().split('\n')[0]
| 23.4 | 81 | 0.675214 |
d704d0e18e261d03e442a1f02690300ef842c364 | 5,304 | py | Python | temboo/core/Library/Fitbit/Activities/GetActivityWeeklyGoals.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Fitbit/Activities/GetActivityWeeklyGoals.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Fitbit/Activities/GetActivityWeeklyGoals.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# GetActivityWeeklyGoals
# Get a user's current weekly activity goals.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetActivityWeeklyGoals(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetActivityWeeklyGoals Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetActivityWeeklyGoals, self).__init__(temboo_session, '/Library/Fitbit/Activities/GetActivityWeeklyGoals')
def new_input_set(self):
return GetActivityWeeklyGoalsInputSet()
def _make_result_set(self, result, path):
return GetActivityWeeklyGoalsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetActivityWeeklyGoalsChoreographyExecution(session, exec_id, path)
class GetActivityWeeklyGoalsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetActivityWeeklyGoals
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ConsumerSecret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('UserID', value)
class GetActivityWeeklyGoalsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetActivityWeeklyGoals Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
def get_Distance(self):
"""
Retrieve the value for the "Distance" output from this Choreo execution. ((decimal) The distance specified as the weekly goal.)
"""
return self._output.get('Distance', None)
def get_Floors(self):
"""
Retrieve the value for the "Floors" output from this Choreo execution. ((integer) The number of floors specified as a weekly goal.)
"""
return self._output.get('Floors', None)
def get_Steps(self):
"""
Retrieve the value for the "Steps" output from this Choreo execution. ((integer) The number of steps specified for a weekly goal.)
"""
return self._output.get('Steps', None)
class GetActivityWeeklyGoalsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetActivityWeeklyGoalsResultSet(response, path)
| 43.121951 | 209 | 0.689291 |
f5b693f1313e2c6ad708cf849028779b06ea5ce1 | 46,161 | py | Python | src/sage/combinat/root_system/weyl_group.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/combinat/root_system/weyl_group.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/combinat/root_system/weyl_group.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | """
Weyl Groups
AUTHORS:
- Daniel Bump (2008): initial version
- Mike Hansen (2008): initial version
- Anne Schilling (2008): initial version
- Nicolas Thiery (2008): initial version
- Volker Braun (2013): LibGAP-based matrix groups
EXAMPLES:
More examples on Weyl Groups should be added here...
The Cayley graph of the Weyl Group of type ['A', 3]::
sage: w = WeylGroup(['A',3])
sage: d = w.cayley_graph(); d
Digraph on 24 vertices
sage: d.show3d(color_by_label=True, edge_size=0.01, vertex_size=0.03)
The Cayley graph of the Weyl Group of type ['D', 4]::
sage: w = WeylGroup(['D',4])
sage: d = w.cayley_graph(); d
Digraph on 192 vertices
sage: d.show3d(color_by_label=True, edge_size=0.01, vertex_size=0.03) #long time (less than one minute)
"""
#*****************************************************************************
# Copyright (C) 2008 Daniel Bump <bump at match.stanford.edu>,
# Mike Hansen <mhansen@gmail.com>
# Anne Schilling <anne at math.ucdavis.edu>
# Nicolas Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.groups.matrix_gps.finitely_generated import FinitelyGeneratedMatrixGroup_gap
from sage.groups.matrix_gps.group_element import MatrixGroupElement_gap
from sage.groups.perm_gps.permgroup import PermutationGroup_generic
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.interfaces.gap import gap
from sage.misc.cachefunc import cached_method
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.root_system.cartan_matrix import CartanMatrix
from sage.combinat.root_system.reflection_group_element import RealReflectionGroupElement
from sage.matrix.constructor import matrix, diagonal_matrix
from sage.combinat.root_system.root_lattice_realizations import RootLatticeRealizations
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.richcmp import richcmp, richcmp_not_equal
from sage.categories.all import WeylGroups, FiniteWeylGroups, AffineWeylGroups
from sage.categories.permutation_groups import PermutationGroups
from sage.sets.family import Family
from sage.matrix.constructor import Matrix
def WeylGroup(x, prefix=None, implementation='matrix'):
"""
Returns the Weyl group of the root system defined by the Cartan
type (or matrix) ``ct``.
INPUT:
- ``x`` - a root system or a Cartan type (or matrix)
OPTIONAL:
- ``prefix`` -- changes the representation of elements from matrices
to products of simple reflections
- ``implementation`` -- one of the following:
* ``'matrix'`` - as matrices acting on a root system
* ``"permutation"`` - as a permutation group acting on the roots
EXAMPLES:
The following constructions yield the same result, namely
a weight lattice and its corresponding Weyl group::
sage: G = WeylGroup(['F',4])
sage: L = G.domain()
or alternatively and equivalently::
sage: L = RootSystem(['F',4]).ambient_space()
sage: G = L.weyl_group()
sage: W = WeylGroup(L)
Either produces a weight lattice, with access to its roots and
weights.
::
sage: G = WeylGroup(['F',4])
sage: G.order()
1152
sage: [s1,s2,s3,s4] = G.simple_reflections()
sage: w = s1*s2*s3*s4; w
[ 1/2 1/2 1/2 1/2]
[-1/2 1/2 1/2 -1/2]
[ 1/2 1/2 -1/2 -1/2]
[ 1/2 -1/2 1/2 -1/2]
sage: type(w) == G.element_class
True
sage: w.order()
12
sage: w.length() # length function on Weyl group
4
The default representation of Weyl group elements is as matrices.
If you prefer, you may specify a prefix, in which case the
elements are represented as products of simple reflections.
::
sage: W=WeylGroup("C3",prefix="s")
sage: [s1,s2,s3]=W.simple_reflections() # lets Sage parse its own output
sage: s2*s1*s2*s3
s1*s2*s3*s1
sage: s2*s1*s2*s3 == s1*s2*s3*s1
True
sage: (s2*s3)^2==(s3*s2)^2
True
sage: (s1*s2*s3*s1).matrix()
[ 0 0 -1]
[ 0 1 0]
[ 1 0 0]
::
sage: L = G.domain()
sage: fw = L.fundamental_weights(); fw
Finite family {1: (1, 1, 0, 0), 2: (2, 1, 1, 0), 3: (3/2, 1/2, 1/2, 1/2), 4: (1, 0, 0, 0)}
sage: rho = sum(fw); rho
(11/2, 5/2, 3/2, 1/2)
sage: w.action(rho) # action of G on weight lattice
(5, -1, 3, 2)
We can also do the same for arbitrary Cartan matrices::
sage: cm = CartanMatrix([[2,-5,0],[-2,2,-1],[0,-1,2]])
sage: W = WeylGroup(cm)
sage: W.gens()
(
[-1 5 0] [ 1 0 0] [ 1 0 0]
[ 0 1 0] [ 2 -1 1] [ 0 1 0]
[ 0 0 1], [ 0 0 1], [ 0 1 -1]
)
sage: s0,s1,s2 = W.gens()
sage: s1*s2*s1
[ 1 0 0]
[ 2 0 -1]
[ 2 -1 0]
sage: s2*s1*s2
[ 1 0 0]
[ 2 0 -1]
[ 2 -1 0]
sage: s0*s1*s0*s2*s0
[ 9 0 -5]
[ 2 0 -1]
[ 0 1 -1]
Same Cartan matrix, but with a prefix to display using simple reflections::
sage: W = WeylGroup(cm, prefix='s')
sage: s0,s1,s2 = W.gens()
sage: s0*s2*s1
s2*s0*s1
sage: (s1*s2)^3
1
sage: (s0*s1)^5
s0*s1*s0*s1*s0*s1*s0*s1*s0*s1
sage: s0*s1*s2*s1*s2
s2*s0*s1
sage: s0*s1*s2*s0*s2
s0*s1*s0
TESTS::
sage: TestSuite(WeylGroup(["A",3])).run()
sage: TestSuite(WeylGroup(["A",3,1])).run() # long time
sage: W = WeylGroup(['A',3,1])
sage: s = W.simple_reflections()
sage: w = s[0]*s[1]*s[2]
sage: w.reduced_word()
[0, 1, 2]
sage: w = s[0]*s[2]
sage: w.reduced_word()
[2, 0]
sage: W = groups.misc.WeylGroup(['A',3,1])
"""
if implementation == "permutation":
return WeylGroup_permutation(x, prefix)
elif implementation != "matrix":
raise ValueError("invalid implementation")
if x in RootLatticeRealizations:
return WeylGroup_gens(x, prefix=prefix)
try:
ct = CartanType(x)
except TypeError:
ct = CartanMatrix(x) # See if it is a Cartan matrix
if ct.is_finite():
return WeylGroup_gens(ct.root_system().ambient_space(), prefix=prefix)
return WeylGroup_gens(ct.root_system().root_space(), prefix=prefix)
class WeylGroup_gens(UniqueRepresentation,
FinitelyGeneratedMatrixGroup_gap):
@staticmethod
def __classcall__(cls, domain, prefix=None):
return super(WeylGroup_gens, cls).__classcall__(cls, domain, prefix)
def __init__(self, domain, prefix):
"""
EXAMPLES::
sage: G = WeylGroup(['B',3])
sage: TestSuite(G).run()
sage: cm = CartanMatrix([[2,-5,0],[-2,2,-1],[0,-1,2]])
sage: W = WeylGroup(cm)
sage: TestSuite(W).run() # long time
"""
self._domain = domain
if self.cartan_type().is_affine():
category = AffineWeylGroups()
elif self.cartan_type().is_finite():
category = FiniteWeylGroups()
else:
category = WeylGroups()
if self.cartan_type().is_irreducible():
category = category.Irreducible()
self.n = domain.dimension() # Really needed?
self._prefix = prefix
# FinitelyGeneratedMatrixGroup_gap takes plain matrices as input
gens_matrix = [self.morphism_matrix(self.domain().simple_reflection(i))
for i in self.index_set()]
from sage.libs.all import libgap
libgap_group = libgap.Group(gens_matrix)
degree = ZZ(self.domain().dimension())
ring = self.domain().base_ring()
FinitelyGeneratedMatrixGroup_gap.__init__(
self, degree, ring, libgap_group, category=category)
@cached_method
def cartan_type(self):
"""
Returns the CartanType associated to self.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.cartan_type()
['F', 4]
"""
return self.domain().cartan_type()
@cached_method
def index_set(self):
"""
Returns the index set of self.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.index_set()
(1, 2, 3, 4)
sage: G = WeylGroup(['A',3,1])
sage: G.index_set()
(0, 1, 2, 3)
"""
return self.cartan_type().index_set()
# Should be implemented in (morphisms of) modules with basis
def morphism_matrix(self, f):
return matrix(self.domain().base_ring(), [f(b).to_vector()
for b in self.domain().basis()]).transpose()
def from_morphism(self, f):
return self._element_constructor_(self.morphism_matrix(f))
@cached_method
def simple_reflections(self):
"""
Returns the simple reflections of self, as a family.
EXAMPLES:
There are the simple reflections for the symmetric group::
sage: W=WeylGroup(['A',2])
sage: s = W.simple_reflections(); s
Finite family {1: [0 1 0]
[1 0 0]
[0 0 1], 2: [1 0 0]
[0 0 1]
[0 1 0]}
As a special feature, for finite irreducible root systems,
s[0] gives the reflection along the highest root::
sage: s[0]
[0 0 1]
[0 1 0]
[1 0 0]
We now look at some further examples::
sage: W=WeylGroup(['A',2,1])
sage: W.simple_reflections()
Finite family {0: [-1 1 1]
[ 0 1 0]
[ 0 0 1], 1: [ 1 0 0]
[ 1 -1 1]
[ 0 0 1], 2: [ 1 0 0]
[ 0 1 0]
[ 1 1 -1]}
sage: W = WeylGroup(['F',4])
sage: [s1,s2,s3,s4] = W.simple_reflections()
sage: w = s1*s2*s3*s4; w
[ 1/2 1/2 1/2 1/2]
[-1/2 1/2 1/2 -1/2]
[ 1/2 1/2 -1/2 -1/2]
[ 1/2 -1/2 1/2 -1/2]
sage: s4^2 == W.one()
True
sage: type(w) == W.element_class
True
"""
return self.domain().simple_reflections().map(self.from_morphism)
def reflections(self):
"""
Return the reflections of ``self``.
The reflections of a Coxeter group `W` are the conjugates of
the simple reflections. They are in bijection with the positive
roots, for given a positive root, we may have the reflection in
the hyperplane orthogonal to it. This method returns a family
indexed by the positive roots taking values in the reflections.
This requires ``self`` to be a finite Weyl group.
.. NOTE::
Prior to :trac:`20027`, the reflections were the keys
of the family and the values were the positive roots.
EXAMPLES::
sage: W = WeylGroup("B2", prefix="s")
sage: refdict = W.reflections(); refdict
Finite family {(1, -1): s1, (0, 1): s2, (1, 1): s2*s1*s2, (1, 0): s1*s2*s1}
sage: [r+refdict[r].action(r) for r in refdict.keys()]
[(0, 0), (0, 0), (0, 0), (0, 0)]
sage: W = WeylGroup(['A',2,1], prefix="s")
sage: W.reflections()
Lazy family (real root to reflection(i))_{i in
Positive real roots of type ['A', 2, 1]}
TESTS::
sage: CM = CartanMatrix([[2,-6],[-1,2]])
sage: W = WeylGroup(CM, prefix='s')
sage: W.reflections()
Traceback (most recent call last):
...
NotImplementedError: only implemented for finite and affine Cartan types
"""
prr = self.domain().positive_real_roots()
def to_elt(alp):
ref = self.domain().reflection(alp)
m = Matrix([ref(x).to_vector() for x in self.domain().basis()])
return self(m.transpose())
return Family(prr, to_elt, name="real root to reflection")
def _repr_(self):
"""
EXAMPLES::
sage: WeylGroup(['A', 1])
Weyl Group of type ['A', 1] (as a matrix group acting on the ambient space)
sage: WeylGroup(['A', 3, 1])
Weyl Group of type ['A', 3, 1] (as a matrix group acting on the root space)
"""
return "Weyl Group of type %s (as a matrix group acting on the %s)"%(self.cartan_type(),
self._domain._name_string(capitalize=False,
base_ring=False,
type=False))
def character_table(self):
"""
Returns the character table as a matrix
Each row is an irreducible character. For larger tables you
may preface this with a command such as
gap.eval("SizeScreen([120,40])") in order to widen the screen.
EXAMPLES::
sage: WeylGroup(['A',3]).character_table()
CT1
<BLANKLINE>
2 3 2 2 . 3
3 1 . . 1 .
<BLANKLINE>
1a 4a 2a 3a 2b
<BLANKLINE>
X.1 1 -1 -1 1 1
X.2 3 1 -1 . -1
X.3 2 . . -1 2
X.4 3 -1 1 . -1
X.5 1 1 1 1 1
"""
gens_str = ', '.join(str(g.gap()) for g in self.gens())
ctbl = gap('CharacterTable(Group({0}))'.format(gens_str))
return ctbl.Display()
@cached_method
def one(self):
"""
Returns the unit element of the Weyl group
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: e = W.one(); e
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
sage: type(e) == W.element_class
True
"""
return self._element_constructor_(matrix(QQ,self.n,self.n,1))
unit = one # For backward compatibility
def domain(self):
"""
Returns the domain of the element of ``self``, that is the
root lattice realization on which they act.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.domain()
Ambient space of the Root system of type ['F', 4]
sage: G = WeylGroup(['A',3,1])
sage: G.domain()
Root space over the Rational Field of the Root system of type ['A', 3, 1]
"""
return self._domain
def simple_reflection(self, i):
"""
Returns the `i^{th}` simple reflection.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.simple_reflection(1)
[1 0 0 0]
[0 0 1 0]
[0 1 0 0]
[0 0 0 1]
sage: W=WeylGroup(['A',2,1])
sage: W.simple_reflection(1)
[ 1 0 0]
[ 1 -1 1]
[ 0 0 1]
"""
if i not in self.index_set():
raise ValueError("i must be in the index set")
return self.simple_reflections()[i]
def long_element_hardcoded(self):
"""
Returns the long Weyl group element (hardcoded data)
Do we really want to keep it? There is a generic
implementation which works in all cases. The hardcoded should
have a better complexity (for large classical types), but
there is a cache, so does this really matter?
EXAMPLES::
sage: types = [ ['A',5],['B',3],['C',3],['D',4],['G',2],['F',4],['E',6] ]
sage: [WeylGroup(t).long_element().length() for t in types]
[15, 9, 9, 12, 6, 24, 36]
sage: all( WeylGroup(t).long_element() == WeylGroup(t).long_element_hardcoded() for t in types ) # long time (17s on sage.math, 2011)
True
"""
type = self.cartan_type()
if type[0] == 'D' and type[1]%2 == 1:
l = [-1 for i in range(self.n-1)]
l.append(1)
m = diagonal_matrix(QQ,l)
elif type[0] == 'A':
l = [0 for k in range((self.n)**2)]
for k in range(self.n-1, (self.n)**2-1, self.n-1):
l[k] = 1
m = matrix(QQ, self.n, l)
elif type[0] == 'E':
if type[1] == 6:
half = ZZ(1)/ZZ(2)
l = [[-half, -half, -half, half, 0, 0, 0, 0],
[-half, -half, half, -half, 0, 0, 0, 0],
[-half, half, -half, -half, 0, 0, 0, 0],
[half, -half, -half, -half, 0, 0, 0, 0],
[0, 0, 0, 0, half, half, half, -half],
[0, 0, 0, 0, half, half, -half, half],
[0, 0, 0, 0, half, -half, half, half],
[0, 0, 0, 0, -half, half, half, half]]
m = matrix(QQ, 8, l)
else:
raise NotImplementedError("Not implemented yet for this type")
elif type[0] == 'G':
third = ZZ(1)/ZZ(3)
twothirds = ZZ(2)/ZZ(3)
l = [[-third, twothirds, twothirds],
[twothirds, -third, twothirds],
[twothirds, twothirds, -third]]
m = matrix(QQ, 3, l)
else:
m = diagonal_matrix([-1 for i in range(self.n)])
return self(m)
def classical(self):
"""
If ``self`` is a Weyl group from an affine Cartan Type, this give
the classical parabolic subgroup of ``self``.
Caveat: we assume that 0 is a special node of the Dynkin diagram
TODO: extract parabolic subgroup method
EXAMPLES::
sage: G = WeylGroup(['A',3,1])
sage: G.classical()
Parabolic Subgroup of the Weyl Group of type ['A', 3, 1]
(as a matrix group acting on the root space)
sage: WeylGroup(['A',3]).classical()
Traceback (most recent call last):
...
ValueError: classical subgroup only defined for affine types
"""
if not self.cartan_type().is_affine():
raise ValueError("classical subgroup only defined for affine types")
return ClassicalWeylSubgroup(self._domain, prefix=self._prefix)
class ClassicalWeylSubgroup(WeylGroup_gens):
"""
A class for Classical Weyl Subgroup of an affine Weyl Group
EXAMPLES::
sage: G = WeylGroup(["A",3,1]).classical()
sage: G
Parabolic Subgroup of the Weyl Group of type ['A', 3, 1] (as a matrix group acting on the root space)
sage: G.category()
Category of finite irreducible weyl groups
sage: G.cardinality()
24
sage: G.index_set()
(1, 2, 3)
sage: TestSuite(G).run()
TESTS::
sage: from sage.combinat.root_system.weyl_group import ClassicalWeylSubgroup
sage: H = ClassicalWeylSubgroup(RootSystem(["A", 3, 1]).root_space(), prefix=None)
sage: H is G
True
Caveat: the interface is likely to change. The current main
application is for plots.
.. TODO::
implement:
- Parabolic subrootsystems
- Parabolic subgroups with a set of nodes as argument
"""
@cached_method
def cartan_type(self):
"""
EXAMPLES::
sage: WeylGroup(['A',3,1]).classical().cartan_type()
['A', 3]
sage: WeylGroup(['A',3,1]).classical().index_set()
(1, 2, 3)
Note: won't be needed, once the lattice will be a parabolic sub root system
"""
return self.domain().cartan_type().classical()
def simple_reflections(self):
"""
EXAMPLES::
sage: WeylGroup(['A',2,1]).classical().simple_reflections()
Finite family {1: [ 1 0 0]
[ 1 -1 1]
[ 0 0 1],
2: [ 1 0 0]
[ 0 1 0]
[ 1 1 -1]}
Note: won't be needed, once the lattice will be a parabolic sub root system
"""
return Family({i: self.from_morphism(self.domain().simple_reflection(i))
for i in self.index_set()})
def __repr__(self):
"""
EXAMPLES::
sage: WeylGroup(['A',2,1]).classical()
Parabolic Subgroup of the Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root space)
sage: WeylGroup(['C',4,1]).classical()
Parabolic Subgroup of the Weyl Group of type ['C', 4, 1] (as a matrix group acting on the root space)
sage: RootSystem(['C',3,1]).coweight_lattice().weyl_group().classical()
Parabolic Subgroup of the Weyl Group of type ['C', 3, 1]^* (as a matrix group acting on the coweight lattice)
sage: RootSystem(['C',4,1]).coweight_lattice().weyl_group().classical()
Parabolic Subgroup of the Weyl Group of type ['C', 4, 1]^* (as a matrix group acting on the coweight lattice)
"""
return "Parabolic Subgroup of the Weyl Group of type %s (as a matrix group acting on the %s)"%(self.domain().cartan_type(),
self._domain._name_string(capitalize=False,
base_ring=False,
type=False))
def weyl_group(self, prefix="hereditary"):
"""
Return the Weyl group associated to the parabolic subgroup.
EXAMPLES::
sage: WeylGroup(['A',4,1]).classical().weyl_group()
Weyl Group of type ['A', 4, 1] (as a matrix group acting on the root space)
sage: WeylGroup(['C',4,1]).classical().weyl_group()
Weyl Group of type ['C', 4, 1] (as a matrix group acting on the root space)
sage: WeylGroup(['E',8,1]).classical().weyl_group()
Weyl Group of type ['E', 8, 1] (as a matrix group acting on the root space)
"""
if prefix == "hereditary":
prefix = self._prefix
return self.domain().weyl_group(prefix)
def _test_is_finite(self, **options):
"""
Tests some internal invariants
EXAMPLES::
sage: WeylGroup(['A', 2, 1]).classical()._test_is_finite()
sage: WeylGroup(['B', 3, 1]).classical()._test_is_finite()
"""
tester = self._tester(**options)
tester.assertTrue(not self.weyl_group(self._prefix).is_finite())
tester.assertTrue(self.is_finite())
class WeylGroupElement(MatrixGroupElement_gap):
"""
Class for a Weyl Group elements
"""
def __init__(self, parent, g, check=False):
"""
EXAMPLES::
sage: G = WeylGroup(['A',2])
sage: s1 = G.simple_reflection(1)
sage: TestSuite(s1).run()
"""
MatrixGroupElement_gap.__init__(self, parent, g, check=check)
self._parent = parent
def __hash__(self):
return hash(self.matrix())
def to_matrix(self):
"""
Return ``self`` as a matrix.
EXAMPLES::
sage: G = WeylGroup(['A',2])
sage: s1 = G.simple_reflection(1)
sage: s1.to_matrix() == s1.matrix()
True
"""
return self.matrix()
def domain(self):
"""
Returns the ambient lattice associated with self.
EXAMPLES::
sage: W = WeylGroup(['A',2])
sage: s1 = W.simple_reflection(1)
sage: s1.domain()
Ambient space of the Root system of type ['A', 2]
"""
return self._parent.domain()
def _repr_(self):
"""
EXAMPLES::
sage: W = WeylGroup(['A',2,1], prefix="s")
sage: [s0,s1,s2] = W.simple_reflections()
sage: s0*s1
s0*s1
sage: W = WeylGroup(['A',2,1])
sage: [s0,s1,s2]=W.simple_reflections()
sage: s0*s1
[ 0 -1 2]
[ 1 -1 1]
[ 0 0 1]
"""
if self._parent._prefix is None:
return MatrixGroupElement_gap._repr_(self)
else:
redword = self.reduced_word()
if len(redword) == 0:
return "1"
else:
ret = ""
for i in redword[:-1]:
ret += "%s%d*"%(self._parent._prefix, i)
return ret + "%s%d"%(self._parent._prefix, redword[-1])
def _latex_(self):
r"""
Return the latex representation of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',2,1], prefix="s")
sage: [s0,s1,s2] = W.simple_reflections()
sage: latex(s0*s1) # indirect doctest
s_{0}s_{1}
sage: W = WeylGroup(['A',2,1])
sage: [s0,s1,s2] = W.simple_reflections()
sage: latex(s0*s1)
\left(\begin{array}{rrr}
0 & -1 & 2 \\
1 & -1 & 1 \\
0 & 0 & 1
\end{array}\right)
"""
if self._parent._prefix is None:
return MatrixGroupElement_gap._latex_(self)
else:
redword = self.reduced_word()
if not redword:
return "1"
else:
return "".join("%s_{%d}" % (self._parent._prefix, i)
for i in redword)
def __eq__(self, other):
"""
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: s[1] == s[1]
True
sage: s[1] == s[2]
False
Note: this implementation of :meth:`__eq__` is not much faster
than :meth:`__cmp__`. But it turned out to be useful for
subclasses overriding __cmp__ with something slow for specific
purposes.
"""
return (self.__class__ == other.__class__ and
self._parent == other._parent and
self.matrix() == other.matrix())
def _richcmp_(self, other, op):
"""
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: s[1] == s[1]
True
sage: s[1] == s[2]
False
"""
if self._parent.cartan_type() != other._parent.cartan_type():
return richcmp_not_equal(self._parent.cartan_type(),
other._parent.cartan_type(), op)
return richcmp(self.matrix(), other.matrix(), op)
def action(self, v):
"""
Return the action of self on the vector v.
EXAMPLES::
sage: W = WeylGroup(['A',2])
sage: s = W.simple_reflections()
sage: v = W.domain()([1,0,0])
sage: s[1].action(v)
(0, 1, 0)
sage: W = WeylGroup(RootSystem(['A',2]).root_lattice())
sage: s = W.simple_reflections()
sage: alpha = W.domain().simple_roots()
sage: s[1].action(alpha[1])
-alpha[1]
sage: W=WeylGroup(['A',2,1])
sage: alpha = W.domain().simple_roots()
sage: s = W.simple_reflections()
sage: s[1].action(alpha[1])
-alpha[1]
sage: s[1].action(alpha[0])
alpha[0] + alpha[1]
"""
if v not in self.domain():
raise ValueError("{} is not in the domain".format(v))
return self.domain().from_vector(self.matrix()*v.to_vector())
##########################################################################
# Descents
##########################################################################
def has_descent(self, i, positive=False, side = "right"):
"""
Test if ``self`` has a descent at position ``i``.
An element `w` has a descent in position `i` if `w` is
on the strict negative side of the `i^{th}` simple reflection
hyperplane.
If ``positive`` is ``True``, tests if it is on the strict
positive side instead.
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: [W.one().has_descent(i) for i in W.domain().index_set()]
[False, False, False]
sage: [s[1].has_descent(i) for i in W.domain().index_set()]
[True, False, False]
sage: [s[2].has_descent(i) for i in W.domain().index_set()]
[False, True, False]
sage: [s[3].has_descent(i) for i in W.domain().index_set()]
[False, False, True]
sage: [s[3].has_descent(i, True) for i in W.domain().index_set()]
[True, True, False]
sage: W = WeylGroup(['A',3,1])
sage: s = W.simple_reflections()
sage: [W.one().has_descent(i) for i in W.domain().index_set()]
[False, False, False, False]
sage: [s[0].has_descent(i) for i in W.domain().index_set()]
[True, False, False, False]
sage: w = s[0] * s[1]
sage: [w.has_descent(i) for i in W.domain().index_set()]
[False, True, False, False]
sage: [w.has_descent(i, side = "left") for i in W.domain().index_set()]
[True, False, False, False]
sage: w = s[0] * s[2]
sage: [w.has_descent(i) for i in W.domain().index_set()]
[True, False, True, False]
sage: [w.has_descent(i, side = "left") for i in W.domain().index_set()]
[True, False, True, False]
sage: W = WeylGroup(['A',3])
sage: W.one().has_descent(0)
True
sage: W.w0.has_descent(0)
False
"""
# s=self.parent().lattice().rho().scalar(self.action(self.parent().lattice().simple_root(i)))
# if positive:
# return s > 0
# else:
# return s < 0
L = self.domain()
# Choose the method depending on the side and the availability of rho and is_positive_root
if not hasattr(L.element_class, "is_positive_root"):
use_rho = True
elif not hasattr(L, "rho"):
use_rho = False
else:
use_rho = side == "left"
if use_rho is not (side == "left"):
self = ~self
if use_rho:
s = self.action(L.rho()).scalar(L.alphacheck()[i]) >= 0
else:
s = self.action(L.alpha()[i]).is_positive_root()
return s is positive
def has_left_descent(self, i):
"""
Test if ``self`` has a left descent at position ``i``.
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: [W.one().has_left_descent(i) for i in W.domain().index_set()]
[False, False, False]
sage: [s[1].has_left_descent(i) for i in W.domain().index_set()]
[True, False, False]
sage: [s[2].has_left_descent(i) for i in W.domain().index_set()]
[False, True, False]
sage: [s[3].has_left_descent(i) for i in W.domain().index_set()]
[False, False, True]
sage: [(s[3]*s[2]).has_left_descent(i) for i in W.domain().index_set()]
[False, False, True]
"""
return self.has_descent(i, side = "left")
def has_right_descent(self, i):
"""
Test if ``self`` has a right descent at position ``i``.
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: [W.one().has_right_descent(i) for i in W.domain().index_set()]
[False, False, False]
sage: [s[1].has_right_descent(i) for i in W.domain().index_set()]
[True, False, False]
sage: [s[2].has_right_descent(i) for i in W.domain().index_set()]
[False, True, False]
sage: [s[3].has_right_descent(i) for i in W.domain().index_set()]
[False, False, True]
sage: [(s[3]*s[2]).has_right_descent(i) for i in W.domain().index_set()]
[False, True, False]
"""
return self.has_descent(i, side="right")
def apply_simple_reflection(self, i, side = "right"):
s = self.parent().simple_reflections()
if side == "right":
return self * s[i]
else:
return s[i] * self
# The methods first_descent, descents, reduced_word appear almost verbatim in
# root_lattice_realizations and need to be factored out!
def to_permutation(self):
"""
A first approximation of to_permutation ...
This assumes types A,B,C,D on the ambient lattice
This further assume that the basis is indexed by 0,1,...
and returns a permutation of (5,4,2,3,1) (beuargl), as a tuple
"""
W = self.parent()
e = W.domain().basis()
return tuple( c*(j+1)
for i in e.keys()
for (j,c) in self.action(e[i]) )
def to_permutation_string(self):
"""
EXAMPLES::
sage: W = WeylGroup(["A",3])
sage: s = W.simple_reflections()
sage: (s[1]*s[2]*s[3]).to_permutation_string()
'2341'
"""
return "".join(str(i) for i in self.to_permutation())
WeylGroup_gens.Element = WeylGroupElement
class WeylGroup_permutation(UniqueRepresentation, PermutationGroup_generic):
"""
A Weyl group given as a permutation group.
"""
@staticmethod
def __classcall__(cls, cartan_type, prefix=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: W1 = WeylGroup(['B',2], implementation="permutation")
sage: W2 = WeylGroup(CartanType(['B',2]), implementation="permutation")
sage: W1 is W2
True
"""
return super(WeylGroup_permutation, cls).__classcall__(cls, CartanType(cartan_type), prefix)
def __init__(self, cartan_type, prefix):
"""
Initialize ``self``.
EXAMPLES::
sage: W = WeylGroup(['F',4], implementation="permutation")
sage: TestSuite(W).run()
"""
self._cartan_type = cartan_type
self._index_set = cartan_type.index_set()
self._index_set_inverse = {ii: i for i,ii in enumerate(cartan_type.index_set())}
self._reflection_representation = None
self._prefix = prefix
#from sage.libs.all import libgap
Q = cartan_type.root_system().root_lattice()
Phi = list(Q.positive_roots()) + [-x for x in Q.positive_roots()]
p = [[Phi.index(x.weyl_action([i]))+1 for x in Phi]
for i in self._cartan_type.index_set()]
cat = FiniteWeylGroups()
if self._cartan_type.is_irreducible():
cat = cat.Irreducible()
cat = (cat, PermutationGroups().Finite())
PermutationGroup_generic.__init__(self, gens=p, canonicalize=False, category=cat)
def iteration(self, algorithm="breadth", tracking_words=True):
r"""
Return an iterator going through all elements in ``self``.
INPUT:
- ``algorithm`` (default: ``'breadth'``) -- must be one of
the following:
* ``'breadth'`` - iterate over in a linear extension of the
weak order
* ``'depth'`` - iterate by a depth-first-search
- ``tracking_words`` (default: ``True``) -- whether or not to keep
track of the reduced words and store them in ``_reduced_word``
.. NOTE::
The fastest iteration is the depth first algorithm without
tracking words. In particular, ``'depth'`` is ~1.5x faster.
EXAMPLES::
sage: W = WeylGroup(["B",2], implementation="permutation")
sage: for w in W.iteration("breadth",True):
....: print("%s %s"%(w, w._reduced_word))
() []
(1,3)(2,6)(5,7) [1]
(1,5)(2,4)(6,8) [0]
(1,7,5,3)(2,4,6,8) [0, 1]
(1,3,5,7)(2,8,6,4) [1, 0]
(2,8)(3,7)(4,6) [1, 0, 1]
(1,7)(3,5)(4,8) [0, 1, 0]
(1,5)(2,6)(3,7)(4,8) [0, 1, 0, 1]
sage: for w in W.iteration("depth", False): w
()
(1,3)(2,6)(5,7)
(1,5)(2,4)(6,8)
(1,3,5,7)(2,8,6,4)
(1,7)(3,5)(4,8)
(1,7,5,3)(2,4,6,8)
(2,8)(3,7)(4,6)
(1,5)(2,6)(3,7)(4,8)
"""
from sage.combinat.root_system.reflection_group_c import Iterator
return iter(Iterator(self, N=self.number_of_reflections(),
algorithm=algorithm, tracking_words=tracking_words))
def __iter__(self):
r"""
Return an iterator going through all elements in ``self``.
For options and faster iteration see :meth:`iteration`.
EXAMPLES::
sage: W = WeylGroup(["B",2], implementation="permutation")
sage: for w in W: print("%s %s"%(w, w._reduced_word))
() []
(1,3)(2,6)(5,7) [1]
(1,5)(2,4)(6,8) [0]
(1,7,5,3)(2,4,6,8) [0, 1]
(1,3,5,7)(2,8,6,4) [1, 0]
(2,8)(3,7)(4,6) [1, 0, 1]
(1,7)(3,5)(4,8) [0, 1, 0]
(1,5)(2,6)(3,7)(4,8) [0, 1, 0, 1]
"""
return self.iteration(algorithm="breadth", tracking_words=True)
def _coerce_map_from_(self, P):
"""
Return ``True`` if ``P`` is a Weyl group of the same
Cartan type and ``False`` otherwise.
EXAMPLES::
sage: W = WeylGroup(["B",4], implementation="permutation")
sage: W2 = WeylGroup(["B",4])
sage: W._coerce_map_from_(W2)
True
sage: W3 = WeylGroup(["B",5])
sage: W.has_coerce_map_from(W3)
False
sage: W4 = CoxeterGroup(["B",4])
sage: W.has_coerce_map_from(W4)
False
sage: W5 = WeylGroup(["C",4], implementation="permutation")
sage: W.has_coerce_map_from(W5)
False
"""
return isinstance(P, WeylGroup_gens) and P.cartan_type() is self.cartan_type()
@cached_method
def rank(self):
"""
Return the rank of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], implementation="permutation")
sage: W.rank()
4
"""
return self._cartan_type.rank()
def simple_reflection(self, i):
r"""
Return the ``i``-th simple reflection of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], implementation="permutation")
sage: W.simple_reflection(1)
(1,11)(2,5)(6,8)(9,10)(12,15)(16,18)(19,20)
sage: W.simple_reflections()
Finite family {1: (1,11)(2,5)(6,8)(9,10)(12,15)(16,18)(19,20),
2: (1,5)(2,12)(3,6)(7,9)(11,15)(13,16)(17,19),
3: (2,6)(3,13)(4,7)(5,8)(12,16)(14,17)(15,18),
4: (3,7)(4,14)(6,9)(8,10)(13,17)(16,19)(18,20)}
"""
return self.gens()[self._index_set_inverse[i]]
@cached_method
def simple_roots(self):
"""
Return the simple roots of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], implementation="permutation")
sage: W.simple_roots()
Finite family {1: (1, 0, 0, 0), 2: (0, 1, 0, 0),
3: (0, 0, 1, 0), 4: (0, 0, 0, 1)}
"""
Q = self._cartan_type.root_system().root_lattice()
roots = [al.to_vector() for al in Q.simple_roots()]
for v in roots:
v.set_immutable()
return Family(self._index_set, lambda i: roots[self._index_set_inverse[i]])
independent_roots = simple_roots
@cached_method
def index_set(self):
"""
Return the index set of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], implementation="permutation")
sage: W.index_set()
(1, 2, 3, 4)
"""
return self._index_set
@cached_method
def reflection_index_set(self):
"""
Return the index set of reflections of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',3], implementation="permutation")
sage: W.reflection_index_set()
(1, 2, 3, 4, 5, 6)
"""
return tuple(range(1, self.number_of_reflections()+1))
def cartan_type(self):
"""
Return the Cartan type of ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], implementation="permutation")
sage: W.cartan_type()
['A', 4]
"""
return self._cartan_type
@cached_method
def roots(self):
"""
Return the roots of ``self``.
EXAMPLES::
sage: W = WeylGroup(['G',2], implementation="permutation")
sage: W.roots()
((1, 0),
(0, 1),
(1, 1),
(3, 1),
(2, 1),
(3, 2),
(-1, 0),
(0, -1),
(-1, -1),
(-3, -1),
(-2, -1),
(-3, -2))
"""
Q = self._cartan_type.root_system().root_lattice()
roots = ([x.to_vector() for x in Q.positive_roots()]
+ [-x.to_vector() for x in Q.positive_roots()])
for v in roots:
v.set_immutable()
return tuple(roots)
def positive_roots(self):
"""
Return the positive roots of ``self``.
EXAMPLES::
sage: W = WeylGroup(['C',3], implementation="permutation")
sage: W.positive_roots()
((1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(1, 1, 0),
(0, 1, 1),
(0, 2, 1),
(1, 1, 1),
(2, 2, 1),
(1, 2, 1))
"""
return self.roots()[:self.number_of_reflections()]
@cached_method
def number_of_reflections(self):
"""
Return the number of reflections in ``self``.
EXAMPLES::
sage: W = WeylGroup(['D',4], implementation="permutation")
sage: W.number_of_reflections()
12
"""
return len(list(self._cartan_type.root_system().root_lattice().positive_roots()))
@cached_method
def distinguished_reflections(self):
"""
Return the reflections of ``self``.
EXAMPLES::
sage: W = WeylGroup(['B',2], implementation="permutation")
sage: W.distinguished_reflections()
Finite family {1: (1,5)(2,4)(6,8), 2: (1,3)(2,6)(5,7),
3: (2,8)(3,7)(4,6), 4: (1,7)(3,5)(4,8)}
"""
Q = self._cartan_type.root_system().root_lattice()
pos_roots = list(Q.positive_roots())
Phi = pos_roots + [-x for x in pos_roots]
def build_elt(index):
r = pos_roots[index]
perm = [Phi.index(x.reflection(r))+1 for x in Phi]
return self.element_class(perm, self, check=False)
return Family(self.reflection_index_set(), lambda i: build_elt(i-1))
reflections = distinguished_reflections
def simple_root_index(self, i):
r"""
Return the index of the simple root `\alpha_i`.
This is the position of `\alpha_i` in the list of simple roots.
EXAMPLES::
sage: W = WeylGroup(['A',3], implementation="permutation")
sage: [W.simple_root_index(i) for i in W.index_set()]
[0, 1, 2]
"""
return self._index_set_inverse[i]
class Element(RealReflectionGroupElement):
def _repr_(self):
"""
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix="s", implementation="permutation")
sage: [s1,s2,s3] = W.simple_reflections()
sage: s1*s2
s1*s2
sage: W = WeylGroup(['A',3], implementation="permutation")
sage: [s1,s2,s3] = W.simple_reflections()
sage: s1*s2
(1,10,2)(3,5,6)(4,8,7)(9,11,12)
"""
if self.parent()._prefix is None:
return RealReflectionGroupElement._repr_(self)
redword = self.reduced_word()
if not redword:
return "1"
else:
return "*".join("%s%d"%(self.parent()._prefix, i) for i in redword)
def _latex_(self):
"""
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix="s", implementation="permutation")
sage: [s1,s2,s3] = W.simple_reflections()
sage: s1*s2
s1*s2
sage: W = WeylGroup(['A',3], implementation="permutation")
sage: [s1,s2,s3] = W.simple_reflections()
sage: s1*s2
(1,10,2)(3,5,6)(4,8,7)(9,11,12)
"""
if self.parent()._prefix is None:
return RealReflectionGroupElement._repr_(self)
redword = self.reduced_word()
if not redword:
return "1"
else:
return "".join("%s_{%d}"%(self.parent()._prefix, i) for i in redword)
| 33.817582 | 146 | 0.510951 |
5816e949ba4a9d3600362e45768d66548fbd4d4b | 969 | py | Python | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 2 | 2020-04-09T13:04:25.000Z | 2021-09-24T14:17:26.000Z | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | null | null | null | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 3 | 2019-09-20T20:49:54.000Z | 2021-09-02T17:33:47.000Z | import unittest
from simulator_diagnoser.graph import InmemoryGraph
from simulator_diagnoser.graph.traversal import ForwardAnalysis
class ForwardAnalysisTest(unittest.TestCase):
def setUp(self):
# Graph =
# 9
# / | \
# 6 7 8
# \ / \ /
# 4 5
# / \ / \
# 1 2 3
self.g1 = InmemoryGraph()
edges = [(1, 4), (2, 4), (2, 5), (3, 5),
(4, 6), (4, 7), (5, 7), (5, 8),
(6, 9), (7, 9), (8, 9)]
for edge in edges:
self.g1.add_edge(*edge)
def test_none(self):
fa = ForwardAnalysis(None)
for x in fa:
fail()
def test_graph(self):
fa = ForwardAnalysis(self.g1)
for i, (node, parents) in enumerate(fa, start=1):
self.assertEqual(i, node)
self.assertEqual(parents, self.g1.get_node_parents(i)[0])
if __name__ == '__main__':
unittest.main()
| 24.846154 | 69 | 0.49742 |
8eaaf0875649c44f415c4b2d2bc4675a561a1dec | 1,193 | py | Python | tests/test_wilcoxon.py | sylwekczmil/cacp | 7322f104547582939c75b8ae8ad59ac1fe0298aa | [
"MIT"
] | 1 | 2022-03-10T20:35:10.000Z | 2022-03-10T20:35:10.000Z | tests/test_wilcoxon.py | sylwekczmil/cacp | 7322f104547582939c75b8ae8ad59ac1fe0298aa | [
"MIT"
] | null | null | null | tests/test_wilcoxon.py | sylwekczmil/cacp | 7322f104547582939c75b8ae8ad59ac1fe0298aa | [
"MIT"
] | null | null | null | from cacp.wilcoxon import process_wilcoxon
CLASSIFIERS = [
('XGB', lambda n_inputs, n_classes: None), # mock this only for tests
('SVC', lambda n_inputs, n_classes: None),
('DT', lambda n_inputs, n_classes: None),
('RF', lambda n_inputs, n_classes: None),
('NC', lambda n_inputs, n_classes: None),
('KNN', lambda n_inputs, n_classes: None),
('MLP', lambda n_inputs, n_classes: None),
('AB', lambda n_inputs, n_classes: None),
('GNB', lambda n_inputs, n_classes: None),
]
def test_wilcoxon(result_dir_with_data, golden_result_dir):
process_wilcoxon(CLASSIFIERS, result_dir_with_data)
for result_winner_dir, expected_winner_dir in [
(result_dir_with_data.joinpath('wilcoxon'), golden_result_dir.joinpath('wilcoxon')),
(result_dir_with_data.joinpath('wilcoxon').joinpath('auc'),
golden_result_dir.joinpath('wilcoxon').joinpath('auc')),
(result_dir_with_data.joinpath('wilcoxon').joinpath('accuracy'),
golden_result_dir.joinpath('wilcoxon').joinpath('accuracy')),
]:
for expected_file in expected_winner_dir.glob('*'):
assert result_winner_dir.joinpath(expected_file.name).exists()
| 42.607143 | 92 | 0.697402 |
f95462a15d06dfd8ca5bbe37770f90adf8b5d986 | 4,756 | py | Python | src/arclib.py | kbines/ARC | abb64e8833a4c1c7bd5dfa1bc8755415420f58cd | [
"Apache-2.0"
] | null | null | null | src/arclib.py | kbines/ARC | abb64e8833a4c1c7bd5dfa1bc8755415420f58cd | [
"Apache-2.0"
] | null | null | null | src/arclib.py | kbines/ARC | abb64e8833a4c1c7bd5dfa1bc8755415420f58cd | [
"Apache-2.0"
] | null | null | null | """NUI Galway CT5132/CT5148 Programming and Tools for AI (James McDermott)
Common functions for Assignment 3
Student name(s): Keith Bines
Student ID(s): 19234297
"""
import json
# function to get json from file
def get_json(task_file):
if task_file[-5:] != '.json':
task_file = task_file + '.json'
with open(task_file) as json_file:
json_data = json.load(json_file)
task_name = task_file[task_file.rindex('/') + 1:-5]
return json_data, task_name
class Task:
""""
Class for the problem task
"""
# Private Methods
# Constructor must have task name and input
def __init__(self, task_name, task_input):
# create and set attributes
self.__task_name = task_name
self.__input = task_input
self.__train = self.__input["train"]
self.__test_input = self.__input["test"][0]["input"][0]
self.__test_output = self.__input["test"][0]["output"]
self.__solution = []
self.__output = task_input
# Call Solve
self.__solve()
def __solve(self):
# use a dispatch table to easily extend for other solutions
dt = {
'feca6190': self.__solve_all_feca6190,
'a64e4611': self.__solve_all_a64e4611,
'ecdecbb3': self.__solve_all_ecdecbb3
}
dt[(self.__task_name)]()
# Set the output to each training solution
for training in range(0, len(self.__solution) - 1 ):
self.__output["train"][training]["output"][0] = self.__solution[training]
# Set last the test output to the test solution, the last grid
self.__output["test"][0]["output"] = self.__solution[len(self.__solution) - 1]
def __solve_all_feca6190(self):
# Solve all training tasks
for train_input in (range(0, len(self.__train))):
print(self.__train[train_input]["input"][0])
self.__solve_feca6190(self.__train[train_input]["input"][0])
# solve test task
self.__solve_feca6190(self.__input["test"][0]["input"][0])
def __solve_all_a64e4611(self):
# Solve all training tasks
for train_input in (range(0, len(self.__train))):
print(self.__train[train_input]["input"][0])
self.__solve_a64e4611(self.__train[train_input]["input"][0])
# solve test task
self.__solve_a64e4611(self.__input["test"][0]["input"][0])
def __solve_all_ecdecbb3(self):
# Solve all training tasks
for train_input in (range(0, len(self.__train))):
print(self.__train[train_input]["input"][0])
self.__solve_ecdecbb3(self.__train[train_input]["input"][0])
# solve test task
self.__solve_ecdecbb3(self.__input["test"][0]["input"][0])
def __solve_feca6190(self, input):
# see Readme for pseudo code
# For this solution the grid size is a square of the number of coloured cells * the length of the input row
coloured_cell_count = 0
for cell in input:
if cell > 0:
coloured_cell_count += 1
square = coloured_cell_count * len(input)
solution_grid = self.__create_solution_grid(square, square)
# For each coloured cell in the input
start_cell = 0;
solution_length = len(solution_grid)
for colour in input:
if colour > 0:
# starting from the bottom row colour the cell at the same position as the test input cell
# then move up 1 and right one until at the last column
# Number of up movements is width of grid (square) - start position
cell = start_cell
for row in range(solution_length, start_cell, -1):
solution_grid[row-1][cell] = colour
cell += 1
start_cell += 1
self.__solution.append(solution_grid)
def __solve_a64e4611(self, input):
print('a64e4611')
def __solve_ecdecbb3(self, input):
print('ecdecbb3')
# create the solution grid of size x,y set to all black (0)
def __create_solution_grid(self, x, y):
solution_grid = []
for rows in range(y):
row = [0] * x
solution_grid.append(row)
return solution_grid
# Public Methods
def print_solution(self):
# todo for row in range(len(self.__solution)):["test"][0]["output"]
for grid in self.__solution:
for row in grid:
print(' '.join(map(str, row)))
print()
def get_solution(self):
return self.__output
def test_input_output(self):
if self.__output == self.__input:
print("Success")
else:
print("Fail")
| 34.970588 | 115 | 0.606812 |
ef780822e777ab569daff07574e24e8a5de7e804 | 27,482 | py | Python | Lib/_collections_abc.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | 1 | 2020-10-25T16:33:22.000Z | 2020-10-25T16:33:22.000Z | Lib/_collections_abc.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | Lib/_collections_abc.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
GenericAlias = type(list[int])
__all__ = ["Awaitable", "Coroutine",
"AsyncIterable", "AsyncIterator", "AsyncGenerator",
"Hashable", "Iterable", "Iterator", "Generator", "Reversible",
"Sized", "Container", "Callable", "Collection",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed from collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such as MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types might not be distinct
# and they may have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
longrange_iterator = type(iter(range(1 << 1000)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
generator = type((lambda: (yield))())
## coroutine ##
async def _coro(): pass
_coro = _coro()
coroutine = type(_coro)
_coro.close() # Prevent ResourceWarning
del _coro
## asynchronous generator ##
async def _ag(): yield
_ag = _ag()
async_generator = type(_ag)
del _ag
### ONE-TRICK PONIES ###
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
return _check_methods(C, "__hash__")
return NotImplemented
class Awaitable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __await__(self):
yield
@classmethod
def __subclasshook__(cls, C):
if cls is Awaitable:
return _check_methods(C, "__await__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the coroutine.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("coroutine ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Coroutine:
return _check_methods(C, '__await__', 'send', 'throw', 'close')
return NotImplemented
Coroutine.register(coroutine)
class AsyncIterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __aiter__(self):
return AsyncIterator()
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterable:
return _check_methods(C, "__aiter__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class AsyncIterator(AsyncIterable):
__slots__ = ()
@abstractmethod
async def __anext__(self):
"""Return the next item or raise StopAsyncIteration when exhausted."""
raise StopAsyncIteration
def __aiter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterator:
return _check_methods(C, "__anext__", "__aiter__")
return NotImplemented
class AsyncGenerator(AsyncIterator):
__slots__ = ()
async def __anext__(self):
"""Return the next item from the asynchronous generator.
When exhausted, raise StopAsyncIteration.
"""
return await self.asend(None)
@abstractmethod
async def asend(self, value):
"""Send a value into the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
raise StopAsyncIteration
@abstractmethod
async def athrow(self, typ, val=None, tb=None):
"""Raise an exception in the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
async def aclose(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
await self.athrow(GeneratorExit)
except (GeneratorExit, StopAsyncIteration):
pass
else:
raise RuntimeError("asynchronous generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncGenerator:
return _check_methods(C, '__aiter__', '__anext__',
'asend', 'athrow', 'aclose')
return NotImplemented
AsyncGenerator.register(async_generator)
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
return _check_methods(C, "__iter__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
return _check_methods(C, '__iter__', '__next__')
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(longrange_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Reversible(Iterable):
__slots__ = ()
@abstractmethod
def __reversed__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Reversible:
return _check_methods(C, "__reversed__", "__iter__")
return NotImplemented
class Generator(Iterator):
__slots__ = ()
def __next__(self):
"""Return the next item from the generator.
When exhausted, raise StopIteration.
"""
return self.send(None)
@abstractmethod
def send(self, value):
"""Send a value into the generator.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the generator.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside generator.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Generator:
return _check_methods(C, '__iter__', '__next__',
'send', 'throw', 'close')
return NotImplemented
Generator.register(generator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
return _check_methods(C, "__len__")
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
return _check_methods(C, "__contains__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Collection(Sized, Iterable, Container):
__slots__ = ()
@classmethod
def __subclasshook__(cls, C):
if cls is Collection:
return _check_methods(C, "__len__", "__iter__", "__contains__")
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
return _check_methods(C, "__call__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
### SETS ###
class Set(Collection):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError from None
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Collection):
__slots__ = ()
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return KeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return ItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
__reversed__ = None
Mapping.register(mappingproxy)
class MappingView(Sized):
__slots__ = '_mapping',
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
__class_getitem__ = classmethod(GenericAlias)
class KeysView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
yield from self._mapping
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v is value or v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView, Collection):
__slots__ = ()
def __contains__(self, value):
for key in self._mapping:
v = self._mapping[key]
if v is value or v == value:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError from None
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(self, other=(), /, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Reversible, Collection):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v is value or v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value, start=0, stop=None):
'''S.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
Supporting start and stop arguments is optional, but
recommended.
'''
if start is not None and start < 0:
start = max(len(self) + start, 0)
if stop is not None and stop < 0:
stop += len(self)
i = start
while stop is None or i < stop:
try:
v = self[i]
if v is value or v == value:
return i
except IndexError:
break
i += 1
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v is value or v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
Sequence.register(memoryview)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
"""All the operations on a read-write sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, value) -- insert value before index'
raise IndexError
def append(self, value):
'S.append(value) -- append value to the end of the sequence'
self.insert(len(self), value)
def clear(self):
'S.clear() -> None -- remove all items from S'
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
if values is self:
values = list(values)
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| 26.629845 | 88 | 0.57474 |
f4dc09dda262e4b5e2d3e3010ee09c660c2da201 | 1,928 | py | Python | metrics/sample_edited.py | iviazovetskyi/rewriting | 1aec0d4dd8cee8de7195c6a49c16ccaeaeb31718 | [
"MIT"
] | null | null | null | metrics/sample_edited.py | iviazovetskyi/rewriting | 1aec0d4dd8cee8de7195c6a49c16ccaeaeb31718 | [
"MIT"
] | null | null | null | metrics/sample_edited.py | iviazovetskyi/rewriting | 1aec0d4dd8cee8de7195c6a49c16ccaeaeb31718 | [
"MIT"
] | 1 | 2020-10-27T04:49:38.000Z | 2020-10-27T04:49:38.000Z | import os
import json
import argparse
import shutil
from tqdm import tqdm
import torch
from utils.stylegan2 import load_seq_stylegan
from utils.pidfile import reserve_dir
from utils.imgsave import SaveImagePool
from utils import zdataset
from torchvision.transforms import ToPILImage
from rewrite import ganrewrite
from .load_mask import load_mask_info
N = 10000
parser = argparse.ArgumentParser('sample edited images')
parser.add_argument('--mask', type=str)
parser.add_argument('--full_rank', action='store_true')
parser.add_argument('--no_tight_paste', action='store_true')
parser.add_argument('--single_context', type=int, default=-1)
args = parser.parse_args()
exp_name = args.mask
if args.full_rank:
exp_name = exp_name + '_full_rank'
if args.single_context != -1:
exp_name = exp_name + f'_context{args.single_context}'
rd = reserve_dir(os.path.join('results/samples', exp_name))
shutil.copyfile('utils/lightbox.html', rd('+lightbox.html'))
mask, dataset, layernum = load_mask_info(args.mask)
model = load_seq_stylegan(dataset, mconv='seq', truncation=0.5)
model.eval()
zds = zdataset.z_dataset_for_model(model, size=1000)
writer = ganrewrite.SeqStyleGanRewriter
gw = writer(model,
zds,
layernum=layernum,
cachedir='results/rewrite/%s/%s/layer%d' % ('stylegan', dataset, layernum),
low_rank_insert=not args.full_rank,
key_method='zca',
tight_paste=not args.no_tight_paste)
with open(mask) as f:
print('Loading mask', mask)
gw.apply_edit(json.load(f), rank=1, single_key=args.single_context)
saver = SaveImagePool()
to_pil = ToPILImage()
with torch.no_grad():
for imgnum in tqdm(range(N)):
z = zdataset.z_sample_for_model(model, size=1, seed=imgnum).cuda()
x_real = gw.sample_image_from_latent(z).detach().cpu()
saver.add(to_pil(x_real[0] * 0.5 + 0.5), rd(f'{imgnum}.png'))
saver.join()
rd.done()
| 31.096774 | 87 | 0.725622 |
061ab38807fcb37c05ea8f657d941a10821861a0 | 602 | py | Python | examples/single_queue_mode.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | examples/single_queue_mode.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | examples/single_queue_mode.py | Hikki12/camio | c183234083c0382b91ecda8952cda6640e78d974 | [
"MIT"
] | null | null | null | import cv2
from camio import Camera
camera = Camera(
src=0, # set a source
fps=None, # Automatic set fps
size=None, # Automatic set of the size resolution
emitterIsEnabled=False, # Disable callbacks
backgroundIsEnabled=True, # Enable background
queueModeEnabled=True, # Enable queue mode
)
camera.start()
while True:
image = camera.read(timeout=None)
if image is not None:
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.stop()
cv2.destroyAllWindows()
| 23.153846 | 62 | 0.606312 |
8075f67ccb745afc13b2398018887617d7b397c7 | 347 | py | Python | py/cap3/whiletrue.py | dacanizares/IntroCS-ES | 1324b59a3bed86559117b01ad85384d593394d4a | [
"MIT"
] | 2 | 2020-03-21T19:12:10.000Z | 2020-03-27T03:59:41.000Z | py/cap3/whiletrue.py | dacanizares/IntroCS-ES | 1324b59a3bed86559117b01ad85384d593394d4a | [
"MIT"
] | 13 | 2020-03-20T01:27:57.000Z | 2020-08-08T18:20:29.000Z | py/cap3/whiletrue.py | dacanizares/IntroCS-ES | 1324b59a3bed86559117b01ad85384d593394d4a | [
"MIT"
] | null | null | null | # USANDO UN WHILE
# Tenemos que realizar una asignacion previa
# que permita entrar al ciclo
n = -1
while n < 0:
n = input('Digite nro positivo ')
print 'El dato es valido'
# USANDO UN WHILE TRUE
# Hay que poner la condicion de salida
while True:
n = input('Digite nro positivo ')
if n >= 0:
break
print 'El dato es valido'
| 20.411765 | 44 | 0.665706 |
c4ffa680af3c4224895d5d9261339410163d8735 | 422 | py | Python | efny2019/wsgi.py | mikephelan/efny2019 | 1a40e381876e69e1a94434980b108f06c82e7119 | [
"MIT"
] | null | null | null | efny2019/wsgi.py | mikephelan/efny2019 | 1a40e381876e69e1a94434980b108f06c82e7119 | [
"MIT"
] | null | null | null | efny2019/wsgi.py | mikephelan/efny2019 | 1a40e381876e69e1a94434980b108f06c82e7119 | [
"MIT"
] | null | null | null | """
WSGI config for DjangoEx project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "efny2019.settings")
application = get_wsgi_application()
| 24.823529 | 82 | 0.734597 |
c69beef8b28ee697cd37a7d7bc6a5fa32a5dea4b | 9,147 | py | Python | scold/system.py | Xion/SublimeScold | 13c97bceed428bb9ac813ccb8194a6664ec9f2d9 | [
"BSD-2-Clause"
] | 5 | 2015-06-11T19:21:40.000Z | 2016-01-02T16:13:50.000Z | scold/system.py | Xion/SublimeScold | 13c97bceed428bb9ac813ccb8194a6664ec9f2d9 | [
"BSD-2-Clause"
] | null | null | null | scold/system.py | Xion/SublimeScold | 13c97bceed428bb9ac813ccb8194a6664ec9f2d9 | [
"BSD-2-Clause"
] | null | null | null | """
Utilities for opening files or URLs in the registered default application
and for sending e-mail using the user's preferred composer.
Taken from the following recipe:
http://code.activestate.com/recipes/511443-cross-platform-startfile-and-mailto-functions/
"""
__version__ = '1.1'
__all__ = ['open', 'mailto']
import os
import sys
import webbrowser
import subprocess
from email.Utils import encode_rfc2231
_controllers = {}
_open = None
class BaseController(object):
"""Base class for open program controllers."""
def __init__(self, name):
self.name = name
def open(self, filename):
raise NotImplementedError
_is_windows = sys.platform.startswith('win')
_is_linux = sys.platform.startswith('linux')
_is_osx = sys.platform == 'darwin'
class Controller(BaseController):
"""Controller for a generic open program."""
def __init__(self, *args):
super(Controller, self).__init__(os.path.basename(args[0]))
self.args = list(args)
def _invoke(self, cmdline):
if _is_windows:
closefds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
closefds = True
startupinfo = None
if os.environ.get('DISPLAY') or _is_windows or _is_osx:
inout = file(os.devnull, 'r+')
else:
# for TTY programs, we need stdin/out
inout = None
# if possible, put the child precess in separate process group,
# so keyboard interrupts don't affect child precess as well as
# Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
stderr=inout, close_fds=closefds,
preexec_fn=setsid, startupinfo=startupinfo)
# It is assumed that this kind of tools (gnome-open, kfmclient,
# exo-open, xdg-open and open for OSX) immediately exit after lauching
# the specific application
returncode = pipe.wait()
if hasattr(self, 'fixreturncode'):
returncode = self.fixreturncode(returncode)
return not returncode
def open(self, filename):
if isinstance(filename, basestring):
cmdline = self.args + [filename]
else:
# assume it is a sequence
cmdline = self.args + filename
try:
return self._invoke(cmdline)
except OSError:
return False
# Platform support for Windows
if _is_windows:
class Start(BaseController):
"""Controller for the win32 start progam through os.startfile."""
def open(self, filename):
try:
os.startfile(filename)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_controllers['windows-default'] = Start('start')
_open = _controllers['windows-default'].open
# Platform support for MacOS
elif _is_osx:
_controllers['open']= Controller('open')
_open = _controllers['open'].open
# Platform support for Unix
else:
import commands
# @WARNING: use the private API of the webbrowser module
from webbrowser import _iscommand
class KfmClient(Controller):
"""Controller for the KDE kfmclient program."""
def __init__(self, kfmclient='kfmclient'):
super(KfmClient, self).__init__(kfmclient, 'exec')
self.kde_version = self.detect_kde_version()
def detect_kde_version(self):
kde_version = None
try:
info = commands.getoutput('kde-config --version')
for line in info.splitlines():
if line.startswith('KDE'):
kde_version = line.split(':')[-1].strip()
break
except (OSError, RuntimeError):
pass
return kde_version
def fixreturncode(self, returncode):
if returncode is not None and self.kde_version > '3.5.4':
return returncode
else:
return os.EX_OK
def detect_desktop_environment():
"""Checks for known desktop environments
Return the desktop environments name, lowercase (kde, gnome, xfce)
or "generic"
"""
desktop_environment = 'generic'
if os.environ.get('KDE_FULL_SESSION') == 'true':
desktop_environment = 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
desktop_environment = 'gnome'
else:
try:
info = commands.getoutput('xprop -root _DT_SAVE_MODE')
if ' = "xfce4"' in info:
desktop_environment = 'xfce'
except (OSError, RuntimeError):
pass
return desktop_environment
def register_X_controllers():
if _iscommand('kfmclient'):
_controllers['kde-open'] = KfmClient()
for command in ('gnome-open', 'exo-open', 'xdg-open'):
if _iscommand(command):
_controllers[command] = Controller(command)
def get():
controllers_map = {
'gnome': 'gnome-open',
'kde': 'kde-open',
'xfce': 'exo-open',
}
desktop_environment = detect_desktop_environment()
try:
controller_name = controllers_map[desktop_environment]
return _controllers[controller_name].open
except KeyError:
if _controllers.has_key('xdg-open'):
return _controllers['xdg-open'].open
else:
return webbrowser.open
if os.environ.get("DISPLAY"):
register_X_controllers()
_open = get()
def open(filename):
"""Open a file or an URL in the registered default application."""
return _open(filename)
def _fix_addersses(**kwargs):
for headername in ('address', 'to', 'cc', 'bcc'):
try:
headervalue = kwargs[headername]
if not headervalue:
del kwargs[headername]
continue
elif not isinstance(headervalue, basestring):
# assume it is a sequence
headervalue = ','.join(headervalue)
except KeyError:
pass
except TypeError:
raise TypeError('string or sequence expected for "%s", '
'%s found' % (headername,
type(headervalue).__name__))
else:
translation_map = {'%': '%25', '&': '%26', '?': '%3F'}
for char, replacement in translation_map.items():
headervalue = headervalue.replace(char, replacement)
kwargs[headername] = headervalue
return kwargs
def mailto_format(**kwargs):
# @TODO: implement utf8 option
kwargs = _fix_addersses(**kwargs)
parts = []
for headername in ('to', 'cc', 'bcc', 'subject', 'body', 'attach'):
if kwargs.has_key(headername):
headervalue = kwargs[headername]
if not headervalue:
continue
if headername in ('address', 'to', 'cc', 'bcc'):
parts.append('%s=%s' % (headername, headervalue))
else:
headervalue = encode_rfc2231(headervalue) # @TODO: check
parts.append('%s=%s' % (headername, headervalue))
mailto_string = 'mailto:%s' % kwargs.get('address', '')
if parts:
mailto_string = '%s?%s' % (mailto_string, '&'.join(parts))
return mailto_string
def mailto(address, to=None, cc=None, bcc=None, subject=None, body=None,
attach=None):
"""Send an e-mail using the user's preferred composer.
Open the user's preferred e-mail composer in order to send a mail to
address(es) that must follow the syntax of RFC822. Multiple addresses
may be provided (for address, cc and bcc parameters) as separate
arguments.
All parameters provided are used to prefill corresponding fields in
the user's e-mail composer. The user will have the opportunity to
change any of this information before actually sending the e-mail.
address - specify the destination recipient
cc - specify a recipient to be copied on the e-mail
bcc - specify a recipient to be blindly copied on the e-mail
subject - specify a subject for the e-mail
body - specify a body for the e-mail. Since the user will be able
to make changes before actually sending the e-mail, this
can be used to provide the user with a template for the
e-mail text may contain linebreaks
attach - specify an attachment for the e-mail. file must point to
an existing file
"""
mailto_string = mailto_format(**locals())
return open(mailto_string)
| 31.650519 | 89 | 0.595605 |
2016de8879329b222b338afed3d5bbcdf1bded2b | 739 | py | Python | 2020/2 - Nesting Depth.py | yashtazor/Google-Code-Jam-Problems | 0f476bee6c8e0c018f330f52513c61425ab3c6bc | [
"MIT"
] | 1 | 2020-05-09T06:35:05.000Z | 2020-05-09T06:35:05.000Z | 2020/2 - Nesting Depth.py | yashtazor/Google-Code-Jam-Problems | 0f476bee6c8e0c018f330f52513c61425ab3c6bc | [
"MIT"
] | null | null | null | 2020/2 - Nesting Depth.py | yashtazor/Google-Code-Jam-Problems | 0f476bee6c8e0c018f330f52513c61425ab3c6bc | [
"MIT"
] | null | null | null | t = int(input())
for i in range(t):
bracks = []
ans = ''
lb = 0
rb = 0
s = input()
l = list(s)
for j in range(1, len(l)):
if(int(l[j-1]) >= int(l[j])):
brac = int(l[j-1]) - int(l[j])
bracks.append(')'*brac)
else:
brac = int(l[j]) - int(l[j-1])
bracks.append('('*brac)
bracks = ['('*int(s[0])] + bracks
for j in range(0, len(s)):
x = bracks[j] + l[j]
ans += x
for j in ans:
if(j == '('):
lb += 1
elif(j == ')'):
rb += 1
else:
continue
brac = lb - rb
ans = ans + ')'*brac
print('Case #'+str(i+1)+':', ans)
| 15.081633 | 42 | 0.351827 |
f0c1ab905732e7da41ef01051fddcf99016fcf77 | 383 | py | Python | praticas_SOII_Fatec/ImportandoModulos/Pratica4/main.py | Lokiatos/Projects_Python | 44c6d912bc9ce742008eb129bea52963726b8382 | [
"MIT"
] | 1 | 2020-06-26T17:57:35.000Z | 2020-06-26T17:57:35.000Z | praticas_SOII_Fatec/ImportandoModulos/Pratica4/main.py | Lokiatos/Projects_Python | 44c6d912bc9ce742008eb129bea52963726b8382 | [
"MIT"
] | null | null | null | praticas_SOII_Fatec/ImportandoModulos/Pratica4/main.py | Lokiatos/Projects_Python | 44c6d912bc9ce742008eb129bea52963726b8382 | [
"MIT"
] | null | null | null | import os
file_path = '/home/hleca/Desktop/pythonProject/praticas_sooII/ImportandoModulos/Pratica3/arquivo.txt'
# Usando o with open não precisamos dar comando para fechar o arquivo
with open(file_path, 'r') as reader:
for line in reader:
print(line)
# Usando o open precisamos dar o comando para fechar o arquivo
rd = open(file_path, 'r')
print(rd.read())
rd.close()
| 27.357143 | 101 | 0.744125 |
5a6e79f2275522feb78f4bd9866dc674eb12f5a8 | 28,117 | py | Python | serums/models.py | drjdlarson/serums | 0d5b04a82d37733f9e64a3ec278cef5337d83af4 | [
"MIT"
] | null | null | null | serums/models.py | drjdlarson/serums | 0d5b04a82d37733f9e64a3ec278cef5337d83af4 | [
"MIT"
] | null | null | null | serums/models.py | drjdlarson/serums | 0d5b04a82d37733f9e64a3ec278cef5337d83af4 | [
"MIT"
] | null | null | null | """Defines various distribution models."""
import numpy as np
import numpy.random as rnd
import scipy.stats as stats
from warnings import warn
import serums.enums as enums
class BaseSingleModel:
"""Generic base class for distribution models.
This defines the required functions and provides their recommended function
signature for inherited classes. It also defines base attributes for the
distribution.
Attributes
----------
location : N x 1 numpy array
location parameter of the distribution
scale : N x N numpy array
scale parameter of the distribution
"""
def __init__(self, loc=None, scale=None):
super().__init__()
self.location = loc
self.scale = scale
def sample(self, rng=None):
"""Draw a sample from the distribution.
This should be implemented by the child class.
Parameters
----------
rng : numpy random generator, optional
random number generator to use. The default is None.
Returns
-------
None.
"""
warn('sample not implemented by class {}'.format(type(self).__name__))
def pdf(self, x):
"""Calculate the PDF value at the given point.
This should be implemented by the child class.
Parameters
----------
x : N x 1 numpy array
Point to evaluate the PDF.
Returns
-------
float
PDF value.
"""
warn('pdf not implemented by class {}'.format(type(self).__name__))
return np.nan
class Gaussian(BaseSingleModel):
"""Represents a Gaussian distribution object."""
def __init__(self, mean=None, covariance=None):
"""Initialize an object.
Parameters
----------
mean : N x 1 numpy array, optional
Mean of the distribution. The default is None.
covariance : N x N numpy array, optional
Covariance of the distribution. The default is None.
Returns
-------
None.
"""
super().__init__(loc=mean, scale=covariance)
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def covariance(self):
"""Covariance of the distribution.
Returns
-------
N x N nmpy array.
"""
return self.scale
@covariance.setter
def covariance(self, val):
self.scale = val
def sample(self, rng=None):
"""Draw a sample from the current mixture model.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
numpy array
randomly sampled numpy array of the same shape as the mean.
"""
if rng is None:
rng = rnd.default_rng()
return rng.multivariate_normal(self.mean.flatten(), self.covariance)
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.multivariate_normal
return rv.pdf(x.flatten(), mean=self.mean.flatten(), cov=self.covariance)
class StudentsT(BaseSingleModel):
"""Represents a Student's t-distribution."""
def __init__(self, mean=None, scale=None, dof=None):
super().__init__(loc=mean, scale=scale)
self._dof = dof
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, must be greater than 0."""
return self._dof
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
self._dof = value
@property
def covariance(self):
"""Read only covariance of the distribution (if defined).
Returns
-------
N x N nmpy array.
"""
if self._dof <= 2:
msg = 'Degrees of freedom is {} and must be > 2'
raise RuntimeError(msg.format(self._dof))
return self._dof / (self._dof - 2) * self.scale
@covariance.setter
def covariance(self, val):
warn('Covariance is read only.')
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Parameters
----------
x : N x 1 numpy array
Value to evaluate the pdf at.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.multivariate_t
return rv.pdf(x.flatten(), loc=self.location.flatten(), shape=self.scale,
df=self.degrees_of_freedom)
def sample(self, rng=None):
"""Multi-variate probability density function for this distribution.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
PDF value of the state `x`.
"""
if rng is None:
rng = rnd.default_rng()
rv = stats.multivariate_t
rv.random_state = rng
x = rv.rvs(loc=self.location.flatten(),
shape=self.scale, df=self.degrees_of_freedom)
return x.reshape((x.size, 1))
class ChiSquared(BaseSingleModel):
"""Represents a Chi Squared distribution."""
def __init__(self, mean=None, scale=None, dof=None):
super().__init__(loc=mean, scale=scale)
self._dof = dof
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, must be greater than 0."""
return self._dof
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
self._dof = value
@property
def covariance(self):
"""Read only covariance of the distribution (if defined).
Returns
-------
N x N nmpy array.
"""
if self._dof < 0:
msg = 'Degrees of freedom is {} and must be > 0'
raise RuntimeError(msg.format(self._dof))
return (self._dof * 2) * (self.scale**2)
@covariance.setter
def covariance(self, val):
warn('Covariance is read only.')
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Parameters
----------
x : N x 1 numpy array
Value to evaluate the pdf at.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.chi2
return rv.pdf(x.flatten(), self._dof,
loc=self.location.flatten(), shape=self.scale)
def sample(self, rng=None):
"""Multi-variate probability density function for this distribution.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
PDF value of the state `x`.
"""
if rng is None:
rng = rnd.default_rng()
rv = stats.chi2
rv.random_state = rng
x = rv.rvs(self._dof, loc=self.location.flatten(),
scale=self.scale)
return x.reshape((x.size, 1))
class Cauchy(StudentsT):
"""Represents a Cauchy distribution.
This is a special case of the Student's t-distribution with the degrees of
freedom fixed at 1. However, the mean and covariance do not exist for this
distribution.
"""
def __init__(self, location=None, scale=None):
super().__init__(scale=scale, dof=1)
self.location = location
@property
def mean(self):
"""Mean of the distribution."""
warn('Mean does not exist for a Cauchy')
@mean.setter
def mean(self, val):
warn('Mean does not exist for a Cauchy')
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, fixed at 1."""
return super().degrees_of_freedom
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
warn('Degrees of freedom is 1 for a Cauchy')
@property
def covariance(self):
"""Read only covariance of the distribution (if defined)."""
warn('Covariance is does not exist.')
@covariance.setter
def covariance(self, val):
warn('Covariance is does not exist.')
class GaussianScaleMixture(BaseSingleModel):
r"""Helper class for defining Gaussian Scale Mixture objects.
Note
----
This is an alternative method for representing heavy-tailed distributions
by modeling them as a combination of a standard Gaussian, :math:`v`, and
another positive random variable known as the generating variate, :math:`z`
.. math::
x \overset{d}{=} \sqrt{z} v
where :math:`\overset{d}{=}` means equal in distribution and :math:`x`
follows a GSM distribution (in general, a heavy tailed distribution).
This formulation is based on
:cite:`VilaValls2012_NonlinearBayesianFilteringintheGaussianScaleMixtureContext`,
:cite:`Wainwright1999_ScaleMixturesofGaussiansandtheStatisticsofNaturalImages`, and
:cite:`Kuruoglu1998_ApproximationofAStableProbabilityDensitiesUsingFiniteGaussianMixtures`.
Attributes
----------
type : :class:`serums.enums.GSMTypes`
Type of the distribution to represent as a GSM.
location_range : tuple
Minimum and maximum values for the location parameter. Useful if being
fed to a filter for estimating the location parameter. Each element must
match the type of the :attr:`.location` attribute.
scale_range : tuple
Minimum and maximum values for the scale parameter. Useful if being
fed to a filter for estimating the scale parameter. Each element must
match the type of the :attr:`.scale` attribute. The default is None.
df_range : tuple
Minimum and maximum values for the degree of freedom parameter.
Useful if being fed to a filter for estimating the degree of freedom
parameter. Each element must be a float. The default is None.
"""
__df_types = (enums.GSMTypes.STUDENTS_T, enums.GSMTypes.CAUCHY)
def __init__(self, gsm_type, location=None, location_range=None,
scale=None, scale_range=None, degrees_of_freedom=None,
df_range=None):
"""Initialize a GSM Object.
Parameters
----------
gsm_type : :class:`serums.enums.GSMTypes`
Type of the distribution to represent as a GSM.
location : N x 1 numpy array, optional
location parameter of the distribution. The default is None.
location_range : tuple, optional
Minimum and maximum values for the location parameter. Useful if being
fed to a filter for estimating the location parameter. Each element must
match the type of the :attr:`.location` attribute. The default is None
scale : N x N numpy array, optional
Scale parameter of the distribution being represented as a GSM.
The default is None.
scale_range : tuple, optional
Minimum and maximum values for the scale parameter. Useful if being
fed to a filter for estimating the scale parameter. Each element must
match the type of the :attr:`.scale` attribute. The default is None.
degrees_of_freedom : float, optional
Degrees of freedom parameter of the distribution being represented
as a GSM. This is not needed by all types. The default is None.
df_range : tuple, optional
Minimum and maximum values for the degree of freedom parameter.
Useful if being fed to a filter for estimating the degree of freedom
parameter. Each element must be a float. The default is None.
Raises
------
RuntimeError
If a `gsm_type` is given that is of the incorrect data type.
"""
super().__init__(loc=location, scale=scale)
if not isinstance(gsm_type, enums.GSMTypes):
raise RuntimeError('Type ({}) must be a GSMType'.format(gsm_type))
self.type = gsm_type
self._df = None
self.location_range = location_range
self.scale_range = scale_range
self.df_range = df_range
if degrees_of_freedom is not None:
self.degrees_of_freedom = degrees_of_freedom
if self.type is enums.GSMTypes.CAUCHY:
self._df = 1
@property
def degrees_of_freedom(self):
"""Degrees of freedom parameter of the distribution being represented as a GSM.
Returns
-------
float, optional
"""
if self.type in self.__df_types:
return self._df
else:
msg = 'GSM type {:s} does not have a degree of freedom.'.format(self.type)
warn(msg)
return None
@degrees_of_freedom.setter
def degrees_of_freedom(self, val):
if self.type in self.__df_types:
if self.type is enums.GSMTypes.CAUCHY:
warn('GSM type {:s} requires degree of freedom = 1'.format(self.type))
return
self._df = val
else:
msg = ('GSM type {:s} does not have a degree of freedom. '
+ 'Skipping').format(self.type)
warn(msg)
def sample(self, rng=None):
"""Draw a sample from the specified GSM type.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
randomly sampled value from the GSM.
"""
if rng is None:
rng = rnd.default_rng()
if self.type in [enums.GSMTypes.STUDENTS_T, enums.GSMTypes.CAUCHY]:
return self._sample_student_t(rng)
elif self.type is enums.GSMTypes.SYMMETRIC_A_STABLE:
return self._sample_SaS(rng)
else:
raise RuntimeError('GSM type: {} is not supported'.format(self.type))
def _sample_student_t(self, rng):
return stats.t.rvs(self.degrees_of_freedom, scale=self.scale,
random_state=rng)
def _sample_SaS(self, rng):
raise RuntimeError('sampling SaS distribution not implemented')
class BaseMixtureModel:
"""Generic base class for mixture distribution models.
This defines the required functions and provides their recommended function
signature for inherited classes. It also defines base attributes for the
mixture model.
Attributes
----------
weights : list
weight of each distribution
"""
def __init__(self, distributions=None, weights=None):
"""Initialize a mixture model object.
Parameters
----------
distributions : list, optional
Each element is a :class:`.BaseSingleModel`. The default is None.
weights : list, optional
Weight of each distribution. The default is None.
Returns
-------
None.
"""
if distributions is None:
distributions = []
if weights is None:
weights = []
self._distributions = distributions
self.weights = weights
def sample(self, rng=None):
"""Draw a sample from the current mixture model.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
numpy array
randomly sampled numpy array of the same shape as the mean.
"""
if rng is None:
rng = rnd.default_rng()
mix_ind = rng.choice(np.arange(len(self.weights), dtype=int),
p=self.weights)
x = self._distributions[mix_ind].sample(rng=rng)
return x.reshape((x.size, 1))
def pdf(self, x):
"""Multi-variate probability density function for this mixture.
Returns
-------
float
PDF value of the state `x`.
"""
p = 0
for w, dist in zip(self.weights, self._distributions):
p += w * dist.pdf(x)
return p
def remove_components(self, indices):
"""Remove component distributions from the mixture by index.
Parameters
----------
indices : list
indices of distributions to remove.
Returns
-------
None.
"""
if not isinstance(indices, list):
indices = list(indices)
for index in sorted(indices, reverse=True):
del self._distributions[index]
del self.weights[index]
def add_component(self, *args):
"""Add a component distribution to the mixture.
This should be implemented by the child class.
Parameters
----------
*args : tuple
Additional arguments specific to the child distribution.
Returns
-------
None.
"""
warn('add_component not implemented by {}'.format(type(self).__name__))
class _DistListWrapper(list):
"""Helper class for wrapping lists of BaseSingleModel to get a list of a single parameter."""
def __init__(self, dist_lst, attr):
"""Give list of distributions and the attribute to access."""
self.dist_lst = dist_lst
self.attr = attr
def __getitem__(self, index):
"""Get the attribute of the item at the index in the list."""
if isinstance(index, slice):
step = 1
if index.step is not None:
step = index.step
return [getattr(self.dist_lst[ii], self.attr)
for ii in range(index.start, index.stop, step)]
elif isinstance(index, int):
return getattr(self.dist_lst[index], self.attr)
else:
fmt = 'Index must be a integer or slice not {}'
raise RuntimeError(fmt.format(type(index)))
def __setitem__(self, index, val):
"""Set the attribute of the item at the index to the value."""
if isinstance(index, slice):
step = 1
if index.step is not None:
step = index.step
for ii in range(index.start, index.stop, step):
setattr(self.dist_lst[ii], self.attr, val)
elif isinstance(index, int):
setattr(self.dist_lst[index], self.attr, val)
else:
fmt = 'Index must be a integer or slice not {}'
raise RuntimeError(fmt.format(type(index)))
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.dist_lst):
self.n += 1
return getattr(self.dist_lst[self.n - 1], self.attr)
else:
raise StopIteration
def __repr__(self):
return str([getattr(d, self.attr) for d in self.dist_lst])
def __len__(self):
return len(self.dist_lst)
def append(self, *args):
raise RuntimeError('Cannot append, use add_component function instead.')
def extend(self, *args):
raise RuntimeError('Cannot extend, use add_component function instead.')
class GaussianMixture(BaseMixtureModel):
"""Gaussian Mixture object."""
def __init__(self, means=None, covariances=None, **kwargs):
"""Initialize an object.
Parameters
----------
means : list, optional
Each element is a N x 1 numpy array. Will be used in place of supplied
distributions but requires covariances to also be given. The default is None.
covariances : list, optional
Each element is an N x N numpy array. Will be used in place of
supplied distributions but requires means to be given. The default is None.
**kwargs : dict, optional
See the base class for details.
Returns
-------
None.
"""
if means is not None and covariances is not None:
kwargs['distributions'] = [Gaussian(mean=m, covariance=c)
for m, c in zip(means, covariances)]
super().__init__(**kwargs)
@property
def means(self):
"""List of Gaussian means, each is a N x 1 numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'location')
@means.setter
def means(self, val):
if not isinstance(val, list):
warn('Must set means to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [Gaussian() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].mean = v
@property
def covariances(self):
"""List of Gaussian covariances, each is a N x N numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'scale')
@covariances.setter
def covariances(self, val):
if not isinstance(val, list):
warn('Must set covariances to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [Gaussian() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].covariance = v
def add_components(self, means, covariances, weights):
"""Add Gaussian distributions to the mixture.
Parameters
----------
means : list
Each is a N x 1 numpy array of the mean of the distributions to add.
covariances : list
Each is a N x N numpy array of the covariance of the distributions
to add.
weights : list
Each is a float for the weight of the distributions to add. No
normalization is done.
Returns
-------
None.
"""
if not isinstance(means, list):
means = [means, ]
if not isinstance(covariances, list):
covariances = [covariances, ]
if not isinstance(weights, list):
weights = [weights, ]
self._distributions.extend([Gaussian(mean=m, covariance=c)
for m, c in zip(means, covariances)])
self.weights.extend(weights)
class StudentsTMixture(BaseMixtureModel):
"""Students T mixture object."""
def __init__(self, means=None, scalings=None, dof=None, **kwargs):
if means is not None and scalings is not None and dof is not None:
if isinstance(dof, list):
dists = [StudentsT(mean=m, scale=s, dof=df)
for m, s, df in zip(means, scalings, dof)]
else:
dists = [StudentsT(mean=m, scale=s, dof=dof)
for m, s in zip(means, scalings)]
kwargs['distributions'] = dists
super().__init__(**kwargs)
@property
def means(self):
"""List of Gaussian means, each is a N x 1 numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'location')
@means.setter
def means(self, val):
if not isinstance(val, list):
warn('Must set means to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].mean = v
@property
def covariances(self):
"""Read only list of covariances, each is a N x N numpy array."""
return _DistListWrapper(self._distributions, 'covariance')
@property
def scalings(self):
"""List of scalings, each is a N x N numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'scale')
@scalings.setter
def scalings(self, val):
if not isinstance(val, list):
warn('Must set scalings to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].scale = v
@property
def dof(self):
"""Most common degree of freedom for the mixture. Deprecated but kept for compatability, new code should use degrees_of_freedom."""
vals, counts = np.unique([d.degrees_of_freedom for d in self._distributions],
return_counts=True)
inds = np.argwhere(counts == np.max(counts))
return vals[inds[0]].item()
@dof.setter
def dof(self, val):
for d in self._distributions:
d.degrees_of_freedom = val
@property
def degrees_of_freedom(self):
"""List of degrees of freedom, each is a float. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'degrees_of_freedom')
@degrees_of_freedom.setter
def degrees_of_freedom(self, val):
if not isinstance(val, list):
warn('Must set degrees of freedom to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].degrees_of_freedom = v
def add_components(self, means, scalings, dof_lst, weights):
"""Add Student's t-distributions to the mixture.
Parameters
----------
means : list
Each is a N x 1 numpy array of the mean of the distributions to add.
scalings : list
Each is a N x N numpy array of the scale of the distributions
to add.
dof_lst : list
Each is a float representing the degrees of freedom of the distribution
to add.
weights : list
Each is a float for the weight of the distributions to add. No
normalization is done.
Returns
-------
None.
"""
if not isinstance(means, list):
means = [means, ]
if not isinstance(scalings, list):
scalings = [scalings, ]
if not isinstance(dof_lst, list):
dof_lst = [dof_lst, ]
if not isinstance(weights, list):
weights = [weights, ]
self._distributions.extend([StudentsT(mean=m, scale=s, dof=df)
for m, s, df in zip(means, scalings, dof_lst)])
self.weights.extend(weights)
| 31.450783 | 139 | 0.588114 |
0d1ba9942db72b23c3cc762ccaf81b86971befd8 | 7,102 | py | Python | benchmark/scripts/table.py | akazachk/UnitCommitment2.jl | c3b848ab2c57783a79a691ba9d32c915cad19835 | [
"BSD-3-Clause"
] | null | null | null | benchmark/scripts/table.py | akazachk/UnitCommitment2.jl | c3b848ab2c57783a79a691ba9d32c915cad19835 | [
"BSD-3-Clause"
] | null | null | null | benchmark/scripts/table.py | akazachk/UnitCommitment2.jl | c3b848ab2c57783a79a691ba9d32c915cad19835 | [
"BSD-3-Clause"
] | null | null | null | # UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from pathlib import Path
import pandas as pd
import re
from tabulate import tabulate
def process_all_log_files():
pathlist = list(Path(".").glob('results/*/*/*.log'))
pathlist += list(Path(".").glob('results/*/*.log'))
rows = []
for path in pathlist:
if ".ipy" in str(path):
continue
row = process(str(path))
rows += [row]
df = pd.DataFrame(rows)
df = df.sort_values(["Group", "Buses"])
df.index = range(len(df))
print("Writing tables/benchmark.csv")
df.to_csv("tables/benchmark.csv", index_label="Index")
def process(filename):
parts = filename.replace(".log", "").split("/")
group_name = "/".join(parts[1:-1])
instance_name = parts[-1]
instance_name, sample_name = instance_name.split(".")
nodes = 0.0
optimize_time = 0.0
simplex_iterations = 0.0
primal_bound = None
dual_bound = None
gap = None
root_obj = None
root_iterations = 0.0
root_time = 0.0
n_rows_orig, n_rows_presolved = None, None
n_cols_orig, n_cols_presolved = None, None
n_nz_orig, n_nz_presolved = None, None
n_cont_vars_presolved, n_bin_vars_presolved = None, None
read_time, model_time, isf_time, total_time = None, None, None, None
cb_calls, cb_time = 0, 0.0
transmission_count, transmission_time, transmission_calls = 0, 0.0, 0
# m = re.search("case([0-9]*)", instance_name)
# n_buses = int(m.group(1))
n_buses = 0
with open(filename) as file:
for line in file.readlines():
m = re.search(r"Explored ([0-9.e+]*) nodes \(([0-9.e+]*) simplex iterations\) in ([0-9.e+]*) seconds", line)
if m is not None:
nodes += int(m.group(1))
simplex_iterations += int(m.group(2))
optimize_time += float(m.group(3))
m = re.search(r"Best objective ([0-9.e+]*), best bound ([0-9.e+]*), gap ([0-9.e+]*)\%", line)
if m is not None:
primal_bound = float(m.group(1))
dual_bound = float(m.group(2))
gap = round(float(m.group(3)), 3)
m = re.search(r"Root relaxation: objective ([0-9.e+]*), ([0-9.e+]*) iterations, ([0-9.e+]*) seconds", line)
if m is not None:
root_obj = float(m.group(1))
root_iterations += int(m.group(2))
root_time += float(m.group(3))
m = re.search(r"Presolved: ([0-9.e+]*) rows, ([0-9.e+]*) columns, ([0-9.e+]*) nonzeros", line)
if m is not None:
n_rows_presolved = int(m.group(1))
n_cols_presolved = int(m.group(2))
n_nz_presolved = int(m.group(3))
m = re.search(r"Optimize a model with ([0-9.e+]*) rows, ([0-9.e+]*) columns and ([0-9.e+]*) nonzeros", line)
if m is not None:
n_rows_orig = int(m.group(1))
n_cols_orig = int(m.group(2))
n_nz_orig = int(m.group(3))
m = re.search(r"Variable types: ([0-9.e+]*) continuous, ([0-9.e+]*) integer \(([0-9.e+]*) binary\)", line)
if m is not None:
n_cont_vars_presolved = int(m.group(1))
n_bin_vars_presolved = int(m.group(3))
m = re.search(r"Read problem in ([0-9.e+]*) seconds", line)
if m is not None:
read_time = float(m.group(1))
m = re.search(r"Computed ISF in ([0-9.e+]*) seconds", line)
if m is not None:
isf_time = float(m.group(1))
m = re.search(r"Built model in ([0-9.e+]*) seconds", line)
if m is not None:
model_time = float(m.group(1))
m = re.search(r"Total time was ([0-9.e+]*) seconds", line)
if m is not None:
total_time = float(m.group(1))
m = re.search(r"User-callback calls ([0-9.e+]*), time in user-callback ([0-9.e+]*) sec", line)
if m is not None:
cb_calls = int(m.group(1))
cb_time = float(m.group(2))
m = re.search(r"Verified transmission limits in ([0-9.e+]*) sec", line)
if m is not None:
transmission_time += float(m.group(1))
transmission_calls += 1
m = re.search(r".*MW overflow", line)
if m is not None:
transmission_count += 1
return {
"Group": group_name,
"Instance": instance_name,
"Sample": sample_name,
"Optimization time (s)": optimize_time,
"Read instance time (s)": read_time,
"Model construction time (s)": model_time,
"ISF & LODF computation time (s)": isf_time,
"Total time (s)": total_time,
"User-callback time": cb_time,
"User-callback calls": cb_calls,
"Gap (%)": gap,
"B&B Nodes": nodes,
"Simplex iterations": simplex_iterations,
"Primal bound": primal_bound,
"Dual bound": dual_bound,
"Root relaxation iterations": root_iterations,
"Root relaxation time": root_time,
"Root relaxation value": root_obj,
"Rows": n_rows_orig,
"Cols": n_cols_orig,
"Nonzeros": n_nz_orig,
"Rows (presolved)": n_rows_presolved,
"Cols (presolved)": n_cols_presolved,
"Nonzeros (presolved)": n_nz_presolved,
"Bin vars (presolved)": n_bin_vars_presolved,
"Cont vars (presolved)": n_cont_vars_presolved,
"Buses": n_buses,
"Transmission screening constraints": transmission_count,
"Transmission screening time": transmission_time,
"Transmission screening calls": transmission_calls,
}
def generate_chart():
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
tables = []
files = ["tables/benchmark.csv"]
for f in files:
table = pd.read_csv(f, index_col=0)
table.loc[:, "Instance"] = table.loc[:,"Group"] + "/" + table.loc[:,"Instance"]
table.loc[:, "Filename"] = f
tables += [table]
benchmark = pd.concat(tables, sort=True)
benchmark = benchmark.sort_values(by="Instance")
k = len(benchmark.groupby("Instance"))
plt.figure(figsize=(12, 0.50 * k))
sns.set_style("whitegrid")
sns.set_palette("Set1")
sns.barplot(y="Instance",
x="Total time (s)",
color="tab:red",
capsize=0.15,
errcolor="k",
errwidth=1.25,
data=benchmark);
plt.tight_layout()
print("Writing tables/benchmark.png")
plt.savefig("tables/benchmark.png", dpi=150);
if __name__ == "__main__":
process_all_log_files()
generate_chart()
| 38.182796 | 120 | 0.549845 |
bdd4faf01f0be38888beb966b6383c5c7144341a | 18,892 | py | Python | rtg/pipeline.py | isi-vista/rtg | 149415f424f2a6585cbe0d97f0007b8b0b53d164 | [
"Apache-2.0"
] | null | null | null | rtg/pipeline.py | isi-vista/rtg | 149415f424f2a6585cbe0d97f0007b8b0b53d164 | [
"Apache-2.0"
] | null | null | null | rtg/pipeline.py | isi-vista/rtg | 149415f424f2a6585cbe0d97f0007b8b0b53d164 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Author: Thamme Gowda [tg (at) isi (dot) edu]
# Created: 3/9/19
import argparse
import os
from rtg import log, TranslationExperiment as Experiment, __version__, debug_mode
from rtg.exp import load_conf
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from rtg.module.decoder import Decoder
from rtg.utils import IO, line_count
from dataclasses import dataclass
import torch
import random
from collections import defaultdict
from sacrebleu import corpus_bleu, corpus_macrof
import inspect
import copy
import json
from rtg.distrib import dtorch
from rtg.registry import ProblemType
@dataclass
class Pipeline:
exp: Experiment
def __post_init__(self):
self.tests_types = {
ProblemType.TRANSLATION: self.run_translation_tests,
ProblemType.CLASSIFICATION: self.run_classification_tests
}
def pre_checks(self):
# Some more validation needed
assert self.exp.work_dir.exists()
conf = self.exp.config
assert conf.get('prep') is not None
assert conf.get('trainer') is not None
assert conf.get('tester') is not None
if not conf['tester'].get('suite') and conf['tester'].get('suit'):
# it was mis spelled as suit https://github.com/isi-nlp/rtg/issues/9
conf['tester']['suite'] = conf['tester']['suit']
assert conf['tester'].get('suite') is not None
for name, data in conf['tester']['suite'].items():
if isinstance(data, str):
src, ref = data, None
elif isinstance(data, list):
src, ref = data[0], data[1] if len(data) > 1 else None
else:
src, ref = data['src'], data.get('ref')
src = Path(src).resolve()
assert src.exists(), f'{src} doesnt exist'
if ref:
ref = Path(ref).resolve()
assert ref.exists(), f'{ref} doesnt exist'
assert line_count(src) == line_count(ref), f'{src} and{ref} are not parallel'
assert conf['trainer']['steps'] > 0
if 'finetune_steps' in conf['trainer']:
assert conf['trainer']['finetune_steps'] > conf['trainer']['steps']
if not self.exp.finetune_file.exists():
assert 'finetune_src' in conf['prep']
assert 'finetune_tgt' in conf['prep']
assert Path(conf['prep']['finetune_src']).exists()
assert Path(conf['prep']['finetune_tgt']).exists()
def detokenize(self, inp: Path):
post_proc = self.exp.get_post_transform(side='tgt')
detok_file = inp.with_suffix('.detok')
with inp.open() as lines, detok_file.open('w') as out:
for line in lines:
line = line.split('\t')[0]
out.write(post_proc(line) + '\n')
return detok_file
def evaluate_mt_file(self, detok_hyp: Path, ref: Union[Path, List[str]], lowercase=True) -> float:
detok_lines = list(IO.get_lines(detok_hyp))
# takes multiple refs, but here we have only one
if isinstance(ref, Path):
ref = [x.strip() for x in IO.get_lines(ref)]
assert isinstance(ref, list), f'List of strings expected, but given {type(ref)} '
assert isinstance(ref[0], str), f'List of strings expected, but given List of {type(ref[0])} '
refs = [ref]
bleu = corpus_bleu(hypotheses=detok_lines, references=refs, lowercase=lowercase)
bleu_str = bleu.format()
bleu_file = detok_hyp.with_name(detok_hyp.name + ('.lc' if lowercase else '.oc') + '.sacrebleu')
log.info(f'{detok_hyp}: {bleu_str}')
IO.write_lines(bleu_file, bleu_str)
macrof1 = corpus_macrof(hypotheses=detok_lines, references=refs, lowercase=lowercase)
macrof1_str = macrof1.format()
macrof1_file = detok_hyp.with_name(detok_hyp.name + ('.lc' if lowercase else '.oc') + '.macrof1')
log.info(f'{detok_hyp}: {macrof1_str}')
IO.write_lines(macrof1_file, macrof1_str)
return bleu.score
def decode_eval_file(self, decoder, src: Union[Path, List[str]], out_file: Path,
ref: Optional[Union[Path, List[str]]],
lowercase: bool = True, **dec_args) -> float:
if out_file.exists() and out_file.stat().st_size > 0 and line_count(out_file) == (
len(src) if isinstance(src, list) else line_count(src)):
log.warning(f"{out_file} exists and has desired number of lines. Skipped...")
else:
if isinstance(src, Path):
log.info(f"decoding {src.name}")
src = list(IO.get_lines(src))
if isinstance(ref, Path):
ref = list(IO.get_lines(ref))
with IO.writer(out_file) as out:
decoder.decode_file(src, out, **dec_args)
detok_hyp = self.detokenize(out_file)
if ref:
return self.evaluate_mt_file(detok_hyp, ref, lowercase=lowercase)
def tune_decoder_params(self, exp: Experiment, tune_src: str, tune_ref: str, batch_size: int,
trials: int = 10, lowercase=True,
beam_size=(1, 4, 8), ensemble=(1, 5, 10), lp_alpha=(0.0, 0.4, 0.6),
suggested: List[Tuple[int, int, float]] = None,
**fixed_args):
_, _, _, tune_args = inspect.getargvalues(inspect.currentframe())
tune_args.update(fixed_args)
ex_args = ['exp', 'self', 'fixed_args', 'batch_size', 'max_len']
if trials == 0:
ex_args += ['beam_size', 'ensemble', 'lp_alpha']
for x in ex_args:
del tune_args[x] # exclude some args
_, step = exp.get_last_saved_model()
tune_dir = exp.work_dir / f'tune_step{step}'
log.info(f"Tune dir = {tune_dir}")
tune_dir.mkdir(parents=True, exist_ok=True)
tune_src, tune_ref = Path(tune_src), Path(tune_ref)
assert tune_src.exists()
assert tune_ref.exists()
tune_src, tune_ref = list(IO.get_lines(tune_src)), list(IO.get_lines(tune_ref))
assert len(tune_src) == len(tune_ref)
tune_log = tune_dir / 'scores.json' # resume the tuning
memory: Dict[Tuple, float] = {}
if tune_log.exists():
data = json.load(tune_log.open())
# JSON keys cant be tuples, so they were stringified
memory = {eval(k): v for k, v in data.items()}
beam_sizes, ensembles, lp_alphas = [], [], []
if suggested:
if isinstance(suggested[0], str):
suggested = [eval(x) for x in suggested]
suggested = [(x[0], x[1], round(x[2], 2)) for x in suggested]
suggested_new = [x for x in suggested if x not in memory]
beam_sizes += [x[0] for x in suggested_new]
ensembles += [x[1] for x in suggested_new]
lp_alphas += [x[2] for x in suggested_new]
new_trials = trials - len(memory)
if new_trials > 0:
beam_sizes += [random.choice(beam_size) for _ in range(new_trials)]
ensembles += [random.choice(ensemble) for _ in range(new_trials)]
lp_alphas += [round(random.choice(lp_alpha), 2) for _ in range(new_trials)]
# ensembling is somewhat costlier, so try minimize the model ensembling, by grouping them together
grouped_ens = defaultdict(list)
for b, ens, l in zip(beam_sizes, ensembles, lp_alphas):
grouped_ens[ens].append((b, l))
try:
for ens, args in grouped_ens.items():
decoder = Decoder.new(exp, ensemble=ens)
for b_s, lp_a in args:
eff_batch_size = batch_size // b_s # effective batch size
name = f'tune_step{step}_beam{b_s}_ens{ens}_lp{lp_a:.2f}'
log.info(name)
out_file = tune_dir / f'{name}.out.tsv'
score = self.decode_eval_file(decoder, tune_src, out_file, tune_ref,
batch_size=eff_batch_size, beam_size=b_s,
lp_alpha=lp_a, lowercase=lowercase, **fixed_args)
memory[(b_s, ens, lp_a)] = score
best_params = sorted(memory.items(), key=lambda x: x[1], reverse=True)[0][0]
return dict(zip(['beam_size', 'ensemble', 'lp_alpha'], best_params)), tune_args
finally:
# JSON keys cant be tuples, so we stringify them
data = {str(k): v for k, v in memory.items()}
IO.write_lines(tune_log, json.dumps(data))
def run_classification_tests(self, exp=None, args=None):
from rtg.emb.tfmcls import ClassificationExperiment
exp:ClassificationExperiment = exp or self.exp
assert exp.problem_type is ProblemType.CLASSIFICATION
args = args or exp.config['tester']
suite: Dict[str, List] = args['suite']
assert suite
log.info(f"Found {len(suite)} suite :: {suite.keys()}")
eval_args = dict(
batch_size = args.get('batch_size') or self.exp.config['trainer']['batch_size'],
max_len = args.get('max_len', 256))
ens = args.get('ensemble', 1)
_, step = exp.get_last_saved_model()
model = exp.load_model(ensemble=ens)
model = model.eval()
test_dir = exp.work_dir / f'test_step{step}_ens{ens}'
test_dir.mkdir(exist_ok=True, parents=True)
for name, data in suite.items():
src, label = data, None
if isinstance(data, list):
src, label = data[:2]
try:
src_link = test_dir / f'{name}.src'
label_link = test_dir / f'{name}.label'
out_file = test_dir / f'{name}.out.tsv'
if out_file.exists() and out_file.stat().st_size > 0:
log.warning(f"{out_file} exists and not empty, so skipping it")
continue
buffer = [(src_link, Path(src).absolute())]
if label:
buffer.append((label_link, Path(label).absolute()))
for link, orig in buffer:
if not link.exists():
orig_rel = os.path.relpath(orig, link.parent)
link.symlink_to(orig_rel)
metric, top1_labels, top1_probs = exp.evaluate_classifier(
model, input=src_link, labels=label_link, **eval_args)
log.info(metric.format(delim='\t'))
test_dir.mkdir(parents=True, exist_ok=True)
score_file = test_dir / f'{name}.score.tsv'
score_file.write_text(metric.format(delim=','))
out = '\n'.join(f'{l}\t{p:g}' for l, p in zip(top1_labels, top1_probs))
out_file.write_text(out)
except Exception as e:
log.exception(f"Something went wrong with '{name}' test")
err = test_dir / f'{name}.err'
err.write_text(str(e))
def run_translation_tests(self, exp=None, args=None):
exp = exp or self.exp
args = args or exp.config['tester']
suite: Dict[str, List] = args.get('suite')
assert suite
log.info(f"Found {len(suite)} suit :: {suite.keys()}")
_, step = exp.get_last_saved_model()
if 'decoder' not in args:
args['decoder'] = {}
dec_args: Dict = args['decoder']
best_params = copy.deepcopy(dec_args)
max_len = best_params.get('max_len', 50)
batch_size = best_params.get('batch_size', 20_000)
# TODO: this has grown to become messy (trying to make backward compatible, improve the logic here
if 'tune' in dec_args and not dec_args['tune'].get('tuned'):
tune_args: Dict = dec_args['tune']
prep_args = exp.config['prep']
if 'tune_src' not in tune_args:
tune_args['tune_src'] = prep_args['valid_src']
if 'tune_ref' not in tune_args:
tune_args['tune_ref'] = prep_args.get('valid_ref', prep_args['valid_tgt'])
best_params, tuner_args_ext = self.tune_decoder_params(
exp=exp, max_len=max_len, batch_size=batch_size, **tune_args)
log.info(f"tuner args = {tuner_args_ext}")
log.info(f"Tuning complete: best_params: {best_params}")
dec_args['tune'].update(tuner_args_ext) # Update the config file with default args
dec_args['tune']['tuned'] = True
if 'tune' in best_params:
del best_params['tune']
log.info(f"params: {best_params}")
beam_size = best_params.get('beam_size', 4)
ensemble: int = best_params.pop('ensemble', 5)
lp_alpha = best_params.get('lp_alpha', 0.0)
eff_batch_size = batch_size // beam_size
dec_args.update(dict(beam_size=beam_size, lp_alpha=lp_alpha, ensemble=ensemble,
max_len=max_len, batch_size=batch_size))
exp.persist_state() # update the config
assert step > 0, 'looks like no model is saved or invalid experiment dir'
test_dir = exp.work_dir / f'test_step{step}_beam{beam_size}_ens{ensemble}_lp{lp_alpha}'
log.info(f"Test Dir = {test_dir}")
test_dir.mkdir(parents=True, exist_ok=True)
decoder = Decoder.new(exp, ensemble=ensemble)
for name, data in suite.items():
# noinspection PyBroadException
src, ref = data, None
out_file = None
if isinstance(data, list):
src, ref = data[:2]
elif isinstance(data, dict):
src, ref = data['src'], data.get('ref')
out_file = data.get('out')
try:
orig_src = Path(src).absolute()
src_link = test_dir / f'{name}.src'
ref_link = test_dir / f'{name}.ref'
buffer = [(src_link, orig_src)]
if ref:
orig_ref = Path(ref).absolute()
buffer.append((ref_link, orig_ref))
for link, orig in buffer:
if not link.exists():
orig_rel = os.path.relpath(orig, link.parent)
link.symlink_to(orig_rel)
out_file = test_dir / f'{name}.out.tsv' if not out_file else out_file
out_file.parent.mkdir(parents=True, exist_ok=True)
self.decode_eval_file(decoder, src_link, out_file, ref_link,
batch_size=eff_batch_size, beam_size=beam_size,
lp_alpha=lp_alpha, max_len=max_len)
except Exception as e:
log.exception(f"Something went wrong with '{name}' test")
err = test_dir / f'{name}.err'
err.write_text(str(e))
def run(self, run_tests=True, debug=debug_mode):
if not self.exp.read_only:
# if not distr.is_main:
# log.clear_console() # console handler
log.update_file_handler(str(self.exp.log_file))
self.pre_checks() # fail early, so TG can fix and restart
if dtorch.is_global_main:
self.exp.pre_process()
dtorch.barrier()
if not self.exp.read_only:
self.exp.reload() # with updated config and vocabs from global_main
# train on all
if debug:
log.warning("<<<Anomaly detection enabled; this is very slow; use this only for debugging/hunting bugs>>>")
with torch.autograd.detect_anomaly():
self.exp.train()
else:
self.exp.train()
dtorch.barrier()
if run_tests:
if self.exp.problem_type in self.tests_types:
if dtorch.is_global_main:
self.exp.reload() # if user changed config for tests while training
with torch.no_grad():
self.tests_types[self.exp.problem_type]()
else:
log.warning(f"{self.exp.problem_type} dont have test runner yet. "
f"Known runners: {self.tests_types}. Please fix me")
def parse_args():
parser = argparse.ArgumentParser(prog="rtg-pipe", description="RTG Pipeline CLI")
parser.add_argument('-v', '--version', action='version', version=f'%(prog)s {__version__}')
parser.add_argument("exp", metavar='EXP_DIR', help="Working directory of experiment", type=Path)
parser.add_argument("conf", metavar='conf.yml', type=Path, nargs='?',
help="Config File. By default <work_dir>/conf.yml is used")
parser.add_argument("-G", "--gpu-only", action="store_true", default=False,
help="Crash if no GPU is available")
parser.add_argument("-fp16", "--fp16", action="store_true", default=False,
help="Float 16")
# multi-gpu / multi-node
parser.add_argument("--local_rank", "--local-rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument("--master-port", type=int, default=-1,
help="Master port (for multi-node SLURM jobs)")
dtorch.setup()
args = parser.parse_args()
if args.fp16:
assert torch.cuda.is_available(), "GPU required for fp16... exiting."
dtorch.enable_fp16()
if args.gpu_only:
assert torch.cuda.is_available(), "No GPU found... exiting"
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
log.info(f'Cuda {i}: {torch.cuda.get_device_properties(i)}')
conf_file: Path = args.conf if args.conf else args.exp / 'conf.yml'
assert conf_file.exists(), f'NOT FOUND: {conf_file}'
conf = load_conf(conf_file)
ExpFactory = Experiment # default
if conf.get('model_type') == 'tfmcls':
log.info("Classification experiment")
from rtg.emb.tfmcls import ClassificationExperiment
ExpFactory = ClassificationExperiment
elif conf.get('spark', {}):
log.info("Big experiment mode enabled; checking pyspark backend")
try:
import pyspark
log.info("pyspark is available")
except:
log.warning("unable to import pyspark. Please do 'pip install pyspark' and run again")
raise
from rtg.big.exp import BigTranslationExperiment
ExpFactory = BigTranslationExperiment
read_only = not dtorch.is_global_main # only main can modify experiment
exp = ExpFactory(args.exp, config=conf_file, read_only=read_only)
dtorch.barrier()
return exp
def main():
pipe = Pipeline(exp=parse_args())
pipe.run()
if __name__ == '__main__':
main()
| 45.854369 | 119 | 0.583104 |
0ba5201628f1b259e9996c68323ad472b7f27484 | 590 | py | Python | train.py | xsir317/AlphaRenju | d5fdcf8d1442e4e43661a4cee88c95d5c25fd45a | [
"MIT"
] | 6 | 2019-02-22T17:47:15.000Z | 2020-08-28T18:46:05.000Z | train.py | xsir317/AlphaRenju | d5fdcf8d1442e4e43661a4cee88c95d5c25fd45a | [
"MIT"
] | null | null | null | train.py | xsir317/AlphaRenju | d5fdcf8d1442e4e43661a4cee88c95d5c25fd45a | [
"MIT"
] | null | null | null | from game import Game
#from policy_value_net import PolicyValueNet
from policy_value_net_residual import PolicyValueNet
from players import MCTSPlayer
from trainer import Trainer
#new policy network
#new Game
#set game player
#init_model = './renju'
init_model = './master'
policy_value_net = PolicyValueNet(model_file=init_model)
#new MCTS
player = MCTSPlayer(policy_value_net.policy_value_fn,5,1200,is_selfplay = 1)
game = Game(player,player)
trainer = Trainer(policy_value_net)
while True:
winner, game_data = game.do_play()
player.reset_player()
trainer.feed(game_data) | 24.583333 | 76 | 0.794915 |
4059a01ed76c0a3726374bae57cc2c88bad3268b | 894 | py | Python | flaskrestful/DjangoProject/App/migrations/0001_initial.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskrestful/DjangoProject/App/migrations/0001_initial.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | flaskrestful/DjangoProject/App/migrations/0001_initial.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-08-16 06:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('g_name', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('s_name', models.CharField(max_length=16)),
('s_age', models.IntegerField(default=18)),
],
),
]
| 27.9375 | 114 | 0.564877 |
8425a615f4cd59999f2d82a6a5c3713161e0f61f | 2,974 | py | Python | test.py | mwizasimbeye11/Makumbi | 2cf6214c6dab830b7e87aa8558d2b138cda0f54a | [
"MIT"
] | null | null | null | test.py | mwizasimbeye11/Makumbi | 2cf6214c6dab830b7e87aa8558d2b138cda0f54a | [
"MIT"
] | null | null | null | test.py | mwizasimbeye11/Makumbi | 2cf6214c6dab830b7e87aa8558d2b138cda0f54a | [
"MIT"
] | 3 | 2016-05-18T10:47:57.000Z | 2017-08-31T14:52:58.000Z | from apixu.client import ApixuClient, ApixuException
import MySQLdb
api_key = 'GET YOU API KEY FROM APIXU.COM'
client = ApixuClient(api_key)
db = MySQLdb.connect("localhost", "root", "", "zambia_weather")
cursor = db.cursor()
# Town names array, helps to grab the data for each specific town.
towns_one = ['Chadiza', 'Chama', 'Chavuma', 'Chembe', 'Chibombo', 'Chiengi', 'Chililabombwe', 'Chilubi', 'Chingola', 'Chinsali']
towns_two = ['Chipata', 'Chirundu', 'Choma', 'Gwembe', 'Isoka', 'Kabwe', 'Kafue', 'Kalabo']
towns_three = ['Kalomo', 'Kaoma', 'Kapiri', 'Kasama', 'Kasempa', 'Kataba', 'Katete', 'Kawambwa', 'Kazembe']
towns_four = ['Kazungula', 'Kitwe', 'Livingstone', 'Luangwa', 'Luanshya', 'Lukulu', 'Lundazi']
towns_five = ['Lusaka', 'Maamba', 'Makeni', 'Mansa', 'Mazabuka', 'Mbala', 'Mbereshi', 'Milenge']
towns_six = ['Mkushi', 'Mongu', 'Monze', 'Mpika', 'Mporokoso', 'Mpulungu', 'Mufulira', 'Mumbwa', 'Muyombe']
towns_seven = ['Mwinilunga', 'Nchelenge', 'Ndola', 'Ngoma', 'Nkana', 'Pemba', 'Petauke', 'Samfya', 'Senanga']
towns_eight = ['Serenje', 'Sesheke', 'Shiwa', 'Ngandu', 'Siavonga', 'Sikalongo', 'Sinazongwe', 'Solwezi', 'Zambezi', 'Zimba']
towns = towns_one + towns_two + towns_three + towns_four + towns_five + towns_six + towns_seven + towns_eight
for x in towns:
print x
current = client.getCurrentWeather(q=x)
tables = """CREATE TABLE IF NOT EXISTS %s (
`id` int(11) NOT NULL AUTO_INCREMENT,
`cloud` int(5) DEFAULT NULL,
`condition_text` varchar(20) DEFAULT NULL,
`condition_code` int(5) DEFAULT NULL,
`icon` text,
`temperature` float DEFAULT NULL,
`humidity` int(5) DEFAULT NULL,
`wind_degree` int(5) DEFAULT NULL,
`wind_dir` varchar(5) DEFAULT NULL,
`wind_kph` float DEFAULT NULL,
`localtime` text,
`region` varchar(30) DEFAULT NULL,
`tz_id` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=1 ;""" % x
cursor = db.cursor()
cursor.execute(tables)
# Data from API
cloud = current['current']['cloud']
code = current['current']['condition']['code']
condition_icon = current['current']['condition']['icon']
condition_text = current['current']['condition']['text']
humidity = current['current']['humidity']
temperature = current['current']['temp_c']
wind_degree = current['current']['wind_degree']
wind_dir = current['current']['wind_dir']
wind_speed = current['current']['wind_kph']
localtime = current['location']['localtime']
region = current['location']['region']
tz_id = current['location']['tz_id']
sql = cursor.execute("INSERT INTO "+ x +" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (
0, cloud, condition_text, code, condition_icon, temperature, humidity, wind_degree, wind_dir, wind_speed, localtime,
region, tz_id))
db.commit()
db.close()
| 47.206349 | 128 | 0.63887 |
b33c6c5d123666f7fb7057103fd50165e0004c5d | 2,024 | gyp | Python | gyp/pdfviewer_lib.gyp | ecoal95/skia | e24d96fc38d8dc96eeeb4915cc21bf7b31178844 | [
"BSD-3-Clause"
] | 111 | 2015-01-13T22:01:50.000Z | 2021-06-10T15:32:48.000Z | gyp/pdfviewer_lib.gyp | ecoal95/skia | e24d96fc38d8dc96eeeb4915cc21bf7b31178844 | [
"BSD-3-Clause"
] | 129 | 2015-01-14T16:07:02.000Z | 2020-03-11T19:44:42.000Z | gyp/pdfviewer_lib.gyp | ecoal95/skia | e24d96fc38d8dc96eeeb4915cc21bf7b31178844 | [
"BSD-3-Clause"
] | 64 | 2015-01-14T16:45:39.000Z | 2021-09-08T11:16:05.000Z | # GYP file to build pdfviewer.
#
# To build on Linux:
# ./gyp_skia pdfviewer.gyp && make pdfviewer
#
{
'targets': [
{
'target_name': 'pdfviewer_lib',
'type': 'static_library',
'sources': [
# FIXME: Include directory is named "inc" (instead of "include") in
# order to not be considered the public API.
'../experimental/PdfViewer/inc/SkPdfContext.h',
'../experimental/PdfViewer/inc/SkPdfDiffEncoder.h',
'../experimental/PdfViewer/inc/SkPdfRenderer.h',
'../experimental/PdfViewer/inc/SkPdfTokenLooper.h',
'../experimental/PdfViewer/src/SkPdfContext.cpp',
'../experimental/PdfViewer/src/SkPdfRenderer.cpp',
'../experimental/PdfViewer/src/SkTDStackNester.h',
'../experimental/PdfViewer/src/SkPdfDiffEncoder.cpp',
'../experimental/PdfViewer/SkPdfGraphicsState.cpp',
'../experimental/PdfViewer/SkPdfFont.cpp',
'../experimental/PdfViewer/SkPdfReporter.cpp',
'../experimental/PdfViewer/SkPdfUtils.cpp',
#'../experimental/PdfViewer/SkPdfNYI.cpp',
'../experimental/PdfViewer/SkTrackDevice.cpp',
'../experimental/PdfViewer/SkTracker.cpp',
'../experimental/PdfViewer/pdfparser/native/SkPdfNativeObject.cpp',
'../experimental/PdfViewer/pdfparser/native/SkPdfNativeTokenizer.cpp',
'../experimental/PdfViewer/pdfparser/native/SkPdfNativeDoc.cpp',
'../experimental/PdfViewer/pdfparser/native/pdfapi/SkPdfMapper_autogen.cpp',
'../experimental/PdfViewer/pdfparser/native/pdfapi/SkPdfHeaders_autogen.cpp',
],
'include_dirs': [
'../experimental/PdfViewer',
'../experimental/PdfViewer/inc',
'../experimental/PdfViewer/src',
'../experimental/PdfViewer/pdfparser',
'../experimental/PdfViewer/pdfparser/native',
'../experimental/PdfViewer/pdfparser/native/pdfapi',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
'skflate.gyp:skflate',
],
},
],
}
| 38.923077 | 85 | 0.647233 |
e8e126385a3325b54608c2b7ff872037fe2a302e | 9,768 | py | Python | docs/conf.py | KelSolaar/anytree | 56725db44f5cabb1ebcaf4f8e9bb3786e0e80002 | [
"Apache-2.0"
] | 1 | 2019-01-24T21:20:38.000Z | 2019-01-24T21:20:38.000Z | docs/conf.py | KelSolaar/anytree | 56725db44f5cabb1ebcaf4f8e9bb3786e0e80002 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | KelSolaar/anytree | 56725db44f5cabb1ebcaf4f8e9bb3786e0e80002 | [
"Apache-2.0"
] | 1 | 2019-07-29T06:45:30.000Z | 2019-07-29T06:45:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# anytree documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 19:57:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
name = "anytree"
version = "2.2.1"
author = 'c0fec0de'
description = "Python Tree Data"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = name
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % name
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%s.tex' % name, description,
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, name, description,
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, name, description,
author, name, description,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'solib': ('http://readthedocs.commsolid.com/projects/solib/latest', None),
'gensolid': ('http://readthedocs.commsolid.com/projects/gensolid/latest', None),
}
intersphinx_cache_limit = 0
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
autoclass_content = 'both'
autodoc_member_order = 'bysource'
| 31.509677 | 84 | 0.71335 |
09d88d54f0a454776c4e1e3751322e6399384159 | 6,020 | py | Python | RSA_py/src/sieve.py | R3DDY97/crypto-py | ffc2ddb53ea9ed077fea746c92c63dc6f6d3e4df | [
"MIT"
] | 3 | 2018-06-18T15:12:23.000Z | 2020-09-10T16:02:13.000Z | RSA_py/src/sieve.py | R3DDY97/crypto-py | ffc2ddb53ea9ed077fea746c92c63dc6f6d3e4df | [
"MIT"
] | null | null | null | RSA_py/src/sieve.py | R3DDY97/crypto-py | ffc2ddb53ea9ed077fea746c92c63dc6f6d3e4df | [
"MIT"
] | null | null | null | from bisect import bisect
from array import array as _array
def _arange(a, b):
ar = _array('l', [0] * (b - a))
for i, e in enumerate(range(a, b)):
ar[i] = e
return ar
def as_int(n):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
try:
result = int(n)
if result != n:
raise TypeError
except TypeError:
raise ValueError('%s is not an integer' % (n,))
return result
class Sieve:
"""An infinite list of prime numbers, implemented as a dynamically
growing sieve of Eratosthenes. When a lookup is requested involving
an odd number that has not been sieved, the sieve is automatically
extended up to that number.
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> 25 in sieve
False
>>> sieve._list
array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23])
"""
# data shared (and updated) by all Sieve instances
_list = _array('l', [2, 3, 5, 7, 11, 13])
def __repr__(self):
return "<Sieve with %i primes sieved: 2, 3, 5, ... %i, %i>" % \
(len(self._list), self._list[-2], self._list[-1])
def _reset(self):
"""Return sieve to its initial state for testing purposes.
"""
self._list = self._list[:6]
def extend(self, n):
"""Grow the sieve to cover all primes <= n (a real number).
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend(30)
>>> sieve[10] == 29
True
"""
n = int(n)
if n <= self._list[-1]:
return
# We need to sieve against all bases up to sqrt(n).
# This is a recursive call that will do nothing if there are enough
# known bases already.
maxbase = int(n**0.5) + 1
self.extend(maxbase)
# Create a new sieve starting from sqrt(n)
begin = self._list[-1] + 1
newsieve = _arange(begin, n + 1)
# Now eliminate all multiples of primes in [2, sqrt(n)]
for p in self.primerange(2, maxbase):
# Start counting at a multiple of p, offsetting
# the index to account for the new sieve's base index
startindex = (-begin) % p
for i in range(startindex, len(newsieve), p):
newsieve[i] = 0
# Merge the sieves
self._list += _array('l', [x for x in newsieve if x])
def extend_to_no(self, i):
"""Extend to include the ith prime number.
i must be an integer.
The list is extended by 50% if it is too short, so it is
likely that it will be longer than requested.
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend_to_no(9)
>>> sieve._list
array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23])
"""
i = as_int(i)
while len(self._list) < i:
self.extend(int(self._list[-1] * 1.5))
def primerange(self, a, b):
"""Generate all prime numbers in the range [a, b).
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.primerange(7, 18)])
[7, 11, 13, 17]
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
a = max(2, int(ceiling(a)))
b = int(ceiling(b))
if a >= b:
return
self.extend(b)
i = self.search(a)[1]
maxi = len(self._list) + 1
while i < maxi:
p = self._list[i - 1]
if p < b:
yield p
i += 1
else:
return
def search(self, n):
"""Return the indices i, j of the primes that bound n.
If n is prime then i == j.
Although n can be an expression, if ceiling cannot convert
it to an integer then an n error will be raised.
Examples
========
>>> from sympy import sieve
>>> sieve.search(25)
(9, 10)
>>> sieve.search(23)
(9, 9)
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
test = int(ceiling(n))
n = int(n)
if n < 2:
raise ValueError("n should be >= 2 but got: %s" % n)
if n > self._list[-1]:
self.extend(n)
b = bisect(self._list, n)
if self._list[b - 1] == test:
return b, b
else:
return b, b + 1
def __contains__(self, n):
try:
n = as_int(n)
assert n >= 2
except (ValueError, AssertionError):
return False
if n % 2 == 0:
return n == 2
a, b = self.search(n)
return a == b
def __getitem__(self, n):
"""Return the nth prime number"""
if isinstance(n, slice):
self.extend_to_no(n.stop)
return self._list[n.start - 1:n.stop - 1:n.step]
else:
n = as_int(n)
self.extend_to_no(n)
return self._list[n - 1]
# Generate a global object for repeated use in trial division etc
sieve = Sieve()
| 30.1 | 76 | 0.534219 |
6270c96f33633543019c935faa107f2684b44f10 | 283 | py | Python | libraries/botframework-streaming/botframework/streaming/payload_transport/__init__.py | andreikop/botbuilder-python | 5e073e0c68fcbdc558133bdbd59a02453e597abe | [
"MIT"
] | 388 | 2019-05-07T15:53:21.000Z | 2022-03-28T20:29:46.000Z | libraries/botframework-streaming/botframework/streaming/payload_transport/__init__.py | andreikop/botbuilder-python | 5e073e0c68fcbdc558133bdbd59a02453e597abe | [
"MIT"
] | 1,286 | 2019-05-07T23:38:19.000Z | 2022-03-31T10:44:16.000Z | libraries/botframework-streaming/botframework/streaming/payload_transport/__init__.py | andreikop/botbuilder-python | 5e073e0c68fcbdc558133bdbd59a02453e597abe | [
"MIT"
] | 168 | 2019-05-14T20:23:25.000Z | 2022-03-16T06:49:14.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .payload_receiver import PayloadReceiver
from .payload_sender import PayloadSender
from .send_packet import SendPacket
__all__ = ["PayloadReceiver", "PayloadSender", "SendPacket"]
| 25.727273 | 60 | 0.80212 |
96af0543ef6cf7f297b3102058d49371ebebbcad | 1,673 | py | Python | web/blueprints/helpers/finance.py | marcelb98/pycroft | 34cc59d9ab7fdc0c20b09b4851111048a9f64d90 | [
"Apache-2.0"
] | null | null | null | web/blueprints/helpers/finance.py | marcelb98/pycroft | 34cc59d9ab7fdc0c20b09b4851111048a9f64d90 | [
"Apache-2.0"
] | null | null | null | web/blueprints/helpers/finance.py | marcelb98/pycroft | 34cc59d9ab7fdc0c20b09b4851111048a9f64d90 | [
"Apache-2.0"
] | null | null | null | from pycroft.model.finance import Split, Transaction
def build_transactions_query(account, search=None, sort_by='valid_on', sort_order=None,
offset=None, limit=None, positive=None):
"""Build a query returning the Splits for a finance account
:param Account account: The finance Account to filter by
:param str search: The string to be included, insensitive
:param str sort_by: The column to sort by. Must be a column of
:cls:`Transaction` or :cls:`Split`.
:param str sort_order: Trigger descending sort order if the value
is ``'desc'``. See also the effect of :attr:`positive`.
:param int offset:
:param int limit:
:param bool positive: if positive is set to ``True``, only get
splits with amount ≥ 0, and amount < 0 if ``False``. In the
latter case, the effect of the :attr:`sort_order` parameter is
being reversed.
:returns: The prepared SQLAlchemy query
:rtype: Query
"""
query = Split.q.join(Transaction).filter(Split.account == account)
if not (sort_by in Transaction.__table__.columns
or sort_by in Split.__table__.columns):
sort_by = "valid_on"
descending = (sort_order == "desc") ^ (positive == False)
ordering = sort_by+" desc" if descending else sort_by
if search:
query = query.filter(Transaction.description.ilike('%{}%'.format(search)))
if positive is not None:
if positive:
query = query.filter(Split.amount >= 0)
else:
query = query.filter(Split.amount < 0)
query = query.order_by(ordering).offset(offset).limit(limit)
return query
| 37.177778 | 87 | 0.653915 |
01bd4ce3265e53942ef31574bba212c8a5d39d7a | 16,055 | py | Python | src/metarace/timy.py | ndf-zz/metarace-v1 | c57e5f804cdb1f1785e1a95f2fa2a5fcc1cfa3a9 | [
"MIT"
] | null | null | null | src/metarace/timy.py | ndf-zz/metarace-v1 | c57e5f804cdb1f1785e1a95f2fa2a5fcc1cfa3a9 | [
"MIT"
] | null | null | null | src/metarace/timy.py | ndf-zz/metarace-v1 | c57e5f804cdb1f1785e1a95f2fa2a5fcc1cfa3a9 | [
"MIT"
] | null | null | null |
"""Alge Timy I/O helper.
This module provides an interface to an Alge Timy connected
via serial port. Methods are provided to read timing events
as tod objects and to write commands to the Timy.
A calling thread creates a timy thread and configures it via the
public methods. Timing events are delivered via callback function.
Timing events are only returned if the corresponding channel
is armed. The channel is then de-armed automatically unless
the armlock has been set by the calling thread.
For example:
Calling thread Cmd Thread Timy
<- C0 1:23.4567
C0 not armed
response is None
arm(3) ->
C3 armed
<- C3 1:24.4551
C3 queued
C3 1:24.4551 <-
C3 dearmed
<- C3 1:24.4901
C3 not armed
response is None
When a calling thread sets the arming lock with timy.armlock(True),
a channel remains armed until explicitly dearmed by a calling thread.
Notes:
- ALL timing impulses correctly read from an attached
Timy will be logged by the command thread with the log
label 'TIMER', even when the channel is not armed.
- It is assumed that messages are received over the serial
connection in the same order as they are measured by
the Timy.
"""
import threading
import Queue
import serial
import logging
from metarace import sysconf
from metarace import tod
from metarace import strops
# System default timy serial port
DEFPORT = u'/dev/ttyS0'
ENCODING = u'cp437' # Timy serial interface encoding
# TIMY serial baudrate
TIMY_BAUD = 38400 # default baudrate
TIMY_CTSRTS = False # default hardware flow control
# thread queue commands
TCMDS = (u'EXIT', u'PORT', u'MSG', u'TRIG', u'RCLR')
# timing channel ids
CHAN_UNKNOWN = -1
CHAN_START = 0
CHAN_FINISH = 1
CHAN_PA = 2
CHAN_PB = 3
CHAN_200 = 4
CHAN_100 = 5
CHAN_AUX = 6
CHAN_7 = 7
CHAN_8 = 8
CHAN_INT = 9
CR = unichr(0x0d)
LOG = logging.getLogger(u'metarace.timy')
LOG.setLevel(logging.DEBUG)
TIMER_LOG_LEVEL = 25
logging.addLevelName(TIMER_LOG_LEVEL, u'TIMER')
def timy_checksum(msg):
"""Return the checksum for the Timy message string."""
# Note: Defer fix to py3 where read returns bytes
ret = 0
for ch in msg:
ret = ret + ord(ch)
return ret & 0xff
def timy_getsum(chkstr):
"""Convert Timy checksum string to an integer."""
ret = -1
try:
ret = int(chkstr, 16)
except Exception:
pass
return ret
def chan2id(chanstr=u'0'):
"""Return ID for the provided channel string."""
ret = CHAN_UNKNOWN
if (isinstance(chanstr, basestring) and len(chanstr) > 1
and chanstr[0].upper() == u'C' and chanstr[1].isdigit()):
ret = int(chanstr[1])
else:
try:
ret = int(chanstr)
except Exception:
pass
if ret < CHAN_UNKNOWN or ret > CHAN_INT:
ret = CHAN_UNKNOWN
return ret
def id2chan(chanid=0):
"""Return normalised channel string for the provided channel id."""
ret = u'C?'
if isinstance(chanid, int) and chanid >= CHAN_START and chanid <= CHAN_INT:
ret = u'C' + unicode(chanid)
return ret
class timy(threading.Thread):
"""Timy thread object class."""
def __init__(self, port=None, name=u'timy'):
"""Construct timy thread object.
Named parameters:
port -- serial port
name -- text identifier for attached unit
"""
threading.Thread.__init__(self)
self.__port = None
self.__cqueue = Queue.Queue() # command queue
self.__rdbuf = u''
self.__arms = [False, False, False, False, False,
False, False, False, False, False]
self.__clearing = False
self.__armlocked = False
self.__chandelay = {} # filled in from sysconf
self.__cb = self.__defcallback
self.name = name
self.error = False
if port is not None:
self.setport(port)
def __defcallback(self, evt=None):
"""Default callback is a tod log entry."""
LOG.debug(evt)
return False
def setcb(self, func=None):
"""Set or clear the event callback."""
if func is not None:
self.__cb = func
else:
self.__cb = self.__defcallback
def printline(self, msg=u''):
"""Print msg to Timy printer, stripped and truncated."""
lmsg = msg[0:32]
LOG.log(TIMER_LOG_LEVEL, lmsg)
self.__cqueue.put_nowait((u'MSG', u'DTP' + lmsg + u'\r'))
def linefeed(self):
"""Advance Timy printer by one line."""
self.__cqueue.put_nowait((u'MSG', u'PRILF\r'))
def clrmem(self):
"""Clear memory in attached Timy."""
self.__cqueue.put_nowait((u'MSG', u'CLR\r'))
def status(self):
"""Request status and current program."""
self.__cqueue.put_nowait((u'MSG', u'NSF\r'))
self.__cqueue.put_nowait((u'MSG', u'PROG?\r'))
def dumpall(self):
"""Request a dump of all times to host."""
self.__cqueue.put_nowait((u'MSG', u'RSM\r'))
def delaytime(self, newdelay):
"""Update the timy hardware channel delays."""
dt = tod.mktod(newdelay)
if dt is not None:
if dt > tod.ZERO and dt < tod.tod(u'99.99'):
nt = dt.rawtime(2, zeros=True)[6:]
self.__cqueue.put_nowait((u'MSG', u'DTS' + nt + u'\r'))
self.__cqueue.put_nowait((u'MSG', u'DTF' + nt + u'\r'))
else:
LOG.info(u'Ignoring invalid delay time: %s', dt.rawtime())
else:
LOG.info(u'Ignoring invalid delay time')
def printer(self, enable=False):
"""Enable or disable printer."""
cmd = u'0'
if enable:
cmd = u'1'
self.__cqueue.put_nowait((u'MSG', u'PRINTER' + cmd + u'\r'))
def printimp(self, doprint=True):
"""Enable or disable internal print of timing impulses."""
cmd = u'1'
if doprint:
cmd = u'0'
self.__cqueue.put_nowait((u'MSG', u'PRIIGN' + cmd + u'\r'))
def keylock(self, setlock=True):
"""Set or clear the timy keypad lock function."""
cmd = u'1'
if not setlock:
cmd = u'0'
self.__cqueue.put_nowait((u'MSG', u'KL' + cmd + u'\r'))
def write(self, msg=None):
"""Queue a raw command string to attached Timy."""
self.__cqueue.put_nowait((u'MSG', msg.rstrip() + u'\r'))
def exit(self, msg=None):
"""Request thread termination."""
self.running = False
self.__cqueue.put_nowait((u'EXIT', msg))
def setport(self, device=None):
"""Request (re)opening port as specified.
Device is passed unchanged to serial.Serial constructor.
Call setport with no argument, None, or an empty string
to close an open port or to run the timy thread with no
external device.
"""
self.__cqueue.put_nowait((u'PORT', device))
def arm(self, channel=0):
"""Arm timing channel 0 - 8 for response through rqueue."""
chan = chan2id(channel)
LOG.debug(u'Arming channel %s', id2chan(chan))
self.__arms[chan] = True;
def dearm(self, channel=0):
"""Disarm timing channel 0 - 8 for response through rqueue."""
chan = chan2id(channel)
LOG.debug(u'De-arm channel %s', id2chan(chan))
self.__arms[chan] = False;
def armlock(self, lock=True):
"""Set or clear the arming lock - flag only."""
self.__armlocked = bool(lock)
LOG.debug(u'Armlock is now %s', self.__armlocked)
def sane(self):
"""Initialise Timy to 'sane' values.
Values set by sane():
TIMIYINIT - initialise
KL0 - keylock off
CHK1 - enable "checksum"
PRE4 - 10,000th sec precision
RR0 - Round by 'cut'
BE1 - Beep on
DTS02.00 - Start delay 2.0
DTF02.00 - Finish & intermediate delay 2.0
EMU0 - Running time off
PRINTER0 - Printer off
PRIIGN1 - Don't print all impulses to receipt
PRILF - Linefeed
All commands are queued individually to the command thread
so it may be necessary to use wait() to suspend the calling
thread until all the commands are sent:
t.start()
t.sane()
t.wait()
Note: "sane" here comes from use at track meets with the
metarace program. It may not always make sense eg, to
have all channel delays set to 2 hundredths of a
second, or to have the internal impulse print off
by default.
"""
for msg in [u'TIMYINIT', u'NSF', u'PROG?', u'KL0', u'CHK1', u'PRE4',
u'RR0', u'BE1', u'DTS02.00', u'DTF02.00', u'EMU0',
u'PRINTER0', u'PRIIGN1',
u'DTPMetarace ' + tod.now().meridian(), u'PRILF']:
self.write(msg)
def trig(self, impulse):
"""Queue a fake timing event."""
impulse.chan = id2chan(chan2id(impulse.chan))
self.__cqueue.put_nowait((u'TRIG', impulse))
def wait(self):
"""Suspend caller until the command queue is empty."""
self.__cqueue.join()
def __parse_message(self, msg):
"""Return tod object from timing msg or None."""
ret = None
msg = msg.rstrip() # remove cr/lf if present
tsum = 0
csum = 0
if len(msg) == 28:
# assume checksum present, grab it and truncate msg
tsum = timy_getsum(msg[26:28])
msg = msg[0:26]
csum = timy_checksum(msg)
if len(msg) == 26:
# assume now msg is a timing impulse
if tsum == csum:
e = msg.split()
if len(e) == 4:
cid = chan2id(e[1])
ret = tod.mktod(e[2])
if ret is not None:
if cid in self.__chandelay:
# note: ret might wrap over 24hr boundary
ret = ret - self.__chandelay[cid]
ret.index = e[0]
ret.chan = e[1]
ret.refid = u''
ret.source = self.name
else:
LOG.error(u'Invalid message: %s', msg)
else:
LOG.error(u'Invalid message: %s', msg)
else:
LOG.error(u'Corrupt message: %s', msg)
LOG.error(u'Checksum fail: 0x%02X != 0x%02X', tsum, csum)
else:
msg = msg.strip()
if msg == u'CLR':
self.__cqueue.put_nowait((u'RCLR', u''))
LOG.debug(msg) # log std responses
return ret
def __proc_impulse(self, st):
"""Process a parsed tod impulse from the Timy.
On reception of a timing channel message, the channel is
compared against the list of armed channels. If the channel
is armed, the callback is run.
If arm lock is not set, the channel is then de-armed.
"""
LOG.log(TIMER_LOG_LEVEL, st)
channo = chan2id(st.chan)
if channo != CHAN_UNKNOWN:
if self.__arms[channo]:
self.__cb(st)
if not self.__armlocked:
self.__arms[channo] = False
if st.index.isdigit():
index = int(st.index)
if index > 2000 and not self.__clearing:
self.__clearing = True
self.clrmem()
LOG.debug(u'Auto clear memory')
else:
pass
return False
def __read(self):
"""Read messages from timy until a timeout condition."""
ch = self.__port.read(1).decode(ENCODING, u'replace')
# decode ok here - timy uses single-byte encoding
mcnt = 0
while ch != u'':
if ch == CR:
# Return ends the current 'message'
self.__rdbuf += ch # include trailing <cr>
t = self.__parse_message(self.__rdbuf)
if t is not None:
self.__proc_impulse(t)
self.__rdbuf = u''
mcnt += 1
if mcnt > 4: # break to allow write back
return
else:
self.__rdbuf += ch
ch = self.__port.read(1).decode(ENCODING, u'replace')
def run(self):
running = True
LOG.debug(u'Starting')
# re-read serial port and delay config from sysconf
baudrate = TIMY_BAUD
if sysconf.has_option(u'timy', u'baudrate'):
baudrate = strops.confopt_posint(sysconf.get(u'timy',
u'baudrate'), baudrate)
LOG.debug(u'Set serial baudrate to: %d', baudrate)
ctsrts = TIMY_CTSRTS
if sysconf.has_option(u'timy', u'ctsrts'):
ctsrts = strops.confopt_bool(sysconf.get(u'timy',
u'ctsrts'))
LOG.debug(u'Set serial CTSRTS to: %s', unicode(ctsrts))
if sysconf.has_option(u'timy', u'chandelay'):
nd = sysconf.get(u'timy', u'chandelay')
for cv in nd:
c = chan2id(cv)
if c != CHAN_UNKNOWN:
nv = tod.mktod(nd[cv])
self.__chandelay[c] = nv
LOG.debug(u'Set channel delay %s: %s', c, nv.rawtime(4))
while running:
try:
# Read phase
if self.__port is not None:
self.__read()
m = self.__cqueue.get_nowait()
else:
m = self.__cqueue.get()
self.__cqueue.task_done()
# Write phase
if isinstance(m, tuple) and m[0] in TCMDS:
if m[0]==u'MSG':
if self.__port is not None and not self.error:
LOG.debug(u'Sending rawmsg: %s', repr(m[1]))
self.__port.write(m[1].encode(ENCODING,'replace'))
elif m[0] == u'TRIG':
if isinstance(m[1], tod.tod):
self.__proc_impulse(m[1])
elif m[0] == u'RCLR':
self.__clearing = False
elif m[0] == u'EXIT':
LOG.debug(u'Request to close: %s', m[1])
running = False
elif m[0] == u'PORT':
if self.__port is not None:
self.__port.close()
self.__port = None
if m[1] is not None and m[1] not in [
u'', u'NULL', u'None']:
LOG.debug(u'Re-Connect port: %s @ %d',
m[1], baudrate)
self.__port = serial.Serial(m[1], baudrate,
rtscts=ctsrts,
timeout=0.2)
self.error = False
else:
LOG.debug(u'Not connected')
self.error = True
else:
pass
else:
LOG.warning(u'Unknown message: %r', m)
except Queue.Empty:
pass
except serial.SerialException as e:
if self.__port is not None:
self.__port.close()
self.__port = None
self.error = True
LOG.error(u'Serial error: %s', e)
except Exception as e:
LOG.error(u'%s: %s', e.__class__.__name__, e)
self.error = True
if self.__port is not None:
self.__port.close()
self.__port = None
LOG.info(u'Exiting')
| 34.087049 | 79 | 0.531423 |
3f018ca4588d1d46592a69028842aa87b8b0dfd1 | 654 | py | Python | Core Concepts/Data Preprocessing/PCA.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | null | null | null | Core Concepts/Data Preprocessing/PCA.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | null | null | null | Core Concepts/Data Preprocessing/PCA.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | 1 | 2021-02-09T12:22:55.000Z | 2021-02-09T12:22:55.000Z | from sklearn.decomposition import PCA
# Set up PCA and the X vector for diminsionality reduction
pca = PCA()
wine_X = wine.drop("Type", axis=1)
# Apply PCA to the wine dataset X vector
transformed_X = pca.fit_transform(wine_X)
# Look at the percentage of variance explained by the different components
print(pca.explained_variance_ratio_)
# Split the transformed X and the y labels into training and test sets
X_wine_train, X_wine_test, y_wine_train, y_wine_test = train_test_split(transformed_X,y)
# Fit knn to the training data
knn.fit(X_wine_train, y_wine_train)
# Score knn on the test data and print it out
knn.score(X_wine_test, y_wine_test)
| 31.142857 | 88 | 0.793578 |
55737fb35fda50bcf394d8d1ef4ddb6830fb9cff | 4,309 | py | Python | nabu/neuralnetworks/models/ed_decoders/rnn_decoder.py | AzizCode92/nabu | 768988ce4c6fc470f843174d6d7d5807880feb10 | [
"MIT"
] | 117 | 2017-02-10T13:23:23.000Z | 2022-02-20T05:31:04.000Z | nabu/neuralnetworks/models/ed_decoders/rnn_decoder.py | AzizCode92/nabu | 768988ce4c6fc470f843174d6d7d5807880feb10 | [
"MIT"
] | 56 | 2017-04-26T08:51:38.000Z | 2021-08-23T11:59:19.000Z | nabu/neuralnetworks/models/ed_decoders/rnn_decoder.py | AzizCode92/nabu | 768988ce4c6fc470f843174d6d7d5807880feb10 | [
"MIT"
] | 50 | 2017-02-06T21:57:40.000Z | 2021-05-14T23:03:07.000Z | '''@file rnn_decoder.py
contains the general recurrent decoder class'''
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from nabu.neuralnetworks.models.ed_decoders import ed_decoder
class RNNDecoder(ed_decoder.EDDecoder):
'''a speller decoder for the LAS architecture'''
__metaclass__ = ABCMeta
def _decode(self, encoded, encoded_seq_length, targets, target_seq_length,
is_training):
'''
Create the variables and do the forward computation to decode an entire
sequence
Args:
encoded: the encoded inputs, this is a list of
[batch_size x ...] tensors
encoded_seq_length: the sequence lengths of the encoded inputs
as a list of [batch_size] vectors
targets: the targets used as decoder inputs as a list of
[batch_size x ...] tensors
target_seq_length: the sequence lengths of the targets
as a list of [batch_size] vectors
is_training: whether or not the network is in training mode
Returns:
- the output logits of the decoder as a list of
[batch_size x ...] tensors
- the logit sequence_lengths as a list of [batch_size] vectors
- the final state of the decoder as a possibly nested tupple
of [batch_size x ... ] tensors
'''
#get the batch size
batch_size = tf.shape(targets.values()[0])[0]
output_dim = self.output_dims.values()[0]
output_name = self.output_dims.keys()[0]
#prepend a sequence border label to the targets to get the encoder
#inputs
expanded_targets = tf.pad(targets.values()[0], [[0, 0], [1, 0]],
constant_values=output_dim-1)
#create the rnn cell
rnn_cell = self.create_cell(encoded, encoded_seq_length, is_training)
#create the embedding
embedding = lambda ids: tf.one_hot(
ids,
output_dim,
dtype=tf.float32)
#create the decoder helper
helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=embedding(expanded_targets),
sequence_length=target_seq_length.values()[0],
embedding=embedding,
sampling_probability=float(self.conf['sample_prob'])
)
#create the decoder
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=rnn_cell,
helper=helper,
initial_state=rnn_cell.zero_state(batch_size, tf.float32)
)
#use the decoder
logits, state, logit_seq_length = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=True)
logits = logits.rnn_output
return (
{output_name: logits},
{output_name: logit_seq_length},
state)
@abstractmethod
def create_cell(self, encoded, encoded_seq_length, is_training):
'''create the rnn cell
Args:
encoded: the encoded sequences as a [batch_size x max_time x dim]
tensor that will be queried with attention
set to None if the rnn_cell should be created without the
attention part (for zero_state)
encoded_seq_length: the encoded sequence lengths as a [batch_size]
vector
is_training: bool whether or not the network is in training mode
Returns:
an RNNCell object'''
def zero_state(self, encoded_dim, batch_size):
'''get the decoder zero state
Args:
encoded_dim: the dimension of the encoded dict of
integers
batch size: the batch size as a scalar Tensor
Returns:
the decoder zero state as a possibly nested tupple
of [batch_size x ... ] tensors'''
encoded = {name:tf.zeros([batch_size, 0, encoded_dim[name]])
for name in encoded_dim}
rnn_cell = self.create_cell(
encoded,
tf.zeros([batch_size]),
False)
return rnn_cell.zero_state(batch_size, tf.float32)
def __getstate__(self):
'''getstate'''
return self.__dict__
| 33.664063 | 79 | 0.603852 |
c0b2683b036db8c1577b99f4aeb99f6c320b62c8 | 2,152 | py | Python | src/skmultiflow/_demos/_test_prequential.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 663 | 2017-11-16T15:48:45.000Z | 2022-03-28T07:38:17.000Z | src/skmultiflow/_demos/_test_prequential.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 293 | 2017-12-16T12:33:49.000Z | 2022-02-22T03:34:25.000Z | src/skmultiflow/_demos/_test_prequential.py | denisesato/scikit-multiflow | 3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95 | [
"BSD-3-Clause"
] | 201 | 2017-11-30T15:52:30.000Z | 2022-03-25T21:46:55.000Z | from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from skmultiflow.core.pipeline import Pipeline
from skmultiflow.data.file_stream import FileStream
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
def demo(output_file=None, instances=40000):
""" _test_prequential
This demo shows how to produce a prequential evaluation.
The first thing needed is a stream. For this case we use a file stream
which gets its samples from the sea_big.csv file.
Then we need to setup a classifier, which in this case is an instance
of sklearn's PassiveAggressiveClassifier. Then, optionally we create a
pipeline structure, initialized on that classifier.
The evaluation is then run.
Parameters
----------
output_file: string
The name of the csv output file
instances: int
The evaluation's max number of instances
"""
# Setup the File Stream
# stream = FileStream("https://raw.githubusercontent.com/scikit-multiflow/streaming-datasets/"
# "master/sea_big.csv")
# stream = WaveformGenerator()
# Setup the classifier
# classifier = SGDClassifier()
# classifier = KNNADWINClassifier(n_neighbors=8, max_window_size=2000,leaf_size=40, nominal_attributes=None)
# classifier = OzaBaggingADWINClassifier(base_estimator=KNNClassifier(n_neighbors=8, max_window_size=2000,
# leaf_size=30))
classifier = PassiveAggressiveClassifier()
# classifier = SGDRegressor()
# classifier = PerceptronMask()
# Setup the pipeline
pipe = Pipeline([('Classifier', classifier)])
# Setup the evaluator
evaluator = EvaluatePrequential(pretrain_size=200, max_samples=instances, batch_size=1, n_wait=100, max_time=1000,
output_file=output_file, show_plot=True,
metrics=['kappa', 'kappa_t', 'performance'])
# Evaluate
evaluator.evaluate(stream=stream, model=pipe)
if __name__ == '__main__':
demo('test_prequential.csv', 20000)
| 37.103448 | 118 | 0.685874 |
00524d8d816b2650e82d79bd2a76af3b7554d642 | 2,457 | py | Python | dlf/layers/l2_normalization.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | dlf/layers/l2_normalization.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | dlf/layers/l2_normalization.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | # borrowed from https://github.com/pierluigiferrari/ssd_keras/blob/master/keras_layers/keras_layer_L2Normalization.py
'''
A custom Keras layer to perform L2-normalization.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import InputSpec, Layer
class L2Normalization(Layer):
'''
Performs L2 normalization on the input tensor with a learnable scaling parameter
as described in the paper "Parsenet: Looking Wider to See Better" (see references)
and as used in the original SSD model.
# Args:
gamma_init (int): The initial scaling parameter. Defaults to 20 following the
SSD paper.
# Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
# Returns:
The scaled tensor. Same shape as the input tensor.
# References:
http://cs.unc.edu/~wliu/papers/parsenet.pdf
'''
def __init__(self, gamma_init=20, **kwargs):
self.axis = 3
self.gamma_init = gamma_init
super(L2Normalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
gamma = self.gamma_init * np.ones((input_shape[self.axis],))
self.gamma = tf.keras.backend.variable(
gamma, name='{}_gamma'.format(self.name))
self.trainable_weights.append(self.gamma)
super(L2Normalization, self).build(input_shape)
def call(self, x, mask=None):
output = tf.nn.l2_normalize(x, self.axis)
return output * self.gamma
def get_config(self):
config = {
'gamma_init': self.gamma_init
}
base_config = super(L2Normalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 38.390625 | 117 | 0.697192 |
4c74846c6783577cb99cdca994b3d0e383071302 | 1,079 | py | Python | tests/proprietary_sim_types_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | null | null | null | tests/proprietary_sim_types_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | null | null | null | tests/proprietary_sim_types_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | 2 | 2020-10-27T20:01:23.000Z | 2020-11-06T23:35:05.000Z | # -*- coding: utf-8 -*-
u"""test proprietary_sim_types
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
import os
import pytest
def setup_module(module):
os.environ.update(
SIREPO_FEATURE_CONFIG_PROPRIETARY_SIM_TYPES='myapp',
)
def test_myapp(auth_fc):
from pykern import pkunit
from pykern.pkdebug import pkdlog, pkdexc, pkdp
fc = auth_fc
fc.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type})
fc.sr_sim_data()
fc.sr_logout()
r = fc.sr_post('authEmailLogin', {'email': 'a@b.c', 'simulationType': fc.sr_sim_type})
fc.sr_email_confirm(fc, r)
fc.sr_post(
'authCompleteRegistration',
{
'displayName': 'abc',
'simulationType': fc.sr_sim_type,
},
)
r = fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type}, raw_response=True)
pkunit.pkeq(403, r.status_code)
| 28.394737 | 92 | 0.684893 |
4a919dd8c8480c239df5304555afb1f2ce1c3a3d | 4,758 | py | Python | eth_tester/validation/outbound.py | onyb/eth-tester | 871932620a72862b814a57a9576fd3bbad5030d5 | [
"MIT"
] | null | null | null | eth_tester/validation/outbound.py | onyb/eth-tester | 871932620a72862b814a57a9576fd3bbad5030d5 | [
"MIT"
] | 8 | 2020-06-05T21:36:23.000Z | 2022-02-12T12:24:00.000Z | eth_tester/validation/outbound.py | onyb/eth-tester | 871932620a72862b814a57a9576fd3bbad5030d5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from eth_utils import (
is_canonical_address,
)
from eth_utils.toolz import (
partial,
)
from eth_tester.constants import (
UINT256_MAX,
UINT2048_MAX,
)
from eth_tester.exceptions import (
ValidationError,
)
from .common import (
if_not_null,
if_not_create_address,
validate_any,
validate_array,
validate_bytes,
validate_positive_integer,
validate_dict,
validate_uint256,
)
def validate_32_byte_string(value):
validate_bytes(value)
if len(value) != 32:
raise ValidationError(
"Must be of length 32. Got: {0} of length {1}".format(value, len(value))
)
validate_block_hash = validate_32_byte_string
def validate_nonce(value):
validate_bytes(value)
if len(value) != 8:
raise ValidationError(
"Must be of length 8. Got: {0} of lenght {1}".format(value, len(value))
)
def validate_logs_bloom(value):
validate_positive_integer(value)
if value > UINT2048_MAX:
raise ValidationError("Value exceeds 2048 bit integer size: {0}".format(value))
def validate_canonical_address(value):
validate_bytes(value)
if not is_canonical_address(value):
raise ValidationError("Value must be a 20 byte string")
def validate_log_entry_type(value):
if value not in {"pending", "mined"}:
raise ValidationError("Log entry type must be one of 'pending' or 'mined'")
LOG_ENTRY_VALIDATORS = {
"type": validate_log_entry_type,
"log_index": validate_positive_integer,
"transaction_index": if_not_null(validate_positive_integer),
"transaction_hash": validate_32_byte_string,
"block_hash": if_not_null(validate_32_byte_string),
"block_number": if_not_null(validate_positive_integer),
"address": validate_canonical_address,
"data": validate_bytes,
"topics": partial(validate_array, validator=validate_32_byte_string),
}
validate_log_entry = partial(validate_dict, key_validators=LOG_ENTRY_VALIDATORS)
def validate_signature_v(value):
validate_positive_integer(value)
if value not in [0, 1, 27, 28] and value not in range(35, UINT256_MAX + 1):
raise ValidationError("The `v` portion of the signature must be 0, 1, 27, 28 or >= 35")
TRANSACTION_VALIDATORS = {
"hash": validate_32_byte_string,
"nonce": validate_uint256,
"block_hash": if_not_null(validate_32_byte_string),
"block_number": if_not_null(validate_positive_integer),
"transaction_index": if_not_null(validate_positive_integer),
"from": validate_canonical_address,
"to": if_not_create_address(validate_canonical_address),
"value": validate_uint256,
"gas": validate_uint256,
"gas_price": validate_uint256,
"data": validate_bytes,
"v": validate_signature_v,
"r": validate_uint256,
"s": validate_uint256,
}
validate_transaction = partial(validate_dict, key_validators=TRANSACTION_VALIDATORS)
RECEIPT_VALIDATORS = {
"transaction_hash": validate_32_byte_string,
"transaction_index": if_not_null(validate_positive_integer),
"block_number": if_not_null(validate_positive_integer),
"block_hash": if_not_null(validate_32_byte_string),
"cumulative_gas_used": validate_positive_integer,
"gas_used": validate_positive_integer,
"contract_address": if_not_null(validate_canonical_address),
"logs": partial(validate_array, validator=validate_log_entry),
"state_root": validate_bytes,
}
validate_receipt = partial(validate_dict, key_validators=RECEIPT_VALIDATORS)
BLOCK_VALIDATORS = {
"number": validate_positive_integer,
"hash": validate_block_hash,
"parent_hash": validate_block_hash,
"nonce": validate_nonce,
"sha3_uncles": validate_32_byte_string,
"logs_bloom": validate_logs_bloom,
"transactions_root": validate_32_byte_string,
"receipts_root": validate_32_byte_string,
"state_root": validate_32_byte_string,
"miner": validate_canonical_address,
"difficulty": validate_positive_integer,
"total_difficulty": validate_positive_integer,
"size": validate_positive_integer,
"extra_data": validate_32_byte_string,
"gas_limit": validate_positive_integer,
"gas_used": validate_positive_integer,
"timestamp": validate_positive_integer,
"transactions": partial(
validate_any,
validators=(
partial(validate_array, validator=validate_32_byte_string),
partial(validate_array, validator=validate_transaction),
),
),
"uncles": partial(validate_array, validator=validate_32_byte_string),
}
validate_block = partial(validate_dict, key_validators=BLOCK_VALIDATORS)
validate_accounts = partial(validate_array, validator=validate_canonical_address)
| 29.7375 | 95 | 0.738546 |
70df7d1a00c870a5d7b839c1805469474ab1ed7f | 13,672 | py | Python | pipeline/Step2/AlexNet_torch/train.py | dyning/AlexNet-Prod | 54de9dfcf540997ff227bd92d0c7a73dc73c45aa | [
"Apache-2.0"
] | 17 | 2021-08-11T13:42:03.000Z | 2022-03-30T03:50:27.000Z | pipeline/Step2/AlexNet_torch/train.py | dyning/AlexNet-Prod | 54de9dfcf540997ff227bd92d0c7a73dc73c45aa | [
"Apache-2.0"
] | 11 | 2021-08-12T06:29:17.000Z | 2021-12-23T03:15:39.000Z | pipeline/Step2/AlexNet_torch/train.py | dyning/AlexNet-Prod | 54de9dfcf540997ff227bd92d0c7a73dc73c45aa | [
"Apache-2.0"
] | 17 | 2021-08-11T14:12:38.000Z | 2022-03-30T03:50:31.000Z | import datetime
import os
import time
import torch
import torch.utils.data
from torch import nn
import torchvision
import presets
import utils
try:
from apex import amp
except ImportError:
amp = None
import sys
sys.path.insert(0, ".")
import numpy as np
from reprod_log import ReprodLogger
def train_one_epoch(model,
criterion,
optimizer,
data_loader,
device,
epoch,
print_freq,
apex=False):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter(
'lr', utils.SmoothedValue(
window_size=1, fmt='{value}'))
metric_logger.add_meter(
'img/s', utils.SmoothedValue(
window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
for image, target in metric_logger.log_every(data_loader, print_freq,
header):
start_time = time.time()
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
if apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(
loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.meters['img/s'].update(batch_size /
(time.time() - start_time))
def evaluate(model, criterion, data_loader, device, print_freq=100):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
with torch.no_grad():
for image, target in metric_logger.log_every(data_loader, print_freq,
header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'.format(
top1=metric_logger.acc1, top5=metric_logger.acc5))
return metric_logger.acc1.global_avg
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets",
"imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def load_data(traindir, valdir, args):
# Data loading code
print("Loading data")
resize_size, crop_size = (342, 299) if args.model == 'inception_v3' else (
256, 224)
print("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print("Loading dataset_train from {}".format(cache_path))
dataset, _ = torch.load(cache_path)
else:
auto_augment_policy = getattr(args, "auto_augment", None)
random_erase_prob = getattr(args, "random_erase", 0.0)
dataset = torchvision.datasets.ImageFolder(
traindir,
presets.ClassificationPresetTrain(
crop_size=crop_size,
auto_augment_policy=auto_augment_policy,
random_erase_prob=random_erase_prob))
if args.cache_dataset:
print("Saving dataset_train to {}".format(cache_path))
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
print("Took", time.time() - st)
print("Loading validation data")
cache_path = _get_cache_path(valdir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print("Loading dataset_test from {}".format(cache_path))
dataset_test, _ = torch.load(cache_path)
else:
dataset_test = torchvision.datasets.ImageFolder(
valdir,
presets.ClassificationPresetEval(
crop_size=crop_size, resize_size=resize_size))
if args.cache_dataset:
print("Saving dataset_test to {}".format(cache_path))
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(
dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler
def main(args):
if args.apex and amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
"to enable mixed-precision training.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
dataset, dataset_test, train_sampler, test_sampler = load_data(
train_dir, val_dir, args)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=args.batch_size,
sampler=test_sampler,
num_workers=args.workers,
pin_memory=True)
print("Creating model")
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
opt_name = args.opt.lower()
if opt_name == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif opt_name == 'rmsprop':
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
eps=0.0316,
alpha=0.9)
else:
raise RuntimeError(
"Invalid optimizer {}. Only SGD and RMSprop are supported.".format(
args.opt))
if args.apex:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.apex_opt_level)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
# return top1 for record
top1 = evaluate(model, criterion, data_loader_test, device=device)
return top1
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device,
epoch, args.print_freq, args.apex)
lr_scheduler.step()
evaluate(model, criterion, data_loader_test, device=device)
if args.output_dir:
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args
}
utils.save_on_master(
checkpoint,
os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
utils.save_on_master(
checkpoint, os.path.join(args.output_dir, 'checkpoint.pth'))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(
description='PyTorch Classification Training', add_help=add_help)
parser.add_argument(
'--data-path', default='/paddle/data/ILSVRC2012_torch', help='dataset')
parser.add_argument('--model', default='alexnet', help='model')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int)
parser.add_argument(
'--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'-j',
'--workers',
default=16,
type=int,
metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--opt', default='sgd', type=str, help='optimizer')
parser.add_argument(
'--lr', default=0.00125, type=float, help='initial learning rate')
parser.add_argument(
'--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument(
'--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument(
'--lr-step-size',
default=30,
type=int,
help='decrease lr every step-size epochs')
parser.add_argument(
'--lr-gamma',
default=0.1,
type=float,
help='decrease lr by a factor of lr-gamma')
parser.add_argument(
'--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output-dir', default='.', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument(
'--start-epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true", )
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true", )
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true", )
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true", )
parser.add_argument(
'--auto-augment',
default=None,
help='auto augment policy (default: None)')
parser.add_argument(
'--random-erase',
default=0.0,
type=float,
help='random erasing probability (default: 0.0)')
# Mixed precision training parameters
parser.add_argument(
'--apex',
action='store_true',
help='Use apex for mixed precision training')
parser.add_argument(
'--apex-opt-level',
default='O1',
type=str,
help='For apex mixed precision training'
'O0 for FP32 training, O1 for mixed precision training.'
'For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet'
)
# distributed training parameters
parser.add_argument(
'--world-size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument(
'--dist-url',
default='env://',
help='url used to set up distributed training')
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
top1 = main(args)
reprod_logger = ReprodLogger()
reprod_logger.add("top1", np.array([top1]))
reprod_logger.save("metric_torch.npy")
| 34.966752 | 97 | 0.61498 |
145fea4e5825a44d4d704f91d6d72733d49e0117 | 891 | py | Python | Tests/pyc/winforms_hw.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | 2 | 2019-09-21T22:22:30.000Z | 2020-05-09T12:45:51.000Z | Tests/pyc/winforms_hw.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | Tests/pyc/winforms_hw.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | 1 | 2019-09-18T05:37:46.000Z | 2019-09-18T05:37:46.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import clr
clr.AddReference("System.Windows.Forms")
from System.Windows.Forms import Form
form = Form(Text="(Compiled WinForms) Hello World")
form.ShowDialog()
| 40.5 | 97 | 0.607183 |
8fe6c074cdf41f2a76fe84e131a1a65425dbb98c | 44 | py | Python | growth/microscopy/__init__.py | sebastianbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | 1 | 2022-03-01T14:48:14.000Z | 2022-03-01T14:48:14.000Z | growth/microscopy/__init__.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | growth/microscopy/__init__.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | from .microscopy import SyntheticMicroscopy
| 22 | 43 | 0.886364 |
1ef6cc8db6187ede8a0bc813b50526b049b0ebe1 | 5,194 | py | Python | distracting_control/camera_test.py | TmacAaron/mydrq | 61c7f8cc893d999eb4147f8df4b23f860d8e9ffe | [
"MIT"
] | null | null | null | distracting_control/camera_test.py | TmacAaron/mydrq | 61c7f8cc893d999eb4147f8df4b23f860d8e9ffe | [
"MIT"
] | null | null | null | distracting_control/camera_test.py | TmacAaron/mydrq | 61c7f8cc893d999eb4147f8df4b23f860d8e9ffe | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for camera movement code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_control import suite as dm_control_suite
from dm_control.suite import cartpole
from dm_control.suite.wrappers import pixels
import numpy as np
import camera
def get_camera_params(domain_name, scale, dynamic):
return dict(
vertical_delta=np.pi / 2 * scale,
horizontal_delta=np.pi / 2 * scale,
# Limit camera to -90 / 90 degree rolls.
roll_delta=np.pi / 2. * scale,
vel_std=.1 * scale if dynamic else 0.,
max_vel=.4 * scale if dynamic else 0.,
roll_std=np.pi / 300 * scale if dynamic else 0.,
max_roll_vel=np.pi / 50 * scale if dynamic else 0.,
# Allow the camera to zoom in at most 50%.
max_zoom_in_percent=.5 * scale,
# Allow the camera to zoom out at most 200%.
max_zoom_out_percent=1.5 * scale,
limit_to_upper_quadrant='reacher' not in domain_name,
)
def distraction_wrap(env, domain_name):
camera_kwargs = get_camera_params(
domain_name=domain_name, scale=0.0, dynamic=True)
return camera.DistractingCameraEnv(env, camera_id=0, **camera_kwargs)
class CameraTest(absltest.TestCase):
def test_dynamic(self):
camera_kwargs = get_camera_params(
domain_name='cartpole', scale=0.1, dynamic=True)
env = cartpole.swingup()
env = camera.DistractingCameraEnv(env, camera_id=0, **camera_kwargs)
env = pixels.Wrapper(env, render_kwargs={'camera_id': 0})
action_spec = env.action_spec()
time_step = env.reset()
frames = []
while not time_step.last() and len(frames) < 10:
action = np.random.uniform(
action_spec.minimum, action_spec.maximum, size=action_spec.shape)
time_step = env.step(action)
frames.append(time_step.observation['pixels'])
self.assertEqual(frames[0].shape, (240, 320, 3))
def test_get_lookat_mat(self):
agent_pos = np.array([1., -3., 4.])
cam_position = np.array([0., 0., 0.])
mat = camera.get_lookat_xmat_no_roll(agent_pos, cam_position)
agent_pos = agent_pos / np.sqrt(np.sum(agent_pos**2.))
start = np.array([0., 0., -1.]) # Cam starts looking down Z.
out = np.dot(mat.reshape((3, 3)), start)
self.assertTrue(np.isclose(np.max(np.abs(out - agent_pos)), 0.))
def test_spherical_conversion(self):
cart = np.array([1.4, -2.8, 3.9])
sphere = camera.cart2sphere(cart)
cart2 = camera.sphere2cart(sphere)
self.assertTrue(np.isclose(np.max(np.abs(cart2 - cart)), 0.))
def test_envs_same(self):
# Test that the camera augmentations with magnitude 0 gives the same results
# as when no camera augmentations are used.
render_kwargs = {'width': 84, 'height': 84, 'camera_id': 0}
domain_and_task = [('cartpole', 'swingup'),
('reacher', 'easy'),
('finger', 'spin'),
('cheetah', 'run'),
('ball_in_cup', 'catch'),
('walker', 'walk')]
for (domain, task) in domain_and_task:
seed = 42
envs = [('baseline',
pixels.Wrapper(
dm_control_suite.load(
domain, task, task_kwargs={'random': seed}),
render_kwargs=render_kwargs)),
('no-wrapper',
pixels.Wrapper(
dm_control_suite.load(
domain, task, task_kwargs={'random': seed}),
render_kwargs=render_kwargs)),
('w/-camera_kwargs',
pixels.Wrapper(
distraction_wrap(
dm_control_suite.load(
domain, task, task_kwargs={'random': seed}), domain),
render_kwargs=render_kwargs))]
frames = []
for _, env in envs:
random_state = np.random.RandomState(42)
action_spec = env.action_spec()
time_step = env.reset()
frames.append([])
while not time_step.last() and len(frames[-1]) < 20:
action = random_state.uniform(
action_spec.minimum, action_spec.maximum, size=action_spec.shape)
time_step = env.step(action)
frame = time_step.observation['pixels'][:, :, 0:3]
frames[-1].append(frame)
frames_np = np.array(frames)
for i in range(1, len(envs)):
difference = np.mean(abs(frames_np[0] - frames_np[i]))
self.assertEqual(difference, 0.)
if __name__ == '__main__':
absltest.main()
| 38.474074 | 80 | 0.633616 |
5e1d8ce57af74701fe0c3581f5e442db7a073017 | 5,574 | py | Python | script/spider/bedtime_poem_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | null | null | null | script/spider/bedtime_poem_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | null | null | null | script/spider/bedtime_poem_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import re
import os
import sys
import time
from util import Profile, write_poem, remove_tmp_all
HOME_PAGE = 'https://bedtimepoem.com/'
def get_total_page_num():
response = requests.get(HOME_PAGE)
soup = BeautifulSoup(response.text, features='lxml')
count_container = soup.find('small', class_='page-count')
page_info = count_container.text
return int(page_info.split(' ')[-1])
def read_poems(page_num):
failed_urls = []
url = HOME_PAGE + 'page/' + str(page_num)
response = requests.get(url)
if response.status_code is not 200:
print('http error: page='+str(page_num))
return
soup = BeautifulSoup(response.text, features='lxml')
posts = soup.find_all('div', class_='post-image fitvid')
hrefs = list(map(lambda post: post.find('a').get('href'), posts))
for href in hrefs:
resolve_poem(href, failed_urls)
time.sleep(0.2)
return failed_urls
CONTENT_PATTERN = re.compile(r'(<p>(.*?)</p>)*')
def resolve_content(ele):
'''
Resolve the content of the poem
'''
ele = ele.replace('<p>', '\n').replace('<p align="justify">', '\n').replace(
'</p>', '').replace('<br />', '').strip().replace('\n', '\r\n').replace(' ', ' ').replace(' ', ' ')
# print(ele)
return ele
def resolve_title(ele):
ele = ele.replace('<strong>', '').replace(
'</strong>', '').replace('<b>', '').replace('</b>', '').strip()
reg = re.compile(r'<a.*>(.*)</a>')
mat = reg.findall(ele)
if len(mat):
return mat[0]
else:
return ele
AUTHOR_PATTERN = re.compile(r'<p( data-page="0")?>作者\s*/\s*\[.*?\](.*)</p>')
AUTHOR_PATTERN1 = re.compile(
r'<p( data-page="0")?>作者\s*/\s*\[.*?\](.*)<br />\n(.*?)</p>')
AUTHOR_PATTERN2 = re.compile(
r'<p><span.*?>作者\s*/\s*\[.*?\](.*)</span><br />\n(.*?)</p>')
AUTHOR_PATTERN3 = re.compile(
r'>\s*作者\s*/\s*\[.*?\]\s*(.*?)<'
)
# print(AUTHOR_PATTERN3.findall('<p align="justify"> 作者 / [清] 纳兰性德</p>'))
def is_not_modern_chinese_poet(text):
# print(text)
authors = AUTHOR_PATTERN.findall(text)
if len(authors):
author = authors[-1]
if isinstance(author, tuple):
author = author[-1]
return author.strip()
authors = AUTHOR_PATTERN1.findall(text)
if not len(authors):
authors = AUTHOR_PATTERN2.findall(text)
# print(authors)
if len(authors):
author = authors[0]
if isinstance(author, tuple):
author = author[-2]
else:
author = authors[-2]
return author.strip()
authors = AUTHOR_PATTERN3.findall(text)
if len(authors):
return authors[0].strip()
return None
# Only match poems writen by modern poets
POEM_PATTERN = re.compile(
r'题图.*<h[12]>(.*)</h[12]>(.*)<p( align="justify")?( data-page="0")?>作者\s*/\s*(.*?)</p>', re.S)
POEM_PATTERN1 = re.compile(
r'<h2>(.*?)</h2>(.*)<p( align="justify")?( data-page="0")?>作者\s*/\s*(.*?)</p>', re.S)
POEM_PATTERN2 = re.compile(
r'<h2><a href=".*"><img.*></a><span style="color: #000000;">(.*?)</span></h2>(.*)<p><span style="color: #000000;">作者\s*/\s*(.*?)</span><br />.*?</p>', re.S
)
def resolve_poem(url, failed_urls):
response = requests.get(url)
if response.status_code is not 200:
print('http error: url=' + url)
return False
text = response.text
bad_author = is_not_modern_chinese_poet(text)
if bad_author:
print('not mofailed_urlsdern chinese poet: ' + bad_author)
return
poem_elements = POEM_PATTERN.findall(text)
if not len(poem_elements):
poem_elements = POEM_PATTERN1.findall(text)
if not len(poem_elements):
poem_elements = POEM_PATTERN2.findall(text)
# print(text)
# print(poem_elements)
if len(poem_elements):
poem = poem_elements[0]
if len(poem) >= 3:
title = resolve_title(poem[0])
content = resolve_content(poem[1])
author = poem[-1].split('<br />')[0].strip()
# remove suffixes
author = author.split('(')[0].split(
'(')[0].split(',')[0].split(',')[0]
if '·' in author or '•' in author or '[' in author or '[' in author:
print('not mofailed_urlsdern chinese poet: ' + author)
else:
print('PARSED: ' + url + ' ' + title + ' @ ' + author)
write_poem(
Profile(href=url, author=author, title=title), content)
else:
print(len(poem))
print(poem)
else:
print("Parsed failed: " + url)
if url not in failed_urls:
failed_urls.append(url)
def main():
argv = sys.argv
start_page = 1
if len(argv) > 1:
try:
start_page = int(argv[1])
except Exception:
print("Invalid start page, will start from 1: " + argv[1])
page_total = get_total_page_num()
print('page_total=' + str(page_total))
if start_page == 1:
remove_tmp_all()
for page_num in range(start_page, page_total + 1):
failed_urls = read_poems(page_num)
with open(os.path.join('tmp', 'failed_urls.txt'), 'a')as file:
for f_url in failed_urls:
file.write(f_url+'\r\n')
print('Parsed page: ' + str(page_num))
write_poem(Profile(author='failed', title='urls',
href=''), '\r\n'.join(failed_urls))
main()
# read_poems(8)
# resolve_poem('https://bedtimepoem.com/archives/11990', [])
| 30.12973 | 159 | 0.569609 |
7da21f289e105624fe9d2c07905b06b95b6a1d99 | 6,328 | py | Python | pl_bolts/models/self_supervised/moco/transforms.py | yangyangkiki/pytorch-lightning-bolts | 01f1a936815262ec810551c56f5ac87198be7c3f | [
"Apache-2.0"
] | 2 | 2021-04-23T11:02:19.000Z | 2021-04-23T11:22:21.000Z | pl_bolts/models/self_supervised/moco/transforms.py | yangyangkiki/pytorch-lightning-bolts | 01f1a936815262ec810551c56f5ac87198be7c3f | [
"Apache-2.0"
] | 5 | 2020-11-28T20:49:22.000Z | 2021-01-09T13:50:22.000Z | pl_bolts/models/self_supervised/moco/transforms.py | yangyangkiki/pytorch-lightning-bolts | 01f1a936815262ec810551c56f5ac87198be7c3f | [
"Apache-2.0"
] | 1 | 2021-05-23T14:06:53.000Z | 2021-05-23T14:06:53.000Z | import random
from pl_bolts.transforms.dataset_normalizations import (
cifar10_normalization,
imagenet_normalization,
stl10_normalization,
)
from pl_bolts.utils import _PIL_AVAILABLE, _TORCHVISION_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms
else: # pragma: no cover
warn_missing_pkg('torchvision')
if _PIL_AVAILABLE:
from PIL import ImageFilter
else: # pragma: no cover
warn_missing_pkg('PIL', pypi_name='Pillow')
class Moco2TrainCIFAR10Transforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 32):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
# image augmentation functions
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(height, scale=(0.2, 1.)),
transforms.RandomApply(
[
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
cifar10_normalization()
])
def __call__(self, inp):
q = self.train_transform(inp)
k = self.train_transform(inp)
return q, k
class Moco2EvalCIFAR10Transforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 32):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
self.test_transform = transforms.Compose([
transforms.Resize(height + 12),
transforms.CenterCrop(height),
transforms.ToTensor(),
cifar10_normalization(),
])
def __call__(self, inp):
q = self.test_transform(inp)
k = self.test_transform(inp)
return q, k
class Moco2TrainSTL10Transforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 64):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
# image augmentation functions
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(height, scale=(0.2, 1.)),
transforms.RandomApply(
[
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
stl10_normalization()
])
def __call__(self, inp):
q = self.train_transform(inp)
k = self.train_transform(inp)
return q, k
class Moco2EvalSTL10Transforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 64):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
self.test_augmentation = transforms.Compose([
transforms.Resize(height + 11),
transforms.CenterCrop(height),
transforms.ToTensor(),
stl10_normalization(),
])
def __call__(self, inp):
q = self.test_augmentation(inp)
k = self.test_augmentation(inp)
return q, k
class Moco2TrainImagenetTransforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 128):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
# image augmentation functions
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(height, scale=(0.2, 1.)),
transforms.RandomApply(
[
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
imagenet_normalization()
])
def __call__(self, inp):
q = self.train_transform(inp)
k = self.train_transform(inp)
return q, k
class Moco2EvalImagenetTransforms:
"""
Moco 2 augmentation:
https://arxiv.org/pdf/2003.04297.pdf
"""
def __init__(self, height: int = 128):
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')
self.test_transform = transforms.Compose([
transforms.Resize(height + 32),
transforms.CenterCrop(height),
transforms.ToTensor(),
imagenet_normalization(),
])
def __call__(self, inp):
q = self.test_transform(inp)
k = self.test_transform(inp)
return q, k
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=(0.1, 2.0)):
if not _PIL_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
'You want to use `Pillow` which is not installed yet, install it with `pip install Pillow`.'
)
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| 31.326733 | 116 | 0.607143 |
68c1c0f86803458a8a0794cebd3188dd12246493 | 4,070 | py | Python | keystone/catalog/backends/templated.py | TampereTC/tre-smartcity-keystone | e2d0adc25165eef102c87d7991fb1a595680fda6 | [
"Apache-2.0"
] | null | null | null | keystone/catalog/backends/templated.py | TampereTC/tre-smartcity-keystone | e2d0adc25165eef102c87d7991fb1a595680fda6 | [
"Apache-2.0"
] | null | null | null | keystone/catalog/backends/templated.py | TampereTC/tre-smartcity-keystone | e2d0adc25165eef102c87d7991fb1a595680fda6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundationc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import six
from keystone.catalog.backends import kvs
from keystone.catalog import core
from keystone import config
from keystone import exception
from keystone.i18n import _LC
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
CONF = config.CONF
def parse_templates(template_lines):
o = {}
for line in template_lines:
if ' = ' not in line:
continue
k, v = line.strip().split(' = ')
if not k.startswith('catalog.'):
continue
parts = k.split('.')
region = parts[1]
# NOTE(termie): object-store insists on having a dash
service = parts[2].replace('_', '-')
key = parts[3]
region_ref = o.get(region, {})
service_ref = region_ref.get(service, {})
service_ref[key] = v
region_ref[service] = service_ref
o[region] = region_ref
return o
class Catalog(kvs.Catalog):
"""A backend that generates endpoints for the Catalog based on templates.
It is usually configured via config entries that look like:
catalog.$REGION.$SERVICE.$key = $value
and is stored in a similar looking hierarchy. Where a value can contain
values to be interpolated by standard python string interpolation that look
like (the % is replaced by a $ due to paste attempting to interpolate on
its own:
http://localhost:$(public_port)s/
When expanding the template it will pass in a dict made up of the conf
instance plus a few additional key-values, notably tenant_id and user_id.
It does not care what the keys and values are but it is worth noting that
keystone_compat will expect certain keys to be there so that it can munge
them into the output format keystone expects. These keys are:
name - the name of the service, most likely repeated for all services of
the same type, across regions.
adminURL - the url of the admin endpoint
publicURL - the url of the public endpoint
internalURL - the url of the internal endpoint
"""
def __init__(self, templates=None):
super(Catalog, self).__init__()
if templates:
self.templates = templates
else:
template_file = CONF.catalog.template_file
if not os.path.exists(template_file):
template_file = CONF.find_file(template_file)
self._load_templates(template_file)
def _load_templates(self, template_file):
try:
self.templates = parse_templates(open(template_file))
except IOError:
LOG.critical(_LC('Unable to open template file %s'), template_file)
raise
def get_catalog(self, user_id, tenant_id, metadata=None):
substitutions = dict(six.iteritems(CONF))
substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
catalog = {}
for region, region_ref in six.iteritems(self.templates):
catalog[region] = {}
for service, service_ref in six.iteritems(region_ref):
service_data = {}
try:
for k, v in six.iteritems(service_ref):
service_data[k] = core.format_url(v, substitutions)
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
catalog[region][service] = service_data
return catalog
| 32.56 | 79 | 0.659951 |
29ed962b252e4a4663cc08c92e2df0170a752bbc | 42,118 | py | Python | isoclassify/grid/classify.py | cosmicoder/isoclassify | 8cef35da50b7a1c6bb3e48c3e87dec737002474f | [
"MIT"
] | null | null | null | isoclassify/grid/classify.py | cosmicoder/isoclassify | 8cef35da50b7a1c6bb3e48c3e87dec737002474f | [
"MIT"
] | null | null | null | isoclassify/grid/classify.py | cosmicoder/isoclassify | 8cef35da50b7a1c6bb3e48c3e87dec737002474f | [
"MIT"
] | null | null | null | import copy
import time,pdb
import ephem
import pandas as pd
import numpy as np
from astropy.io import ascii
from itertools import product
from .pdf import * # part of isoclassify package (to do make explicit import)
from .priors import * # part of isoclassify package (to do make explicit import)
from .plot import * # part of isoclassify package (to do make explicit import)
class obsdata():
def __init__(self):
self.plx = -99.0
self.plxe = -99.0
self.teff = -99.0
self.teffe = -99.0
self.logg = -99.0
self.logge = -99.0
self.feh = -99.0
self.fehe = -99.0
self.lum = -99.0
self.lume = -99.0
self.bmag = -99.0
self.bmage = -99.0
self.vmag = -99.0
self.vmage = -99.0
self.btmag = -99.0
self.btmage = -99.0
self.vtmag = -99.0
self.vtmage = -99.0
self.dmag = -99.0
self.dmage = -99.0
self.gmag = -99.0
self.gmage = -99.0
self.rmag = -99.0
self.rmage = -99.0
self.imag = -99.0
self.image = -99.0
self.zmag = -99.0
self.zmage = -99.0
self.jmag = -99.0
self.jmage = -99.0
self.hmag = -99.0
self.hmage = -99.0
self.kmag = -99.0
self.kmage = -99.0
self.gamag = -99.0
self.gamage = -99.0
self.bpmag = -99.0
self.bpmage = -99.0
self.rpmag = -99.0
self.rpmage = -99.0
self.numax = -99.0
self.numaxe = -99.0
self.dnu = -99.0
self.dnue = -99.0
def addspec(self,value,sigma):
self.teff = value[0]
self.teffe = sigma[0]
self.logg = value[1]
self.logge = sigma[1]
self.feh = value[2]
self.fehe = sigma[2]
def addlum(self,value,sigma):
self.lum = value[0]
self.lume = sigma[0]
def addbv(self,value,sigma):
self.bmag = value[0]
self.bmage = sigma[0]
self.vmag = value[1]
self.vmage = sigma[1]
def addbvt(self,value,sigma):
self.btmag = value[0]
self.btmage = sigma[0]
self.vtmag = value[1]
self.vtmage = sigma[1]
def addgriz(self,value,sigma):
self.gmag = value[0]
self.gmage = sigma[0]
self.rmag = value[1]
self.rmage = sigma[1]
self.imag = value[2]
self.image = sigma[2]
self.zmag = value[3]
self.zmage = sigma[3]
def addjhk(self,value,sigma):
self.jmag = value[0]
self.jmage = sigma[0]
self.hmag = value[1]
self.hmage = sigma[1]
self.kmag = value[2]
self.kmage = sigma[2]
def addgaia(self,value,sigma):
self.gamag = value[0]
self.gamage = sigma[0]
self.bpmag = value[1]
self.bpmage = sigma[1]
self.rpmag = value[2]
self.rpmage = sigma[2]
def addplx(self,value,sigma):
self.plx = value
self.plxe = sigma
def adddmag(self,value,sigma):
self.dmag = value
self.dmage = sigma
def addseismo(self,value,sigma):
self.numax = value[0]
self.numaxe = sigma[0]
self.dnu = value[1]
self.dnue = sigma[1]
def addcoords(self,value1,value2):
self.ra = value1
self.dec = value2
class resdata():
def __init__(self):
self.teff = 0.0
self.teffep = 0.0
self.teffem = 0.0
self.teffpx = 0.0
self.teffpy = 0.0
self.logg = 0.0
self.loggep = 0.0
self.loggem = 0.0
self.loggpx = 0.0
self.loggpy = 0.0
self.feh = 0.0
self.fehep = 0.0
self.fehem = 0.0
self.fehpx = 0.0
self.fehpy = 0.0
self.rad = 0.0
self.radep = 0.0
self.radem = 0.0
self.radpx = 0.0
self.radpy = 0.0
self.mass = 0.0
self.massep = 0.0
self.massem = 0.0
self.masspx = 0.0
self.masspy = 0.0
self.rho = 0.0
self.rhoep = 0.0
self.rhoem = 0.0
self.rhopx = 0.0
self.rhopy = 0.0
self.lum = 0.0
self.lumep = 0.0
self.lumem = 0.0
self.lumpx = 0.0
self.lumpy = 0.0
self.age = 0.0
self.ageep = 0.0
self.ageem = 0.0
self.agepx = 0.0
self.agepy = 0.0
self.avs = 0.0
self.avsep = 0.0
self.avsem = 0.0
self.avspx = 0.0
self.avspy = 0.0
self.dis = 0.0
self.disep = 0.0
self.disem = 0.0
self.dispx = 0.0
self.dispy = 0.0
self.teffsec = 0.0
self.teffsecep = 0.0
self.teffsecem = 0.0
self.teffsecpx = 0.0
self.teffsecpy = 0.0
self.radsec = 0.0
self.radsecep = 0.0
self.radsecem = 0.0
self.radsecpx = 0.0
self.radsecpy = 0.0
self.loggsec = 0.0
self.loggsecep = 0.0
self.loggsecem = 0.0
self.loggsecpx = 0.0
self.loggsecpy = 0.0
self.rhosec = 0.0
self.rhosecep = 0.0
self.rhosecem = 0.0
self.rhosecpx = 0.0
self.rhosecpy = 0.0
self.masssec = 0.0
self.masssecep = 0.0
self.masssecem = 0.0
self.masssecpx = 0.0
self.masssecpy = 0.0
class extinction():
def __init__(self):
self.ab = 1.3454449
self.av = 1.00
self.abt = 1.3986523
self.avt = 1.0602271
self.ag = 1.2348743
self.ar = 0.88343449
self.ai = 0.68095687
self.az = 0.48308430
self.aj = 0.28814896
self.ah = 0.18152716
self.ak = 0.11505195
self.aga=1.2348743
def classify(input, model, dustmodel=0, plot=1, useav=-99.0, ext=-99.0, band=''):
"""
Run grid based classifier
Args:
input (object): input object
model (dict): dictionary of arrays
dustmodel (Optional[DataFrame]): extinction model
useav (float):
ext (float):
"""
## constants
gsun = 27420.010
numaxsun = 3090.0
dnusun = 135.1
teffsun = 5772.0
# bolometric correction error; kinda needs to be motivated better ...
bcerr = 0.03
## extinction coefficients
extfactors = ext
## class containing output results
result = resdata()
# calculate colors + errors:
bvcol = input.bmag - input.vmag
bvtcol = input.btmag - input.vtmag
grcol = input.gmag - input.rmag
ricol = input.rmag - input.imag
izcol = input.imag - input.zmag
gicol = input.gmag - input.imag
rzcol = input.rmag - input.zmag
gzcol = input.gmag - input.zmag
jhcol = input.jmag - input.hmag
hkcol = input.hmag - input.kmag
jkcol = input.jmag - input.kmag
bpgacol = input.bpmag - input.gamag
garpcol = input.gamag - input.rpmag
bprpcol = input.bpmag - input.rpmag
vjcol = input.vmag - input.jmag
vtjcol = input.vtmag - input.jmag
gjcol = input.gmag - input.jmag
rjcol = input.rmag - input.jmag
vkcol = input.vmag - input.kmag
vtkcol = input.vtmag - input.kmag
gkcol = input.gmag - input.kmag
rkcol = input.rmag - input.kmag
gajcol = input.gamag - input.jmag
gakcol = input.gamag - input.kmag
bvcole = np.sqrt(input.bmage**2 + input.vmage**2)
bvtcole = np.sqrt(input.btmage**2 + input.vtmage**2)
grcole = np.sqrt(input.gmage**2 + input.rmage**2)
ricole = np.sqrt(input.rmage**2 + input.image**2)
izcole = np.sqrt(input.image**2 + input.zmage**2)
gicole = np.sqrt(input.gmage**2 + input.image**2)
rzcole = np.sqrt(input.rmage**2 + input.zmage**2)
gzcole = np.sqrt(input.gmage**2 + input.zmage**2)
jhcole = np.sqrt(input.jmage**2 + input.hmage**2)
hkcole = np.sqrt(input.hmage**2 + input.kmage**2)
jkcole = np.sqrt(input.jmage**2 + input.kmage**2)
bpgacole = np.sqrt(input.bpmage**2 + input.gamage**2)
garpcole = np.sqrt(input.gamage**2 + input.rpmage**2)
bprpcole = np.sqrt(input.bpmage**2 + input.rpmage**2)
vjcole = np.sqrt(input.vmage**2 + input.jmage**2)
vtjcole = np.sqrt(input.vtmage**2 + input.jmage**2)
gjcole = np.sqrt(input.gmage**2 + input.jmage**2)
rjcole = np.sqrt(input.rmage**2 + input.jmage**2)
vkcole = np.sqrt(input.vmage**2 + input.kmage**2)
vtkcole = np.sqrt(input.vtmage**2 + input.kmage**2)
gkcole = np.sqrt(input.gmage**2 + input.kmage**2)
rkcole = np.sqrt(input.rmage**2 + input.kmage**2)
gajcole = np.sqrt(input.gamage**2 + input.jmage**2)
gakcole = np.sqrt(input.gamage**2 + input.kmage**2)
# Compute extra color error term based on underestimation of stellar teff errors with nominal 2% error floor:
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
gkexcole = compute_extra_gk_color_error(gkcol)
# Determine which gK error term is greater and use that one:
print("g - K error from photometry: ",gkcole)
print("g - K error from best-fit polynomial: ",gkexcole)
gkcole = max(gkcole,gkexcole)
print("Using g - K error: ",gkcole)
# apparent mag to use for distance estimation. set by "band" input
redmap = -99.0
if (getattr(input,band) > -99.):
redmap = getattr(input,band)
redmape = getattr(input,band+'e')
model_mabs = model[band]
# correct for companion
if (input.dmag != -99.):
dx=-0.4*input.dmag
dxe=-0.4*input.dmage
cor=2.5*np.log10(1.+10**dx)
redmap = redmap+cor
redmape = np.sqrt( redmape**2 + (dxe*2.5*10**dx/(1.+10**dx))**2)
# absolute magnitude
if (input.plx > -99.0):
mabs = -5.0 * np.log10(1.0 / input.plx) + redmap + 5.0
mabse = np.sqrt(
(-5.0 / (input.plx * np.log(10)))**2 * input.plxe**2
+ redmape**2 + bcerr**2)
# Also compute extra error term for M-dwarfs with K band mags only:
if (mabs > 4.0) and (input.kmag > -99.0):
print("M-dwarf with K band magnitude detected!")
mabseex = compute_extra_MK_error(mabs)
print("M_K from photometry: ",mabse)
print("M_K error from best-fit polynomial: ",mabseex)
mabse = np.sqrt(mabse**2 + mabseex**2)
print("After adding in quadrature, using M_K error: ",mabse)
else:
mabs = -99.0
mabse = -99.0
# pre-select model grid; first only using reddening-independent quantities
sig = 4.0
um = np.arange(0,len(model['teff']),1)
if (input.teff > -99.0):
ut=np.where((model['teff'] > input.teff-sig*input.teffe) & \
(model['teff'] < input.teff+sig*input.teffe))[0]
um=np.intersect1d(um,ut)
print('teff',len(um))
if (input.lum > -99.0):
ut=np.where((model['lum'] > input.lum-sig*input.lume) & \
(model['lum'] < input.lum+sig*input.lume))[0]
um=np.intersect1d(um,ut)
print('lum',len(um))
if (input.dnu > 0.0):
model_dnu = dnusun*model['fdnu']*np.sqrt(10**model['rho'])
ut = np.where(
(model_dnu > input.dnu - sig*input.dnue)
& (model_dnu < input.dnu + sig*input.dnue)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('dnu', len(um))
if (input.numax > 0.0):
model_numax = (numaxsun
* (10**model['logg']/gsun)
* (model['teff']/teffsun)**(-0.5))
ut = np.where(
(model_numax > input.numax - sig*input.numaxe)
& (model_numax < input.numax + sig*input.numaxe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('numax', len(um))
if (input.logg > -99.0):
ut = np.where(
(model['logg'] > input.logg - sig*input.logge)
& (model['logg'] < input.logg + sig*input.logge)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.feh > -99.0):
ut = np.where(
(model['feh'] > input.feh - sig*input.fehe)
& (model['feh'] < input.feh + sig*input.fehe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('feh', len(um))
print('number of models used within non-phot obsconstraints:', len(um))
# bail if there are not enough good models
if (len(um) < 10):
return result
# add reddening
if (redmap > -99.0):
# if no reddening map is provided, add Av as a new variable
# and fit for it
if (isinstance(dustmodel,pd.DataFrame) == False):
avs = np.arange(-0.3,1.0,0.01)
# user-specified reddening
#if (useav > -99.0):
# avs = np.zeros(1) + useav
mod = reddening(model, um, avs, extfactors)
# otherwise, just redden each model according to the provided map
else:
mod = reddening_map(
model, model_mabs, redmap, dustmodel, um, input, extfactors, band
)
# photometry to use for distance
mod_mabs = mod[band]
um = np.arange(0,len(mod['teff']),1)
mod['dis'] = 10**((redmap - mod_mabs + 5.0)/5.0)
print('number of models incl reddening:',len(um))
else:
mod = model
# next, another model down-select based on reddening-dependent quantities
# only do this if no spec constraints are available
if (mabs > -99.0):
ut = np.where(
(mod_mabs > mabs - sig*mabse)
& (mod_mabs < mabs + sig*mabse)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.teff == -99.0):
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
ut=np.where(
(mod['bmag'] - mod['vmag'] > bvcol - sig*bvcole)
& (mod['bmag'] - mod['vmag'] < bvcol + sig*bvcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
ut=np.where(
(mod['btmag'] - mod['vtmag'] > bvtcol - sig*bvtcole)
& (mod['btmag'] - mod['vtmag'] < bvtcol + sig*bvtcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['rmag'] > grcol-sig*grcole)
& (mod['gmag'] - mod['rmag'] < grcol+sig*grcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['imag'] > ricol - sig*ricole)
& (mod['rmag'] - mod['imag'] < ricol + sig*ricole)
)
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.imag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['imag'] - mod['zmag'] > izcol - sig*izcole)
& (mod['imag'] - mod['zmag'] < izcol + sig*izcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['imag'] > gicol-sig*gicole)
& (mod['gmag'] - mod['imag'] < gicol+sig*gicole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['zmag'] > rzcol-sig*rzcole)
& (mod['rmag'] - mod['zmag'] < rzcol+sig*rzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['zmag'] > gzcol-sig*gzcole)
& (mod['gmag'] - mod['zmag'] < gzcol+sig*gzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['hmag'] > jhcol - sig*jhcole)
& (mod['jmag'] - mod['hmag'] < jhcol + sig*jhcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['hmag'] - mod['kmag'] > hkcol - sig*hkcole)
& (mod['hmag'] - mod['kmag'] < hkcol + sig*hkcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['kmag'] > jkcol - sig*jkcole)
& (mod['jmag'] - mod['kmag'] < jkcol + sig*jkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['gamag'] > bpgacol - sig*bpgacole)
& (mod['bpmag'] - mod['gamag'] < bpgacol + sig*bpgacole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['rpmag'] > garpcol - sig*garpcole)
& (mod['gamag'] - mod['rpmag'] < garpcol + sig*garpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['rpmag'] > bprpcol - sig*bprpcole)
& (mod['bpmag'] - mod['rpmag'] < bprpcol + sig*bprpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['jmag'] > vjcol - sig*vjcole)
& (mod['vmag'] - mod['jmag'] < vjcol + sig*vjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['jmag'] > vtjcol - sig*vtjcole)
& (mod['vtmag'] - mod['jmag'] < vtjcol + sig*vtjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['jmag'] > gjcol - sig*gjcole)
& (mod['gmag'] - mod['jmag'] < gjcol + sig*gjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['jmag'] > rjcol - sig*rjcole)
& (mod['rmag'] - mod['jmag'] < rjcol + sig*rjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['kmag'] > vkcol - sig*vkcole)
& (mod['vmag'] - mod['kmag'] < vkcol + sig*vkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['kmag'] > vtkcol - sig*vtkcole)
& (mod['vtmag'] - mod['kmag'] < vtkcol + sig*vtkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['kmag'] > gkcol - sig*gkcole)
& (mod['gmag'] - mod['kmag'] < gkcol + sig*gkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['kmag'] > rkcol - sig*rkcole)
& (mod['rmag'] - mod['kmag'] < rkcol + sig*rkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['jmag'] > gajcol - sig*gajcole)
& (mod['gamag'] - mod['jmag'] < gajcol + sig*gajcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['kmag'] > gakcol - sig*gakcole)
& (mod['gamag'] - mod['kmag'] < gakcol + sig*gakcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('number of models after phot constraints:',len(um))
print('----')
# bail if there are not enough good models
if (len(um) < 10):
return result
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2./(2.*sig**2.))
# likelihoods
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
lh_gr = gaussian(grcol, mod['gmag'][um]-mod['rmag'][um], grcole)
else:
lh_gr = np.ones(len(um))
if ((input.rmag > -99.0) & (input.imag > -99.0)):
lh_ri = gaussian(ricol, mod['rmag'][um]-mod['imag'][um], ricole)
else:
lh_ri = np.ones(len(um))
if ((input.imag > -99.0) & (input.zmag > -99.0)):
lh_iz = gaussian(izcol, mod['imag'][um]-mod['zmag'][um], izcole)
else:
lh_iz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.imag > -99.0)):
lh_gi = gaussian(gicol, mod['gmag'][um]-mod['imag'][um], gicole)
else:
lh_gi = np.ones(len(um))
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
lh_rz = gaussian(rzcol, mod['rmag'][um]-mod['zmag'][um], rzcole)
else:
lh_rz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
lh_gz = gaussian(gzcol, mod['gmag'][um]-mod['zmag'][um], gzcole)
else:
lh_gz = np.ones(len(um))
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
lh_jh = gaussian(jhcol, mod['jmag'][um]-mod['hmag'][um], jhcole)
else:
lh_jh = np.ones(len(um))
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
lh_hk = gaussian(hkcol, mod['hmag'][um]-mod['kmag'][um], hkcole)
else:
lh_hk = np.ones(len(um))
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
lh_jk = gaussian(jkcol, mod['jmag'][um]-mod['kmag'][um], jkcole)
else:
lh_jk = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
lh_bpga = gaussian(bpgacol, mod['bpmag'][um]-mod['gamag'][um], bpgacole)
else:
lh_bpga = np.ones(len(um))
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
lh_garp = gaussian(garpcol, mod['gamag'][um]-mod['rpmag'][um], garpcole)
else:
lh_garp = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
lh_bprp = gaussian(bprpcol, mod['bpmag'][um]-mod['rpmag'][um], bprpcole)
else:
lh_bprp = np.ones(len(um))
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
lh_bv = gaussian(bvcol, mod['bmag'][um]-mod['vmag'][um], bvcole)
else:
lh_bv = np.ones(len(um))
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
lh_bvt = gaussian(bvtcol, mod['btmag'][um]-mod['vtmag'][um], bvtcole)
else:
lh_bvt = np.ones(len(um))
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
lh_vj = gaussian(vjcol, mod['vmag'][um]-mod['jmag'][um], vjcole)
else:
lh_vj = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
lh_vtj = gaussian(vtjcol, mod['vtmag'][um]-mod['jmag'][um], vtjcole)
else:
lh_vtj = np.ones(len(um))
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
lh_gj = gaussian(gjcol, mod['gmag'][um]-mod['jmag'][um], gjcole)
else:
lh_gj = np.ones(len(um))
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
lh_rj = gaussian(rjcol, mod['rmag'][um]-mod['jmag'][um], rjcole)
else:
lh_rj = np.ones(len(um))
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
lh_vk = gaussian(vkcol, mod['vmag'][um]-mod['kmag'][um], vkcole)
else:
lh_vk = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
lh_vtk = gaussian(vtkcol, mod['vtmag'][um]-mod['kmag'][um], vtkcole)
else:
lh_vtk = np.ones(len(um))
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
lh_gk = gaussian(gkcol, mod['gmag'][um]-mod['kmag'][um], gkcole)
else:
lh_gk = np.ones(len(um))
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
lh_rk = gaussian(rkcol, mod['rmag'][um]-mod['kmag'][um], rkcole)
else:
lh_rk = np.ones(len(um))
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
lh_gaj = gaussian(gajcol, mod['gamag'][um]-mod['jmag'][um], gajcole)
else:
lh_gaj = np.ones(len(um))
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
lh_gak = gaussian(gakcol, mod['gamag'][um]-mod['kmag'][um], gakcole)
else:
lh_gak = np.ones(len(um))
if (input.teff > -99):
lh_teff = gaussian(input.teff, mod['teff'][um], input.teffe)
else:
lh_teff = np.ones(len(um))
if (input.lum > -99):
lh_lum = gaussian(input.lum, mod['lum'][um], input.lume)
else:
lh_lum = np.ones(len(um))
if (input.logg > -99.0):
lh_logg = gaussian(input.logg, mod['logg'][um], input.logge)
else:
lh_logg = np.ones(len(um))
if (input.feh > -99.0):
lh_feh = gaussian(input.feh, mod['feh'][um], input.fehe)
else:
lh_feh = np.ones(len(um))
if (input.plx > -99.0):
# Compute most likely value of absolute magnitude:
mabsIndex = np.argmax(np.exp( (-1./(2.*input.plxe**2))*(input.plx-1./mod['dis'][um])**2))
# Only use downselected models based on input parameters:
downSelMagArr = mod_mabs[um]
# Compute the likelihood of the maximum magnitude given computed errors:
lh_mabs = gaussian(downSelMagArr[mabsIndex],mod_mabs[um],mabse)
else:
lh_mabs = np.ones(len(um))
if (input.dnu > 0.):
mod_dnu = dnusun*mod['fdnu']*np.sqrt(10**mod['rho'])
lh_dnu = np.exp( -(input.dnu-mod_dnu[um])**2.0 / (2.0*input.dnue**2.0))
else:
lh_dnu = np.ones(len(um))
if (input.numax > 0.):
mod_numax = (numaxsun
* (10**mod['logg']/gsun)
* (mod['teff']/teffsun)**(-0.5))
lh_numax = gaussian(input.numax,mod_numax[um],input.numaxe)
else:
lh_numax = np.ones(len(um))
tlh = (lh_gr*lh_ri*lh_iz*lh_gi*lh_rz*lh_gz*lh_jh*lh_hk*lh_jk*lh_bv*lh_bvt*lh_bpga*lh_garp*lh_bprp*
lh_vj*lh_vtj*lh_gj*lh_rj*lh_vk*lh_vtk*lh_gk*lh_rk*lh_gaj*lh_gak*
lh_teff*lh_logg*lh_feh*lh_mabs*lh_dnu*lh_numax*lh_lum)
# metallicity prior (only if no FeH input is given)
if (input.feh > -99.0):
fprior = np.ones(len(um))
else:
fprior = fehprior(mod['feh'][um])
# distance prior
if (input.plx > -99.0):
lscale = 1350.
dprior = ((mod['dis'][um]**2/(2.0*lscale**3.))
*np.exp(-mod['dis'][um]/lscale))
else:
dprior = np.ones(len(um))
# isochrone prior (weights)
tprior = mod['dage'][um]*mod['dmass'][um]*mod['dfeh'][um]
# posterior
prob = fprior*dprior*tprior*tlh
prob = prob/np.sum(prob)
if (isinstance(dustmodel,pd.DataFrame) == False):
names = ['teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age']
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes = [0, 1, 1, 0, 0, 1, 1, 0, 1]
if (redmap > -99.0):
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs'
]
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1]
if ((input.plx == -99.0) & (redmap > -99)):
names=[
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs', 'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
#if ((input.plx == -99.0) & (map > -99) & (useav > -99.0)):
# names=['teff','logg','feh','rad','mass','rho','lum','age','dis']
# steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# fixes=[0,1,1,0,0,1,1,0,0]
else:
#names=['teff','logg','feh','rad','mass','rho','lum','age']
#steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
#fixes=[0,1,1,0,0,1,1,0,1]
#if (input.plx == -99.0):
avstep=((np.max(mod['avs'][um])-np.min(mod['avs'][um]))/10.)
#pdb.set_trace()
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age', 'avs',
'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, avstep, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
# Provision figure
if plot:
plotinit()
ix = 1
iy = 2
npar = len(names)
for j in range(0,npar):
if fnmatch.fnmatch(names[j],'*lum*'):
lum=np.log10((mod['rad'][um]**2. * (mod['teff'][um]/5772.)**4.))
x, y, res, err1, err2 = getpdf(
lum, prob, name=names[j], step=steps[j], fixed=fixes[j],
dustmodel=dustmodel)
else:
if (len(np.unique(mod[names[j]][um])) > 1):
x, y, res, err1, err2 = getpdf(
mod[names[j]][um], prob, name=names[j], step=steps[j],
fixed=fixes[j],dustmodel=dustmodel
)
elif ((len(np.unique(mod[names[j]][um])) == 1) and (names[j] == 'avs')):
res = mod[names[j]][um[0]]
err1 = 0.0
err2 = 0.0
x = res
y = 1.0
else:
res = 0.0
err1 = 0.0
err2 = 0.0
print(names[j], res, err1, err2)
setattr(result, names[j], res)
setattr(result, names[j]+'ep', err1)
setattr(result, names[j]+'em', err2)
setattr(result, names[j]+'px', x)
setattr(result, names[j]+'py', y)
# Plot individual posteriors
if plot:
plotposterior(x, y, res, err1, err2, names, j, ix, iy)
ix += 2
iy += 2
# calculate posteriors for a secondary with a given delta_mag, assuming it has the same
# distance, age, and metallicity. to do this we'll interpolate the physical properties
# of the secondary given a delta_mag, and assign it the same posterior probabilities
# same procedure as used in Kraus+ 16
if (input.dmag > -99.):
print(' ')
print('calculating properties for secondary ...')
delta_k=input.dmag
delta_k_err=input.dmage
print('using dmag=',delta_k,'+/-',delta_k_err,' in ',band)
# interpolate across constant age and metallicity
feh_un=np.unique(mod['feh_init'][um])
age_un=np.unique(mod['age'][um])
#adding in the contrast error without sampling is tricky, because that uncertainty
# is not present in the primary posterior; instead, calculate the secondary
# posteriors 3 times for +/- contrast errors, and then add those in quadrature
# *explicitly assumes that the contrast errors are gaussian*
mds=[delta_k+delta_k_err,delta_k,delta_k-delta_k_err]
# the new model quantities for the secondary
mod_sec=np.zeros((5,3,len(prob)))
# Now reduce model to only those that match metallicity, age, and mass (must be less than max primary mass) conditions:
ufeh = np.in1d(model['feh_init'],feh_un) # Must match all potential primary initial metallicities
uage = np.in1d(model['age'],age_un) # Must match all potential primary ages
umass = np.where(model['mass'] < np.max(mod['mass'][um]))[0] # Must be less than max primary mass
ufa = np.where((ufeh == True) & (uage == True))[0] # Find intersection of age and feh
ufam = np.intersect1d(umass,ufa) # Find intersection of mass and ufa
modelMin = dict((k, model[k][ufam]) for k in model.keys()) # Define minimal model grid
# insanely inefficient triple loop follows
for s in range(0,len(mds)):
for r in range(0,len(feh_un)):
for k in range (0,len(age_un)):
# NB the next line uses model instead of mod, since the interpolation needs
# the full model grid rather than the pre-selected models returned by the
# reddening routine (which excludes secondary solutions). This may screw
# things up when trying to constrain reddening (i.e. dust="none")
ux=np.where((modelMin['feh_init'] == feh_un[r]) & (modelMin['age'] == age_un[k]))[0]
ux2=np.where((mod['feh_init'][um] == feh_un[r]) & (mod['age'][um] == age_un[k]))[0]
sr=np.argsort(modelMin[band][ux])
if ((len(ux) == 0) | (len(ux2) == 0)):
continue
mod_sec[0,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['teff'][ux[sr]])
mod_sec[1,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['logg'][ux[sr]])
mod_sec[2,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rad'][ux[sr]])
mod_sec[3,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['mass'][ux[sr]])
mod_sec[4,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rho'][ux[sr]])
# now get PDFs across all delta mags, add errors in quadrature
names = ['teff', 'logg', 'rad', 'mass', 'rho']
steps=[0.001, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 0, 0, 1]
ix = 1
iy = 2
npar = len(names)
for j in range(0,5):
x, y, res_1, err1_1, err2_1 = getpdf(mod_sec[j,0,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
xo, yo, res_2, err1_2, err2_2 = getpdf(mod_sec[j,1,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
x, y, res_3, err1_3, err2_3 = getpdf(mod_sec[j,2,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
finerr1=np.sqrt(err1_2**2 + (np.abs(res_2-res_1))**2)
finerr2=np.sqrt(err2_2**2 + (np.abs(res_2-res_3))**2)
print(names[j], res_2, finerr1, finerr2)
setattr(result, names[j]+'sec', res_2)
setattr(result, names[j]+'sec'+'ep', finerr1)
setattr(result, names[j]+'sec'+'em', finerr2)
setattr(result, names[j]+'sec'+'px', x)
setattr(result, names[j]+'sec'+'py', y)
# Plot individual posteriors
if plot:
plotposterior_sec(xo,yo, res_2, finerr1, finerr2, names, j, ix, iy)
ix += 2
iy += 2
# Plot HR diagrams
if plot:
plothrd(model,mod,um,input,mabs,mabse,ix,iy)
return result
# add extinction as a model parameter
def reddening(model,um,avs,extfactors):
model2=dict((k, model[k][um]) for k in model.keys())
nmodels=len(model2['teff'])*len(avs)
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
start=0
end=len(um)
#print start,end
for i in range(0,len(avs)):
ix = np.arange(start,end,1)
# NB: in reality, the model mags should also be Av-dependent;
# hopefully a small effect!
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag][ix] = model2[cmag] + avs[i]*extfactors[ac]/av
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key][ix]=model2[key]
model3['avs'][ix] = avs[i]
start = start + len(um)
end = end + len(um)
print(i)
return model3
# redden model given a reddening map
def reddening_map(model, model_mabs, redmap, dustmodel, um, input, extfactors,
band):
if (len(band) == 4):
bd = band[0:1]
else:
bd = band[0:2]
equ = ephem.Equatorial(
input.ra*np.pi/180.0, input.dec*np.pi/180.0, epoch=ephem.J2000
)
gal = ephem.Galactic(equ)
lon_deg = gal.lon*180./np.pi
lat_deg = gal.lat*180./np.pi
# zero-reddening distance
dis = 10**((redmap-model_mabs[um]+5)/5.)
# iterate distance and map a few times
for i in range(0,1):
xp = np.concatenate(
([0.0],np.array(dustmodel.columns[2:].str[3:],dtype='float'))
)
fp = np.concatenate(([0.0],np.array(dustmodel.iloc[0][2:])))
ebvs = np.interp(x=dis, xp=xp, fp = fp)
ext_band = extfactors['a'+bd]*ebvs
dis=10**((redmap-ext_band-model_mabs[um]+5)/5.)
# if no models have been pre-selected (i.e. input is
# photometry+parallax only), redden all models
if (len(um) == len(model['teff'])):
model3 = copy.deepcopy(model)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model[cmag] + extfactors[ac] * ebvs
model3['dis'] = dis
model3['avs'] = extfactors['av']*ebvs
#pdb.set_trace()
# if models have been pre-selected, extract and only redden those
else:
model2 = dict((k, model[k][um]) for k in model.keys())
nmodels = len(model2['teff'])
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model2[cmag] + extfactors[ac] * ebvs
model3['dis']=dis
model3['avs']=extfactors['av']*ebvs
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key] = model2[key]
return model3
########################### M-dwarf error computation and gK to 2% teff uncertainty computation:
def compute_extra_MK_error(abskmag):
massPoly = np.array([-1.218087354981032275e-04,3.202749540513295540e-03,
-2.649332720970200630e-02,5.491458806424324990e-02,6.102330369026183476e-02,
6.122397810371335014e-01])
massPolyDeriv = np.array([-6.090436774905161376e-04,1.281099816205318216e-02,
-7.947998162910602238e-02,1.098291761284864998e-01,6.102330369026183476e-02])
kmagExtraErr = abs(0.021*np.polyval(massPoly,abskmag)/np.polyval(massPolyDeriv,abskmag))
return kmagExtraErr
def compute_extra_gk_color_error(gk):
teffPoly = np.array([5.838899127633915245e-06,-4.579640759410575821e-04,
1.591988911769273360e-02,-3.229622768514631148e-01,4.234782988549875782e+00,
-3.752421323678526477e+01,2.279521336429464498e+02,-9.419602441779162518e+02,
2.570487048729761227e+03,-4.396474893847861495e+03,4.553858427460818348e+03,
-4.123317864249115701e+03,9.028586421378711748e+03])
teffPolyDeriv = np.array([7.006678953160697955e-05,-5.037604835351633566e-03,
1.591988911769273429e-01,-2.906660491663167978e+00,3.387826390839900625e+01,
-2.626694926574968463e+02,1.367712801857678642e+03,-4.709801220889581600e+03,
1.028194819491904491e+04,-1.318942468154358357e+04,9.107716854921636696e+03,
-4.123317864249115701e+03])
gkExtraColorErr = abs(0.02*np.polyval(teffPoly,gk)/np.polyval(teffPolyDeriv,gk))
return gkExtraColorErr
######################################### misc stuff
# calculate parallax for each model
def redden(redmap, mabs, gl, gb, dust):
logd = (redmap-mabs+5.)/5.
newd = logd
for i in range(0,1):
cur = 10**newd
ebv = dust(gl,gb,cur/1000.)
av = ebv*3.1
aj = av*1.2348743
newd = (redmap-mabs-aj+5.)/5.
s_newd = np.sqrt( (0.2*0.01)**2 + (0.2*0.03)**2 + (0.2*0.02)**2 )
plx=1./(10**newd)
s_plx=10**(-newd)*np.log(10)*s_newd
pdb.set_trace()
return 1./(10**newd)
def readinput(input):
input = ascii.read('input.txt')
ra = input['col1'][0]
dec = input['col2'][0]
bmag = input['col1'][1]
bmage = input['col2'][1]
vmag = input['col1'][2]
vmage = input['col2'][2]
gmag = input['col1'][3]
gmage = input['col2'][3]
rmag = input['col1'][4]
rmage = input['col2'][4]
imag = input['col1'][5]
image = input['col2'][5]
zmag = input['col1'][6]
zmage = input['col2'][6]
jmag = input['col1'][7]
jmage = input['col2'][7]
hmag = input['col1'][8]
hmage = input['col2'][8]
kmag = input['col1'][9]
kmage = input['col2'][9]
plx = input['col1'][10]
plxe = input['col2'][10]
teff = input['col1'][11]
teffe = input['col2'][11]
logg = input['col1'][12]
logge = input['col2'][12]
feh = input['col1'][13]
fehe = input['col2'][13]
out = (
ra, dec, bmag, bmage, vmag, vmage, gmag, gmage, rmag, rmage,
imag, image, zmag, zmage, jmag, jmage, hmag, hmage, kmag, kmage,
plx, plxe, teff, teffe, logg, logge, feh, fehe
)
return out
| 34.13128 | 138 | 0.521369 |
d6effdbf18bce7d80e01ff62b5864b1bc05f8aa6 | 1,870 | py | Python | conf.py | Chilipp/autosummary-issue | 04fdce809a8772dcfd5a34cc86340fb1eb405930 | [
"MIT"
] | null | null | null | conf.py | Chilipp/autosummary-issue | 04fdce809a8772dcfd5a34cc86340fb1eb405930 | [
"MIT"
] | null | null | null | conf.py | Chilipp/autosummary-issue | 04fdce809a8772dcfd5a34cc86340fb1eb405930 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'test'
copyright = '2021, Some guy'
author = 'Some guy'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.62963 | 79 | 0.660963 |
dda2fa83646793c1ab34e2eddcaabff9b78f21ce | 26,222 | py | Python | server/apps/streamalias/tests/test_stream_alias_tap_api.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/streamalias/tests/test_stream_alias_tap_api.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/streamalias/tests/test_stream_alias_tap_api.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | import datetime
import json
import dateutil.parser
from django.utils import timezone
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from apps.org.models import Org
from apps.physicaldevice.models import Device
from apps.project.models import Project
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.models import StreamData
from apps.streamevent.models import StreamEventData
from apps.utils.test_util import TestMixin
from ..models import *
class StreamAliasTapAPITestCase(TestMixin, APITestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.p1.project_template = self.pt1
self.p1.save()
self.p2.project_template = self.pt1
self.p2.save()
self.v1 = StreamVariable.objects.create_variable(
name='Var A', project=self.p1, created_by=self.u2, lid=1,
)
self.v2 = StreamVariable.objects.create_variable(
name='Var B', project=self.p2, created_by=self.u3, lid=2,
)
self.pd11 = Device.objects.create_device(project=self.p1, label='d11', template=self.dt1, created_by=self.u2)
self.pd12 = Device.objects.create_device(project=self.p1, label='d12', template=self.dt1, created_by=self.u2)
self.pd21 = Device.objects.create_device(project=self.p2, label='d21', template=self.dt1, created_by=self.u3)
self.pd22 = Device.objects.create_device(project=self.p2, label='d22', template=self.dt1, created_by=self.u3)
self.sa1 = StreamAlias.objects.create(
name='some alias',
org=self.o2,
created_by=self.u2,
)
self.s11 = StreamId.objects.create_stream(
project=self.p1,
variable=self.v1,
device=self.pd11,
created_by=self.u2
)
self.s12 = StreamId.objects.create_stream(
project=self.p1,
variable=self.v1,
device=self.pd12,
created_by=self.u2
)
self.sa2 = StreamAlias.objects.create(
name='some other alias',
org=self.o3,
created_by=self.u1,
)
self.s21 = StreamId.objects.create_stream(
project=self.p2,
variable=self.v2,
device=self.pd21,
created_by=self.u3
)
self.s22 = StreamId.objects.create_stream(
project=self.p2,
variable=self.v2,
device=self.pd22,
created_by=self.u3
)
self.dt = dateutil.parser.parse('2016-09-28T10:00:00Z')
self.sat11 = StreamAliasTap.objects.create(
alias=self.sa1,
timestamp=self.dt + datetime.timedelta(seconds=20),
stream=self.s11,
created_by=self.u2
)
self.sat12 = StreamAliasTap.objects.create(
alias=self.sa1,
timestamp=self.dt + datetime.timedelta(seconds=60),
stream=self.s12,
created_by=self.u2
)
self.sat13 = StreamAliasTap.objects.create(
alias=self.sa1,
timestamp=self.dt + datetime.timedelta(seconds=80),
stream=self.s11,
created_by=self.u2
)
self.sat21 = StreamAliasTap.objects.create(
alias=self.sa2,
timestamp=self.dt,
stream=self.s21,
created_by=self.u3
)
self.sat22 = StreamAliasTap.objects.create(
alias=self.sa2,
timestamp=self.dt + datetime.timedelta(seconds=50),
stream=self.s22,
created_by=self.u3
)
for i, n in enumerate(('11', '12', '21', '22')):
for p in range(0, 11):
StreamData.objects.create(
stream_slug=getattr(self, 's{}'.format(n)).slug,
type='Num',
timestamp=self.dt + p * datetime.timedelta(seconds=10),
int_value= 100 * i + p
)
StreamEventData.objects.create(
timestamp=self.dt,
device_timestamp=100,
stream_slug=self.s11.slug,
streamer_local_id=1
)
StreamEventData.objects.create(
timestamp=self.dt + datetime.timedelta(seconds=70),
device_timestamp=170,
stream_slug=self.s12.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=self.dt + datetime.timedelta(seconds=90),
device_timestamp=190,
stream_slug=self.s11.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=self.dt + datetime.timedelta(seconds=60),
device_timestamp=160,
stream_slug=self.s22.slug,
streamer_local_id=4
)
def tearDown(self):
StreamAliasTap.objects.all().delete()
StreamAlias.objects.all().delete()
StreamId.objects.all().delete()
self.projectTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testGetStreamAliasTap(self):
"""
Ensure we can call GET on the Stream Alias Tap API.
"""
url = reverse('streamaliastap-list')
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
# Staff has access to all if staff argument is provided
ok = self.client.login(email='user1@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 0)
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 0)
resp = self.client.get(url+'?staff=1', format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 5)
detail_url1 = reverse('streamaliastap-detail', kwargs={'pk': self.sat11.id})
detail_url2 = reverse('streamaliastap-detail', kwargs={'pk': self.sat21.id})
resp = self.client.get(detail_url1, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.get(detail_url2, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.get(detail_url1+'?staff=1', format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['id'], self.sat11.id)
self.assertEqual(deserialized['alias'], self.sa1.slug)
self.assertEqual(deserialized['timestamp'], '2016-09-28T10:00:20Z')
self.assertEqual(deserialized['stream'], self.s11.slug)
self.client.logout()
# Normal users have access to their Stream Aliases only
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 3)
resp = self.client.get(detail_url1, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['id'], self.sat11.id)
resp = self.client.get(detail_url2, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.client.logout()
ok = self.client.login(email='user3@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 2)
resp = self.client.get(detail_url1, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.get(detail_url2, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['id'], self.sat21.id)
self.client.logout()
def testPostStreamAliasTap(self):
"""
Ensure we can call POST on the Stream Alias Tap API.
"""
url = reverse('streamaliastap-list')
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
# added by staff
payload = {
'alias': self.sa2.slug,
'timestamp': self.dt + datetime.timedelta(seconds=70),
'stream': self.s21.slug,
}
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
ok = self.client.login(email='user1@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(StreamAliasTap.objects.all().count(), 6)
tap = StreamAliasTap.objects.all().last()
self.assertEqual(tap.alias, self.sa2)
self.assertEqual(tap.timestamp, self.dt + datetime.timedelta(seconds=70))
self.assertEqual(tap.stream, self.s21)
self.assertEqual(tap.created_by, self.u1)
# inconsistent alias and stream
payload = {
'alias': self.sa1.slug,
'timestamp': self.dt + datetime.timedelta(seconds=70),
'stream': self.s21.slug,
}
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(StreamAliasTap.objects.all().count(), 6)
self.client.logout()
# added by member
payload = {
'alias': self.sa2.slug,
'timestamp': self.dt + datetime.timedelta(seconds=85),
'stream': self.s22.slug,
}
ok = self.client.login(email='user3@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(StreamAliasTap.objects.all().count(), 7)
tap = StreamAliasTap.objects.all().last()
self.assertEqual(tap.alias, self.sa2)
self.assertEqual(tap.timestamp, self.dt + datetime.timedelta(seconds=85))
self.assertEqual(tap.stream, self.s22)
self.assertEqual(tap.created_by, self.u3)
self.client.logout()
# non-member
payload = {
'alias': self.sa2.slug,
'timestamp': self.dt + datetime.timedelta(seconds=85),
'stream': self.s22.slug,
}
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(StreamAliasTap.objects.all().count(), 7)
membership = self.o3.register_user(self.u2, role='m1')
membership.permissions['can_manage_stream_aliases'] = False
membership.save()
resp = self.client.post(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(StreamAliasTap.objects.all().count(), 7)
self.client.logout()
def testPatchStreamAliasTap(self):
"""
Ensure we can call PATCH on the Stream Alias Tap API.
"""
url = reverse('streamaliastap-detail', kwargs={'pk': self.sat11.id})
# changed by staff
payload = {
'timestamp': self.dt,
}
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
ok = self.client.login(email='user1@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.patch(url+'?staff=1', data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
tap = StreamAliasTap.objects.get(pk=self.sat11.id)
self.assertEqual(tap.timestamp, payload['timestamp'])
self.client.logout()
# changed by member
payload = {
'stream': self.s12,
}
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
tap = StreamAliasTap.objects.get(pk=self.sat11.id)
self.assertEqual(tap.stream, payload['stream'])
# alias and stream from forbidden org
payload = {
'alias': self.sa2.slug,
'stream': self.s21.slug,
}
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
# changed by non-member
payload = {
'timestamp': self.dt + datetime.timedelta(seconds=40),
}
ok = self.client.login(email='user3@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
membership = self.o2.register_user(self.u3, role='m1')
membership.permissions['can_manage_stream_aliases'] = False
membership.save()
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
membership.permissions['can_manage_stream_aliases'] = True
membership.save()
# with stream from other org
payload = {
'stream': self.s21,
}
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# with stream from same org
payload = {
'stream': self.s11,
}
resp = self.client.patch(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.client.logout()
def testPutStreamAliasTap(self):
"""
Ensure we can call PUT on the Stream Alias Tap API.
"""
url = reverse('streamaliastap-detail', kwargs={'pk': self.sat11.id})
# changed by staff
payload = {
'alias': self.sat11.alias.slug,
'stream': self.sat11.stream.slug,
'timestamp': self.dt,
}
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
ok = self.client.login(email='user1@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.put(url+'?staff=1', data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
tap = StreamAliasTap.objects.get(pk=self.sat11.id)
self.assertEqual(tap.timestamp, payload['timestamp'])
self.client.logout()
# changed by member
payload = {
'alias': self.sat11.alias.slug,
'stream': self.s12.slug,
'timestamp': self.sat11.timestamp,
}
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
tap = StreamAliasTap.objects.get(pk=self.sat11.id)
self.assertEqual(tap.stream.slug, payload['stream'])
# alias and stream from forbidden org
payload = {
'alias': self.sa2.slug,
'stream': self.s21.slug,
'timestamp': self.dt + datetime.timedelta(seconds=40),
}
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
# changed by non-member
payload = {
'alias': self.sat11.alias.slug,
'stream': self.sat11.stream.slug,
'timestamp': self.dt + datetime.timedelta(seconds=40),
}
ok = self.client.login(email='user3@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
membership = self.o2.register_user(self.u3, role='m1')
membership.permissions['can_manage_stream_aliases'] = False
membership.save()
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
membership.permissions['can_manage_stream_aliases'] = True
membership.save()
# with stream from other org
payload = {
'alias': self.sat11.alias.slug,
'stream': self.s21.slug,
'timestamp': self.sat11.timestamp,
}
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# with stream from same org
payload = {
'alias': self.sat11.alias.slug,
'stream': self.s11.slug,
'timestamp': self.sat11.timestamp,
}
resp = self.client.put(url, data=payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.client.logout()
def testDeleteStreamAliasTap(self):
"""
Ensure we can call DELETE on the Stream Alias Tap API.
"""
url = reverse('streamaliastap-detail', kwargs={'pk': self.sat12.id})
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
resp = self.client.delete(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
# Staff can delete if staff argument is provided
ok = self.client.login(email='user1@foo.com', password='pass')
self.assertTrue(ok)
resp = self.client.delete(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
resp = self.client.delete(url+'?staff=1', format='json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 4)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 0)
self.client.logout()
self.sat12 = StreamAliasTap.objects.create(
alias=self.sa1,
timestamp=self.dt + datetime.timedelta(seconds=60),
stream=self.s12,
created_by=self.u2
)
url = reverse('streamaliastap-detail', kwargs={'pk': self.sat12.id})
# Member can delete
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
resp = self.client.delete(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 4)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 0)
self.client.logout()
self.sat12 = StreamAliasTap.objects.create(
alias=self.sa1,
timestamp=self.dt + datetime.timedelta(seconds=60),
stream=self.s12,
created_by=self.u2
)
url = reverse('streamaliastap-detail', kwargs={'pk': self.sat12.id})
# Non-Member can't delete
ok = self.client.login(email='user3@foo.com', password='pass')
self.assertTrue(ok)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
resp = self.client.delete(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
# Permissions are required to delete stream alias
membership = self.o2.register_user(self.u3, role='m1')
membership.permissions['can_manage_stream_aliases'] = False
membership.save()
resp = self.client.delete(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(StreamAlias.objects.all().count(), 2)
self.assertEqual(StreamAliasTap.objects.all().count(), 5)
self.assertEqual(StreamId.objects.all().count(), 4)
self.assertEqual(StreamVariable.objects.all().count(), 4)
self.assertEqual(StreamData.objects.all().count(), 44)
self.assertEqual(StreamEventData.objects.all().count(), 4)
self.assertEqual(StreamAliasTap.objects.filter(pk=self.sat12.id).count(), 1)
self.client.logout()
def testFilter(self):
url = reverse('streamaliastap-list')
ok = self.client.login(email='user2@foo.com', password='pass')
self.assertTrue(ok)
sa3 = StreamAlias.objects.create(
name='new alias',
org=self.o2,
created_by=self.u2,
)
sat31 = StreamAliasTap.objects.create(
alias=sa3,
timestamp=self.dt,
stream=self.s11,
created_by=self.u2
)
resp = self.client.get(url, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 4)
resp = self.client.get(url+'?target={}'.format(self.sa1.slug), format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 3)
resp = self.client.get(url+'?target={}'.format(sa3.slug), format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content.decode())
self.assertEqual(deserialized['count'], 1)
self.assertEqual(deserialized['results'][0]['alias'], sa3.slug)
self.assertEqual(deserialized['results'][0]['timestamp'], '2016-09-28T10:00:00Z')
self.assertEqual(deserialized['results'][0]['stream'], self.s11.slug)
self.client.logout()
| 39.372372 | 117 | 0.627107 |
730708d87b1f5d4d3790a3a9c51e468e3e6e76dc | 4,747 | py | Python | test_lpcnet.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | test_lpcnet.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | 1 | 2020-06-17T12:07:27.000Z | 2020-06-17T12:07:27.000Z | test_lpcnet.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
'''Copyright (c) 2018 Mozilla
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
set_session(tf.Session(config=config))
model, enc, dec = lpcnet.new_lpcnet_model()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()
#argc = sys.argc
#if False and argc == 3:
# print("using official mode 55 features")
# feature_file = sys.argv[1]
# out_file = sys.argv[2]
# mode = "vocoder"
#else:
mode = sys.argv[1]
feature_file = sys.argv[2]
out_file = sys.argv[3]
frame_size = 160
nb_frames = 1
nb_used_features = model.nb_used_features
if mode == "vocoder":
print("using official mode 55 features")
nb_features = 55
else:
from lib import LPCNet
nb_features = 20
features = np.fromfile(feature_file, dtype='float32')
features = np.resize(features, (-1, nb_features))
feature_chunk_size = features.shape[0]
pcm_chunk_size = frame_size*feature_chunk_size
features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
if mode != "lpcnet":
extra_features = np.zeros((nb_frames, feature_chunk_size, nb_used_features - nb_features))
extra_features[:, :, -2:] = features[:, :, 18:20]
features = np.concatenate((features, extra_features), axis=-1)
features[:, :, 18:36] = 0
periods = (.1 + 50*features[:,:,36:37]+100).astype('int16')
model.load_weights('pretrained_model/lpcnet18_384_10_G16_120.h5')
order = 16
pcm = np.zeros((nb_frames*pcm_chunk_size, ))
fexc = np.zeros((1, 1, 2), dtype='float32')
iexc = np.zeros((1, 1, 1), dtype='int16')
state1 = np.zeros((1, model.rnn_units1), dtype='float32')
state2 = np.zeros((1, model.rnn_units2), dtype='float32')
mem = 0
coef = 0.85
fout = open(out_file, 'wb')
skip = order + 1
for c in range(0, nb_frames):
cfeat = enc.predict([features[c:c+1, :, :nb_used_features], periods[c:c+1, :, :]])
for fr in range(0, feature_chunk_size):
print("====>", fr, feature_chunk_size)
f = c*feature_chunk_size + fr
if mode == "lpcnet":
a = features[c, fr, nb_features-order:]
else:
a = LPCNet.lpc_from_cepstrum(features[c, fr, :18])
for i in range(skip, frame_size):
pred = -sum(a*pcm[f*frame_size + i - 1:f*frame_size + i - order-1:-1])
fexc[0, 0, 1] = lin2ulaw(pred)
p, state1, state2 = dec.predict([fexc, iexc, cfeat[:, fr:fr+1, :], state1, state2])
#Lower the temperature for voiced frames to reduce noisiness
p *= np.power(p, np.maximum(0, 1.5*features[c, fr, 37] - .5))
p = p/(1e-18 + np.sum(p))
#Cut off the tail of the remaining distribution
p = np.maximum(p-0.002, 0).astype('float64')
p = p/(1e-8 + np.sum(p))
iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
pcm[f*frame_size + i] = pred + ulaw2lin(iexc[0, 0, 0])
fexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i])
mem = coef*mem + pcm[f*frame_size + i]
#print(mem)
np.array([np.round(mem)], dtype='int16').tofile(fout)
skip = 0
| 35.962121 | 112 | 0.684222 |
7274e0fae6b0446c210011a7a6f1abb3dcdcffac | 1,026 | py | Python | src/psiz/utils/expand_dim_repeat.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 21 | 2020-04-03T21:10:05.000Z | 2021-12-02T01:31:11.000Z | src/psiz/utils/expand_dim_repeat.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 14 | 2020-04-10T00:48:02.000Z | 2021-05-25T18:06:55.000Z | psiz/utils/expand_dim_repeat.py | rgerkin/psiz | d540738462b6436a08a472d5e349ca2b813e6d47 | [
"Apache-2.0"
] | 4 | 2020-10-13T16:46:14.000Z | 2021-11-10T00:08:47.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of utility functions.
Functions:
expand_dim_repeat: Repeat Tensor along a newly inserted axis.
"""
import tensorflow as tf
def expand_dim_repeat(x, n_repeat, axis=1):
"""Repeat Tensor along a newly inserted axis."""
x = tf.expand_dims(x, axis=axis)
return tf.repeat(x, n_repeat, axis=axis)
| 34.2 | 78 | 0.680312 |
5c0277649b402e4684e84337efa846db94cbb00b | 26,551 | py | Python | fixture/testhelpersm_search.py | IrinaSlobodchikova/marker | 72f981134fb025a94348cd2bc829fa8430a01372 | [
"Apache-2.0"
] | null | null | null | fixture/testhelpersm_search.py | IrinaSlobodchikova/marker | 72f981134fb025a94348cd2bc829fa8430a01372 | [
"Apache-2.0"
] | null | null | null | fixture/testhelpersm_search.py | IrinaSlobodchikova/marker | 72f981134fb025a94348cd2bc829fa8430a01372 | [
"Apache-2.0"
] | null | null | null | import re
from datetime import datetime
from random import randrange
import calendar
import time
from fixture.testhelpersm import testHelperSM
class testHelperSMSearch:
def __init__(self, app):
self.app = app
def current_date_time_day(self):
i = datetime.strftime(datetime.now(), "%d")
return i
def find_region(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[2]/label")
wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']")
def find_region2(self, reg_name):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr/td[2]/div/div/div[1]/span[2]").click()
wd.find_element_by_xpath("//div[@id='mCSB_6_container']/div/ul/li[20]/label").click()
wd.find_element_by_id("aggSearchText").click()
wd.find_element_by_id("aggSearchText").clear()
wd.find_element_by_id("aggSearchText").send_keys("%s" % reg_name)
wd.find_element_by_id("aggSearch").click()
wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/span[3]").click()
wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[7]/label").click()
wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
self.app.wait_smBlock(600)
self.press_search_button()
def find_torgovaya_ploschadka(self, name):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@id='additionalAggregatesPlaceholder']/table/tbody/tr/td[3]/div[2]/div/div[1]/span[2]").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_id("aggSearchText").click()
wd.find_element_by_id("aggSearchText").clear()
wd.find_element_by_id("aggSearchText").send_keys("%s" % name)
wd.find_element_by_id("aggSearch").click()
self.app.wait_sm_artefact_Block(10)
if name == "ЕЭТП":
wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[2]/label").click()
else:
wd.find_element_by_css_selector("label.checkbox-lbl").click()
wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
def find_region3(self):
wd = self.app.wd
self.app.wait_smBlock(600)
i = randrange(24)
wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr[2]/td[1]/div/div/div[1]/span[2]").click()
self.app.wait_sm_artefact_Block(10)
if i > 0:
#element = wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i)
#ActionChains(wd).move_to_element(element).perform()
wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
else:
i = 2
wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
self.app.wait_smBlock(20)
self.press_search_button()
def find_in_container_number(self, range_container_numbers, container_number, i):
wd = self.app.wd
self.app.wait_smBlock(600)
spicok = []
if i == 0:
i = randrange(1, 4, 1)
else:
i = i
if container_number == 0:
ct = randrange(1, range_container_numbers, 1)
else:
ct = container_number
self.expand_show_hide()
if i > 0 and ct > 0:
if ct == 1:
if i < 3:
wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
if i == 3:
i = 2
wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 2:
try:
wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
except:
wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 3:
wd.find_element_by_xpath("//div[@id='mCSB_3_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 4:
wd.find_element_by_xpath("//div[@id='mCSB_4_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 5:
wd.find_element_by_xpath("//div[@id='mCSB_5_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 6:
wd.find_element_by_xpath("//div[@id='mCSB_6_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 7:
wd.find_element_by_xpath("//div[@id='mCSB_7_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 8:
wd.find_element_by_xpath("//div[@id='mCSB_8_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 9:
wd.find_element_by_xpath("//div[@id='mCSB_9_container']/ul/li[%s]/label" % str(i)).click()
elif ct == 10:
wd.find_element_by_xpath("//div[@id='mCSB_10_container']/ul/li[%s]/label" % str(i)).click()
else:
i = 2
wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
return i, ct
def press_bottom_row(self, range_container_numbers, container_number, i):
wd = self.app.wd
self.app.wait_smBlock(600)
#wd.find_element_by_xpath("//div[@id='mCSB_6_scrollbar_vertical']/a[2]").click()
self.expand_show_hide()
ct = container_number
if i > 0 and ct > 0 and ct < range_container_numbers:
wd.find_element_by_xpath("//div[@id='mCSB_%s_scrollbar_vertical']/a[2]" % str(ct)).click()
time.sleep(4)
def check_615_can_be_selected(self, range_container_numbers, container_number, row_number):
tr = 1
if not self.is_615_visible(row_number):
while not self.is_615_visible(row_number) and tr < 20:
self.press_bottom_row(range_container_numbers, container_number, row_number)
tr = tr + 1
return True
def is_615_visible(self, i):
self.app.wait_smBlock(600)
wd = self.app.wd
try:
wd.find_element_by_xpath("//div[@id='mCSB_6_container']/ul/li[%s]/label" % str(i)).click()
wd.find_element_by_xpath("//div[@id='mCSB_6_container']/ul/li[%s]/label" % str(i)).click()
return True
except:
return False
def expand_show_hide(self):
wd = self.app.wd
if not self.is_sm_advSearch_is_displayed():
if len(wd.find_elements_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']")) < 2:
wd.find_element_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']").click()
else:
wd.find_element_by_xpath("//div[@id='advSearch']/div[2]/a").click()
def press_search_button(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']").click()
def is_sm_advSearch_is_displayed(self):
try:
text = self.app.wd.find_element_by_id("advSearchContent").value_of_css_property("display")
if text == 'block':
return True
except:
return False
def find_zakazchik_for_purchases_list(self):
wd = self.app.wd
self.app.wait_smBlock(600)
i = randrange(24)
wd.find_element_by_xpath(
"//div[@id='aggregatesPlaceholder']/table/tbody/tr[1]/td[3]/div[2]/div/div[1]/span[2]").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_id("aggSearchText").click()
wd.find_element_by_id("aggSearchText").clear()
wd.find_element_by_id("aggSearchText").send_keys("администрация")
wd.find_element_by_id("aggSearch").click()
self.app.wait_sm_artefact_Block(10)
if i > 0:
wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
else:
i = 2
wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
self.app.wait_smBlock(600)
self.press_search_button()
# ! not work
def search_in_opened_container(self):
wd = self.app.wd
self.app.wait_smBlock(600)
self.expand_show_hide()
i = randrange(1, 24, 1)
c = len(wd.find_elements_by_css_selector("span.agg-widget_btn"))
ct = randrange(c)
wd.find_elements_by_css_selector("span.agg-widget_btn")[ct].click()
self.app.wait_sm_artefact_Block(10)
#найти как кликнуть на элементе
wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
self.app.wait_smBlock(600)
self.press_search_button()
def get_artef_parametrs(self, ct):
wd = self.app.wd
self.app.wait_smBlock(600)
for row in wd.find_elements_by_xpath("//div[@id='mCSB_%s_container']/ul/li[1]" % ct):
cells = row.find_elements_by_tag_name("span")
results = cells[0].find_element_by_tag_name("em").text
try:
parametr = cells[3].text
except:
parametr = cells[2].text
return parametr
def get_artef_param(self, ct):
wd = self.app.wd
param = self.get_artef_parametrs(ct)
return param
def get_table_parametrs(self, i, s):
wd = self.app.wd
self.app.wait_smBlock(600)
for row in wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[%s]" % i):
cells = row.find_elements_by_tag_name("td")
zakazchik_summ, zakazchik, href_zakazchik = self.cell_in_table_res_with_price(cells, s)
try:
publication_name = cells[3].find_element_by_tag_name("a").text.rstrip()
except:
publication_name = 0
try:
href_publication = cells[3].find_element_by_tag_name("a").get_attribute("href")
except:
href_publication = 0
try:
poctavschik_summ = cells[4].find_element_by_css_selector("p.price").text.rstrip()
except:
poctavschik_summ = 0
try:
poctavschik = cells[4].find_element_by_tag_name("a").text.rstrip()
except:
poctavschik = 0
try:
href_poctavschik = cells[4].find_element_by_tag_name("a").get_attribute("href")
except:
href_poctavschik = 0
period = cells[6].text.rstrip()
return zakazchik_summ, zakazchik, href_zakazchik, publication_name, href_publication, poctavschik_summ, poctavschik, href_poctavschik, period
def get_table_parametrs2(self, i):
wd = self.app.wd
self.app.wait_smBlock(600)
for row in wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[%s]" % i):
cells = row.find_elements_by_tag_name("td")
cell1_href, cell1_price, cell1_text = self.cell_in_table_res_with_price(cells, 1)
cell2_href, cell2_price, cell2_text = self.cell_in_table_res_with_price(cells, 2)
cell3_href, cell3_price, cell3_text = self.cell_in_table_res_with_price(cells, 3)
cell4_href, cell4_price, cell4_text = self.cell_in_table_res_with_price(cells, 4)
cell5_href, cell5_price, cell5_text = self.cell_in_table_res_with_price(cells, 5)
cell6_href, cell6_price, cell6_text = self.cell_in_table_res_with_price(cells, 6)
cell7_href, cell7_price, cell7_text = self.cell_in_table_res_with_price(cells, 7)
return cell1_price, cell1_text, cell1_href, cell2_text, cell2_href, cell3_text, cell3_href, cell4_price, \
cell4_text, cell4_href, cell5_text, cell5_href, cell6_text, cell6_href, cell7_text, cell7_href
#первый параметр (номер строки, если 0 - случайный выбор), второй номер колонки в таблице
def get_one_table_parametr(self, i, s):
wd = self.app.wd
self.app.wait_smBlock(600)
td = 1
cell1_href = 0
i2 = 1
while cell1_href == 0 and td < 40:
if td <= 20:
if i == 0:
i1 = randrange(51)
else:
i1 = i
for row in wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[%s]" % i1):
cells = row.find_elements_by_tag_name("td")
cell1_href, cell1_price, cell1_text = self.cell_in_table_res_with_price(cells, s)
td = td + 1
if cell1_href != 0 and td < 20:
return cell1_href, cell1_price, cell1_text
if td > 20 and td < 40:
for row in wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[%s]" % i2):
cells = row.find_elements_by_tag_name("td")
cell1_href, cell1_price, cell1_text = self.cell_in_table_res_with_price(cells, s)
td = td + 1
i2 = i2 + 1
if cell1_href != 0 and td < 20:
return cell1_href, cell1_price, cell1_text
def cell_in_table_res_with_price(self, cells, i):
try:
cell1_price = cells[i].find_element_by_css_selector("p.price").text.rstrip()
except:
cell1_price = 0
try:
cell1_text = cells[i].find_element_by_tag_name("a").text.rstrip()
except:
cell1_text = 0
try:
cell1_href = cells[i].find_element_by_tag_name("a").get_attribute("href")
except:
cell1_href = 0
return cell1_href, cell1_price, cell1_text
def get_table_param(self, ct):
wd = self.app.wd
param = self.get_table_parametrs(ct)
return param
def is_smresult_not_0(self):
try:
text = self.get_total_results()
if text != '0':
return True
except:
return False
def check_results(self):
self.app.wait_smBlock(900)
if self.is_smresult_not_0():
result = self.get_total_results()
return result
else:
return '0'
def get_total_results(self):
wd = self.app.wd
results = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")
#clear_result = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")[13:len(results)]
clear_result = results[13:len(results)]
return self.clear_result(clear_result)
def select_all_50(self):
wd = self.app.wd
wd.find_element_by_xpath("//label[@for='allItemsCb']").click()
if not wd.find_element_by_id("allItemsCb").is_selected():
wd.find_element_by_id("allItemsCb").click()
def clear_result(self, s):
x = re.sub(" ", "", str(s))
return x
def clear_spase_result(self, s):
x = re.sub(" ", "", str(s))
return x
def ensure_link_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h1.clip").text
return header.rstrip()
def ensure_link_type2_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h2").text
return header[0:8]
def compare_company_name(self):
wd = self.app.wd
self.app.wait_smBlock(600)
current_text_in_header2 = wd.find_element_by_xpath("//div[@class='card-hdr_title']/h1").text.rstrip()
current_text_in_header = current_text_in_header2[10:len(current_text_in_header2)]
short_name_text = wd.find_element_by_xpath("//div[@id='main']/div[1]/table/tbody/tr[2]/td[2]").text.rstrip()
return current_text_in_header, short_name_text
# доделать
def compare_lot(self):
wd = self.app.wd
self.app.wait_smBlock(600)
current_text_in_header2 = wd.find_element_by_xpath("//div[@class='card-hdr_title']/h1").text.rstrip()
current_text_in_header = current_text_in_header2[10:len(current_text_in_header2)]
short_name_text = wd.find_element_by_xpath("//div[@id='main']/div[1]/table/tbody/tr[1]/td[2]").text.rstrip()
period = wd.find_element_by_xpath("//div[@id='main']/div[1]/table/tbody/tr[6]/td[2]").text.rstrip()
price2 = wd.find_element_by_xpath("//div[@id='main']/div[1]/table/tbody/tr[7]/td[2]").text.rstrip()
price = price2[4:len(price2)]
customer = wd.find_element_by_xpath("//div[@id='customersInfo']/div/table/tbody/tr/td[1]/a").text.rstrip()
customer_price2 = wd.find_element_by_xpath("//div[@id='customersInfo']/div/table/tbody/tr/td[2]/strong").text.rstrip()
customer_price = customer_price2[4:len(customer_price2)]
seller = wd.find_element_by_xpath("//div[@id='sellerInfo']/div/table/tbody/tr/td[1]/a").text.rstrip()
seller_price2 = wd.find_element_by_xpath("//div[@id='sellerInfo']/div/table/tbody/tr/td[2]/strong").text.rstrip()
seller_price = seller_price2[4:len(seller_price2)]
return current_text_in_header, short_name_text, period, price, customer, customer_price, seller, seller_price
def find_company_by_fio(self, s):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_id("SearchParams.PersonsSearchParams.FioTextSearch").send_keys(s)
def find_company_by_email(self, s):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_id("SearchParams.PersonsSearchParams.WebTextSearch").send_keys(s)
def find_company_by_phone(self, s):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_id("SearchParams.PersonsSearchParams.PhoneTextSearch").send_keys(s)
def find_company_by_name(self, s):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_id("SearchParams.PersonsSearchParams.TextSearch").send_keys(s)
def enter_text_in_seach_field(self, s):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_id("SearchParams.TextSearch").click()
wd.find_element_by_id("SearchParams.TextSearch").clear()
wd.find_element_by_id("SearchParams.TextSearch").send_keys(s)
def select_data_nachala(self, i, t):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_css_selector("i.fa.fa-chevron-down").click()
wd.find_element_by_xpath("//form[@id='frmSearch']//label[.='%s']" % i).click()
if i == 'Выбрать период...':
#вчера сегодня
if t == 0:
date = self.current_date_time_day()
start_date = int(date)-1
end_date = date
wd.find_element_by_id("SearchParams_ActivationDateFrom").click()
wd.find_element_by_link_text("%s" % str(start_date)).click()
wd.find_element_by_id("SearchParams_ActivationDateTo").click()
wd.find_element_by_link_text("%s" % end_date).click()
#произвольный период
elif t == 1:
date = self.current_date_time_day()
start_date = randrange(1, int(date))
end_date = randrange(int(date), 28)
wd.find_element_by_id("SearchParams_ActivationDateFrom").click()
wd.find_element_by_link_text("%s" % start_date).click()
wd.find_element_by_id("SearchParams_ActivationDateTo").click()
wd.find_element_by_link_text("%s" % end_date).click()
def select_data_okonchaniya(self, i):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//form[@id='frmSearch']/div/div[4]/div[1]/div[4]/div[2]/div/p/label/i").click()
if i == 'Выбрать период...':
start_date = randrange(1, 15)
end_date = randrange(15, 28)
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[4]/div[2]/div/div/ul/li[8]/label").click()
wd.find_element_by_id("SearchParams_ExpirationDateFrom").click()
wd.find_element_by_link_text("%s" % start_date).click()
wd.find_element_by_id("SearchParams_ExpirationDateTo").click()
wd.find_element_by_link_text("%s" % end_date).click()
elif i == 'Сегодня':
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[4]/div[2]/div/div/ul/li[2]/label").click()
elif i == 'За весь период':
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[4]/div[2]/div/div/ul/li[1]/label").click()
else:
wd.find_element_by_xpath("//form[@id='frmSearch']//label[.='%s']" % i).click()
def select_first_publish_date(self, i, t):
wd = self.app.wd
self.app.wait_smBlock(600)
#l = (
# 1 = 'За весь период', 2 = 'Сегодня', 3 = 'Вчера', 4 = 'Последние 7 дней', 5 = 'Текущий месяц',
# 6 = 'Прошлый месяц', 7 = 'Текущий квартал', 8 = 'Прошлый квартал',
# 9 = 'Текущий год', 10 = 'Прошлый год', 11 = 'Выбрать период...')
wd.find_element_by_xpath("//form[@id='frmSearch']/div/div[4]/div[1]/div[5]/div[2]/div/p/label/i").click()
if i == 11:
#вчера сегодня
if t == 0:
date = self.current_date_time_day()
start_date = int(date)-1
end_date = date
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[5]/div[2]/div/div/ul/li[11]/label").click()
wd.find_element_by_id("SearchParams_PublicationDateFrom").click()
wd.find_element_by_link_text("%s" % start_date).click()
wd.find_element_by_id("SearchParams_PublicationDateTo").click()
wd.find_element_by_link_text("%s" % end_date).click()
#произвольный период
elif t == 1:
date = self.current_date_time_day()
start_date = randrange(1, int(date))
end_date = randrange(int(date), 28)
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[5]/div[2]/div/div/ul/li[11]/label").click()
wd.find_element_by_id("SearchParams_PublicationDateFrom").click()
wd.find_element_by_link_text("%s" % start_date).click()
wd.find_element_by_id("SearchParams_PublicationDateTo").click()
wd.find_element_by_link_text("%s" % end_date).click()
else:
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[5]/div[2]/div/div/ul/li[%s]/label" % i).click()
def set_calendar_value(self, date):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath(
"//form[@id='frmSearch']/div/div[4]/div[1]/div[5]/div[2]/div/div/ul/li[11]/label").click()
wd.find_element_by_id("SearchParams_PublicationDateFrom").click()
wd.find_element_by_css_selector("span.ui-icon.ui-icon-circle-triangle-w").click()
wd.find_element_by_link_text("%s" % date).click()
wd.find_element_by_id("SearchParams_PublicationDateTo").click()
wd.find_element_by_css_selector("span.ui-icon.ui-icon-circle-triangle-w").click()
wd.find_element_by_link_text("%s" % date).click()
def select_actual_only(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//label[@for='idSearchParams.OnlyActual']").click()
if not wd.find_element_by_id("idSearchParams.OnlyActual").is_selected():
wd.find_element_by_id("idSearchParams.OnlyActual").click()
def select_search_in_documents(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//label[@for='idSearchParams.OnlyActual']").click()
if not wd.find_element_by_id("idSearchParams.OnlyActual").is_selected():
wd.find_element_by_id("idSearchParams.OnlyActual").click()
def select_spetstorgi(self, i):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//form[@id='frmSearch']/div/div[4]/div[1]/div[8]/div[2]/div/p/label/i").click()
if i == 2:
wd.find_element_by_xpath("//form[@id='frmSearch']//label[.='Только спецторги']").click()
elif i == 1:
wd.find_element_by_xpath("//form[@id='frmSearch']//label[.='Не учитывать']").click()
elif i == 3:
wd.find_element_by_xpath("//form[@id='frmSearch']//label[.='Исключить спецторги']").click()
else:
print("incorrect number of item in menu, possible value: 1 - Не учитывать, 2 - Только спецторги, "
"3 - Исключить спецторги")
def random_value(self, start_num, end_num):
s = randrange(start_num, end_num)
return s
def get_res_by_ploschadka_for_one_day(self, name_ploschadki, date):
wd = self.app.wd
self.app.wait_smBlock(600)
self.set_calendar_value(date)
self.find_torgovaya_ploschadka(name_ploschadki)
def get_minim_max_avg_value(self, name_ploschadki, day_in_month):
wd = self.app.wd
self.app.wait_smBlock(600)
l = []
for i in range(1, int(day_in_month)):
self.get_res_by_ploschadka_for_one_day(name_ploschadki, i)
self.press_search_button()
s = self.app.testhelpersm.check_results()
return s
def day_in_month(self):
i = datetime.strftime(datetime.now(), "%m")
calendar.monthrange(2010, i)
return i
| 45.231687 | 154 | 0.608 |
a18bd19273e19aa8f31154ddfc09bfaccb068a6f | 1,170 | py | Python | mobi/consts.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 2 | 2021-02-17T18:37:25.000Z | 2021-03-04T05:47:06.000Z | mobi/consts.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 24 | 2021-03-09T15:20:20.000Z | 2021-06-07T11:53:34.000Z | mobi/consts.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 1 | 2021-02-23T15:31:28.000Z | 2021-02-23T15:31:28.000Z | CONTINENTS = (
("AS", "Asie"),
("AF", "Afrique"),
("AdN", "Amerique du Nord"),
("AdS", "Amerique du Sud"),
("EU", "Europe"),
("OC", "Oceanie"),
)
DEPARTEMENTINSA = (
("BB", "Biosciences Biochimie et Biotechnologie"),
("BIM", "Bioinformatique et Modélisation"),
("GCU", "Génie Civil et Urbanisme"),
("GE", "Génie Electrique"),
("GEN", "Génie Energétique et Environnement"),
("GI", "Génie Industriel"),
("GM", "Génie Mécanique"),
("IF", "Informatique"),
("SGM", "Science et Génie Matériaux"),
("TC", "Télécommunications, Services et Usages"),
)
MOBITYPE = (
('DD', 'Double Diplôme'),
('E', 'Echange'),
)
PERIOD = (
('Hebdo', 'Hebdomadaire'),
('Mensuel', 'Mensuel'),
('Trim', 'Trimestriel'),
('Sem', 'Semestriel'),
('An', 'Annuel')
)
ACCESS = (
('High', 'Demande forte'),
('Medium', 'Demande normale'),
('Low', 'Demande faible')
)
SEMESTER = (
('4A-S1', '4A-S1'),
('4A-S2', '4A-S2'),
('5A-S1', '5A-S1'),
('5A-S2', '5A-S2'),
('4A', '4A'),
('5A', '5A')
)
LANGUAGES = (
('TOEIC', 'TOEIC'),
('INC', 'INCONNU'),
('AUCUN', 'AUCUN')
) | 21.272727 | 54 | 0.506838 |
8e2e60ff484c92e2b9fdd07ab7c2b3fc4fec260e | 67,511 | py | Python | pyaedt/modules/SetupTemplates.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 12 | 2021-07-01T06:35:12.000Z | 2021-09-22T15:53:07.000Z | pyaedt/modules/SetupTemplates.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 111 | 2021-07-01T16:02:36.000Z | 2021-09-29T12:36:44.000Z | pyaedt/modules/SetupTemplates.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 5 | 2021-07-09T14:24:59.000Z | 2021-09-07T12:42:03.000Z | from pyaedt.generic.general_methods import aedt_exception_handler
from pyaedt.generic.DataHandlers import _dict2arg
from collections import OrderedDict
meshlink = [("ImportMesh", False)]
autosweep = [("RangeType", "LinearStep"), ("RangeStart", "1GHz"), ("RangeEnd", "10GHz"), ("RangeStep", "1GHz")]
autosweeps = [("Sweep", autosweep)]
multifreq = [("1GHz", [0.02]), ("2GHz", [0.02]), ("5GHz", [0.02])]
sweepsbr = [("RangeType", "LinearStep"), ("RangeStart", "1GHz"), ("RangeEnd", "10GHz"), ("RangeStep", "1GHz")]
sweepssbr = [("Sweep", sweepsbr)]
muoption = [("MuNonLinearBH", True)]
transientelectrostatic = [("SaveField", True), ("Stop", "100s"), ("InitialStep", "0.01s"), ("MaxStep", "5s")]
transienthfss = [
("TimeProfile", "Broadband Pulse"),
("HfssFrequency", "1GHz"),
("MinFreq", "100MHz"),
("MaxFreq", "1GHz"),
("NumFreqsExtracted", 401),
("SweepMinFreq", "100MHz"),
("SweepMaxFreq", "1GHz"),
("UseAutoTermination", 1),
("SteadyStateCriteria", 0.01),
("UseMinimumDuration", 0),
("TerminateOnMaximum", 0),
]
HFSSDrivenAuto = [
("IsEnabled", True),
("MeshLink", meshlink),
("AutoSolverSetting", "Balanced"),
("Sweeps", autosweeps),
("SaveRadFieldsOnly", False),
("SaveAnyFields", True),
("Type", "Discrete"),
]
"""HFSS automatic setup properties and default values."""
HFSSDrivenDefault = [
("AdaptMultipleFreqs", False),
("MultipleAdaptiveFreqsSetup", multifreq),
("Frequency", "5GHz"),
("MaxDeltaS", 0.02),
("PortsOnly", False),
("UseMatrixConv", False),
("MaximumPasses", 6),
("MinimumPasses", 1),
("MinimumConvergedPasses", 1),
("PercentRefinement", 30),
("IsEnabled", True),
("MeshLink", meshlink),
("BasisOrder", 1),
("DoLambdaRefine", True),
("DoMaterialLambda", True),
("SetLambdaTarget", False),
("Target", 0.3333),
("UseMaxTetIncrease", False),
("PortAccuracy", 2),
("UseABCOnPort", False),
("SetPortMinMaxTri", False),
("UseDomains", False),
("UseIterativeSolver", False),
("SaveRadFieldsOnly", False),
("SaveAnyFields", True),
("IESolverType", "Auto"),
("LambdaTargetForIESolver", 0.15),
("UseDefaultLambdaTgtForIESolver", True),
("IE Solver Accuracy", "Balanced"),
]
"""HFSS driven properties and default values."""
HFSSEigen = [
("MinimumFrequency", "2GHz"),
("NumModes", 1),
("MaxDeltaFreq", 10),
("ConvergeOnRealFreq", False),
("MaximumPasses", 3),
("MinimumPasses", 1),
("MinimumConvergedPasses", 1),
("PercentRefinement", 30),
("IsEnabled", True),
("MeshLink", meshlink),
("BasisOrder", 1),
("DoLambdaRefine", True),
("DoMaterialLambda", True),
("SetLambdaTarget", False),
("Target", 0.2),
("UseMaxTetIncrease", False),
]
"""HFSS Eigenmode properties and default values."""
HFSSTransient = [
("Frequency", "5GHz"),
("MaxDeltaS", 0.02),
("MaximumPasses", 20),
("UseImplicitSolver", True),
("IsEnabled", True),
("MeshLink", meshlink),
("BasisOrder", -1),
("Transient", transienthfss),
]
"""HFSS transient setup properties and default values."""
HFSSSBR = [
("IsEnabled", True),
("MeshLink", meshlink),
("IsSbrRangeDoppler", False),
("RayDensityPerWavelength", 4),
("MaxNumberOfBounces", 5),
("RadiationSetup", ""),
("PTDUTDSimulationSettings", "None"),
("Sweeps", sweepssbr),
("ComputeFarFields", False),
]
"""HFSS SBR+ setup properties and default values."""
MaxwellTransient = [
("Enabled", True),
("MeshLink", meshlink),
("NonlinearSolverResidual", "0.005"),
("ScalarPotential", "Second Order"),
("SmoothBHCurve", False),
("StopTime", "10000000ns"),
("TimeStep", "2000000ns"),
("OutputError", False),
("UseControlProgram", False),
("ControlProgramName", ""),
("ControlProgramArg", ""),
("CallCtrlProgAfterLastStep", False),
("FastReachSteadyState", False),
("AutoDetectSteadyState", False),
("IsGeneralTransient", True),
("IsHalfPeriodicTransient", False),
("SaveFieldsType", "None"),
("CacheSaveKind", "Count"),
("NumberSolveSteps", 1),
("RangeStart", "0s"),
("RangeEnd", "0.1s"),
]
"""Maxwell transient setup properties and default values."""
Magnetostatic = [
("Enabled", True),
("MeshLink", meshlink),
("MaximumPasses", 10),
("MinimumPasses", 2),
("MinimumConvergedPasses", 1),
("PercentRefinement", 30),
("SolveFieldOnly", False),
("PercentError", 1),
("SolveMatrixAtLast", True),
("PercentError", 1),
("UseIterativeSolver", False),
("RelativeResidual", 1e-06),
("NonLinearResidual", 0.001),
("SmoothBHCurve", False),
("MuOption", muoption),
]
"""Maxwell magnetostatic setup properties and default values."""
Electrostatic = [
("Enabled", True),
("MeshLink", meshlink),
("MaximumPasses", 10),
("MinimumPasses", 2),
("MinimumConvergedPasses", 1),
("PercentRefinement", 30),
("SolveFieldOnly", False),
("PercentError", 1),
("SolveMatrixAtLast", True),
("PercentError", 1),
("UseIterativeSolver", False),
("RelativeResidual", 1e-06),
("NonLinearResidual", 0.001),
]
"""Maxwell electrostatic setup properties and default values."""
EddyCurrent = [
("Enabled", True),
("MeshLink", meshlink),
("MaximumPasses", 6),
("MinimumPasses", 1),
("MinimumConvergedPasses", 1),
("PercentRefinement", 30),
("SolveFieldOnly", False),
("PercentError", 1),
("SolveMatrixAtLast", True),
("PercentError", 1),
("UseIterativeSolver", False),
("RelativeResidual", 1e-5),
("NonLinearResidual", 0.0001),
("SmoothBHCurve", False),
("Frequency", "60Hz"),
("HasSweepSetup", False),
("SweepSetupType", "LinearStep"),
("StartValue", "1e-08GHz"),
("StopValue", "1e-06GHz"),
("StepSize", "1e-08GHz"),
("UseHighOrderShapeFunc", False),
("UseMuLink", False),
]
"""Maxwell eddy current setup properties and default values."""
ElectricTransient = [
("Enabled",),
("MeshLink", meshlink),
("Tolerance", 0.005),
("ComputePowerLoss", False),
("Data", transientelectrostatic),
("Initial Voltage", "0mV"),
]
"""Maxwell electric transient setup properties and default values."""
SteadyTemperatureAndFlow = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Temperature", True),
("Include Flow", True),
("Include Gravity", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled", False),
("Radiation Model", "Off"),
("Under-relaxation - Pressure", "0.7"),
("Under-relaxation - Momentum", "0.3"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "Second"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Frozen Flow Simulation", False),
("Sequential Solve of Flow and Energy Equations", False),
("Convergence Criteria - Max Iterations", 100),
]
"""Icepack steady temperature and steady flow setup properties and default values."""
SteadyTemperatureOnly = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Temperature", True),
("Include Gravity", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled", False),
("Radiation Model", "Off"),
("Under-relaxation - Pressure", "0.7"),
("Under-relaxation - Momentum", "0.3"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "Second"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Sequential Solve of Flow and Energy Equations", False),
("Convergence Criteria - Max Iterations", 100),
]
"""Icepack steady temperature setup properties and default values."""
SteadyFlowOnly = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Flow", True),
("Include Gravity", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled", False),
("Radiation Model", "Off"),
("Under-relaxation - Pressure", "0.7"),
("Under-relaxation - Momentum", "0.3"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "First"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Frozen Flow Simulation", False),
("Sequential Solve of Flow and Energy Equations", False),
("Convergence Criteria - Max Iterations", 100),
]
"""Icepack steady flow setup properties and default values."""
Q3DCond = [("MaxPass", 10), ("MinPass", 1), ("MinConvPass", 1), ("PerError", 1), ("PerRefine", 30)]
Q3DMult = [("MaxPass", 1), ("MinPass", 1), ("MinConvPass", 1), ("PerError", 1), ("PerRefine", 30)]
Q3DDC = [("SolveResOnly", False), ("Cond", Q3DCond), ("Mult", Q3DMult)]
Q3DCap = [
("MaxPass", 10),
("MinPass", 1),
("MinConvPass", 1),
("PerError", 1),
("PerRefine", 30),
("AutoIncreaseSolutionOrder", True),
("SolutionOrder", "High"),
("Solver Type", "Iterative"),
]
Q3DAC = [("MaxPass", 10), ("MinPass", 1), ("MinConvPass", 1), ("PerError", 1), ("PerRefine", 30)]
Matrix = [
("AdaptiveFreq", "1GHz"),
("SaveFields", False),
("Enabled", True),
("Cap", Q3DCap),
("DC", Q3DDC),
("AC", Q3DAC),
]
"""Q3D Extractor setup properties and default values."""
OutputQuantities = []
NoiseOutputQuantities = []
SweepDefinition = [("Variable", "Freq"), ("Data", "LINC 1GHz 5GHz 501"), ("OffsetF1", False), ("Synchronize", 0)]
NexximLNA = [
("DataBlockID", 16),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "LinearFrequency"),
("LinearFrequencyData", [False, 0.1, False, "", False]),
("SweepDefinition", SweepDefinition),
]
"""Nexxim linear network setup properties and default values."""
NexximDC = [
("DataBlockID", 15),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "LinearFrequency"),
]
"""Nexxim DC setup properties and default values."""
NexximTransient = [
("DataBlockID", 10),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "LinearFrequency"),
("TransientData", ["0.1ns", "10ns"]),
("TransientNoiseData", [False, "", "", 0, 1, 0, False, 1]),
("TransientOtherData", ["default"]),
]
"""Nexxim transient setup properties and default values."""
NexximQuickEye = [
("DataBlockID", 28),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "QuickEyeAnalysis"),
("QuickEyeAnalysis", [False, "1e-9", False, "0", "", True]),
]
NexximVerifEye = [
("DataBlockID", 27),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "VerifEyeAnalysis"),
("VerifEyeAnalysis", [False, "1e-9", False, "0", "", True]),
]
NexximAMI = [
("DataBlockID", 29),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "AMIAnalysis"),
("AMIAnalysis", [32, False, False]),
]
NexximOscillatorRSF = []
NexximOscillator1T = []
NexximOscillatorNT = []
NexximHarmonicBalance1T = []
NexximHarmonicBalanceNT = []
NexximSystem = [
("DataBlockID", 32),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "HSPICETransient"),
("HSPICETransientData", ["0.1ns", "10ns"]),
("HSPICETransientOtherData", [3]),
]
NexximTVNoise = []
HSPICE = [
("DataBlockID", 30),
("OptionName", "(Default Options)"),
("AdditionalOptions", ""),
("AlterBlockName", ""),
("FilterText", ""),
("AnalysisEnabled", 1),
("OutputQuantities", OutputQuantities),
("NoiseOutputQuantities", NoiseOutputQuantities),
("Name", "SystemFDAnalysis"),
("SystemFDAnalysis", [False]),
]
HFSS3DLayout_Properties = [("Enable", "true")]
HFSS3DLayout_AdvancedSettings = [
("AccuracyLevel", 2),
("GapPortCalibration", True),
("ReferenceLengthRatio", 0.25),
("RefineAreaRatio", 4),
("DRCOn", False),
("FastSolverOn", False),
("StartFastSolverAt", 3000),
("LoopTreeOn", True),
("SingularElementsOn", False),
("UseStaticPortSolver", False),
("UseThinMetalPortSolver", False),
("ComputeBothEvenAndOddCPWModes", False),
("ZeroMetalLayerThickness", 0),
("ThinDielectric", 0),
("UseShellElements", False),
("SVDHighCompression", False),
("NumProcessors", 1),
("UseHfssIterativeSolver", False),
("UseHfssMUMPSSolver", True),
("RelativeResidual", 1e-06),
("EnhancedLowFreqAccuracy", False),
("OrderBasis", -1),
("MaxDeltaZo", 2),
("UseRadBoundaryOnPorts", False),
("SetTrianglesForWavePort", False),
("MinTrianglesForWavePort", 100),
("MaxTrianglesForWavePort", 500),
("numprocessorsdistrib", 1),
("CausalMaterials", True),
("enabledsoforopti", True),
("usehfsssolvelicense", False),
("ExportAfterSolve", False),
("ExportDir", ""),
("CircuitSparamDefinition", False),
("CircuitIntegrationType", "FFT"),
("DesignType", "generic"),
("MeshingMethod", "Phi"),
("EnableDesignIntersectionCheck", True),
]
HFSS3DLayout_CurveApproximation = [
("ArcAngle", "30deg"),
("StartAzimuth", "0deg"),
("UseError", False),
("Error", "0meter"),
("MaxPoints", 8),
("UnionPolys", True),
("Replace3DTriangles", True),
]
HFSS3DLayout_Q3D_DCSettings = [
("SolveResOnly", True),
("Cond", Q3DCond),
("Mult", Q3DMult),
("Solution Order", "Normal"),
]
# HFSS3DLayout_AdaptiveFrequencyData = [
# ("AdaptiveFrequency", "5GHz"),
# ("MaxDelta", "0.02"),
# ("MaxPasses", 10),
# ("Expressions", [])]
CGDataBlock = [
("MaxPass", 10),
("MinPass", 1),
("MinConvPass", 1),
("PerError", 1),
("PerRefine", 30),
("DataType", "CG"),
("Included", True),
("UseParamConv", True),
("UseLossyParamConv", False),
("PerErrorParamConv", 1),
("UseLossConv", True),
]
RLDataBlock = [
("MaxPass", 10),
("MinPass", 1),
("MinConvPass", 1),
("PerError", 1),
("PerRefine", 30),
("DataType", "CG"),
("Included", True),
("UseParamConv", True),
("UseLossyParamConv", False),
("PerErrorParamConv", 1),
("UseLossConv", True),
]
Open = [
("AdaptiveFreq", "1GHz"),
("SaveFields", True),
("Enabled", True),
("MeshLink", meshlink),
("CGDataBlock", CGDataBlock),
("RLDataBlock", RLDataBlock),
("CacheSaveKind", "Delta"),
("ConstantDelta", "0s"),
]
"""Q2D open setup properties and default values."""
Close = [
("AdaptiveFreq", "1GHz"),
("SaveFields", True),
("Enabled", True),
("MeshLink", meshlink),
("CGDataBlock", CGDataBlock),
("RLDataBlock", RLDataBlock),
("CacheSaveKind", "Delta"),
("ConstantDelta", "0s"),
]
"""Q2D close setup properties and default values."""
TransientTemperatureAndFlow = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Temperature", True),
("Include Flow", True),
("Include Gravity", False),
("Include Solar", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Solution Initialization - Use Model Based Flow Initialization", False),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled:=", False),
("Radiation Model", "Off"),
("Solar Radiation Model", "Solar Radiation Calculator"),
("Solar Radiation - Scattering Fraction", "0"),
("Solar Radiation - Day", 1),
("Solar Radiation - Month", 1),
("Solar Radiation - Hours", 0),
("Solar Radiation - Minutes", 0),
("Solar Radiation - GMT", "0"),
("Solar Radiation - Latitude", "0"),
("Solar Radiation - Latitude Direction", "East"),
("Solar Radiation - Longitude", "0"),
("Solar Radiation - Longitude Direction", "North"),
("Solar Radiation - Ground Reflectance", "0"),
("Solar Radiation - Sunshine Fraction", "0"),
("Solar Radiation - North X", "0"),
("Solar Radiation - North Y", "0"),
("Solar Radiation - North Z", "1"),
("Under-relaxation - Pressure", "0.3"),
("Under-relaxation - Momentum", "0.7"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "First"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Coupled pressure-velocity formulation", False),
("Frozen Flow Simulation", False),
("Start Time:=", "0s"),
("Stop Time:=", "20s"),
("Time Step:=", "1s"),
("Iterations per Time Step", 20),
("Import Start Time", False),
("Copy Fields From Source", False),
("SaveFieldsType", "Every N Steps"),
("N Steps:=", "10s"),
("Enable Control Program", False),
("Control Program Name", ""),
]
TransientTemperatureOnly = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Temperature", True),
("Include Flow", False),
("Include Gravity", False),
("Include Solar", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Solution Initialization - Use Model Based Flow Initialization", False),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled:=", False),
("Radiation Model", "Off"),
("Solar Radiation Model", "Solar Radiation Calculator"),
("Solar Radiation - Scattering Fraction", "0"),
("Solar Radiation - Day", 1),
("Solar Radiation - Month", 1),
("Solar Radiation - Hours", 0),
("Solar Radiation - Minutes", 0),
("Solar Radiation - GMT", "0"),
("Solar Radiation - Latitude", "0"),
("Solar Radiation - Latitude Direction", "East"),
("Solar Radiation - Longitude", "0"),
("Solar Radiation - Longitude Direction", "North"),
("Solar Radiation - Ground Reflectance", "0"),
("Solar Radiation - Sunshine Fraction", "0"),
("Solar Radiation - North X", "0"),
("Solar Radiation - North Y", "0"),
("Solar Radiation - North Z", "1"),
("Under-relaxation - Pressure", "0.3"),
("Under-relaxation - Momentum", "0.7"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "First"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Coupled pressure-velocity formulation", False),
("Frozen Flow Simulation", False),
("Start Time:=", "0s"),
("Stop Time:=", "20s"),
("Time Step:=", "1s"),
("Iterations per Time Step", 20),
("Import Start Time", False),
("Copy Fields From Source", False),
("SaveFieldsType", "Every N Steps"),
("N Steps:=", "10s"),
("Enable Control Program", False),
("Control Program Name", ""),
]
TransientFlowOnly = [
("Enabled", True),
("Flow Regime", "Laminar"),
("Include Temperature", False),
("Include Flow", True),
("Include Gravity", False),
("Include Solar", False),
("Solution Initialization - X Velocity", "0m_per_sec"),
("Solution Initialization - Y Velocity", "0m_per_sec"),
("Solution Initialization - Z Velocity", "0m_per_sec"),
("Solution Initialization - Temperature", "AmbientTemp"),
("Solution Initialization - Turbulent Kinetic Energy", "1m2_per_s2"),
("Solution Initialization - Turbulent Dissipation Rate", "1m2_per_s3"),
("Solution Initialization - Specific Dissipation Rate", "1diss_per_s"),
("Solution Initialization - Use Model Based Flow Initialization", False),
("Convergence Criteria - Flow", "0.001"),
("Convergence Criteria - Energy", "1e-07"),
("Convergence Criteria - Turbulent Kinetic Energy", "0.001"),
("Convergence Criteria - Turbulent Dissipation Rate", "0.001"),
("Convergence Criteria - Specific Dissipation Rate", "0.001"),
("Convergence Criteria - Discrete Ordinates", "1e-06"),
("IsEnabled:=", False),
("Radiation Model", "Off"),
("Solar Radiation Model", "Solar Radiation Calculator"),
("Solar Radiation - Scattering Fraction", "0"),
("Solar Radiation - Day", 1),
("Solar Radiation - Month", 1),
("Solar Radiation - Hours", 0),
("Solar Radiation - Minutes", 0),
("Solar Radiation - GMT", "0"),
("Solar Radiation - Latitude", "0"),
("Solar Radiation - Latitude Direction", "East"),
("Solar Radiation - Longitude", "0"),
("Solar Radiation - Longitude Direction", "North"),
("Solar Radiation - Ground Reflectance", "0"),
("Solar Radiation - Sunshine Fraction", "0"),
("Solar Radiation - North X", "0"),
("Solar Radiation - North Y", "0"),
("Solar Radiation - North Z", "1"),
("Under-relaxation - Pressure", "0.3"),
("Under-relaxation - Momentum", "0.7"),
("Under-relaxation - Temperature", "1"),
("Under-relaxation - Turbulent Kinetic Energy", "0.8"),
("Under-relaxation - Turbulent Dissipation Rate", "0.8"),
("Under-relaxation - Specific Dissipation Rate", "0.8"),
("Discretization Scheme - Pressure", "Standard"),
("Discretization Scheme - Momentum", "First"),
("Discretization Scheme - Temperature", "First"),
("Secondary Gradient", False),
("Discretization Scheme - Turbulent Kinetic Energy", "First"),
("Discretization Scheme - Turbulent Dissipation Rate", "First"),
("Discretization Scheme - Specific Dissipation Rate", "First"),
("Discretization Scheme - Discrete Ordinates", "First"),
("Linear Solver Type - Pressure", "V"),
("Linear Solver Type - Momentum", "flex"),
("Linear Solver Type - Temperature", "F"),
("Linear Solver Type - Turbulent Kinetic Energy", "flex"),
("Linear Solver Type - Turbulent Dissipation Rate", "flex"),
("Linear Solver Type - Specific Dissipation Rate", "flex"),
("Linear Solver Termination Criterion - Pressure", "0.1"),
("Linear Solver Termination Criterion - Momentum", "0.1"),
("Linear Solver Termination Criterion - Temperature", "0.1"),
("Linear Solver Termination Criterion - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Termination Criterion - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Termination Criterion - Specific Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Pressure", "0.1"),
("Linear Solver Residual Reduction Tolerance - Momentum", "0.1"),
("Linear Solver Residual Reduction Tolerance - Temperature", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Kinetic Energy", "0.1"),
("Linear Solver Residual Reduction Tolerance - Turbulent Dissipation Rate", "0.1"),
("Linear Solver Residual Reduction Tolerance - Specific Dissipation Rate", "0.1"),
("Linear Solver Stabilization - Pressure", "None"),
("Linear Solver Stabilization - Temperature", "None"),
("Coupled pressure-velocity formulation", False),
("Frozen Flow Simulation", False),
("Start Time:=", "0s"),
("Stop Time:=", "20s"),
("Time Step:=", "1s"),
("Iterations per Time Step", 20),
("Import Start Time", False),
("Copy Fields From Source", False),
("SaveFieldsType", "Every N Steps"),
("N Steps:=", "10s"),
("Enable Control Program", False),
("Control Program Name", ""),
]
def HFSS3DLayout_AdaptiveFrequencyData(freq):
"""Update HFSS 3D adaptive frequency data.
Parameters
----------
freq : float
Adaptive frequency value.
Returns
-------
list
List of frequency data.
"""
value = [("AdaptiveFrequency", freq), ("MaxDelta", "0.02"), ("MaxPasses", 10), ("Expressions", [], None)]
return value
HFSS3DLayout_SingleFrequencyDataList = [("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("5GHz"))]
HFSS3DLayout_BroadbandFrequencyDataList = [
("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("5GHz")),
("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("10GHz")),
]
HFSS3DLayout_MultiFrequencyDataList = [
("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("2.5GHz")),
("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("5GHz")),
("AdaptiveFrequencyData", HFSS3DLayout_AdaptiveFrequencyData("10GHz")),
]
HFSS3DLayout_AdaptiveSettings = [
("DoAdaptive", True),
("SaveFields", False),
("SaveRadFieldsOnly", False),
("MaxRefinePerPass", 30),
("MinPasses", 1),
("MinConvergedPasses", 1),
("AdaptType", "kSingle"), # possible values are "kSingle", "kMultiFrequencies", "kBroadband"
("Basic", True),
("SingleFrequencyDataList", HFSS3DLayout_SingleFrequencyDataList),
("BroadbandFrequencyDataList", HFSS3DLayout_BroadbandFrequencyDataList),
("MultiFrequencyDataList", HFSS3DLayout_MultiFrequencyDataList),
]
HFSS3DLayout = [
("Properties", HFSS3DLayout_Properties),
("CustomSetup", False),
("SolveSetupType", "HFSS"),
("PercentRefinementPerPass", 30),
("MinNumberOfPasses", 1),
("MinNumberOfConvergedPasses", 1),
("UseDefaultLambda", True),
("UseMaxRefinement", False),
("MaxRefinement", 1000000),
("SaveAdaptiveCurrents", False),
("SaveLastAdaptiveRadFields", False),
("ProdMajVerID", -1),
("ProjDesignSetup", ""),
("ProdMinVerID", -1),
("Refine", False),
("Frequency", "10GHz"),
("LambdaRefine", True),
("MeshSizeFactor", 1.5),
("QualityRefine", True),
("MinAngle", "15deg"),
("UniformityRefine", False),
("MaxRatio", 2),
("Smooth", False),
("SmoothingPasses", 5),
("UseEdgeMesh", False),
("UseEdgeMeshAbsLength", False),
("EdgeMeshRatio", 0.1),
("EdgeMeshAbsLength", "1000mm"),
("LayerProjectThickness", "0meter"),
("UseDefeature", True),
("UseDefeatureAbsLength", False),
("DefeatureRatio", 1e-06),
("DefeatureAbsLength", "0mm"),
("InfArrayDimX", 0),
("InfArrayDimY", 0),
("InfArrayOrigX", 0),
("InfArrayOrigY", 0),
("InfArraySkew", 0),
("ViaNumSides", 6),
("ViaMaterial", "copper"),
("Style25DVia", "Mesh"),
("Replace3DTriangles", True),
("LayerSnapTol", "1e-05"),
("ViaDensity", 0),
("HfssMesh", True),
("Q3dPostProc", False),
("UnitFactor", 1000),
("Verbose", False),
("NumberOfProcessors", 0),
("SmallVoidArea", -2e-09),
("HealingOption", 1),
("InclBBoxOption", 1),
("AuxBlock", []),
("DoAdaptive", True),
("Color", ["R:=", 0, "G:=", 0, "B:=", 0], None), # TODO: create something smart for color arrays, like a class
("AdvancedSettings", HFSS3DLayout_AdvancedSettings),
("CurveApproximation", HFSS3DLayout_CurveApproximation),
("Q3D_DCSettings", HFSS3DLayout_Q3D_DCSettings),
("AdaptiveSettings", HFSS3DLayout_AdaptiveSettings),
]
"""HFSS 3D Layout setup properties and default values."""
HFSS3DLayout_SweepDataList = []
HFSS3DLayout_SIWAdvancedSettings = [
("IncludeCoPlaneCoupling", True),
("IncludeInterPlaneCoupling:=", False),
("IncludeSplitPlaneCoupling:=", True),
("IncludeFringeCoupling", True),
("IncludeTraceCoupling", True),
("XtalkThreshold", "-34"),
("MaxCoupledLines", 12),
("MinVoidArea", "2mm2"),
("MinPadAreaToMesh", "1mm2"),
("MinPlaneAreaToMesh", "6.25e-6mm2"),
("SnapLengthThreshold", "2.5um"),
("MeshAutoMatic", True),
("MeshFrequency", "4GHz"),
("ReturnCurrentDistribution", False),
("IncludeVISources", False),
("IncludeInfGnd", False),
("InfGndLocation", "0mm"),
("PerformERC", False),
("IgnoreNonFunctionalPads", True),
]
HFSS3DLayout_SIWDCSettings = [
("UseDCCustomSettings", False),
("PlotJV", True),
("ComputeInductance", False),
("ContactRadius", "0.1mm"),
("DCSliderPos", 1),
]
HFSS3DLayout_SIWDCAdvancedSettings = [
("DcMinPlaneAreaToMesh", "0.25mm2"),
("DcMinVoidAreaToMesh", "0.01mm2"),
("MaxInitMeshEdgeLength", "2.5mm"),
("PerformAdaptiveRefinement", True),
("MaxNumPasses", 5),
("MinNumPasses", 1),
("PercentLocalRefinement", 20),
("EnergyError", 2),
("MeshBws", True),
("RefineBws", False),
("MeshVias", True),
("RefineVias", False),
("NumBwSides", 8),
("NumViaSides", 8),
]
HFSS3DLayout_SIWDCIRSettings = [
("IcepakTempFile", "D:/Program Files/AnsysEM/AnsysEM21.2/Win64/"),
("SourceTermsToGround", []),
("ExportDCThermalData", False),
("ImportThermalData", False),
("FullDCReportPath", ""),
("ViaReportPath", ""),
("PerPinResPath", ""),
("DCReportConfigFile", ""),
("DCReportShowActiveDevices", False),
("PerPinUsePinFormat", False),
("UseLoopResForPerPin", False),
]
HFSS3DLayout_SimulationSettings = [
("Enabled", True),
("UseSISettings", True),
("UseCustomSettings", False),
("SISliderPos", 1),
("PISliderPos", 1),
("SIWAdvancedSettings", HFSS3DLayout_SIWAdvancedSettings),
("SIWDCSettings", HFSS3DLayout_SIWDCSettings),
("SIWDCAdvancedSettings", HFSS3DLayout_SIWDCAdvancedSettings),
("SIWDCIRSettings", HFSS3DLayout_SIWDCIRSettings),
]
HFSS3DLayout_ACSimulationSettings = [
("Enabled", True),
("UseSISettings", True),
("UseCustomSettings", False),
("SISliderPos", 1),
("PISliderPos", 1),
("SIWAdvancedSettings", HFSS3DLayout_SIWAdvancedSettings),
("SIWDCSettings", HFSS3DLayout_SIWDCSettings),
("SIWDCAdvancedSettings", HFSS3DLayout_SIWDCAdvancedSettings),
]
SiwaveDC3DLayout = [
("Properties", HFSS3DLayout_Properties),
("CustomSetup", False),
("SolveSetupType", "SIwave"),
("Color", ["R:=", 0, "G:=", 0, "B:=", 0]),
("Position", 0),
("SimSetupType", "kSIwave"),
("SimulationSettings", HFSS3DLayout_SimulationSettings),
("SweepDataList", HFSS3DLayout_SweepDataList),
]
SiwaveAC3DLayout = [
("Properties", HFSS3DLayout_Properties),
("CustomSetup", False),
("SolveSetupType", "SIwaveDCIR"),
("Position", 0),
("SimSetupType", "kSIwaveDCIR"),
("SimulationSettings", HFSS3DLayout_ACSimulationSettings),
("SweepDataList", HFSS3DLayout_SweepDataList),
]
HFSS3DLayout_LNASimulationSettings = [
("Enabled", True),
("GroupDelay", False),
("Perturbation", 0.1),
("Noise", False),
("Skip_DC", False),
("AdditionalOptions", ""),
("BaseOptionName", "(Default Options)"),
("FilterText", ""),
]
LNA_Sweep = [
("DataId", "Sweep0"),
("Properties", HFSS3DLayout_Properties),
("Sweep", SweepDefinition),
("SolutionID", -1),
]
HFSS3DLayout_LNAData = [("LNA Sweep 1", LNA_Sweep)]
LNA3DLayout = [
("Properties", HFSS3DLayout_Properties),
("CustomSetup", False),
("SolveSetupType", "LNA"),
("Position", 0),
("SimSetupType", "kLNA"),
("SimulationSettings", HFSS3DLayout_LNASimulationSettings),
("SweepDataList", HFSS3DLayout_SweepDataList),
("Data", HFSS3DLayout_LNAData),
]
MechTerm = [
("Enabled", True),
("MeshLink", meshlink),
("Solver", "Program Controlled"),
("Stepping", "Program Controlled"),
]
"""Mechanical thermal setup properties and default values."""
MechModal = [
("Enabled", True),
("MeshLink", meshlink),
("Max Modes", 6),
("Limit Search", True),
("Range Max", "100MHz"),
("Range Min", "0Hz"),
("Solver", "Program Controlled"),
]
"""Mechanical modal setup properties and default values."""
MechStructural = [
("Enabled", True),
("MeshLink", meshlink),
("Solver", "Program Controlled"),
("Stepping", "Program Controlled"),
]
"""Mechanical structural setup properties and default values."""
# TODO complete the list of templates for other Solvers
GRM = [
("Enabled", True),
("MeshLink", meshlink),
("RatedOutputPower", "1W"),
("RatedVoltage", "208V"),
("RatedSpeed", "3600rpm"),
("OperatingTemperature", "75cel"),
("OperationType", "Motor"),
("LoadType", "ConstPower"),
("RatedPowerFactor", "0.8"),
("Frequency", "60Hz"),
("CapacitivePowerFactor", False),
]
TR = []
class SweepHFSS(object):
"""Initializes, creates, and updates sweeps in HFSS.
Parameters
----------
oanalysis :
setupname : str
Name of the setup.
sweeptype : str, optional
Type of the sweep. Options are ``"Fast"``, ``"Interpolating"``,
and ``"Discrete"``. The default is ``"Interpolating"``.
props : dict, optional
Dictionary of the properties. The default is ``None``, in which case
the default properties are retrieved.
"""
def __init__(self, oanalysis, setupname, sweepname, sweeptype="Interpolating", props=None):
self.oanalysis = oanalysis
self.props = {}
self.setupname = setupname
self.name = sweepname
if props:
self.props = props
else:
self.setupname = setupname
self.name = sweepname
self.props["Type"] = sweeptype
self.props["IsEnabled"] = True
self.props["RangeType"] = "LinearCount"
self.props["RangeStart"] = "2.5GHz"
self.props["RangeEnd"] = "7.5GHz"
self.props["SaveSingleField"] = False
self.props["RangeCount"] = 401
self.props["RangeStep"] = "1MHz"
self.props["RangeSamples"] = 11
self.props["SaveFields"] = True
self.props["SaveRadFields"] = True
self.props["GenerateFieldsForAllFreqs"] = False
self.props["InterpTolerance"] = 0.5
self.props["InterpMaxSolns"] = 250
self.props["InterpMinSolns"] = 0
self.props["InterpMinSubranges"] = 1
self.props["InterpUseS"] = True
self.props["InterpUsePortImped"] = False
self.props["InterpUsePropConst"] = True
self.props["UseDerivativeConvergence"] = False
self.props["InterpDerivTolerance"] = 0.2
self.props["EnforcePassivity"] = True
self.props["UseFullBasis"] = True
self.props["PassivityErrorTolerance"] = 0.0001
self.props["EnforceCausality"] = False
self.props["UseQ3DForDCSolve"] = False
self.props["SMatrixOnlySolveMode"] = "Auto"
self.props["SMatrixOnlySolveAbove"] = "1MHz"
self.props["SweepRanges"] = {"Subrange": []}
@aedt_exception_handler
def add_subrange(self, rangetype, start, end=None, count=None, unit="GHz", save_single_fields=False):
"""Add a subrange to the sweep.
Parameters
----------
rangetype : str
Type of the subrange. Options are ``"LinearCount"``,
``"LinearStep"``, ``"LogScale"`` and ``"SinglePoints"``.
start : float
Starting frequency.
end : float, optional
Stopping frequency. Required for ``rangetype="LinearCount"|"LinearStep"|"LogScale"``.
count : int or float, optional
Frequency count or frequency step. Required for ``rangetype="LinearCount"|"LinearStep"|"LogScale"``.
unit : str, optional
Unit of the frequency. For example, ``"MHz`` or ``"GHz"``. The default is ``"GHz"``.
save_single_fields : bool, optional
Whether to save the fields of the single point. The default is ``False``.
Used only for ``rangetype="SinglePoints"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
Create a setup in an HFSS design and add multiple sweep ranges.
>>> setup = hfss.create_setup(setupname="MySetup")
>>> sweep = setup.add_sweep()
>>> sweep.change_type("Interpolating")
>>> sweep.change_range("LinearStep", 1.1, 2.1, 0.4, "GHz")
>>> sweep.add_subrange("LinearCount", 1, 1.5, 5, "MHz")
>>> sweep.add_subrange("LogScale", 1, 3, 10, "GHz")
"""
if rangetype == "LinearCount" or rangetype == "LinearStep" or rangetype == "LogScale":
if not end or not count:
raise AttributeError("Parameters 'end' and 'count' must be present.")
range = {}
range["RangeType"] = rangetype
range["RangeStart"] = str(start) + unit
if rangetype == "LinearCount":
range["RangeEnd"] = str(end) + unit
range["RangeCount"] = count
elif rangetype == "LinearStep":
range["RangeEnd"] = str(end) + unit
range["RangeStep"] = str(count) + unit
elif rangetype == "LogScale":
range["RangeEnd"] = str(end) + unit
range["RangeCount"] = self.props["RangeCount"]
range["RangeSamples"] = count
elif rangetype == "SinglePoints":
range["RangeEnd"] = str(start) + unit
range["SaveSingleField"] = save_single_fields
self.props["SweepRanges"]["Subrange"].append(range)
return True
@aedt_exception_handler
def create(self):
"""Create a sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.InsertFrequencySweep(self.setupname, self._get_args())
return True
@aedt_exception_handler
def update(self):
"""Update a sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.EditFrequencySweep(self.setupname, self.name, self._get_args())
return True
@aedt_exception_handler
def _get_args(self, props=None):
"""Retrieve arguments.
Parameters
----------
props :dict, optional
Dictionary of the properties. The default is ``None``, in which
case the default properties are retrieved.
Returns
-------
dict
Dictionary of the properties.
"""
if props is None:
props = self.props
arg = ["NAME:" + self.name]
_dict2arg(props, arg)
return arg
class SweepHFSS3DLayout(object):
"""Initializes, creates, and updates sweeps in HFSS 3D Layout.
Parameters
----------
oanaysis :
setupname : str
Name of the setup.
sweepname : str
Name of the sweep.
sweeptype : str, optional
Type of the sweep. Options are ``"Interpolating"`` and ``"Discrete"``. The default is ``"Interpolating"``.
save_fields : bool, optional
Whether to save the fields. The default is ``True``.
props : dict, optional
Dictionary of the properties. The default is ``None``, in which
case the default properties are retrieved.
"""
def __init__(
self,
oanalysis,
setupname,
sweepname,
sweeptype="Interpolating",
save_fields=True,
props=None,
):
self.oanalysis = oanalysis
self.props = {}
self.setupname = setupname
self.name = sweepname
if props:
self.props = props
else:
self.setupname = setupname
self.name = sweepname
self.props["Properties"] = OrderedDict({"Enable": True})
self.props["Sweeps"] = OrderedDict(
{"Variable": "Sweep 1", "Data": "LIN 1Hz 20GHz 0.05GHz", "OffsetF1": False, "Synchronize": 0}
)
self.props["GenerateSurfaceCurrent"] = save_fields
self.props["SaveRadFieldsOnly"] = False
if sweeptype == "Interpolating":
self.props["FastSweep"] = True
elif sweeptype == "Discrete":
self.props["FastSweep"] = False
else:
raise AttributeError("Allowed sweeptype options are 'Interpolating' and 'Discrete'.")
# self.props["SaveSingleField"] = False
self.props["ZoSelected"] = False
self.props["SAbsError"] = 0.005
self.props["ZoPercentError"] = 1
self.props["GenerateStateSpace"] = False
self.props["EnforcePassivity"] = False
self.props["PassivityTolerance"] = 0.0001
self.props["UseQ3DForDC"] = False
self.props["ResimulateDC"] = False
self.props["MaxSolutions"] = 250
self.props["InterpUseSMatrix"] = True
self.props["InterpUsePortImpedance"] = True
self.props["InterpUsePropConst"] = True
self.props["InterpUseFullBasis"] = True
self.props["AdvDCExtrapolation"] = False
self.props["MinSolvedFreq"] = "0.01GHz"
self.props["CustomFrequencyString"] = ""
self.props["AllEntries"] = False
self.props["AllDiagEntries"] = False
self.props["AllOffDiagEntries"] = False
self.props["MagMinThreshold"] = 0.01
@aedt_exception_handler
def change_type(self, sweeptype):
"""Change the type of the sweep.
Parameters
----------
sweeptype : str
Type of the sweep. Options are ``"Interpolating"`` and ``"Discrete"``.
The default is ``"Interpolating"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if sweeptype == "Interpolating":
self.props["FastSweep"] = True
elif sweeptype == "Discrete":
self.props["FastSweep"] = False
else:
raise AttributeError("Allowed sweeptype options are 'Interpolating' and 'Discrete'.")
return self.update()
@aedt_exception_handler
def set_save_fields(self, save_fields, save_rad_fields=False):
"""Choose whether the fields are saved.
Parameters
----------
save_fields : bool
Whether to save the fields.
save_rad_fields : bool, optional
Whether to save the radiating fields. The default is ``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.props["GenerateSurfaceCurrent"] = save_fields
self.props["SaveRadFieldsOnly"] = save_rad_fields
return self.update()
@aedt_exception_handler
def add_subrange(self, rangetype, start, end=None, count=None, unit="GHz"):
"""Add a subrange to the sweep.
Parameters
----------
rangetype : str
Type of the subrange. Options are ``"LinearCount"``, ``"SinglePoint"``,
``"LinearStep"``, and ``"LogScale"``.
start : float
Starting frequency.
end : float, optional
Stopping frequency.
Mandatory for ``"LinearCount"``, ``"LinearStep"``, and ``"LogScale"``.
count : int or float, optional
Frequency count or frequency step.
Mandatory for ``"LinearCount"``, ``"LinearStep"``, and ``"LogScale"``.
unit : str
Unit of the frequency. For example, ``"MHz`` or ``"GHz"``. The default is ``"GHz"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if rangetype == "SinglePoint" and self.props["FastSweep"]:
raise AttributeError("'SinglePoint is allowed only when sweeptype is 'Discrete'.'")
if rangetype == "LinearCount" or rangetype == "LinearStep" or rangetype == "LogScale":
if not end or not count:
raise AttributeError("Parameters 'end' and 'count' must be present.")
if rangetype == "LinearCount":
sweep_range = " LINC " + str(start) + unit + " " + str(end) + unit + " " + str(count)
elif rangetype == "LinearStep":
sweep_range = " LIN " + str(start) + unit + " " + str(end) + unit + " " + str(count) + unit
elif rangetype == "LogScale":
sweep_range = " DEC " + str(start) + unit + " " + str(end) + unit + " " + str(count) + unit
elif rangetype == "SinglePoint":
sweep_range = " " + str(start) + unit
else:
raise AttributeError('Allowed rangetype are "LinearCount", "SinglePoint", "LinearStep", and "LogScale".')
self.props["Sweeps"]["Data"] += sweep_range
return self.update()
@aedt_exception_handler
def change_range(self, rangetype, start, end=None, count=None, unit="GHz"):
"""Change the range of the sweep.
Parameters
----------
rangetype : str
Type of the subrange. Options are ``"LinearCount"``, ``"SinglePoint"``,
``"LinearStep"``, and ``"LogScale"``.
start : float
Starting frequency.
end : float, optional
Stopping frequency.
Mandatory for ``"LinearCount"``, ``"LinearStep"``, and ``"LogScale"``.
count : int or float, optional
Frequency count or frequency step.
Mandatory for ``"LinearCount"``, ``"LinearStep"``, and ``"LogScale"``.
unit : str, optional
Unit of the frequency. For example, ``"MHz`` or ``"GHz"``. The default is ``"GHz"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if rangetype == "LinearCount":
sweep_range = "LINC " + str(start) + unit + " " + str(end) + unit + " " + str(count)
elif rangetype == "LinearStep":
sweep_range = "LIN " + str(start) + unit + " " + str(end) + unit + " " + str(count) + unit
elif rangetype == "LogScale":
sweep_range = "DEC " + str(start) + unit + " " + str(end) + unit + " " + str(count) + unit
elif rangetype == "SinglePoint":
sweep_range = str(start) + unit
else:
raise AttributeError('Allowed rangetype are "LinearCount", "SinglePoint", "LinearStep", and "LogScale".')
self.props["Sweeps"]["Data"] = sweep_range
return self.update()
@aedt_exception_handler
def create(self):
"""Create a sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.AddSweep(self.setupname, self._get_args())
return True
@aedt_exception_handler
def update(self):
"""Update the sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.EditSweep(self.setupname, self.name, self._get_args())
return True
@aedt_exception_handler
def _get_args(self, props=None):
"""Retrieve properties.
Parameters
----------
props : dict
Dictionary of the properties. The default is ``None``, in which case
the default properties are retrieved.
Returns
-------
dict
Dictionary of the properties.
"""
if props is None:
props = self.props
arg = ["NAME:" + self.name]
_dict2arg(props, arg)
return arg
class SweepQ3D(object):
"""Initializes, creates, and updates sweeps in Q3D.
Parameters
----------
oanaysis :
setupname :str
Name of the setup.
sweepname : str
Name of the sweep.
sweeptype : str, optional
Type of the sweep. Options are ``"Fast"``, ``"Interpolating"``,
and ``"Discrete"``. The default is ``"Interpolating"``.
props : dict
Dictionary of the properties. The default is ``None``, in which case
the default properties are retrieved.
"""
def __init__(self, oanalysis, setupname, sweepname, sweeptype="Interpolating", props=None):
self.oanalysis = oanalysis
self.setupname = setupname
self.name = sweepname
self.props = {}
if props:
self.props = props
else:
self.props["Type"] = sweeptype
if sweeptype == "Discrete":
self.props["isenabled"] = True
self.props["RangeType"] = "LinearCount"
self.props["RangeStart"] = "2.5GHz"
self.props["RangeStep"] = "1GHz"
self.props["RangeEnd"] = "7.5GHz"
self.props["SaveSingleField"] = False
self.props["RangeSamples"] = 3
self.props["RangeCount"] = 401
self.props["SaveFields"] = False
self.props["SaveRadFields"] = False
self.props["SweepRanges"] = []
else:
self.props["IsEnabled"] = True
self.props["RangeType"] = "LinearStep"
self.props["RangeStart"] = "1GHz"
self.props["RangeStep"] = "1GHz"
self.props["RangeEnd"] = "20GHz"
self.props["SaveFields"] = False
self.props["SaveRadFields"] = False
self.props["InterpTolerance"] = 0.5
self.props["InterpMaxSolns"] = 50
self.props["InterpMinSolns"] = 0
self.props["InterpMinSubranges"] = 1
@aedt_exception_handler
def add_subrange(self, type, start, end, count):
"""Add a subrange to the sweep.
Parameters
----------
type : str
Type of the subrange. Options are ``"LinearCount"``,
``"LinearStep"``, and ``"LogScale"``.
start : float
Starting frequency.
end : float
Stopping frequency.
count : int or float
Frequency count or frequency step.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
range = {}
range["RangeType"] = type
range["RangeStart"] = start
range["RangeEnd"] = end
if type == "LinearCount":
range["RangeCount"] = count
elif type == "LinearStep":
range["RangeStep"] = count
elif type == "LogScale":
range["RangeCount"] = self.props["RangeCount"]
range["RangeSamples"] = count
self.props["SweepRanges"].append(range)
return True
@aedt_exception_handler
def create(self):
"""Create a sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.InsertSweep(self.setupname, self._get_args())
return True
@aedt_exception_handler
def update(self):
"""Update the sweep.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.EditSweep(self.setupname, self.name, self._get_args())
return True
@aedt_exception_handler
def _get_args(self, props=None):
"""Retrieve properties.
Parameters
----------
props : dict
Dictionary of the properties. The default is ``None``, in which case
the default properties are retrieved.
Returns
-------
dict
Dictionary of the properties.
"""
if props is None:
props = self.props
arg = ["NAME:" + self.name]
_dict2arg(props, arg)
return arg
class SetupKeys(object):
"""Provides setup keys."""
SetupTemplates = {
0: HFSSDrivenAuto,
1: HFSSDrivenDefault,
2: HFSSEigen,
3: HFSSTransient,
4: HFSSSBR,
5: MaxwellTransient,
6: Magnetostatic,
7: EddyCurrent,
8: Electrostatic,
9: Electrostatic,
10: ElectricTransient,
11: SteadyTemperatureAndFlow,
12: SteadyTemperatureOnly,
13: SteadyFlowOnly,
14: Matrix,
15: NexximLNA,
16: NexximDC,
17: NexximTransient,
18: NexximQuickEye,
19: NexximVerifEye,
20: NexximAMI,
21: NexximOscillatorRSF,
22: NexximOscillator1T,
23: NexximOscillatorNT,
24: NexximHarmonicBalance1T,
25: NexximHarmonicBalanceNT,
26: NexximSystem,
27: NexximTVNoise,
28: HSPICE,
29: HFSS3DLayout,
30: Open,
31: Close,
32: MechTerm,
33: MechModal,
34: GRM,
35: TR,
36: TransientTemperatureAndFlow,
37: TransientTemperatureOnly,
38: TransientFlowOnly,
39: MechStructural,
40: SiwaveDC3DLayout,
41: SiwaveAC3DLayout,
42: LNA3DLayout,
}
SetupNames = [
"HFSSDrivenAuto",
"HFSSDriven",
"HFSSEigen",
"HFSSTransient",
"HFSSDriven",
"Transient",
"Magnetostatic",
"EddyCurrent",
"Electrostatic",
"ElectroDCConduction",
"ElectroDCConduction",
"IcepakSteadyState",
"IcepakSteadyState",
"IcepakSteadyState",
"Matrix",
"NexximLNA",
"NexximDC",
"NexximTransient",
"NexximQuickEye",
"NexximVerifEye",
"NexximAMI",
"NexximOscillatorRSF",
"NexximOscillator1T",
"NexximOscillatorNT",
"NexximHarmonicBalance1T",
"NexximHarmonicBalanceNT",
"NexximSystem",
"NexximTVNoise",
"HSPICE",
"HFSS3DLayout",
"2DMatrix",
"2DMatrix",
"MechThermal",
"MechModal",
"GRM",
"TR",
"IcepakTransient",
"IcepakTransient",
"IcepakTransient",
"MechStructural",
"SiwaveDC3DLayout",
"SiwaveAC3DLayout",
"LNA3DLayout",
]
| 36.770697 | 117 | 0.609752 |
4970204346bb09b20f4d486beffa816ff028cf1f | 3,645 | py | Python | tests/models/programdb/environment/conftest.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/programdb/environment/conftest.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/programdb/environment/conftest.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # -*- coding: utf-8 -*-
#
# tests.models.programdb.environment.conftest.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""The RAMSTK Environment module test fixtures."""
# Third Party Imports
import pytest
from pubsub import pub
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKEnvironmentRecord
from ramstk.models.dbtables import RAMSTKEnvironmentTable
from tests import MockDAO
@pytest.fixture
def mock_dao(monkeypatch):
"""Create a mock database table."""
_environment_1 = RAMSTKEnvironmentRecord()
_environment_1.revision_id = 1
_environment_1.mission_id = 1
_environment_1.mission_phase_id = 1
_environment_1.environment_id = 1
_environment_1.name = "Condition Name"
_environment_1.units = "Units"
_environment_1.minimum = 0.0
_environment_1.maximum = 0.0
_environment_1.mean = 0.0
_environment_1.variance = 0.0
_environment_1.ramp_rate = 0.0
_environment_1.low_dwell_time = 0.0
_environment_1.high_dwell_time = 0.0
_environment_2 = RAMSTKEnvironmentRecord()
_environment_2.revision_id = 1
_environment_2.mission_id = 1
_environment_2.mission_phase_id = 1
_environment_2.environment_id = 2
_environment_2.name = "Condition Name"
_environment_2.units = "Units"
_environment_2.minimum = 0.0
_environment_2.maximum = 0.0
_environment_2.mean = 0.0
_environment_2.variance = 0.0
_environment_2.ramp_rate = 0.0
_environment_2.low_dwell_time = 0.0
_environment_2.high_dwell_time = 0.0
_environment_3 = RAMSTKEnvironmentRecord()
_environment_3.revision_id = 1
_environment_3.mission_id = 1
_environment_3.mission_phase_id = 1
_environment_3.environment_id = 3
_environment_3.name = "Condition Name"
_environment_3.units = "Units"
_environment_3.minimum = 0.0
_environment_3.maximum = 0.0
_environment_3.mean = 0.0
_environment_3.variance = 0.0
_environment_3.ramp_rate = 0.0
_environment_3.low_dwell_time = 0.0
_environment_3.high_dwell_time = 0.0
dao = MockDAO()
dao.table = [
_environment_1,
_environment_2,
_environment_3,
]
yield dao
@pytest.fixture(scope="function")
def test_attributes():
"""Create a dict of Environment attributes."""
yield {
"revision_id": 1,
"mission_id": 1,
"mission_phase_id": 1,
"environment_id": 1,
"name": "Condition Name",
"units": "Units",
"minimum": 0.0,
"maximum": 0.0,
"mean": 0.0,
"variance": 0.0,
"ramp_rate": 0.0,
"low_dwell_time": 0.0,
"high_dwell_time": 0.0,
}
@pytest.fixture(scope="class")
def test_table_model():
"""Get a table model instance for each test function."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKEnvironmentTable()
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_environment_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_environment_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_environment")
pub.unsubscribe(dut.do_update, "request_update_environment")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_get_tree, "request_get_environment_tree")
pub.unsubscribe(dut.do_delete, "request_delete_environment")
pub.unsubscribe(dut.do_insert, "request_insert_environment")
# Delete the device under test.
del dut
| 31.153846 | 88 | 0.707545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.