hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
f3f055d2d43051b7e1d0b360516d537f45dd36e6
591
py
Python
setup.py
goosechooser/file-manip-toolkit
2a1a14b3e53b63ae2224e62d1a2184249e4af440
[ "MIT" ]
null
null
null
setup.py
goosechooser/file-manip-toolkit
2a1a14b3e53b63ae2224e62d1a2184249e4af440
[ "MIT" ]
3
2017-04-05T08:08:42.000Z
2017-04-05T09:58:10.000Z
setup.py
goosechooser/file-manip-toolkit
2a1a14b3e53b63ae2224e62d1a2184249e4af440
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='file-manip-toolkit', version='1.1', description='collection of tools for low level binary manipulations of files', long_description=readme, author='M B', author_email='dont@me', license=license, packages=find_packages(), entry_points={ 'console_scripts': [ 'unfman=file_manip_toolkit.unfman.cli:main', 'eswap=file_manip_toolkit.eswap.cli:main'], } )
24.625
82
0.641286
75
591
4.92
0.6
0.073171
0.130081
0
0
0
0
0
0
0
0
0.004396
0.230118
591
23
83
25.695652
0.806593
0
0
0
0
0
0.34687
0.135364
0
0
0
0
0
1
0
false
0
0.05
0
0.05
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3f25f5d972972cc7fc6131be78ccaf5e519a99c
6,603
py
Python
src/pyfinlab/backtesting.py
vishalbelsare/pyfinlab
63c0d3f639e4cdae19096a2069967fdf7f66d8ab
[ "BSD-3-Clause" ]
8
2021-07-19T21:08:26.000Z
2021-12-04T22:10:01.000Z
src/pyfinlab/backtesting.py
vishalbelsare/pyfinlab
63c0d3f639e4cdae19096a2069967fdf7f66d8ab
[ "BSD-3-Clause" ]
null
null
null
src/pyfinlab/backtesting.py
vishalbelsare/pyfinlab
63c0d3f639e4cdae19096a2069967fdf7f66d8ab
[ "BSD-3-Clause" ]
4
2021-07-19T21:08:37.000Z
2021-10-05T19:34:30.000Z
import bt import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime from pyfinlab import data_api as api """ These functions backtest the efficient frontier portfolios. """ class OrderedWeights(bt.Algo): def __init__(self, weights): self.target_weights = weights def __call__(self, target): target.temp['weights'] = dict(zip(target.temp['selected'], self.target_weights)) return True def backtest_parameters(portfolio, weightings, prices): """ Creates Backtest object combining Strategy object with price data. :param portfolio: (int) Choose any portfolio from 1-20. :param weightings: (pd.DataFrame) Weightings for efficient frontier portfolios. :param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset. :return: (obj) Backtest object combining Strategy object with price data. """ target_weights = weightings[portfolio] target_weights = target_weights[target_weights!=0].to_frame() tickers = list(target_weights.index) weights_dict = target_weights.to_dict().get(portfolio) prices_df = prices[tickers] strategy = bt.Strategy('{}'.format(portfolio), [bt.algos.RunQuarterly(), bt.algos.SelectAll(tickers), OrderedWeights(list(weights_dict.values())), bt.algos.Rebalance()]) return bt.Backtest(strategy, prices_df) def compile_backtests(weightings, prices): """ Compiles multiple backtest objects. :param weightings: (pd.DataFrame) Weightings for efficient frontier portfolios. :param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset. :return: (list) List of Backtest objects, one for each efficient frontier portfolio. """ backtests = [] for backtest in list(weightings.columns): backtests.append(backtest_parameters(backtest, weightings, prices)) return backtests def benchmark_strategy(benchmark_ticker='SPY'): """ Creates a Strategy object for the benchmark ticker. :param benchmark_ticker: (str) Optional, benchmark ticker. Defaults to 'SPY'. :return: (obj) Strategy object assigned to the benchmark. """ return bt.Strategy( benchmark_ticker, algos = [bt.algos.RunQuarterly(), bt.algos.SelectAll(), bt.algos.SelectThese([benchmark_ticker]), bt.algos.WeighEqually(), bt.algos.Rebalance()], ) def benchmark_backtest(benchmark_ticker, start_date, end_date, api_source): """ Creates Backtest object combining Strategy object with price data from the benchmark. :param benchmark_ticker: (str) Optional, benchmark ticker. Defaults to 'SPY'. :param start_date: (str) Start date of requested time series. Must be in 'YYYY-MM-DD' (i.e. '2021-06-21') if api_source is yfinance. Must be in 'MM/DD/YYYY' (i.e. '2021-06-21') format if api_source is bloomberg. :param end_date: (str) End date of requested time series. Must be in 'YYYY-MM-DD' (i.e. '2021-06-21') if api_source is yfinance. Must be in 'MM/DD/YYYY' (i.e. '2021-06-21') format if api_source is bloomberg. :param api_source: (str) API source to pull data from. Choose from 'yfinance' or 'bloomberg'. Default is yfinance. :return: (obj) Backtest object combining Strategy object with price data. """ benchmark_prices = api.price_history([benchmark_ticker], start_date, end_date, api_source) benchmark_prices.columns = [benchmark_ticker] benchmark_name = api.name(api_source, benchmark_ticker) return bt.Backtest(benchmark_strategy(benchmark_ticker), benchmark_prices) def run_backtest(backtests, benchmark): """ Runs the backtest. :param backtests: (list) List of Backtest objects, one for each efficient frontier portfolio. :param benchmark: (list) Backtest object for the benchmark_strategy. :return: (obj) Result object containing backtest results. """ np.seterr(divide='ignore') return bt.run( backtests[0], backtests[1], backtests[2], backtests[3], backtests[4], backtests[5], backtests[6], backtests[7], backtests[8], backtests[9], backtests[10], backtests[11], backtests[12], backtests[13], backtests[14], backtests[15], backtests[16], backtests[17], backtests[18], backtests[19], benchmark ) def linechart(Results, title='Backtest Results', figsize=(15, 9), save=False, show=True, colormap='jet'): """ Plots the performance for all efficient frontier portfolios. :param Results: (object) Results object from bt.backtest.Result(*backtests). Refer to the following documentation https://pmorissette.github.io/bt/bt.html?highlight=display#bt.backtest.Result :param title: (str) Optional, used to name image file if saved. Defaults to 'backtests'. :param figsize: (float, float) Optional, multiple by which to multiply the maximum weighting constraints at the ticker level. Defaults to (15, 9). :param save: (bool) Optional, width, height in inches. Defaults to False. :param show: (bool) Optional, displays plot. Defaults to True. :param colormap: (str or matplotlib colormap object) Colormap to select colors from. If string, load colormap with that name from matplotlib. Defaults to 'jet'. :return: (fig) Plot of performance for all efficient frontier portfolios. """ plot = Results.plot(title=title, figsize=figsize, colormap=colormap) fig = plot.get_figure() plt.legend(loc="upper left") if save == True: plt.savefig( '../charts/linechart_{}.png'.format(datetime.today().strftime('%m-%d-%Y')), bbox_inches='tight') if show == False: plt.close() def backtest_timeseries(Results, freq='d'): """ Plots the performance for all efficient frontier portfolios. :param Results: (object) Results object from bt.backtest.Result(*backtests). Refer to the following documentation https://pmorissette.github.io/bt/bt.html?highlight=display#bt.backtest.Result :param freq: (str) Data frequency used for display purposes. Refer to pandas docs for valid freq strings. :return: (pd.DataFrame) Time series of each portfolio's value over time according to the backtest Results object. """ return Results._get_series(freq).drop_duplicates().iloc[1:]
46.174825
129
0.681508
827
6,603
5.360339
0.269649
0.043988
0.036544
0.027972
0.369501
0.369501
0.343785
0.343785
0.325739
0.300023
0
0.014085
0.215054
6,603
142
130
46.5
0.841212
0.520521
0
0
0
0
0.033322
0.00912
0
0
0
0
0
1
0.15
false
0
0.1
0
0.383333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3f2c852678fa5d35b8d2a46acf9b6507ca1bf3d
2,318
py
Python
sent/sentiment.py
RA-VI-RUS/senti
bccfdfe7033be263d34258b44f6527c39d857719
[ "MIT" ]
1
2016-03-25T01:57:41.000Z
2016-03-25T01:57:41.000Z
sent/sentiment.py
RA-VI-RUS/senti
bccfdfe7033be263d34258b44f6527c39d857719
[ "MIT" ]
null
null
null
sent/sentiment.py
RA-VI-RUS/senti
bccfdfe7033be263d34258b44f6527c39d857719
[ "MIT" ]
null
null
null
""" Sentiment prediction module """ import nltk import numpy as np from cPickle import load def get_word_index_array(words, word2index): u""" Transform the words into list of int(word index) Note: Unknown words are dropped >>> words = [u"I", u"love", u"you", u"RANDOM STUFF"] >>> word2index = {u"I": 0, u"love": 1, u"you": 2} >>> get_word_index_array(words, word2index) [0, 1, 2] """ return [word2index[w] for w in words if word2index.get(w) is not None # filter out those unknown ] def pad_sents(sents, padding_token_index): """ Pad the sents(in word index form) into same length so they can form a matrix # 15447 >>> sents = [[1,2,3], [1,2], [1,2,3,4,5]] >>> pad_sents(sents, padding_token_index = -1) [[1, 2, 3, -1, -1], [1, 2, -1, -1, -1], [1, 2, 3, 4, 5]] """ max_len_sent = max(sents, key = lambda sent: len(sent)) max_len = len(max_len_sent) get_padding = lambda sent: [padding_token_index] * (max_len - len(sent)) padded_sents = [(sent + get_padding(sent)) for sent in sents] return padded_sents WORD2INDEX = load(open("sent/data/twitter.pkl"))[3] PADDING_INDEX = WORD2INDEX[u"<PADDING>"] from param_util import load_dcnn_model_params from dcnn import DCNN params = load_dcnn_model_params("sent/models/filter_widths=8,6,,batch_size=10,,ks=20,8,,fold=1,1,,conv_layer_n=2,,ebd_dm=48,,l2_regs=1e-06,1e-06,1e-06,0.0001,,dr=0.5,0.5,,nkerns=7,12.pkl") MODEL = DCNN(params) def sentiment_scores_of_sents(sents): """ Predict the sentiment positive scores for a bunch of sentences >>> sentiment_scores_of_sents([u'simultaneously heart breaking and very funny , the last kiss is really all about performances .', u'( u ) stupid .']) array([ 0.78528505, 0.0455901 ]) """ word_indices = [get_word_index_array(nltk.word_tokenize(sent), WORD2INDEX) for sent in sents] x = np.asarray( pad_sents(word_indices, PADDING_INDEX), dtype = np.int32 ) scores = MODEL._p_y_given_x(x) return scores[:, 1] # return `positiveness` def sentiment_score(sent): """simple wrapper around the more general case""" return sentiment_scores_of_sents([sent])[0]
29.341772
188
0.633305
353
2,318
3.988669
0.399433
0.009943
0.008523
0.036222
0.09517
0.088068
0
0
0
0
0
0.056614
0.230371
2,318
78
189
29.717949
0.732623
0.362381
0
0.058824
0
0.029412
0.134757
0.12813
0
0
0
0
0
1
0.117647
false
0
0.147059
0
0.382353
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3f5715946eab9fedb592e1f0e7140af27f37fbd
2,362
py
Python
scripts/results-processing/generate_cmc_curves.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
12
2020-02-19T02:50:49.000Z
2022-03-31T19:39:35.000Z
scripts/results-processing/generate_cmc_curves.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
68
2020-03-23T00:07:28.000Z
2022-03-28T10:02:16.000Z
scripts/results-processing/generate_cmc_curves.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
3
2020-02-11T19:07:08.000Z
2020-11-04T18:48:00.000Z
from pathlib import Path import numpy as np import pandas as pd import torch from matplotlib import pyplot as plt from src.tools.metrics import evaluate def load_dataframes(din, wildcard="*fusion*.csv"): data = {} for f in din.glob(wildcard): print(f) ref = "-".join(f.with_name("").name.split("_")[-3:]).replace("-fusion", "") print(ref) data[ref] = pd.read_csv(f) if __name__ == "__main__": dir_results = Path( Path.home().joinpath( "Dropbox/FIW_Video/results/search_retrieval_evaluation/results/image_based" ) ) df_list = load_dataframes(dir_results) result = np.load( dir_results.joinpath("gallery_probe_features.npy"), allow_pickle=True ) gallery_feature = torch.FloatTensor(result.item().get("gallery_f")) gallery_label = result.item().get("gallery_label") print("gallery size:", gallery_feature.size(), gallery_label.shape) query_feature = torch.FloatTensor(result.item().get("query_f")) query_label = result.item().get("query_label") print("query size:", query_feature.size(), query_label.shape) query_feature = query_feature.squeeze(1) gallery_feature = gallery_feature.squeeze(1) ## query-gallery CMC = torch.IntTensor(gallery_label.shape[0]).zero_() ap = 0.0 all_scores = [] all_predicts = [] for i in range(query_label.shape[0]): scores, predicts, (ap_tmp, CMC_tmp) = evaluate( query_feature[i], query_label[i], gallery_feature, gallery_label ) all_scores.append(scores.squeeze()) all_predicts.append(predicts) if CMC_tmp[0] == -1: continue CMC = CMC + CMC_tmp ap += ap_tmp CMC = CMC.float() CMC = CMC / query_label.shape[0] # average CMC print("Rank@1:%f Rank@5:%f Rank@10:%f" % (CMC[0], CMC[4], CMC[9])) print("Rank@10:%f Rank@20:%f Rank@50:%f" % (CMC[9], CMC[19], CMC[49])) print("mAP:%f" % (ap / query_label.shape[0])) # save all_scores to npy predict_result = { "score": np.asarray(all_scores), "predict": np.asarray(all_predicts), } np.save("predict_result.npy", predict_result) CMC = CMC.numpy() fig, ax = plt.subplots() plt.plot(CMC) ax.set(xscale="log") plt.xlim(0, 1000) plt.show() fig.savefig("CMC_result.png")
29.525
87
0.629551
324
2,362
4.388889
0.351852
0.049226
0.036568
0.033755
0.050633
0.050633
0
0
0
0
0
0.018448
0.219729
2,362
79
88
29.898734
0.75312
0.020322
0
0
0
0
0.132958
0.042876
0
0
0
0
0
1
0.016129
false
0
0.096774
0
0.112903
0.112903
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3f694c253c50689ab808a6179d10da6fe162e10
1,615
py
Python
calendarium/templatetags/calendarium_tags.py
Reston/django-calendarium
7b12b1d6845002378bcd18d91f8ce9c46ef6c9d8
[ "MIT" ]
null
null
null
calendarium/templatetags/calendarium_tags.py
Reston/django-calendarium
7b12b1d6845002378bcd18d91f8ce9c46ef6c9d8
[ "MIT" ]
null
null
null
calendarium/templatetags/calendarium_tags.py
Reston/django-calendarium
7b12b1d6845002378bcd18d91f8ce9c46ef6c9d8
[ "MIT" ]
1
2019-02-21T16:47:58.000Z
2019-02-21T16:47:58.000Z
"""Templatetags for the ``calendarium`` project.""" try: from django.core.urlresolvers import reverse except ImportError: # >= django 2.0 from django.urls import reverse from django import template from django.utils.timezone import datetime, now, timedelta, utc from ..models import Event, EventCategory register = template.Library() register_tag = register.assignment_tag if hasattr( register, 'assignment_tag') else register.simple_tag @register.filter def get_week_URL(date, day=0): """ Returns the week view URL for a given date. :param date: A date instance. :param day: Day number in a month. """ if day < 1: day = 1 date = datetime(year=date.year, month=date.month, day=day, tzinfo=utc) return reverse( 'calendarium:calendar_week', kwargs={ 'year': date.isocalendar()[0], 'week': date.isocalendar()[1]}) def _get_upcoming_events(amount=5, category=None): if not isinstance(category, EventCategory): category = None return Event.objects.get_occurrences( now(), now() + timedelta(days=356), category)[:amount] @register.inclusion_tag('calendarium/upcoming_events.html') def render_upcoming_events(event_amount=5, category=None): """Template tag to render a list of upcoming events.""" return { 'occurrences': _get_upcoming_events( amount=event_amount, category=category), } @register_tag def get_upcoming_events(amount=5, category=None): """Returns a list of upcoming events.""" return _get_upcoming_events(amount=amount, category=category)
29.363636
74
0.693498
204
1,615
5.362745
0.372549
0.102377
0.062157
0.084095
0.120658
0.120658
0.071298
0.071298
0
0
0
0.01
0.195046
1,615
54
75
29.907407
0.831538
0.157895
0
0
0
0
0.068182
0.043182
0
0
0
0
0
1
0.117647
false
0
0.176471
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3f87273a6ab084e7b2486de9cda1728e1c1a45f
9,266
py
Python
objectModel/Python/tests/cdm/resolution_guidance/common_test.py
CBA-Consult/CDM
892bceac7a15167c85342cc1c61d7ecdf5f1b78d
[ "CC-BY-4.0", "MIT" ]
1
2020-10-17T14:07:55.000Z
2020-10-17T14:07:55.000Z
objectModel/Python/tests/cdm/resolution_guidance/common_test.py
CBA-Consult/CDM
892bceac7a15167c85342cc1c61d7ecdf5f1b78d
[ "CC-BY-4.0", "MIT" ]
5
2021-07-05T15:32:15.000Z
2022-01-04T16:51:11.000Z
objectModel/Python/tests/cdm/resolution_guidance/common_test.py
lukeenterprise/CDM
96c1da8b4bafe132eaee7243d9b7c6e42e87fc18
[ "CC-BY-4.0", "MIT" ]
1
2021-09-24T16:51:04.000Z
2021-09-24T16:51:04.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. import os import unittest from typing import TYPE_CHECKING from cdm.enums import CdmStatusLevel from cdm.objectmodel import CdmCorpusDefinition from cdm.storage import LocalAdapter from cdm.utilities import ResolveOptions, AttributeResolutionDirectiveSet from tests.common import TestHelper from tests.utilities.object_validator import ObjectValidator if TYPE_CHECKING: from cdm.objectmodel import CdmEntityDefinition from tests.utilities.object_validator import AttributeContextExpectedValue, AttributeExpectedValue class CommonTest(unittest.TestCase): # The path of the SchemaDocs project. schema_docs_path = TestHelper.get_schema_docs_root() # The test's data path. tests_subpath = os.path.join('Cdm', 'ResolutionGuidance') async def run_test_with_values( self, test_name: str, source_entity_name: str, expected_context_default: 'AttributeContextExpectedValue', expected_context_normalized: 'AttributeContextExpectedValue', expected_context_reference_only: 'AttributeContextExpectedValue', expected_context_structured: 'AttributeContextExpectedValue', expected_context_normalized_structured: 'AttributeContextExpectedValue', expected_context_reference_only_normalized: 'AttributeContextExpectedValue', expected_context_reference_only_structured: 'AttributeContextExpectedValue', expected_context_reference_only_normalized_structured: 'AttributeContextExpectedValue', expected_default: 'List[AttributeExpectedValue]', expected_normalized: 'List[AttributeExpectedValue]', expected_reference_only: 'List[AttributeExpectedValue]', expected_structured: 'List[AttributeExpectedValue]', expected_normalized_structured: 'List[AttributeExpectedValue]', expected_reference_only_normalized: 'List[AttributeExpectedValue]', expected_reference_only_structured: 'List[AttributeExpectedValue]', expected_reference_only_normalized_structured: 'List[AttributeExpectedValue]' ) -> None: """This method runs the tests with a set expected attributes & attribute context values and validated the actual result.""" try: test_input_path = TestHelper.get_input_folder_path(self.tests_subpath, test_name) corpus = CdmCorpusDefinition() corpus.ctx.report_at_level = CdmStatusLevel.WARNING corpus.storage.mount('localInput', LocalAdapter(test_input_path)) corpus.storage.mount('cdm', LocalAdapter(self.schema_docs_path)) corpus.storage.default_namespace = 'localInput' src_entity_def = await corpus.fetch_object_async('localInput:/{}.cdm.json/{}'.format(source_entity_name, source_entity_name)) self.assertTrue(src_entity_def is not None) res_opt = ResolveOptions(wrt_doc=src_entity_def.in_document) resolved_entity_def = None # type: CdmEntityDefinition output_entity_name = '' output_entity_file_name = '' entity_file_name = '' if expected_context_default and expected_default: entity_file_name = 'default' res_opt.directives = AttributeResolutionDirectiveSet(set()) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_default, expected_default, resolved_entity_def) if expected_context_normalized and expected_normalized: entity_file_name = 'normalized' res_opt.directives = AttributeResolutionDirectiveSet(set({'normalized'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_normalized, expected_normalized, resolved_entity_def) if expected_context_reference_only and expected_reference_only: entity_file_name = 'referenceOnly' res_opt.directives = AttributeResolutionDirectiveSet(set({'referenceOnly'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_reference_only, expected_reference_only, resolved_entity_def) if expected_context_structured and expected_structured: entity_file_name = 'structured' res_opt.directives = AttributeResolutionDirectiveSet(set({'structured'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_structured, expected_structured, resolved_entity_def) if expected_context_normalized_structured and expected_normalized_structured: entity_file_name = 'normalized_structured' res_opt.directives = AttributeResolutionDirectiveSet(set({'normalized', 'structured'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_normalized_structured, expected_normalized_structured, resolved_entity_def) if expected_context_reference_only_normalized and expected_reference_only_normalized: entity_file_name = 'referenceOnly_normalized' res_opt.directives = AttributeResolutionDirectiveSet(set({'referenceOnly', 'normalized'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_reference_only_normalized, expected_reference_only_normalized, resolved_entity_def) if expected_context_reference_only_structured and expected_reference_only_structured: entity_file_name = 'referenceOnly_structured' res_opt.directives = AttributeResolutionDirectiveSet(set({'referenceOnly', 'structured'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_reference_only_structured, expected_reference_only_structured, resolved_entity_def) if expected_context_reference_only_normalized_structured and expected_reference_only_normalized_structured: entity_file_name = 'referenceOnly_normalized_structured' res_opt.directives = AttributeResolutionDirectiveSet(set({'referenceOnly', 'normalized', 'structured'})) output_entity_name = '{}_Resolved_{}'.format(source_entity_name, entity_file_name) output_entity_file_name = '{}.cdm.json'.format(output_entity_name) resolved_entity_def = await src_entity_def.create_resolved_entity_async(output_entity_name, res_opt) self.validate_output_with_values(expected_context_reference_only_normalized_structured, expected_reference_only_normalized_structured, resolved_entity_def) except Exception as e: self.fail(e) def validate_output_with_values(self, expected_context: 'AttributeContextExpectedValue', expected_attributes: 'List[AttributeExpectedValue]', actual_resolved_entity_def: 'CdmEntityDefinition') -> None: """Runs validation to test actual output vs expected output for attributes collection vs attribute context.""" ObjectValidator.validate_attributes_collection(self, expected_attributes, actual_resolved_entity_def.attributes) ObjectValidator.validate_attribute_context(self, expected_context, actual_resolved_entity_def.attribute_context)
66.661871
205
0.745521
969
9,266
6.660475
0.136223
0.055779
0.056399
0.059498
0.60753
0.545708
0.458475
0.353889
0.353889
0.336226
0
0
0.186272
9,266
139
206
66.661871
0.855968
0.036693
0
0.216216
0
0
0.122584
0.073118
0
0
0
0
0.009009
1
0.009009
false
0
0.099099
0
0.135135
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3fe9cadd102e32ca06c472d5c44ec2934c88eb6
4,592
py
Python
research/cv/PGAN/src/optimizer.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/PGAN/src/optimizer.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/PGAN/src/optimizer.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Gnet define""" from mindspore import ops from mindspore import nn from mindspore.ops import constexpr import mindspore import numpy as np @constexpr def generate_tensor(batch_size): """generate_tensor Returns: output. """ np_array = np.random.randn(batch_size, 1, 1, 1) return mindspore.Tensor(np_array, mindspore.float32) class GradientWithInput(nn.Cell): """GradientWithInput""" def __init__(self, discrimator): super(GradientWithInput, self).__init__() self.reduce_sum = ops.ReduceSum() self.discrimator = discrimator def construct(self, interpolates, alpha): """GradientWithInput Returns: output. """ decisionInterpolate = self.discrimator(interpolates, alpha) decisionInterpolate = self.reduce_sum(decisionInterpolate, 0) return decisionInterpolate class WGANGPGradientPenalty(nn.Cell): """WGANGPGradientPenalty""" def __init__(self, discrimator, lambdaGP=10): super(WGANGPGradientPenalty, self).__init__() self.reduce_sum = ops.ReduceSum() self.reduce_sum_keep_dim = ops.ReduceSum(keep_dims=True) self.sqrt = ops.Sqrt() self.discrimator = discrimator self.gradientWithInput = GradientWithInput(discrimator) self.lambdaGP = mindspore.Tensor(lambdaGP, mindspore.float32) self.gradient_op = ops.GradOperation() def construct(self, input_x, fake, input_alpha): """WGANGPGradientPenalty Returns: output. """ batch_size = input_x.shape[0] alpha = generate_tensor(batch_size) alpha = alpha.expand_as(input_x) interpolates = alpha * input_x + ((1 - alpha) * fake) gradient = self.gradient_op(self.gradientWithInput)(interpolates, input_alpha) gradient = ops.reshape(gradient, (batch_size, -1)) gradient = self.sqrt(self.reduce_sum(gradient * gradient, 1)) gradient_penalty = self.reduce_sum_keep_dim((gradient - 1.0) ** 2) * self.lambdaGP return gradient_penalty class AllLossD(nn.Cell): """AllLossD""" def __init__(self, netD): super(AllLossD, self).__init__() self.netD = netD self.wGANGPGradientPenalty = WGANGPGradientPenalty(self.netD) self.reduce_sum = ops.ReduceSum() self.epsilonLoss = EpsilonLoss(0.001) self.scalr_summary = ops.ScalarSummary() self.summary = ops.TensorSummary() def construct(self, real, fake, alpha): """AllLossD Returns: output. """ predict_real = self.netD(real, alpha) loss_real = -self.reduce_sum(predict_real, 0) predict_fake = self.netD(fake, alpha) loss_fake = self.reduce_sum(predict_fake, 0) lossD_Epsilon = self.epsilonLoss(predict_real) lossD_Grad = self.wGANGPGradientPenalty(real, fake, alpha) all_loss = loss_real + loss_fake + lossD_Grad + lossD_Epsilon return all_loss class AllLossG(nn.Cell): """AllLossG""" def __init__(self, netG, netD): super(AllLossG, self).__init__() self.netG = netG self.netD = netD self.reduce_sum = ops.ReduceSum() def construct(self, inputNoise, alpha): """AllLossG Returns: output. """ fake = self.netG(inputNoise, alpha) predict_fake = self.netD(fake, alpha) loss_fake = -self.reduce_sum(predict_fake, 0) return loss_fake class EpsilonLoss(nn.Cell): """EpsilonLoss""" def __init__(self, epsilonD): super(EpsilonLoss, self).__init__() self.reduce_sum = ops.ReduceSum() self.epsilonD = mindspore.Tensor(epsilonD, mindspore.float32) def construct(self, predRealD): """EpsilonLoss Returns: output. """ return self.reduce_sum(predRealD ** 2) * self.epsilonD
31.027027
90
0.647213
509
4,592
5.644401
0.275049
0.045249
0.058824
0.027845
0.115211
0.10268
0.0811
0.0811
0.042464
0.042464
0
0.009989
0.236934
4,592
147
91
31.238095
0.809932
0.204486
0
0.148649
0
0
0
0
0
0
0
0
0
1
0.148649
false
0
0.067568
0
0.364865
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3fec431861c0ae27a970571bbb718d20a7f68e6
5,774
py
Python
lda-training.py
Bekyilma/Personalized-Visual-Art-Recommendation
60049ffe20302c6f510dff1dc297848e2208ec94
[ "MIT" ]
6
2020-11-30T13:34:53.000Z
2022-01-30T16:45:34.000Z
lda-training.py
Bekyilma/Personalized-Visual-Art-Recommendation
60049ffe20302c6f510dff1dc297848e2208ec94
[ "MIT" ]
null
null
null
lda-training.py
Bekyilma/Personalized-Visual-Art-Recommendation
60049ffe20302c6f510dff1dc297848e2208ec94
[ "MIT" ]
2
2020-11-30T05:38:15.000Z
2021-08-30T20:38:57.000Z
import pandas as pd import numpy as np from gensim.models.wrappers import LdaMallet from sklearn.metrics.pairwise import cosine_similarity from gensim.corpora import Dictionary from gensim import corpora import pickle import os """This class trains the Latent Dirichlet Allocation (LDA) Model on painting description corpus., we want to compare the paintings by computing a similarity measure : cosine similarity""" class LdaTraining: path_to_mallet_bin = "/resources/mallet-2.0.6/bin/mallet" #path has to be absolute os.environ['MALLET_HOME'] = "/resources/mallet-2.0.6/" #path has to be absolute path_save_score = 'resources/datasets/' path_save_outputs = 'resources/matrices/lda/' path_save_model = 'resources/models/' path_to_listwords = 'resources/datasets/preprocessed/list_words.txt' path_to_dict = 'resources/datasets/preprocessed/dict' path_to_corpus = 'resources/datasets/preprocessed/corpus' painting_df = pd.read_csv('resources/datasets/ng-dataset.csv') def __init__(self, num_topics): self.num_topics = num_topics def load_list_words(self, path_to_listwords): """Load the list of words""" with open(path_to_listwords, "rb") as fp: # Unpickling list_words = pickle.load(fp) return list_words def load_dictionary(self, path_to_dict): """Load the dictionary""" dictionary = Dictionary.load(path_to_dict) return dictionary def load_corpus(self, path_to_corpus): """Load the corpus""" corpus = corpora.MmCorpus(path_to_corpus) return corpus def LdaModel(self, num_topics, corpus, dictionary): """Create a LDA topic model Input: num_topics: number of topics for the model corpus: gensim corpus ditionary: gensim dictionary Output: lda_model: a topic model using Latent Dirichlet Allocation (LDA) """ lda_model = LdaMallet(mallet_path=self.path_to_mallet_bin, num_topics=num_topics, corpus=corpus, id2word=dictionary, random_seed=123) return lda_model def transform_output(self, lda_model, corpus): """Transform the topic document matrix into an ordered array of topic distribution Input: lda_model: LDA model corpus: gensim corpus Output: lda_model: a topic model using Latent Dirichlet Allocation (LDA) """ topdoc_mat = lda_model[corpus] topdoc_sorted = self.sort_tuples(topdoc_mat) lda_output = np.asarray(topdoc_sorted) return lda_output def sort_tuples(self, topdoc_mat): """Sort the tuples (topic, distribution) in a numeric ascending order and drop the topic index [(3,0.02), (1, 0.1), (2,0.03), ...] => [(1, 0.1), (2, 0.03), (3,0.02), ...] => [0.1, 0.03, 0.02] Input: topdoc_mat: matrix topic distribution / document Output: sorted tuples with index removed """ # Reordering the topics in ascending order (0,1,2,3...) so we can compare them using a similarity metrics for i in range(len(topdoc_mat)): topdoc_mat[i] = sorted(topdoc_mat[i], key=lambda tup: (tup[0], tup[1])) for j in range(len(topdoc_mat[i])): topdoc_mat[i][j] = topdoc_mat[i][j][1] return topdoc_mat def save_output(self, lda_output, path_save_outputs): np.save(path_save_outputs+'lda-output', lda_output) def save_cosine(self, cos_mat, path_save_outputs): np.save(path_save_outputs+'cosine-mat', cos_mat) def save_pairwise_score(self, painting_df, cos_mat, path_save_score): list_tuples = [] for i, list_score in enumerate(cos_mat): for k, score in enumerate(list_score): list_tuples.append((i, k, score)) sim_df = pd.DataFrame(list_tuples).rename(columns={0: 'painting_1', 1: 'painting_2', 2:'score'}) sim_df['painting_1'] = sim_df['painting_1'].apply(lambda x: painting_df.iloc[x].painting_id) sim_df['painting_2'] = sim_df['painting_2'].apply(lambda x: painting_df.iloc[x].painting_id) sim_df = sim_df.loc[sim_df['painting_1'] != sim_df['painting_2']] #sim_df.to_csv(path_save_score+'lda-scores') sim_df.to_csv('C:/Users/aghenda/Documents/Datasets/lda-scores.csv') def main(self): list_words = self.load_list_words(self.path_to_listwords) dictionary = self.load_dictionary(self.path_to_dict) corpus = self.load_corpus(self.path_to_corpus) lda_model = self.LdaModel(self.num_topics, corpus, dictionary) lda_model.save(self.path_save_model+'lda.model') lda_output = self.transform_output(lda_model, corpus) self.save_output(lda_output, self.path_save_outputs) cos_mat = cosine_similarity(lda_output) self.save_cosine(cos_mat, self.path_save_outputs) self.save_pairwise_score(self.painting_df, cos_mat, self.path_save_score) if __name__=='__main__': lda = LdaTraining(10) lda.main()
41.84058
145
0.59283
711
5,774
4.566807
0.220816
0.02587
0.032338
0.02587
0.253157
0.209424
0.142901
0.10656
0.061595
0.061595
0
0.015175
0.315206
5,774
137
146
42.145985
0.806019
0.177347
0
0
0
0
0.106061
0.0662
0
0
0
0
0
1
0.152778
false
0
0.111111
0
0.472222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f3fff2b8f3f509e2dc362a994e0cf8b4578c863c
2,130
py
Python
scripts/convert_logos.py
TechLoaf/ServerMappings
bf3a00533514c3d684a039e797debaa4432bff8f
[ "MIT" ]
37
2021-11-10T22:34:26.000Z
2022-03-28T21:21:06.000Z
scripts/convert_logos.py
TechLoaf/ServerMappings
bf3a00533514c3d684a039e797debaa4432bff8f
[ "MIT" ]
55
2021-11-15T05:54:09.000Z
2022-03-30T15:19:21.000Z
scripts/convert_logos.py
TechLoaf/ServerMappings
bf3a00533514c3d684a039e797debaa4432bff8f
[ "MIT" ]
301
2021-11-12T04:22:34.000Z
2022-03-31T09:46:04.000Z
import os import argparse import json import webptools # Grant permissions to Webptools webptools.grant_permission() def main(): parser = argparse.ArgumentParser() parser.add_argument('--servers', required=True, type=str) parser.add_argument('--servers_logos_source', required=True, type=str) parser.add_argument('--servers_logos_output', required=True, type=str) parser.add_argument('--sizes', nargs='+', type=int, default=[256]) parser.add_argument('--lossless', default=False, action='store_true') args = parser.parse_args() # Load server mappings JSON servers = {} with open(args.servers) as servers_file: servers = json.load(servers_file) print(f'Converting {len(servers)} server logos.') # Create server logos output directory os.makedirs(args.servers_logos_output, exist_ok=True) for server in servers: server_id = server['id'] server_name = server['name'] logo_path = f'{args.servers_logos_source}/{server_id}.png' # Base 512 Size convert_and_resize( logo_path, f'{args.servers_logos_output}/{server_id}.webp', lossless=args.lossless ) # Size-based destination name for size in args.sizes: convert_and_resize( logo_path, f'{args.servers_logos_output}/{server_id}-{size}.webp', lossless=args.lossless, size=size, ) print(f'Successfully converted {server_name}\'s logo.') print(f'Sucessfully converted {len(servers)} server logos.') # Utility to convert and resize images def convert_and_resize(source, destination, lossless=False, size=512): options = [ f'-metadata none', f'-resize {size} {size}' ] if lossless: options.append('-lossless') output = webptools.cwebp( input_image=source, output_image=destination, option=' '.join(options) ) if output.get('exit_code'): raise OSError(f'Failed to run Webptools ({source})') if __name__ == '__main__': main()
28.4
74
0.63615
251
2,130
5.203187
0.354582
0.05513
0.065084
0.05513
0.24732
0.204441
0.185299
0.157734
0.157734
0.084227
0
0.005601
0.24554
2,130
75
75
28.4
0.807094
0.080751
0
0.076923
0
0
0.228996
0.093238
0
0
0
0
0
1
0.038462
false
0
0.076923
0
0.115385
0.057692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d019390c83aab6ce4e1c381cc60c695d75c0f7c
5,396
py
Python
__init__.py
raulbarbosa1996/CreateService
61a5419819274d4c1c461b2e93ddbc34d5398112
[ "Apache-2.0" ]
null
null
null
__init__.py
raulbarbosa1996/CreateService
61a5419819274d4c1c461b2e93ddbc34d5398112
[ "Apache-2.0" ]
null
null
null
__init__.py
raulbarbosa1996/CreateService
61a5419819274d4c1c461b2e93ddbc34d5398112
[ "Apache-2.0" ]
null
null
null
from mycroft import MycroftSkill, intent_file_handler from mycroft.skills.context import adds_context, removes_context from mycroft import intent_handler from adapt.intent import IntentBuilder import json import requests import base64 class Createservice(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) self.s_messages = True self.d={} def initialize(self): def on_utterance(message): self.audio=message.data['signal'] decode_string = base64.b64decode(self.audio.encode('utf-8')) send=base64.b64encode(decode_string).decode('ascii') self.d['audio']=json.dumps(send) self.d['tag'] = 'CreateService' self.add_event('recognizer_loop:utterance', on_utterance) @intent_handler('createservice.intent') @adds_context('NameContext') def handle_createservice(self, message): self.name="" self.hosts=[] self.internet=False self.performance=[] response = requests.post('http://localhost:5550/sr/identification', json=self.d) res=response.json() id_=res['id'] name_user=res['user_name'] if(id_==1): self.speak('Sure '+name_user+'. What is the name of the service?​​',expect_response=True) elif(id_==0): self.speak(name_user+", you dont have permissions for that") else: self.speak("User not recognize") @intent_handler(IntentBuilder('NameServiceIntent').require("Type").require('NameContext').build()) @adds_context('HostsContext') def handle_name_service(self, message): utterance = message.data.get('Type') self.name=utterance self.log.info(utterance) self.speak('Sure. Please name the machines that will have access to the service​​​',expect_response=True) @intent_handler(IntentBuilder('HostsIntent').require('HostsContext').build()) @adds_context('InternetAccessContext') def handle_hosts_service(self, message): utterance = message.data.get('utterance') res = [int(i) for i in utterance.split() if i.isdigit()] res=[str(x) for x in res] self.log.info(res) self.hosts=res self.speak('Regarding internet access, does the service need internet access?​​',expect_response=True) @intent_handler(IntentBuilder('YesInternetIntent').require("Yes").require('InternetAccessContext').build()) @adds_context('PerformanceContext') @removes_context('InternetAccessContext') def handle_yes_internet_access(self, message): self.internet=True self.log.info(self.internet) self.speak('One last question, do you want to define the performance of the service?​​​',expect_response=True) @intent_handler(IntentBuilder('NoInternetIntent').require("No").require('InternetAccessContext').build()) @adds_context('PerformanceContext') @removes_context('InternetAccessContext') def handle_no_internet_access(self, message): self.internet=False self.log.info(self.internet) self.speak('One last question, do you want to define the performance of the service?​​​',expect_response=True) @intent_handler(IntentBuilder('YesPerformanceIntent').require("Yes").require('PerformanceContext').build()) @removes_context('NameContext') @removes_context('HostsContext') @removes_context('InternetAccessContext') def handle_yes_performance(self, message): utterance = message.data.get('utterance') res = [int(i) for i in utterance.split() if i.isdigit()] self.performance=res[0] self.log.info(res) self.speak('Thanks for the information, wait a bit while I implement the service​') json_={"IntentType": "CreateService","Intent_Target": "Service","Intent_State": "new intent","Conditions": [{"Policy": "CreateService","Constraints": [{ "Domains":[{"Name": self.name,"Bool": self.internet,"Acess": self.hosts,"Performance": self.performance}]}]}]} json_ = json.dumps(json_, indent = 4) self.log.info(json_) response = requests.post('http://localhost:5500/sr/intents', json=json_) self.log.info(response.text) dictFromServer = response.json() @intent_handler(IntentBuilder('NoPerformanceIntent').require("No").require('PerformanceContext').build()) @removes_context('NameContext') @removes_context('HostsContext') @removes_context('InternetAccessContext') def handle_no_performance(self, message): self.performance=0 self.log.info(self.performance) self.speak('Thanks for the information, wait a bit while I implement the service​') json_={"IntentType": "CreateService","Intent_Target": "Service","Intent_State": "new intent","Conditions": [{"Policy": "CreateService","Constraints": [{ "Domains":[{"Name": self.name,"Bool": self.internet,"Acess": self.hosts,"Performance": self.performance}]}]}]} json_ = json.dumps(json_, indent = 4) self.log.info(json_) response = requests.post('http://localhost:5500/sr/intents', json=json_) self.log.info(response.text) dictFromServer = response.json() def create_skill(): return Createservice()
40.878788
271
0.662528
599
5,396
5.831386
0.248748
0.02004
0.031492
0.052963
0.569997
0.551388
0.506155
0.492413
0.476954
0.476954
0
0.006743
0.202928
5,396
131
272
41.19084
0.805394
0
0
0.363636
0
0
0.265987
0.031881
0
0
0
0
0
1
0.111111
false
0
0.070707
0.010101
0.20202
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0269097886931199b82c35d6059adcc53136ef
2,076
py
Python
Public/utils.py
zouning68/ner-demo
ffdbf95fd0354766bd2f882ecb02d55a9b14b74d
[ "Apache-2.0" ]
2
2020-12-27T06:17:28.000Z
2020-12-27T06:17:33.000Z
Public/utils.py
zouning68/ner-demo
ffdbf95fd0354766bd2f882ecb02d55a9b14b74d
[ "Apache-2.0" ]
null
null
null
Public/utils.py
zouning68/ner-demo
ffdbf95fd0354766bd2f882ecb02d55a9b14b74d
[ "Apache-2.0" ]
null
null
null
import logging import keras import os from Public.path import path_log_dir def create_log(path, stream=False): """ 获取日志对象 :param path: 日志文件路径 :param stream: 是否输出控制台 False: 不输出到控制台 True: 输出控制台,默认为输出到控制台 :return:日志对象 """ logger = logging.getLogger() logger.setLevel(logging.DEBUG) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S') if stream: # 设置CMD日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logging.DEBUG) logger.addHandler(sh) # 设置文件日志s fh = logging.FileHandler(path, encoding='utf-8') fh.setFormatter(fmt) fh.setLevel(logging.DEBUG) logger.addHandler(fh) return logger class TrainHistory(keras.callbacks.Callback): def __init__(self, log=None, model_name=None): super(TrainHistory, self).__init__() if not log: path = os.path.join(path_log_dir, 'callback.log') log = create_log(path=path, stream=False) self.log = log self.model_name = model_name self.epoch = 0 self.info = [] def on_epoch_begin(self, epoch, logs=None): self.epoch = epoch message = f"begin epoch: {self.epoch}" self.log.info(message) def on_epoch_end(self, epoch, logs={}): message = f'end epoch: {epoch} loss:{logs["loss"]} val_loss:{logs["val_loss"]} acc:{logs["crf_viterbi_accuracy"]} val_acc:{logs["val_crf_viterbi_accuracy"]}' self.log.info(message) dict = { 'model_name':self.model_name, 'epoch': self.epoch+1, 'loss': logs["loss"], 'acc': logs['crf_viterbi_accuracy'], 'val_loss': logs["val_loss"], 'val_acc': logs['val_crf_viterbi_accuracy'] } self.info.append(dict) def on_batch_end(self, batch, logs={}): message = f'{self.model_name} epoch: {self.epoch} batch:{batch} loss:{logs["loss"]} acc:{logs["crf_viterbi_accuracy"]}' self.log.info(message)
30.529412
165
0.601638
260
2,076
4.634615
0.303846
0.052282
0.074689
0.044813
0.301245
0.217427
0.172614
0.119502
0
0
0
0.001944
0.256744
2,076
67
166
30.985075
0.779002
0.067437
0
0.06383
0
0.042553
0.23393
0.084826
0
0
0
0
0
1
0.106383
false
0
0.085106
0
0.234043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d060b5da617547297f6d4722b0d799b84785cb9
9,726
py
Python
appengine/findit/waterfall/trigger_base_swarming_task_pipeline.py
mithro/chromium-infra
d27ac0b230bedae4bc968515b02927cf9e17c2b7
[ "BSD-3-Clause" ]
null
null
null
appengine/findit/waterfall/trigger_base_swarming_task_pipeline.py
mithro/chromium-infra
d27ac0b230bedae4bc968515b02927cf9e17c2b7
[ "BSD-3-Clause" ]
null
null
null
appengine/findit/waterfall/trigger_base_swarming_task_pipeline.py
mithro/chromium-infra
d27ac0b230bedae4bc968515b02927cf9e17c2b7
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import logging import time from common.pipeline_wrapper import BasePipeline from gae_libs.http.http_client_appengine import HttpClientAppengine from libs import time_util from model import analysis_status from waterfall import monitoring from waterfall import swarming_util from waterfall import waterfall_config class TriggerBaseSwarmingTaskPipeline(BasePipeline): # pragma: no cover. """A pipeline to trigger a Swarming task to re-run selected tests of a step. This pipeline only supports test steps that run on Swarming and support the gtest filter. """ def _GetSwarmingTaskName(self, ref_task_id): # pragma: no cover. return 'findit/ref_task_id/%s/%s' % ( ref_task_id, time_util.GetUTCNow().strftime('%Y-%m-%d %H:%M:%S %f')) def _CreateNewSwarmingTaskRequest(self, ref_task_id, ref_request, master_name, builder_name, build_number, step_name, tests, iterations): """Returns a SwarmingTaskRequest instance to run the given tests only.""" # Make a copy of the referred request and drop or overwrite some fields. new_request = copy.deepcopy(ref_request) new_request.name = self._GetSwarmingTaskName(ref_task_id) new_request.parent_task_id = '' new_request.user = '' # To force a fresh re-run and ignore cached result of any equivalent run. new_request.idempotent = False # Set the gtest_filter to run the given tests only. # Remove existing test filter first. new_request.extra_args = [ a for a in new_request.extra_args if ( not a.startswith('--gtest_filter') and not a.startswith('--test-launcher-filter-file')) ] new_request.extra_args.append('--gtest_filter=%s' % ':'.join(tests)) # On Android, --gtest_repeat is only supported for gtest, but not for other # test types. E.g. instrumentation tests currently support it via # --test-repeat. # # Here we blindly treat all tests on Android as gtest, and let other test # types fail out, because it is hard to distinguish them programmatically # while the majority is gtest. # # https://crbug.com/669632 tracks the effort to unify the command switches # of the Android test runner that are used here. new_request.extra_args.append('--gtest_repeat=%s' % iterations) ref_os = swarming_util.GetTagValue(ref_request.tags, 'os') or '' if ref_os.lower() == 'android': # Workaround. pragma: no cover. new_request.extra_args.append('--num_retries=0') else: new_request.extra_args.append('--test-launcher-retry-limit=0') # Also rerun disabled tests. Scenario: the test was disabled before Findit # runs any analysis. One possible case: # 1. A gtest became flaky on CQ, but Findit was not automatically # triggered to run any analysis because: # * the test is not flaky enough # * chromium-try-flakes has filed/updated too many bugs # 2. The test got disabled, but no culprit was identified. # 3. Some developer starts the investigation and requests Findit to # analyze the flaky test. # 4. Findit picks the latest Waterfall build of the matching configuration # for the CQ build in which the flaky test is found. # 5. In the picked Waterfall build, the test is already disabled. # # Note: test runner on Android ignores this flag because it is not supported # yet even though it exists. new_request.extra_args.append('--gtest_also_run_disabled_tests') # Remove the env setting for sharding. sharding_settings = ['GTEST_SHARD_INDEX', 'GTEST_TOTAL_SHARDS'] new_request.env = [ e for e in new_request.env if e['key'] not in sharding_settings ] # Reset tags for searching and monitoring. ref_name = swarming_util.GetTagValue(ref_request.tags, 'name') new_request.tags = [] new_request.tags.append('ref_master:%s' % master_name) new_request.tags.append('ref_buildername:%s' % builder_name) new_request.tags.append('ref_buildnumber:%s' % build_number) new_request.tags.append('ref_stepname:%s' % step_name) new_request.tags.append('ref_task_id:%s' % ref_task_id) new_request.tags.append('ref_name:%s' % ref_name) # Add additional tags. for tag in self._GetAdditionalTags(): new_request.tags.append(tag) return new_request def _GetAdditionalTags(self): """Returns additional tags for the Swarming task.""" return [] def _GetArgs(self, master_name, builder_name, build_number, step_name, tests): # Returns an array you can pass into _GetSwarmingTask, _CreateSwarmingTask, # _NeedANewSwarmingTask as the arguments. # Should be overwritten in child method. raise NotImplementedError( '_GetArgs should be implemented in child class') def _GetSwarmingTask(self): # Get the appropriate kind of Swarming Task (Wf or Flake). # Should be overwritten in child method. raise NotImplementedError( '_GetSwarmingTask should be implemented in child class') def _CreateSwarmingTask(self): # Create the appropriate kind of Swarming Task (Wf or Flake) # Should be overwritten in child method. raise NotImplementedError( '_CreateSwarmingTask should be implemented in child class') def _OnTaskTriggered(self): """A hook function called after the Swarming task is actually triggered.""" pass def _NeedANewSwarmingTask(self, *args): swarming_task = self._GetSwarmingTask(*args) if not swarming_task: swarming_task = self._CreateSwarmingTask(*args) swarming_task.status = analysis_status.PENDING swarming_task.put() return True else: # TODO(http://crbug.com/585676): Rerun the Swarming task if it runs into # unexpected infra errors. return False def _GetSwarmingTaskId(self, *args): swarming_settings = waterfall_config.GetSwarmingSettings() wait_seconds = swarming_settings.get('get_swarming_task_id_wait_seconds') timeout_seconds = swarming_settings.get( 'get_swarming_task_id_timeout_seconds') deadline = time.time() + timeout_seconds while time.time() < deadline: swarming_task = self._GetSwarmingTask(*args) if not swarming_task: # pragma: no cover. Pipeline will retry. raise Exception('Swarming task was deleted unexpectedly!') if swarming_task.task_id: return swarming_task.task_id # Wait for the existing pipeline to start the Swarming task. time.sleep(wait_seconds) raise Exception('Time out!') # pragma: no cover. Pipeline will retry. def _GetIterationsToRerun(self): # How many times we want to run the swarming rerun # By default, it's what's in wf_config raise NotImplementedError( '_GetIterationsToRerun should be implemented in child class') # Arguments number differs from overridden method - pylint: disable=W0221 def run(self, master_name, builder_name, build_number, step_name, tests): """Triggers a new Swarming task to run the given tests. Args: master_name (str): The master name. builder_name (str): The builder name. build_number (str): The build number. step_name (str): The failed test step name. tests (list): A list of test cases, eg: ['suite1.test1', 'suite2.testw2'] Returns: task_id (str): The new Swarming task that re-run the given tests. """ call_args = self._GetArgs(master_name, builder_name, build_number, step_name, tests) # Check if a new Swarming Task is really needed. if not self._NeedANewSwarmingTask(*call_args): return self._GetSwarmingTaskId(*call_args) assert tests http_client = HttpClientAppengine() # 0. Retrieve existing Swarming task ids for the given step. swarming_task_items = swarming_util.ListSwarmingTasksDataByTags( master_name, builder_name, build_number, http_client, {'stepname': step_name}) if len(swarming_task_items) < 1: monitoring.swarming_tasks.increment( {'operation': 'refer', 'category': 'copy-settings-and-parameters'}) raise Exception('No Swarming task was run at %s, %s, %s' % ( master_name, builder_name, build_number)) ref_task_id = swarming_task_items[0]['task_id'] # 1. Retrieve Swarming task parameters from a given Swarming task id. ref_request = swarming_util.GetSwarmingTaskRequest( ref_task_id, http_client) # 2. Update/Overwrite parameters for the re-run. iterations_to_rerun = self._GetIterationsToRerun() new_request = self._CreateNewSwarmingTaskRequest( ref_task_id, ref_request, master_name, builder_name, build_number, step_name, tests, iterations_to_rerun) # 3. Trigger a new Swarming task to re-run the failed tests. task_id, error = swarming_util.TriggerSwarmingTask(new_request, http_client) # Update swarming task info. swarming_task = self._GetSwarmingTask(*call_args) swarming_task.task_id = task_id swarming_task.parameters['tests'] = tests swarming_task.parameters['iterations_to_rerun'] = iterations_to_rerun swarming_task.parameters['ref_name'] = swarming_util.GetTagValue( new_request.tags, 'ref_name') if error: swarming_task.error = error else: logging.info('A Swarming task was triggered:%s', task_id) swarming_task.put() # Call the hook function after the task is triggered. self._OnTaskTriggered() return task_id
40.024691
80
0.709336
1,293
9,726
5.153906
0.264501
0.070228
0.013505
0.02521
0.230642
0.191627
0.125
0.109694
0.088385
0.066026
0
0.004816
0.210056
9,726
242
81
40.190083
0.862554
0.369217
0
0.104
0
0
0.139694
0.038129
0
0
0
0.004132
0.008
1
0.088
false
0.008
0.08
0.008
0.24
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0a6c7c39afb1420e793306938adb7daf4564ab
3,253
py
Python
pipeline.py
AngelFA04/newspapers_pipeline_scraping
283cf7fdea283ad17a5db419bdd271ab1504feae
[ "MIT" ]
null
null
null
pipeline.py
AngelFA04/newspapers_pipeline_scraping
283cf7fdea283ad17a5db419bdd271ab1504feae
[ "MIT" ]
null
null
null
pipeline.py
AngelFA04/newspapers_pipeline_scraping
283cf7fdea283ad17a5db419bdd271ab1504feae
[ "MIT" ]
null
null
null
import logging logging.basicConfig(level=logging.INFO) import subprocess import os import shutil import re from extract.common import config_dict as config logger = logging.getLogger(__name__) #List of all the news websites stored in extract/config.yaml news_sites_uids = [site for site in config()['news_sites']] def main(): _extract() _transform() _load() def _extract(): logger.info('Starting extract process') for news_site_uid in news_sites_uids: ## Execute the extractions of all the news sites #subprocess.run(['python', 'main.py', news_site_uid], cwd='./extract') os.system(f'python ./extract/main.py {news_site_uid} {news_site_uid}') ## Move all the .csv file generated with the scraper to the 'tranform' directory r = re.compile(r'.*\.(csv|json)') extension = re.search(r'(\.csv|\.json)', str(os.listdir(path='./extract'))).group(1) try: source = list(filter(r.match, os.listdir(path='./extract')))[0] #import pdb; pdb.set_trace() if source.endswith('.json'): shutil.move(f'./extract/{source}', f'./transform/{news_site_uid}_.json') elif source.endswith('.csv'): shutil.move(f'./extract/{source}', f'./transform/{news_site_uid}_.csv') except: logger.warning(f'There is not csv or json file asociated to {news_site_uid}') def _transform(): logger.info('Starting transform process') r = re.compile(r'.*\.(csv|json)') # extension = list(filter(r.match, os.listdir(path='./extract')))[0][-3:] extension = re.search(r'(\.csv|\.json)', str(os.listdir(path='./transform'))).group(1)[1:] #import pdb;pdb.set_trace() for news_site_uid in news_sites_uids: dirty_data_filename = f'{news_site_uid}_.{extension}' clean_data_filename = f'clean_{news_site_uid}.{extension}' try: ## Execute main.py to clean the data and create clean data files subprocess.run(['python', 'main.py', f'{news_site_uid}_.{extension}'], cwd='./transform') ## Remove the dirty data file os.remove(f'./transform/{news_site_uid}_.{extension}') ## Move the clean data file into 'load' directory with the '{news_site_uid}.csv' name shutil.move(f'./transform/clean_{news_site_uid}_.csv', f'./load/{news_site_uid}_.csv') except: logger.warning(f'There is not csv file asociated to {news_site_uid} in the "transform" directory') def _load(): logger.info('Starting load process') for news_site_uid in news_sites_uids: clean_data_filename = f'{news_site_uid}.csv' try: ## Execute the script 'main.py' to load the date into a SQLite Database subprocess.run(['python', 'main.py', f'{news_site_uid}_.csv'], cwd='./load') pass ## Remove the csv file os.remove(f'./load/{news_site_uid}_.csv') except: logger.warning(f'There is not csv file asociated to {news_site_uid} in the "load" directory') if __name__ == "__main__": main()
36.144444
111
0.609284
431
3,253
4.392111
0.229698
0.088748
0.122029
0.05177
0.464871
0.39197
0.352879
0.329107
0.313788
0.19757
0
0.002463
0.251153
3,253
89
112
36.550562
0.774631
0.197049
0
0.211538
0
0
0.341054
0.114217
0
0
0
0
0
1
0.076923
false
0.019231
0.115385
0
0.192308
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0b6747f2e0ed70b58670bd13ac67dc8b1e6354
1,149
py
Python
setup.py
newnativeabq/mendeley-python-sdk
bbdcb3520ac23d56566407bbe3f976a097dcfa45
[ "Apache-2.0" ]
103
2015-01-12T00:40:51.000Z
2022-03-29T07:02:06.000Z
setup.py
mnpopcenter/mendeley-python-sdk
bffd2ed5945a47f67be54049b1bac812a4bc7dfc
[ "Apache-2.0" ]
26
2015-01-10T04:08:41.000Z
2021-02-05T16:31:37.000Z
setup.py
mnpopcenter/mendeley-python-sdk
bffd2ed5945a47f67be54049b1bac812a4bc7dfc
[ "Apache-2.0" ]
43
2015-03-04T18:11:06.000Z
2022-03-13T02:33:34.000Z
from setuptools import setup __version__ = None with open('mendeley/version.py') as f: exec(f.read()) setup( name='mendeley', version=__version__, packages=['mendeley', 'mendeley.models', 'mendeley.resources'], url='http://dev.mendeley.com', license='Apache', author='Mendeley', author_email='api@mendeley.com', description='Python SDK for the Mendeley API', install_requires=[ 'arrow==0.5.0', 'future==0.14.3', 'memoized-property==1.0.2', 'requests==2.5.1', 'requests-oauthlib==0.4.2', 'oauthlib==0.7.2' ], tests_require=[ 'pytest==2.6.4', 'vcrpy==1.2.0' ], classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules" ] )
26.72093
70
0.582245
124
1,149
5.306452
0.596774
0.086626
0.113982
0
0
0
0
0
0
0
0
0.032787
0.256745
1,149
42
71
27.357143
0.737705
0
0
0.054054
0
0
0.536989
0.060923
0
0
0
0
0
1
0
false
0
0.027027
0
0.027027
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0bebe5d9c8a55be30fdd8295e43dbdffe078b9
14,002
py
Python
Chapter-11/collections/ansible_collections/kubernetes/core/plugins/module_utils/copy.py
PacktPublishing/Ansible-for-Real-life-Automation
35c0d92ea08a5dbf3bea749e1971cffabd5e6de4
[ "MIT" ]
7
2021-11-16T04:05:42.000Z
2022-02-19T21:14:29.000Z
Chapter-11/collections/ansible_collections/kubernetes/core/plugins/module_utils/copy.py
PacktPublishing/Ansible-for-Real-life-Automation
35c0d92ea08a5dbf3bea749e1971cffabd5e6de4
[ "MIT" ]
1
2022-03-12T02:25:26.000Z
2022-03-12T02:25:26.000Z
Chapter-11/collections/ansible_collections/kubernetes/core/plugins/module_utils/copy.py
PacktPublishing/Ansible-for-Real-life-Automation
35c0d92ea08a5dbf3bea749e1971cffabd5e6de4
[ "MIT" ]
1
2022-03-01T05:43:07.000Z
2022-03-01T05:43:07.000Z
# Copyright [2021] [Red Hat, Inc.] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function __metaclass__ = type import os from tempfile import TemporaryFile, NamedTemporaryFile from select import select from abc import ABCMeta, abstractmethod import tarfile # from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule from ansible.module_utils._text import to_native try: from kubernetes.client.api import core_v1_api from kubernetes.stream import stream from kubernetes.stream.ws_client import ( STDOUT_CHANNEL, STDERR_CHANNEL, ERROR_CHANNEL, ABNF, ) except ImportError: pass try: import yaml except ImportError: # ImportError are managed by the common module already. pass class K8SCopy(metaclass=ABCMeta): def __init__(self, module, client): self.client = client self.module = module self.api_instance = core_v1_api.CoreV1Api(client.client) self.local_path = module.params.get("local_path") self.name = module.params.get("pod") self.namespace = module.params.get("namespace") self.remote_path = module.params.get("remote_path") self.content = module.params.get("content") self.no_preserve = module.params.get("no_preserve") self.container_arg = {} if module.params.get("container"): self.container_arg["container"] = module.params.get("container") @abstractmethod def run(self): pass class K8SCopyFromPod(K8SCopy): """ Copy files/directory from Pod into local filesystem """ def __init__(self, module, client): super(K8SCopyFromPod, self).__init__(module, client) self.is_remote_path_dir = None self.files_to_copy = list() def list_remote_files(self): """ This method will check if the remote path is a dir or file if it is a directory the file list will be updated accordingly """ try: find_cmd = ["find", self.remote_path, "-type", "f", "-name", "*"] response = stream( self.api_instance.connect_get_namespaced_pod_exec, self.name, self.namespace, command=find_cmd, stdout=True, stderr=True, stdin=False, tty=False, _preload_content=False, **self.container_arg ) except Exception as e: self.module.fail_json( msg="Failed to execute on pod {0}/{1} due to : {2}".format( self.namespace, self.name, to_native(e) ) ) stderr = [] while response.is_open(): response.update(timeout=1) if response.peek_stdout(): self.files_to_copy.extend( response.read_stdout().rstrip("\n").split("\n") ) if response.peek_stderr(): err = response.read_stderr() if "No such file or directory" in err: self.module.fail_json( msg="{0} does not exist in remote pod filesystem".format( self.remote_path ) ) stderr.append(err) error = response.read_channel(ERROR_CHANNEL) response.close() error = yaml.safe_load(error) if error["status"] != "Success": self.module.fail_json( msg="Failed to execute on Pod due to: {0}".format(error) ) def read(self): self.stdout = None self.stderr = None if self.response.is_open(): if not self.response.sock.connected: self.response._connected = False else: ret, out, err = select((self.response.sock.sock,), (), (), 0) if ret: code, frame = self.response.sock.recv_data_frame(True) if code == ABNF.OPCODE_CLOSE: self.response._connected = False elif ( code in (ABNF.OPCODE_BINARY, ABNF.OPCODE_TEXT) and len(frame.data) > 1 ): channel = frame.data[0] content = frame.data[1:] if content: if channel == STDOUT_CHANNEL: self.stdout = content elif channel == STDERR_CHANNEL: self.stderr = content.decode("utf-8", "replace") def copy(self): is_remote_path_dir = ( len(self.files_to_copy) > 1 or self.files_to_copy[0] != self.remote_path ) relpath_start = self.remote_path if is_remote_path_dir and os.path.isdir(self.local_path): relpath_start = os.path.dirname(self.remote_path) for remote_file in self.files_to_copy: dest_file = self.local_path if is_remote_path_dir: dest_file = os.path.join( self.local_path, os.path.relpath(remote_file, start=relpath_start) ) # create directory to copy file in os.makedirs(os.path.dirname(dest_file), exist_ok=True) pod_command = ["cat", remote_file] self.response = stream( self.api_instance.connect_get_namespaced_pod_exec, self.name, self.namespace, command=pod_command, stderr=True, stdin=True, stdout=True, tty=False, _preload_content=False, **self.container_arg ) errors = [] with open(dest_file, "wb") as fh: while self.response._connected: self.read() if self.stdout: fh.write(self.stdout) if self.stderr: errors.append(self.stderr) if errors: self.module.fail_json( msg="Failed to copy file from Pod: {0}".format("".join(errors)) ) self.module.exit_json( changed=True, result="{0} successfully copied locally into {1}".format( self.remote_path, self.local_path ), ) def run(self): self.list_remote_files() if self.files_to_copy == []: self.module.exit_json( changed=False, warning="No file found from directory '{0}' into remote Pod.".format( self.remote_path ), ) self.copy() class K8SCopyToPod(K8SCopy): """ Copy files/directory from local filesystem into remote Pod """ def __init__(self, module, client): super(K8SCopyToPod, self).__init__(module, client) self.files_to_copy = list() def run_from_pod(self, command): response = stream( self.api_instance.connect_get_namespaced_pod_exec, self.name, self.namespace, command=command, stderr=True, stdin=False, stdout=True, tty=False, _preload_content=False, **self.container_arg ) errors = [] while response.is_open(): response.update(timeout=1) if response.peek_stderr(): errors.append(response.read_stderr()) response.close() err = response.read_channel(ERROR_CHANNEL) err = yaml.safe_load(err) response.close() if err["status"] != "Success": self.module.fail_json( msg="Failed to run {0} on Pod.".format(command), errors=errors ) def is_remote_path_dir(self): pod_command = ["test", "-d", self.remote_path] response = stream( self.api_instance.connect_get_namespaced_pod_exec, self.name, self.namespace, command=pod_command, stdout=True, stderr=True, stdin=False, tty=False, _preload_content=False, **self.container_arg ) while response.is_open(): response.update(timeout=1) err = response.read_channel(ERROR_CHANNEL) err = yaml.safe_load(err) response.close() if err["status"] == "Success": return True return False def close_temp_file(self): if self.named_temp_file: self.named_temp_file.close() def run(self): # remove trailing slash from destination path dest_file = self.remote_path.rstrip("/") src_file = self.local_path self.named_temp_file = None if self.content: self.named_temp_file = NamedTemporaryFile(mode="w") self.named_temp_file.write(self.content) self.named_temp_file.flush() src_file = self.named_temp_file.name else: if not os.path.exists(self.local_path): self.module.fail_json( msg="{0} does not exist in local filesystem".format(self.local_path) ) if not os.access(self.local_path, os.R_OK): self.module.fail_json(msg="{0} not readable".format(self.local_path)) if self.is_remote_path_dir(): if self.content: self.module.fail_json( msg="When content is specified, remote path should not be an existing directory" ) else: dest_file = os.path.join(dest_file, os.path.basename(src_file)) if self.no_preserve: tar_command = [ "tar", "--no-same-permissions", "--no-same-owner", "-xmf", "-", ] else: tar_command = ["tar", "-xmf", "-"] if dest_file.startswith("/"): tar_command.extend(["-C", "/"]) response = stream( self.api_instance.connect_get_namespaced_pod_exec, self.name, self.namespace, command=tar_command, stderr=True, stdin=True, stdout=True, tty=False, _preload_content=False, **self.container_arg ) with TemporaryFile() as tar_buffer: with tarfile.open(fileobj=tar_buffer, mode="w") as tar: tar.add(src_file, dest_file) tar_buffer.seek(0) commands = [] # push command in chunk mode size = 1024 * 1024 while True: data = tar_buffer.read(size) if not data: break commands.append(data) stderr, stdout = [], [] while response.is_open(): if response.peek_stdout(): stdout.append(response.read_stdout().rstrip("\n")) if response.peek_stderr(): stderr.append(response.read_stderr().rstrip("\n")) if commands: cmd = commands.pop(0) response.write_stdin(cmd) else: break response.close() if stderr: self.close_temp_file() self.module.fail_json( command=tar_command, msg="Failed to copy local file/directory into Pod due to: {0}".format( "".join(stderr) ), ) self.close_temp_file() if self.content: self.module.exit_json( changed=True, result="Content successfully copied into {0} on remote Pod".format( self.remote_path ), ) self.module.exit_json( changed=True, result="{0} successfully copied into remote Pod into {1}".format( self.local_path, self.remote_path ), ) def check_pod(k8s_ansible_mixin, module): resource = k8s_ansible_mixin.find_resource("Pod", None, True) namespace = module.params.get("namespace") name = module.params.get("pod") container = module.params.get("container") def _fail(exc): arg = {} if hasattr(exc, "body"): msg = "Namespace={0} Kind=Pod Name={1}: Failed requested object: {2}".format( namespace, name, exc.body ) else: msg = to_native(exc) for attr in ["status", "reason"]: if hasattr(exc, attr): arg[attr] = getattr(exc, attr) module.fail_json(msg=msg, **arg) try: result = resource.get(name=name, namespace=namespace) containers = [ c["name"] for c in result.to_dict()["status"]["containerStatuses"] ] if container and container not in containers: module.fail_json(msg="Pod has no container {0}".format(container)) return containers except Exception as exc: _fail(exc)
34.318627
100
0.536638
1,515
14,002
4.782178
0.188119
0.028986
0.023188
0.023464
0.349758
0.244168
0.213803
0.196411
0.190752
0.184955
0
0.006787
0.368662
14,002
407
101
34.402948
0.812783
0.074418
0
0.384164
0
0
0.07414
0.00163
0
0
0
0
0
1
0.041056
false
0.008798
0.038123
0
0.096774
0.002933
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0d8400dda85593833348d99b940ce42c8b67f1
10,002
py
Python
src/mist/api/networks/models.py
cc-daveloper/mist.io_mist.api
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
[ "Apache-2.0" ]
1
2019-04-10T11:37:25.000Z
2019-04-10T11:37:25.000Z
src/mist/api/networks/models.py
d-mo/mist.api
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
[ "Apache-2.0" ]
3
2021-04-07T23:15:17.000Z
2021-09-23T23:21:45.000Z
src/mist/api/networks/models.py
cc-daveloper/mist.io_mist.api
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
[ "Apache-2.0" ]
null
null
null
import re import uuid import netaddr import mongoengine as me from mist.api.exceptions import RequiredParameterMissingError from mist.api.clouds.models import Cloud from mist.api.clouds.models import CLOUDS from mist.api.networks.controllers import SubnetController from mist.api.networks.controllers import NetworkController # Automatically populated mappings of all Network and Subnet subclasses, # keyed by their provider name. NETWORKS, SUBNETS = {}, {} def _populate_class_mapping(mapping, class_suffix, base_class): """Populates a dict that matches a provider name with its model class.""" for key, value in globals().items(): if key.endswith(class_suffix) and key != class_suffix: if issubclass(value, base_class) and value is not base_class: for provider, cls in CLOUDS.items(): if key.replace(class_suffix, '') in repr(cls): mapping[provider] = value class Network(me.Document): """The basic Network model. This class is only meant to be used as a basic class for cloud-specific `Network` subclasses. `Network` contains all common, provider-independent fields and handlers. """ id = me.StringField(primary_key=True, default=lambda: uuid.uuid4().hex) cloud = me.ReferenceField(Cloud, required=True) network_id = me.StringField() # required=True) name = me.StringField() cidr = me.StringField() description = me.StringField() extra = me.DictField() # The `extra` dictionary returned by libcloud. meta = { 'allow_inheritance': True, 'collection': 'networks', 'indexes': [ { 'fields': ['cloud', 'network_id'], 'sparse': False, 'unique': True, 'cls': False, }, ], } def __init__(self, *args, **kwargs): super(Network, self).__init__(*args, **kwargs) # Set `ctl` attribute. self.ctl = NetworkController(self) # Calculate and store network type specific fields. self._network_specific_fields = [field for field in type(self)._fields if field not in Network._fields] @classmethod def add(cls, cloud, cidr=None, name='', description='', id='', **kwargs): """Add a Network. This is a class method, meaning that it is meant to be called on the class itself and not on an instance of the class. You're not meant to be calling this directly, but on a network subclass instead like this: network = AmazonNetwork.add(cloud=cloud, name='Ec2Network') :param cloud: the Cloud on which the network is going to be created. :param cidr: :param name: the name to be assigned to the new network. :param description: an optional description. :param id: a custom object id, passed in case of a migration. :param kwargs: the kwargs to be passed to the corresponding controller. """ assert isinstance(cloud, Cloud) network = cls(cloud=cloud, cidr=cidr, name=name, description=description) if id: network.id = id network.ctl.create(**kwargs) return network def clean(self): """Checks the CIDR to determine if it maps to a valid IPv4 network.""" if self.cidr: try: netaddr.cidr_to_glob(self.cidr) except (TypeError, netaddr.AddrFormatError) as err: raise me.ValidationError(err) def as_dict(self): """Returns the API representation of the `Network` object.""" net_dict = { 'id': self.id, 'cloud': self.cloud.id, 'network_id': self.network_id, 'name': self.name, 'cidr': self.cidr, 'description': self.description, 'extra': self.extra, } net_dict.update( {key: getattr(self, key) for key in self._network_specific_fields} ) return net_dict def __str__(self): return '%s "%s" (%s)' % (self.__class__.__name__, self.name, self.id) class AmazonNetwork(Network): instance_tenancy = me.StringField(default='default', choices=('default', 'private')) def clean(self): """Extended validation for EC2 Networks to ensure CIDR assignment.""" if not self.cidr: raise me.ValidationError('Missing IPv4 range in CIDR notation') super(AmazonNetwork, self).clean() class GoogleNetwork(Network): mode = me.StringField(default='legacy', choices=('legacy', 'auto', 'custom')) def clean(self): """Custom validation for GCE Networks. GCE enforces: - Regex constrains on network names. - CIDR assignment only if `legacy` mode has been selected. """ if self.mode == 'legacy': super(GoogleNetwork, self).clean() elif self.cidr is not None: raise me.ValidationError('CIDR cannot be set for modes other than ' '"legacy" - Current mode: %s' % self.mode) if not re.match('^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$', self.name): raise me.ValidationError('A **lowercase** name must be specified') class OpenStackNetwork(Network): shared = me.BooleanField(default=False) admin_state_up = me.BooleanField(default=True) router_external = me.BooleanField(default=False) class Subnet(me.Document): """The basic Subnet model. This class is only meant to be used as a basic class for cloud-specific `Subnet` subclasses. `Subnet` contains all common, provider-independent fields and handlers. """ id = me.StringField(primary_key=True, default=lambda: uuid.uuid4().hex) network = me.ReferenceField('Network', required=True, reverse_delete_rule=me.CASCADE) subnet_id = me.StringField() name = me.StringField() cidr = me.StringField(required=True) description = me.StringField() extra = me.DictField() # The `extra` dictionary returned by libcloud. meta = { 'allow_inheritance': True, 'collection': 'subnets', 'indexes': [ { 'fields': ['network', 'subnet_id'], 'sparse': False, 'unique': True, 'cls': False, }, ], } def __init__(self, *args, **kwargs): super(Subnet, self).__init__(*args, **kwargs) # Set `ctl` attribute. self.ctl = SubnetController(self) # Calculate and store subnet type specific fields. self._subnet_specific_fields = [field for field in type(self)._fields if field not in Subnet._fields] @classmethod def add(cls, network, cidr, name='', description='', id='', **kwargs): """Add a Subnet. This is a class method, meaning that it is meant to be called on the class itself and not on an instance of the class. You're not meant to be calling this directly, but on a network subclass instead like this: subnet = AmazonSubnet.add(network=network, name='Ec2Subnet', cidr='172.31.10.0/24') :param network: the Network nn which the subnet is going to be created. :param cidr: the CIDR to be assigned to the new subnet. :param name: the name to be assigned to the new subnet. :param description: an optional description. :param id: a custom object id, passed in case of a migration. :param kwargs: the kwargs to be passed to the corresponding controller. """ assert isinstance(network, Network) if not cidr: raise RequiredParameterMissingError('cidr') subnet = cls(network=network, cidr=cidr, name=name, description=description) if id: subnet.id = id subnet.ctl.create(**kwargs) return subnet def clean(self): """Checks the CIDR to determine if it maps to a valid IPv4 network.""" try: netaddr.cidr_to_glob(self.cidr) except (TypeError, netaddr.AddrFormatError) as err: raise me.ValidationError(err) def as_dict(self): """Returns the API representation of the `Subnet` object.""" subnet_dict = { 'id': self.id, 'cloud': self.network.cloud.id, 'network': self.network.id, 'subnet_id': self.subnet_id, 'name': self.name, 'cidr': self.cidr, 'description': self.description, 'extra': self.extra, } subnet_dict.update( {key: getattr(self, key) for key in self._subnet_specific_fields} ) return subnet_dict def __str__(self): return '%s "%s" (%s)' % (self.__class__.__name__, self.name, self.id) class AmazonSubnet(Subnet): availability_zone = me.StringField(required=True) class GoogleSubnet(Subnet): region = me.StringField(required=True) def clean(self): """Extended validation for GCE Subnets.""" if not re.match('^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$', self.name): raise me.ValidationError('A **lowercase** name must be specified') super(GoogleSubnet, self).clean() class OpenStackSubnet(Subnet): gateway_ip = me.StringField() ip_version = me.IntField(default=4) enable_dhcp = me.BooleanField(default=True) dns_nameservers = me.ListField(default=lambda: []) allocation_pools = me.ListField(default=lambda: []) _populate_class_mapping(NETWORKS, 'Network', Network) _populate_class_mapping(SUBNETS, 'Subnet', Subnet)
34.371134
79
0.59778
1,164
10,002
5.043814
0.212199
0.033214
0.009198
0.017033
0.519843
0.510986
0.449668
0.44047
0.420712
0.407086
0
0.004692
0.296841
10,002
290
80
34.489655
0.830087
0.264747
0
0.370588
0
0.011765
0.085225
0.011135
0
0
0
0
0.011765
1
0.082353
false
0
0.052941
0.011765
0.382353
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d0e0a77100ddff32d6eea302bdb6b7fa790b104
2,518
py
Python
test/std/database_test.py
cybergarage/uecho-py
6b0dc72b9c3770d79b812bad75ea201c820b089a
[ "Apache-2.0" ]
null
null
null
test/std/database_test.py
cybergarage/uecho-py
6b0dc72b9c3770d79b812bad75ea201c820b089a
[ "Apache-2.0" ]
null
null
null
test/std/database_test.py
cybergarage/uecho-py
6b0dc72b9c3770d79b812bad75ea201c820b089a
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2021 Satoshi Konno. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from uecho import Property from uecho.std import Database def test_manufacture_database(): db = Database() expecteds = [ ["Panasonic", 0x00000B], ["Panasonic", bytes([0x00, 0x00, 0x0B])], ["Panasonic", bytearray([0x00, 0x00, 0x0B])], ["Sharp", 0x000005], ["Sharp", bytes([0x00, 0x00, 0x05])], ["Sharp", bytearray([0x00, 0x00, 0x05])], ] for expected in expecteds: man = db.get_manufacturer(expected[1]) assert man assert man.name.startswith(expected[0]) def test_object_database(): db = Database() obj = db.get_object((0x00, 0x01)) assert (obj) expecteds = [ [0x80, Property.REQUIRED, Property.OPTIONAL, Property.REQUIRED], [0xB0, Property.OPTIONAL, Property.OPTIONAL, Property.PROHIBITED], [0xB1, Property.REQUIRED, Property.PROHIBITED, Property.REQUIRED], [0xBF, Property.PROHIBITED, Property.OPTIONAL, Property.PROHIBITED], ] assert obj.get_property(0x00) is None for expected in expecteds: prop = obj.get_property(expected[0]) assert prop assert prop.get_attribute(Property.GET) == expected[1] assert prop.get_attribute(Property.SET) == expected[2] assert prop.get_attribute(Property.ANNO) == expected[3] def test_mra_object_database(): db = Database() obj = db.get_object((0x02, 0x91)) assert (obj) expecteds = [ [0x80, Property.REQUIRED, Property.REQUIRED, Property.REQUIRED], [0xB0, Property.OPTIONAL, Property.OPTIONAL, Property.OPTIONAL], ] assert obj.get_property(0x00) is None for expected in expecteds: prop = obj.get_property(expected[0]) assert prop assert prop.get_attribute(Property.GET) == expected[1] assert prop.get_attribute(Property.SET) == expected[2] assert prop.get_attribute(Property.ANNO) == expected[3]
32.701299
76
0.67355
311
2,518
5.389068
0.363344
0.047733
0.085919
0.078759
0.429594
0.429594
0.429594
0.374702
0.257757
0.257757
0
0.048558
0.214853
2,518
76
77
33.131579
0.799191
0.227562
0
0.479167
0
0
0.021762
0
0
0
0.058031
0
0.291667
1
0.0625
false
0
0.041667
0
0.104167
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d1132aa2e29241cd896c2e57d8bd511396c582a
1,207
py
Python
ml-agents/mlagents/trainers/tests/torch/test_layers.py
J-Travnik/ml-agents
c392380ab32bd762536a83501483dd5e7d1898c8
[ "Apache-2.0" ]
null
null
null
ml-agents/mlagents/trainers/tests/torch/test_layers.py
J-Travnik/ml-agents
c392380ab32bd762536a83501483dd5e7d1898c8
[ "Apache-2.0" ]
null
null
null
ml-agents/mlagents/trainers/tests/torch/test_layers.py
J-Travnik/ml-agents
c392380ab32bd762536a83501483dd5e7d1898c8
[ "Apache-2.0" ]
null
null
null
import torch from mlagents.trainers.torch.layers import ( Swish, linear_layer, lstm_layer, Initialization, ) def test_swish(): layer = Swish() input_tensor = torch.Tensor([[1, 2, 3], [4, 5, 6]]) target_tensor = torch.mul(input_tensor, torch.sigmoid(input_tensor)) assert torch.all(torch.eq(layer(input_tensor), target_tensor)) def test_initialization_layer(): torch.manual_seed(0) # Test Zero layer = linear_layer( 3, 4, kernel_init=Initialization.Zero, bias_init=Initialization.Zero ) assert torch.all(torch.eq(layer.weight.data, torch.zeros_like(layer.weight.data))) assert torch.all(torch.eq(layer.bias.data, torch.zeros_like(layer.bias.data))) def test_lstm_layer(): torch.manual_seed(0) # Test zero for LSTM layer = lstm_layer( 4, 4, kernel_init=Initialization.Zero, bias_init=Initialization.Zero ) for name, param in layer.named_parameters(): if "weight" in name: assert torch.all(torch.eq(param.data, torch.zeros_like(param.data))) elif "bias" in name: assert torch.all( torch.eq(param.data[4:8], torch.ones_like(param.data[4:8])) )
29.439024
86
0.664457
168
1,207
4.619048
0.285714
0.070876
0.090206
0.122423
0.469072
0.409794
0.309278
0.234536
0.234536
0
0
0.016807
0.211268
1,207
40
87
30.175
0.798319
0.023198
0
0.064516
0
0
0.008503
0
0
0
0
0
0.16129
1
0.096774
false
0
0.064516
0
0.16129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d12b9597693069db63e21ca3c42a4f99c2c9aa0
11,562
py
Python
discordbot.py
ootomo2680/discordpy-startup
4df5f728a2b2338ac2a4a146b22787c87b4b9aae
[ "MIT" ]
null
null
null
discordbot.py
ootomo2680/discordpy-startup
4df5f728a2b2338ac2a4a146b22787c87b4b9aae
[ "MIT" ]
null
null
null
discordbot.py
ootomo2680/discordpy-startup
4df5f728a2b2338ac2a4a146b22787c87b4b9aae
[ "MIT" ]
null
null
null
import discord from discord.ext import commands import os import random import cv2 import numpy as np import re import pandas as pd import codecs as cd token = "Njg1MTgxMTU3NTc0MzExOTc0.XmE6rg.5Gyfl0WZSsVa8UEw14qmLinQpyg" prefix = '$' client = discord.Client() ''' df0 = pd.read_csv('0.csv') df0 = pd.read_csv('0.csv') df0 = pd.read_csv('0.csv') df0 = pd.read_csv('0.csv') df0 = pd.read_csv('0.csv') df0 = pd.read_csv('0.csv') ''' helpmessage = ( '```\n' '0.ヘルプ:\n' ' $help コマンド一覧を表示\n' '1.モミール:\n' ' $dm{N} 無作為に選ばれたコスト{N}のクリーチャー(NEO, サイキック, 禁断を含む)を表示\n' ' $gr{N} 無作為に選ばれたGRクリーチャーを{N}枚表示({N}は省略可)\n' ' $st{N} 無作為に選ばれた「S・トリガー」を持つ呪文を{N}枚表示({N}は省略可)\n' ' $rule デュエマモミールのルール(暫定)を表示\n' '2.TRPG:\n' ' ${N}d{M} {M}面ダイスを{N}回振る\n' ' $fumble ファンブル表を振る\n' ' $hencho 変調表を振る\n' ' $kanjo 感情表を振る\n' ' $scene シーン表を振る\n' ' $senjo 戦場表を振る\n' '```\n' ) def dice(dice_size): num = np.random.randint(1, int(dice_size)) return num def simple_dice(dice_size, dice_num): dice_val = np.array([], dtype=np.int64) for i in range(dice_num): dice_val = np.append(dice_val, dice(dice_size)) return dice_val def imread(filename, flags=cv2.IMREAD_COLOR, dtype=np.uint8): try: n = np.fromfile(filename, dtype) img = cv2.imdecode(n, flags) return img except Exception as e: print(e) return None def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC): h_min = min(im.shape[0] for im in im_list) im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation) for im in im_list] return cv2.hconcat(im_list_resize) def dmomir(arg): """無作為に選ばれたコスト<arg>のクリーチャー(NEO, サイキック, 禁断を含む)を表示""" # with cd.open(str(arg)+'.csv', "r", "Shift-JIS", "ignore") as csv_file: # df = pd.read_table(csv_file, delimiter=",",names=["name","type","img","civ","pow","cost","race","abl"]) df = pd.read_csv(str(arg)+'.csv', encoding='utf_8_sig') s = df.sample() print(s) name = str(s['name'].values[0]) typ = str(s['type'].values[0]) img = str(s['img'].values[0]) civ = str(s['civ'].values[0]) cost = str(s['cost'].values[0]) power = str(s['pow'].values[0]) race = str(s['race'].values[0]) abl = s['abl'].values[0] info = '{0} [{1}] ({2}) {3}\n{4} -- {5}\n```{6}```'.format(name,civ,cost,typ,race,power,abl) return info,img def trigger(): """無作為に選ばれた「S・トリガー」を持つ呪文を<arg>枚表示""" df = pd.read_csv('st.csv', encoding='utf_8_sig') s = df.sample() print(s) name = str(s['name'].values[0]) img = str(s['img'].values[0]) civ = str(s['civ'].values[0]) cost = str(s['cost'].values[0]) race = str(s['race'].values[0]) abl = s['abl'].values[0] info = '{0} [{1}] ({2})\n```{3}```'.format(name,civ,cost,abl) return info,img def gr(): """無作為に選ばれたGRクリーチャーを<arg>枚表示""" df = pd.read_csv('gr.csv', encoding='utf_8_sig') s = df.sample() print(s) name = str(s['name'].values[0]) typ = str(s['type'].values[0]) img = str(s['img'].values[0]) civ = str(s['civ'].values[0]) cost = str(s['cost'].values[0]) power = str(s['pow'].values[0]) race = str(s['race'].values[0]) abl = s['abl'].values[0] info = '{0} [{1}] ({2}) {3}\n{4} -- {5}\n```{6}```'.format(name,civ,cost,typ,race,power,abl) return info,img def rule(): """デュエマモミールのルール(暫定)を表示""" return ("```" "■プレイヤーは自分のメインステップ中に一度,カードを1枚捨て,マナゾーンのカードを好きな数タップしてもよい。\n" " そうしたら,コストがそれと同じ数の進化でないクリーチャーを無作為に選び,コストを支払ったものとして召喚する。\n" " このようにしてバトルゾーンに出たサイキック・クリーチャーを裏返すことはできない。\n" "■プレイヤーがGR召喚をする時,かわりにすべてのGRクリーチャーから無作為に選び,召喚する(GR召喚として扱う)。\n" "■バトルゾーンのクリーチャーがゲーム中にバトルゾーン以外のゾーンに行った場合,消滅する。これはルール上の処理として行う。\n" "■手札と山札とマナゾーンと墓地とシールドゾーンにあるカードのコストと効果とカードタイプと名前は無視される(コストを参照する場合は0とする)。\n" "■ゲーム開始時,山札の上から5枚をシールドとして置く時,かわりに3枚(←要調整)をシールドとして置く。\n" " ただし、シールドゾーンにあるカードを手札に加える時、かわりに無作為に選ばれたS・トリガーを持つ呪文として扱ってもよい。\n" "```" ) def rush(): """ランダムにカードを抽選""" path = './rush' dirs = os.listdir(path) fl = random.choice(dirs) image = '{0}/{1}'.format(path, fl) return image def kanjo(): """感情表を振る""" i = random.randrange(1,7,1) num_to_kanjo = {1:"1: 共感/不信", 2:"2: 友情/怒り", 3:"3: 愛情/妬み", 4:"4: 忠誠/侮蔑", 5:"5: 憧憬/劣等感", 6:"6: 狂信/殺意"} val = num_to_kanjo[i] return val def senjo(): """戦場表を振る(括弧内は効果)""" i = random.randrange(1,7,1) num_to_senjo = {1:"1: 平地\n特になし", 2:"2: 水中\n回避判定-2", 3:"3: 高所\nファンブル時接近戦ダメージ1点", 4:"4: 悪天候\n攻撃忍法の間合+1", 5:"5: 雑踏\nファンブル値+1", 6:"6: 極地\nラウンド終了時GMは1D6を振る。戦闘開始時からの経過ラウンド以下の目が出たとき接近戦ダメージを1点受ける。この戦場から脱落したときランダムに変調を受ける)"} val = num_to_senjo[i] return val def hencho(): """変調表を振る""" i = random.randrange(1,7,1) num_to_hencho ={1:"1: 故障\n忍具が使用不能になる(累積しない)\n各サイクル終了時《絡繰術》で判定し成功で解除", 2:"2: マヒ\n修得している特技一つをランダムに選び,使用不能にする(特技の数だけ累積)\n各サイクル終了時《身体操術》で判定し成功ですべて解除", 3:"3: 重傷\n命中・情報・感情判定を行うたび接近戦ダメージ1点(累積しない)\n各サイクル終了時《生存術》で判定し成功で解除", 4:"4: 行方不明\nメインフェイズ中自分以外のシーンに登場不可(累積しない)\n各サイクル終了時《経済力》で判定し成功で解除", 5:"5: 忘却\n獲得している【感情】一つをランダムに選び,持っていないものとして扱う(【感情】の数だけ累積)\n各サイクル終了時《記憶術》で判定し成功ですべて解除", 6:"6: 呪い\n修得している忍法一つをランダムに選び,修得していないものとして扱う(忍法の数だけ累積)\n各サイクル終了時《呪術》で判定し成功ですべて解除"} val = num_to_hencho[i] return val def fumble(): """ファンブル表を振る""" i = random.randrange(1,7,1) num_to_kanjo = {1:"1: 何か調子がおかしい。そのサイクルの間、すべての行為判定にマイナス1の修正がつく。", 2:"2: しまった! 好きな忍具を1つ失ってしまう。", 3:"3: 情報が漏れる! このゲームであなたが獲得した【秘密】は、他のキャラクター全員の知るところとなる。", 4:"4: 油断した! 術の制御に失敗し、好きな【生命力】を1点失う。", 5:"5: 敵の陰謀か? 罠にかかり、ランダムに選んだ変調1つを受ける。変調は、変調表で決定すること。", 6:"6: ふう。危ないところだった。特に何も起こらない。"} val = num_to_kanjo[i] return val def scene(): """シーン表を振る""" x = np.sum(simple_dice(6,2)) if x == 2: val = "2: 血の匂いがあたりに充満している。何者かの戦いがあった気配。いや?まだ戦いは続いているのだろうか?" elif x == 3: val = "3: これは……夢か? もう終わったはずの過去。しかし、それを忘れることはできない。" elif x == 4: val = "4: 眼下に広がる街並みを眺める。ここからなら街を一望できるが……。" elif x == 5: val = "5: 世界の終わりのような暗黒。暗闇の中、お前達は密やかに囁く。" elif x == 6: val = "6: 優しい時間が過ぎていく。影の世界のことを忘れてしまいそうだ。" elif x == 7: val = "7: 清廉な気配が漂う森の中。鳥の囀りや、そよ風が樹々を通り過ぎる音が聞こえる。" elif x == 8: val = "8: 凄まじい人混み。喧噪。影の世界のことを知らない無邪気な人々の手柄話や無駄話が騒がしい。" elif x == 9: val = "9: 強い雨が降り出す。人々は、軒を求めて、大慌てて駆けだしていく。" elif x == 10: val = "10: 大きな風が吹き荒ぶ。髪の毛や衣服が大きく揺れる。何かが起こりそうな予感……" elif x == 11: val = "11: 酔っぱらいの怒号。客引きたちの呼び声。女たちの嬌声。いつもの繁華街の一幕だが。" elif x == 12: val = "12: 太陽の微笑みがあなたを包み込む。影の世界の住人には、あまりにまぶしすぎる。" return val @client.event async def on_ready(): print('Logged in') print('-----') @client.event async def on_message(message): # 開始ワード if message.content.startswith(prefix): # 送り主がBotではないか if client.user != message.author: msg = message.content.lstrip(prefix) #dmomir if msg.startswith('dm'): info = msg.lstrip('dm ') if info.isdecimal(): data = dmomir(info) await message.channel.send(message.author.mention + '\n' + re.sub(r'\\n','\n',data[0]), file=discord.File(data[1])) elif msg.startswith('st'): info = msg.lstrip('st ') if info == '': data = trigger() await message.channel.send(message.author.mention + '\n' + re.sub(r'\\n','\n',data[0]), file=discord.File(data[1])) elif info.isdecimal(): data = trigger() s = data[0] im1 = imread(data[1]) for i in range(int(info)-1): data = trigger() s = s + data[0] im2 = imread(data[1]) im1 = hconcat_resize_min([im1, im2]) cv2.imwrite('data/triggers.jpg', im1) await message.channel.send(message.author.mention + '\n' + re.sub(r'\\n','\n',s), file=discord.File('data/triggers.jpg')) elif msg.startswith('gr'): info = msg.lstrip('gr ') if info == '': data = gr() await message.channel.send(message.author.mention + '\n' + re.sub(r'\\n','\n',data[0]), file=discord.File(data[1])) elif info.isdecimal(): data = gr() s = data[0] im1 = imread(data[1]) for i in range(int(info)-1): data = gr() s = s + data[0] im2 = imread(data[1]) im1 = hconcat_resize_min([im1, im2]) cv2.imwrite('data/grs.jpg', im1) await message.channel.send(message.author.mention + '\n' + re.sub(r'\\n','\n',s), file=discord.File('data/grs.jpg')) elif msg.startswith('rule'): await message.channel.send(rule()) elif msg.startswith('rush'): info = msg.lstrip('rush ') if info == '': await message.channel.send(message.author.mention, file=discord.File(rush())) elif info.isdecimal(): im1 = cv2.imread(rush()) for i in range(int(info)-1): im2 = cv2.imread(rush()) im1 = hconcat_resize_min([im1, im2]) cv2.imwrite('data/rushs.jpg', im1) await message.channel.send(message.author.mention, file=discord.File('data/rushs.jpg')) #trpg elif msg.startswith('kanjo'): await message.channel.send(message.author.mention + ' ' + kanjo()) elif msg.startswith('senjo'): await message.channel.send(message.author.mention + ' ' + senjo()) elif msg.startswith('hencho'): await message.channel.send(message.author.mention + ' ' + hencho()) elif msg.startswith('fumble'): await message.channel.send(message.author.mention + ' ' + fumble()) elif msg.startswith('scene'): await message.channel.send(scene()) #help elif msg.startswith('help'): await message.channel.send(helpmessage) #dice else: info = re.split('\D+', message.content) print(info) if info: if info[1].isdecimal() and info[2].isdecimal(): dice_num = int(info[1]) dice_size = int(info[2]) val = simple_dice(dice_size, dice_num) await message.channel.send(message.author.mention + ' ' + str(dice_num) + 'd' + str(dice_size) + ': ' + str(val) + ' = ' + '**[' + str(np.sum(val)) + ']**') client.run(token)
38.798658
180
0.527504
1,410
11,562
4.293617
0.243972
0.025438
0.047076
0.056987
0.366535
0.330525
0.31665
0.277998
0.260159
0.252725
0
0.029081
0.30704
11,562
297
181
38.929293
0.722791
0.034077
0
0.304878
0
0.01626
0.245956
0.15154
0
0
0
0
0
1
0.056911
false
0
0.036585
0
0.154472
0.028455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d16d0a9149a82836ebda93b94a662f0aa549423
1,488
py
Python
process_msg.py
oudoubleyang/Dragalia
01e327aed55bdf585b573862c63d013860b32444
[ "MIT" ]
1
2019-07-04T06:40:56.000Z
2019-07-04T06:40:56.000Z
process_msg.py
oudoubleyang/Dragalia
01e327aed55bdf585b573862c63d013860b32444
[ "MIT" ]
1
2020-01-01T19:04:56.000Z
2020-01-01T19:04:58.000Z
process_msg.py
oudoubleyang/Dragalia
01e327aed55bdf585b573862c63d013860b32444
[ "MIT" ]
null
null
null
from session import dra from info import self_id special_ids = [ 100, 1000, 10000, 100000, 1000000 ] for i in special_ids.copy(): for j in range(1, 10): special_ids.append(i*j) special_ids.extend([114514, 1919, 810, 1919810]) done = ['好了', '可以'] thanks = ['\u8C22', '\u5C04'] extension = { 'image': { 'good': '.webp', 'bad': ['.jpg', '.bmp'] }, # audio 'video': { 'good': '.mp4', 'bad': ['.avi', '.rm'] }, } def process_id(chat_id, message_id): if message_id in special_ids: dra.send_message(chat_id, f'祝贺本群第**{message_id}**条消息达成! 🎉', parse_mode='Markdown') if dra.get_chat_member(chat_id, self_id).can_pin_messages: dra.pin_chat_message(chat_id, message_id, disable_notification=True) return True def process_keyword(message): text = message.text if message.caption and not text: text = message.caption for word in done: if f'我{word}' in text: return message.reply(f'我也{word}') for word in thanks: if f'已经{word}了' in text or f'我{word}了' in text: return message.reply(f'我也{word}了') return None def process_msg(client, message): text = message.text chat_id = message.chat.id message_id = message.message_id process_id(chat_id, message_id) if message.caption and not text: text = message.caption if text: return process_keyword(message) else: return None
24
90
0.611559
213
1,488
4.117371
0.394366
0.047891
0.074116
0.068415
0.286203
0.236032
0.236032
0.236032
0.100342
0
0
0.049774
0.257392
1,488
61
91
24.393443
0.742986
0.00336
0
0.163265
0
0
0.095881
0.018231
0
0
0
0
0
1
0.061224
false
0
0.040816
0
0.22449
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d1aa146a37c0c8ebc3fe202336c4bbf48972e99
8,086
py
Python
vframe/vframe/settings/vframe_cfg.py
kant/vframe
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
[ "MIT" ]
1
2021-04-18T10:42:10.000Z
2021-04-18T10:42:10.000Z
vframe/vframe/settings/vframe_cfg.py
vframeio/_vframe_v0_archived
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
[ "MIT" ]
null
null
null
vframe/vframe/settings/vframe_cfg.py
vframeio/_vframe_v0_archived
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
[ "MIT" ]
null
null
null
import os from os.path import join import logging import collections import cv2 as cv from vframe.settings import types from vframe.models.video_item import VideoQuality from vframe.utils import click_utils # ----------------------------------------------------------------------------- # Enun lists used for custom Click Params # ----------------------------------------------------------------------------- ClassifyNetVar = click_utils.ParamVar(types.ClassifyNet) DetectorNetVar = click_utils.ParamVar(types.DetectorNet) PyTorchNetVar = click_utils.ParamVar(types.PyTorchNet) SceneTextNetVar = click_utils.ParamVar(types.SceneTextNet) FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet) SearchParamVar = click_utils.ParamVar(types.SearchParam) ClientRecordVar = click_utils.ParamVar(types.ClientRecord) MetadataTreeVar = click_utils.ParamVar(types.MetadataTree) ImageSizeVar = click_utils.ParamVar(types.ImageSize) VideoQualityVar = click_utils.ParamVar(types.VideoQuality) DataStoreVar = click_utils.ParamVar(types.DataStore) FileExtVar = click_utils.ParamVar(types.FileExt) KeyframeMetadataVar = click_utils.ParamVar(types.KeyframeMetadata) MediaRecordVar = click_utils.ParamVar(types.MediaRecord) VerifiedVar = click_utils.ParamVar(types.Verified) MediaFormatVar = click_utils.ParamVar(types.MediaFormat) MetadataVar = click_utils.ParamVar(types.Metadata) LogLevelVar = click_utils.ParamVar(types.LogLevel) ActionVar = click_utils.ParamVar(types.Action) # # data_store DATA_STORE = '/data_store_hdd/' DIR_DATASETS = join(DATA_STORE,'datasets') DIR_APPS = join(DATA_STORE,'apps') DIR_APP_VFRAME = join(DIR_APPS,'vframe') DIR_APP_SA = join(DIR_APPS, 'syrianarchive') DIR_MODELS_VFRAME = join(DIR_APP_VFRAME,'models') DIR_MODELS_SA = join(DIR_APP_SA,'models') # # Frameworks DIR_MODELS_OPENCV = join(DIR_MODELS_VFRAME,'caffe') DIR_MODELS_CAFFE = join(DIR_MODELS_VFRAME,'caffe') DIR_MODELS_DARKNET = join(DIR_MODELS_VFRAME,'darknet') DIR_MODELS_DARKNET_PJREDDIE = join(DIR_MODELS_DARKNET, 'pjreddie') DIR_MODELS_DARKNET_VFRAME = join(DIR_MODELS_DARKNET, 'vframe') DIR_MODELS_PYTORCH = join(DIR_MODELS_VFRAME,'pytorch') DIR_MODELS_TORCH = join(DIR_MODELS_VFRAME,'torch') DIR_MODELS_MXNET = join(DIR_MODELS_VFRAME,'mxnet') DIR_MODELS_TF = join(DIR_MODELS_VFRAME,'tensorflow') DIR_MODELS_DLIB = join(DIR_MODELS_VFRAME,'dlib') DIR_MODELS_DLIB_CNN = join(DIR_MODELS_DLIB, 'mmod_human_face_detector.dat') DIR_MODELS_DLIB_5PT = join(DIR_MODELS_DLIB, 'shape_predictor_5_face_landmarks.dat') DIR_MODELS_DLIB_68PT = join(DIR_MODELS_DLIB, 'shape_predictor_68_face_landmarks.dat') # Test images DIR_TEST_IMAGES = join(DIR_APP_SA, 'test', 'images') # ----------------------------------------------------------------------------- # Drawing, GUI settings # ----------------------------------------------------------------------------- DIR_ASSETS = join(DIR_APP_VFRAME, 'assets') FP_FONT = join(DIR_ASSETS, 'font') # ----------------------------------------------------------------------------- # click chair settings # ----------------------------------------------------------------------------- DIR_COMMANDS_PROCESSOR_CHAIR = 'vframe/commands/' DIR_COMMANDS_PROCESSOR_VCAT = 'vcat/commands/' DIR_COMMANDS_PROCESSOR_ADMIN = 'admin/commands' # ----------------------------------------------------------------------------- # Sugarcube dates # Dates the snaphots are made # ----------------------------------------------------------------------------- SUGARCUBE_DATES = ['20180611'] # ----------------------------------------------------------------------------- # Filesystem settings # hash trees enforce a maximum number of directories per directory # ----------------------------------------------------------------------------- ZERO_PADDING = 6 # padding for enumerated image filenames #FRAME_NAME_ZERO_PADDING = 6 # is this active?? CKPT_ZERO_PADDING = 9 HASH_TREE_DEPTH = 3 HASH_BRANCH_SIZE = 3 # ----------------------------------------------------------------------------- # Logging options exposed for custom click Params # ----------------------------------------------------------------------------- LOGGER_NAME = 'vframe' LOGLEVELS = { types.LogLevel.DEBUG: logging.DEBUG, types.LogLevel.INFO: logging.INFO, types.LogLevel.WARN: logging.WARN, types.LogLevel.ERROR: logging.ERROR, types.LogLevel.CRITICAL: logging.CRITICAL } LOGLEVEL_OPT_DEFAULT = types.LogLevel.DEBUG.name #LOGFILE_FORMAT = "%(asctime)s: %(levelname)s: %(message)s" #LOGFILE_FORMAT = "%(levelname)s:%(name)s: %(message)s" #LOGFILE_FORMAT = "%(levelname)s: %(message)s" #LOGFILE_FORMAT = "%(filename)s:%(lineno)s %(funcName)s() %(message)s" # colored logs """ black, red, green, yellow, blue, purple, cyan and white. {color}, fg_{color}, bg_{color}: Foreground and background colors. bold, bold_{color}, fg_bold_{color}, bg_bold_{color}: Bold/bright colors. reset: Clear all formatting (both foreground and background colors). """ LOGFILE_FORMAT = "%(log_color)s%(levelname)-8s%(reset)s %(cyan)s%(filename)s:%(lineno)s:%(bold_cyan)s%(funcName)s() %(reset)s%(message)s" # ----------------------------------------------------------------------------- # Media formats accepted by VFRAME # ----------------------------------------------------------------------------- VALID_MEDIA_EXTS = { types.MediaFormat.VIDEO: ['mp4','mov','avi'], types.MediaFormat.PHOTO: ['jpg','jpeg','png'] } # ----------------------------------------------------------------------------- # Image size for web images # ----------------------------------------------------------------------------- # order here is used for effecient image-pyramid resizing IMAGE_SIZES = collections.OrderedDict() IMAGE_SIZES[types.ImageSize.THUMB] = 160 IMAGE_SIZES[types.ImageSize.SMALL] = 320 IMAGE_SIZES[types.ImageSize.MEDIUM] = 640 IMAGE_SIZES[types.ImageSize.LARGE] = 1280 IMAGE_SIZE_LABELS = collections.OrderedDict() IMAGE_SIZE_LABELS[types.ImageSize.THUMB] = 'th' IMAGE_SIZE_LABELS[types.ImageSize.SMALL] = 'sm' IMAGE_SIZE_LABELS[types.ImageSize.MEDIUM] = 'md' IMAGE_SIZE_LABELS[types.ImageSize.LARGE] = 'lg' DEFAULT_SIZE_LABEL_FEAT_EXTRACT = IMAGE_SIZE_LABELS[types.ImageSize.MEDIUM] JPG_SAVE_QUALITY = 75 KEYFRAME_EXT = 'jpg' # Define video quality metrics (w, h, fps, sec) VIDEO_QUALITY = collections.OrderedDict() VIDEO_QUALITY[types.VideoQuality.POOR] = VideoQuality(160, 90, 12, 2) VIDEO_QUALITY[types.VideoQuality.LOW] = VideoQuality(320, 180, 12, 2) VIDEO_QUALITY[types.VideoQuality.MEDIUM] = VideoQuality(640, 360, 12, 2) VIDEO_QUALITY[types.VideoQuality.HIGH] = VideoQuality(1280, 720, 12, 2) # HD Ready VIDEO_QUALITY[types.VideoQuality.HD] = VideoQuality(1920, 1080, 24, 2) # Full HD # ----------------------------------------------------------------------------- # OpenCV backend and target # used for optimizing DNN inference speeds # requires OpenCV >= 3.4.2 # ----------------------------------------------------------------------------- OPENCV_DNN_BACKENDS = { types.CVBackend.DEFAULT: cv.dnn.DNN_BACKEND_DEFAULT, types.CVBackend.HALIDE: cv.dnn.DNN_BACKEND_HALIDE, types.CVBackend.INFER_ENGINE: cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, types.CVBackend.OPENCV: cv.dnn.DNN_BACKEND_OPENCV } OPENCV_DNN_TARGETS = { types.CVTarget.CPU: cv.dnn.DNN_TARGET_CPU, types.CVTarget.OPENCL: cv.dnn.DNN_TARGET_OPENCL, types.CVTarget.OPENCL_FP16: cv.dnn.DNN_TARGET_OPENCL_FP16, types.CVTarget.MYRIAD: cv.dnn.DNN_TARGET_MYRIAD } OPENCV_BACKEND_DEFAULT = types.CVBackend.OPENCV OPENCV_TARGET_DEFAULT = types.CVTarget.OPENCL_FP16 # ----------------------------------------------------------------------------- # Minimum keyframe extraction video attributes # ----------------------------------------------------------------------------- KEYFRAME_MIN_WIDTH = 640 #KEYFRAME_MIN_WIDTH = 480 # some verified videos are this small, ignore for now KEYFRAME_MIN_HEIGHT = 320 KEYFRAME_MIN_FPS = 10 KEYFRAME_MIN_FRAMES = 90 # ----------------------------------------------------------------------------- # Defaults # -----------------------------------------------------------------------------
41.045685
137
0.621692
881
8,086
5.447219
0.324631
0.052511
0.071265
0.091061
0.107314
0.08085
0.027089
0
0
0
0
0.014253
0.080262
8,086
196
138
41.255102
0.631034
0.338734
0
0
0
0.009174
0.089314
0.039274
0
0
0
0
0
1
0
false
0
0.073395
0
0.073395
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d1f4b2c6fa4351760dc466834cf6bd3fbffaa60
10,040
py
Python
project/firewall/mainFirewall.py
flyflyinit/GUI-admin-tool
1fa97393ee2a39a65f5b7bbe95eb5b5f04bc6ad6
[ "MIT" ]
3
2020-06-15T18:57:41.000Z
2020-09-28T14:30:36.000Z
project/firewall/mainFirewall.py
flyflyinit/GUI-admin-tool
1fa97393ee2a39a65f5b7bbe95eb5b5f04bc6ad6
[ "MIT" ]
null
null
null
project/firewall/mainFirewall.py
flyflyinit/GUI-admin-tool
1fa97393ee2a39a65f5b7bbe95eb5b5f04bc6ad6
[ "MIT" ]
1
2020-06-06T23:34:16.000Z
2020-06-06T23:34:16.000Z
import qtmodern.styles import qtmodern.windows from project.firewall.configFirewall import CreateFwWindow, EditFwWindow, DeleteFwWindow from project.firewall.firewallScripts import firewallGlobalInfo, setDefaultZone, defaultZone from project.firewall.tableFirewall import * from PyQt5 import QtCore from PyQt5.QtWidgets import * from project.firewall.tableFirewall import listservices, listZoneModified, listports firewallIsrunnig=True def getContentFirewall(self): self.gridFw = QGridLayout() createTableFw(self,firewallIsrunnig) createFwButtons(self,firewallIsrunnig) self.groupBox = QGroupBox() self.containerFw = QVBoxLayout() self.containerFw.addLayout(self.gridFw) self.containerFw.addLayout(self.hboxbtn) self.containerFw.addWidget(self.tableFw) self.containerFw.addStretch() self.groupBox.setLayout(self.containerFw) self.scroll = QScrollArea() self.scroll.setFixedWidth(1150) self.scroll.setWidget(self.groupBox) self.bottomRightLayout.addWidget(self.scroll) def createFwButtons(self,firewallIsrunnig): self.hboxbtn = QHBoxLayout() try: defaultzone = defaultZone()[0] self.defaultZone = QLabel(f"DEFAULT ZONE : {defaultzone}") self.defaultZone.move(10, 10) except IndexError: QMessageBox.critical(self, 'error', f'Please install Firewalld or start the service ') self.defaultZone = QLabel("FIREWALLD SERVICE IS NOT RUNNING") firewallIsrunnig=False self.defaultZone.setStyleSheet("color: #303a46;font: bold 14px;") self.addBtn = QPushButton('Add') self.editBtn = QPushButton('Edit') self.deleteBtn = QPushButton('Delete') self.addBtn.setFixedHeight(30) self.addBtn.setFixedWidth(120) self.editBtn.setFixedHeight(30) self.editBtn.setFixedWidth(120) self.deleteBtn.setFixedHeight(30) self.deleteBtn.setFixedWidth(120) self.addBtn.clicked.connect(lambda: createUsersWindow(self, self.dic4)) self.addBtn.setStyleSheet("color: #ecf0f1; background-color: #2ecc71 ; border: 0px solid #2c3e50") self.editBtn.clicked.connect(lambda: editFWWindow(self)) self.editBtn.setStyleSheet("color: #ecf0f1; background-color: #34495e ; border: 0px solid #2c3e50") self.deleteBtn.clicked.connect(lambda: deleteFwWindow(self, self.dic4)) self.deleteBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px solid #2c3e50") self.hboxbtn.addWidget(self.defaultZone) self.hboxbtn.addStretch() self.hboxbtn.addStretch() self.hboxbtn.addStretch() self.hboxbtn.addStretch() if firewallIsrunnig : self.hboxbtn.addWidget(self.addBtn) self.hboxbtn.addWidget(self.editBtn) self.hboxbtn.addWidget(self.deleteBtn) def createTableFw(self,firewallIsrunnig): if firewallIsrunnig: self.tableFw = QTableWidget() self.tableFw.setRowCount(0) self.tableFw.setColumnCount(6) self.tableFw.setFixedHeight(570) self.tableFw.setFixedWidth(1130) self.tableFw.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.tableFw.setHorizontalHeaderItem(0, QTableWidgetItem("zone")) self.tableFw.setHorizontalHeaderItem(1, QTableWidgetItem("interface")) self.tableFw.setHorizontalHeaderItem(2, QTableWidgetItem("Services")) self.tableFw.setHorizontalHeaderItem(3, QTableWidgetItem("Ports")) self.tableFw.setHorizontalHeaderItem(4, QTableWidgetItem("set-Default")) self.tableFw.setHorizontalHeaderItem(5, QTableWidgetItem("select")) self.tableFw.setEditTriggers(QAbstractItemView.NoEditTriggers) showmyfwlist(self) class SelectCellInTableNet(QWidget): def __init__(self, parent=None): super(SelectCellInTableNet, self).__init__(parent) self.isSelected = False self.hbox = QHBoxLayout() self.checkb = QCheckBox(self) self.checkb.stateChanged.connect(self.checkBoxChangedAction) self.hbox.addStretch() self.hbox.addWidget(self.checkb) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.setSpacing(8) self.setLayout(self.hbox) def checkBoxChangedAction(self, state): if (QtCore.Qt.Checked == state): self.isSelected = True else: self.isSelected = False class SetDefaultZone(QWidget): def __init__(self, zone, parent=None): super(SetDefaultZone, self).__init__(parent) self.zone = zone self.hbox = QHBoxLayout() self.showmoreBtn = QPushButton('Set') self.showmoreBtn.clicked.connect(self.showmoreBtnClicked) self.hbox.addWidget(self.showmoreBtn) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.setSpacing(8) self.setLayout(self.hbox) def showmoreBtnClicked(self): index = str(self.zone) try: setDefaultZone(index) except: QMessageBox.critical(self, 'warrning', f'\n can set {index} the default zone ') else: QMessageBox.information(self, 'Services', f'\n {index} has been setted the default zone ') class ServiceTableFw(QWidget): def __init__(self, username, parent=None): super(ServiceTableFw, self).__init__(parent) self.username = username self.hbox = QHBoxLayout() self.showmoreBtn = QPushButton('more') self.showmoreBtn.clicked.connect(self.showmoreBtnClicked) self.hbox.addWidget(self.showmoreBtn) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.setSpacing(8) self.setLayout(self.hbox) def showmoreBtnClicked(self): index = str(self.username) output = listservices(index) outputString = '' for i in output: outputString += f'{i} ' QMessageBox.information(self, 'Services', f'\n Services enabled in {index} Zone are:\n {outputString}') class PortsTableFw(QWidget): def __init__(self, username, parent=None): super(PortsTableFw, self).__init__(parent) self.username = username self.hbox = QHBoxLayout() self.showmoreBtn = QPushButton('more') self.showmoreBtn.clicked.connect(self.showmoreBtnClicked) self.hbox.addWidget(self.showmoreBtn) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.setSpacing(8) self.setLayout(self.hbox) def showmoreBtnClicked(self): index = str(self.username) output = listports(index) outputString = '' for i in output: outputString += f'{i} ' QMessageBox.information(self, 'Ports', f'\n Ports added in {index} Zone are:\n {outputString}') class interfaceTableFw(QWidget): def __init__(self, zone, parent=None): super(interfaceTableFw, self).__init__(parent) self.zone = zone self.hbox = QHBoxLayout() self.showmoreBtn = QPushButton('more') self.showmoreBtn.clicked.connect(self.showmoreBtnClicked) self.hbox.addWidget(self.showmoreBtn) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.setSpacing(8) self.setLayout(self.hbox) def showmoreBtnClicked(self): index = str(self.zone) output = listinterfaces(index) outputString = '' for i in output: outputString += f'{i} ' QMessageBox.information(self, 'Interfaces', f'\n Interfaces added in {index} Zone are:\n {outputString}') def showmyfwlist(self): self.list_of_fw = listZoneModified() self.dic = {} self.dic1 = {} self.dic2 = {} self.dic3 = {} self.dic4 = {} self.rowposition = 0 for i in self.list_of_fw: self.rowPosition = self.tableFw.rowCount() self.tableFw.insertRow(self.rowPosition) self.tableFw.setItem(self.rowPosition, 0, QTableWidgetItem(i[0])) self.dic3[i[0]] = interfaceTableFw(i[0]) self.dic[i[0]] = SetDefaultZone(i[0]) self.dic1[i[0]] = ServiceTableFw(i[0]) self.dic2[i[0]] = PortsTableFw(i[0]) self.dic4[i[0]] = SelectCellInTableNet() self.tableFw.setCellWidget(self.rowPosition, 4, self.dic[i[0]]) self.tableFw.setCellWidget(self.rowPosition, 2, self.dic1[i[0]]) self.tableFw.setCellWidget(self.rowPosition, 3, self.dic2[i[0]]) self.tableFw.setCellWidget(self.rowPosition, 1, self.dic3[i[0]]) self.tableFw.setCellWidget(self.rowPosition, 5, self.dic4[i[0]]) def createUsersWindow(self, d): list_users_to_edit = [] for i in d: if d[i].isSelected == True: list_users_to_edit.append(i) if len(list_users_to_edit) == 0 or len(list_users_to_edit) > 1: user = [] user.append("") self.secondwindow = CreateFwWindow(user) self.sw = qtmodern.windows.ModernWindow(self.secondwindow) self.sw.show() else: for user in self.list_of_fw: if user[0] == list_users_to_edit[0]: self.secondwindow = CreateFwWindow(user) self.sw = qtmodern.windows.ModernWindow(self.secondwindow) self.sw.show() else: continue def editFWWindow(self): self.secondwindow = EditFwWindow() self.sw = qtmodern.windows.ModernWindow(self.secondwindow) self.sw.show() def deleteFwWindow(self, d): list_users_to_edit = [] for i in d: if d[i].isSelected == True: list_users_to_edit.append(i) if len(list_users_to_edit) == 0 or len(list_users_to_edit) > 1: QMessageBox.warning(self, 'warning', 'Please select just one Zone') else: for user in self.list_of_fw: if user[0] == list_users_to_edit[0]: self.secondwindow = DeleteFwWindow(user) self.sw = qtmodern.windows.ModernWindow(self.secondwindow) self.sw.show() else: continue
36.245487
114
0.668327
1,091
10,040
6.078827
0.183318
0.037394
0.004524
0.022618
0.45763
0.40576
0.398824
0.358866
0.335344
0.322678
0
0.018971
0.217729
10,040
276
115
36.376812
0.825439
0
0
0.452174
0
0
0.075
0
0
0
0
0
0
1
0.073913
false
0
0.034783
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d20163867ce78900f3085c931841cc9a71d2294
20,397
py
Python
piCamMovecpu.py
pootle/piCameraWeb
3422a21ed62d0a231d6d26bd5c59914f57ebe1a6
[ "Unlicense" ]
1
2019-05-04T10:34:23.000Z
2019-05-04T10:34:23.000Z
piCamMovecpu.py
pootle/piCameraWeb
3422a21ed62d0a231d6d26bd5c59914f57ebe1a6
[ "Unlicense" ]
1
2019-04-01T08:16:05.000Z
2019-04-01T08:16:05.000Z
piCamMovecpu.py
pootle/piCameraWeb
3422a21ed62d0a231d6d26bd5c59914f57ebe1a6
[ "Unlicense" ]
null
null
null
#!/usr/bin/python3 import threading, queue, time import picamera.array as picamarray, numpy, pathlib import numpy.ma as nma import png, io from pootlestuff import watchables as wv class piCamCPU(wv.watchablesmart): """ a base class for things that want to analyse images in detail for movement detection, exposure adjustment or anything else. It uses picamera to resize frames (to reduce processing load and reduce noise, pulls out each frame and passes it to an analyser. """ def __init__(self, statusvals, wabledefs, startbtnvals, loglevel=wv.loglvls.INFO, **kwargs): assert hasattr(self, 'monitor') super().__init__(wabledefs=[ ('status', wv.enumWatch, statusvals[0], False, {'vlist': statusvals}), ('startstopbtn',wv.enumWatch, startbtnvals[0], False, {'vlist': startbtnvals}), ('autostart', wv.enumWatch, 'off', True, {'vlist': ('off', 'on')}), ('width', wv.intWatch, 128, True, {'minv': 8, 'maxv': 800}), ('height', wv.intWatch, 96, True, {'minv': 6, 'maxv': 600}), ('lastactive', wv.floatWatch, float('nan'), False), ('imagemode', wv.enumWatch, 'rgb', True, {'vlist': ('rgb', 'yuv')}), ('imagechannel',wv.enumWatch, '0', True, {'vlist': ('0','1','2', '*')}), ('skippedcount',wv.intWatch, 0, False), ('analysedcount',wv.intWatch, 0, False), ]+wabledefs, **kwargs) self.agentclass=self.app.agentclass self.monthread=None self.procthread=None self.loglevel=loglevel if self.autostart.getIndex()==1: self.startstopbtn.setIndex(1,wv.myagents.app) self.running=True self.monthread=threading.Thread(name=type(self).__name__, target=self.monitor, kwargs={'startdelay':2.5}) self.monthread.start() self.startstopbtn.addNotify(self.do_startstop, wv.myagents.user) def do_startstop(self, watched, agent, newValue, oldValue): """ called when the users clicks the start / stop button to start running detection, run up a thread on the 'monitor' function of this object """ btnstate=watched.getIndex() if self.monthread==None and btnstate==1: self.running=True self.monthread=threading.Thread(name=type(self).__name__, target=self.monitor) self.monthread.start() elif not self.monthread==None and btnstate==0: self.running=False else: self.log(wv.loglvls.WARN,' inconsistent move detection states running is %s and button was %s' % (self.running, oldValue)) def preparearray(self): """ prepares / updates a numpy array or masked array dependent on various variables """ nshape=[self.height.getValue(),self.width.getValue()] if self.imagechannel.getIndex() == 3: nspage.append(3) return numpy.empty(shape=nshape, dtype=numpy.int16) def monitor(self, startdelay=0): """ This function coordinates cpu based movement detection, it runs in its own thread within the loop of a picamera.capture_sequence call until self.running is set False. buffercycle (a generator) runs in a loop to process each frame. This also starts another thread to analyse successive frames from the camera, this thread uses a threadsafe queue (which only ever has 1 entry) to trigger analysis (if analysis still running when next frame arrives, it is discarded) """ if startdelay > 0: time.sleep(startdelay) self.status.setIndex(1, self.agentclass.app) self.lastactive.setValue(time.time(), self.agentclass.app) picam=self.app.startCamera() resize=((self.width.getValue()+31) // 32 * 32, (self.height.getValue()+15) // 16 * 16) self.freebuffs=queue.Queue() arraytype=picamarray.PiRGBArray if self.imagemode.getValue()=='rgb' else picamarray.PiYUVArray for i in range(3): self.freebuffs.put(arraytype(picam, size=resize)) self.camerabuff=None # the buffer currently being filled self.pendingbuffs=queue.Queue(maxsize=1) # and a queue of buffers we want to analyse - restricted to 1 - just using threadsafeness splitter_port=self.app._getSplitterPort(type(self).__name__) self.log(wv.loglvls.INFO, 'cpu move detect using port %d and image size %s' % (splitter_port, resize)) time.sleep(.1) self.condition=None # used to trigger detection overlay streaming self.analthread=threading.Thread(name='cpuanalyse', target=self.analysethread) self.analthread.start() picam.capture_sequence(self.buffercycle(), format='rgb' if self.imagemode.getValue()=='rgb' else 'yuv', resize=resize, splitter_port=splitter_port, use_video_port=True) self.camerabuff=None self.pendingbuffs=None self.freebuffs=None self.app._releaseSplitterPort(type(self).__name__, splitter_port) self.lastactive.setValue(time.time(), self.agentclass.app) self.monthread=None self.analthread.join() self.analthread=None self.status.setIndex(0, self.agentclass.app) def buffercycle(self): """ This generator function is used by picamera.capture_sequence to yield buffers to capture_sequence. A small pool of buffers is used, and each time it runs round the loop it records the last filled buffer so the analyse thread can pick up the latest frame whenever it is ready. """ try: while self.running: try: nextbuff=self.freebuffs.get_nowait() except queue.Empty: nextbuff=None if nextbuff is None: self.overruns.increment(agent=self.agentclass.app) time.sleep(.2) try: nextbuff=self.freebuffs.get_nowait() except queue.Empty: raise StopIteration() self.log(wv.loglvls.ERROR,'irrecoverable buffer overflow') prevbuff=self.camerabuff self.camerabuff=nextbuff if not prevbuff is None: try: expiredbuff=self.pendingbuffs.get_nowait() expiredbuff.truncate(0) self.freebuffs.put(expiredbuff) self.skippedcount.increment(agent=self.agentclass.app) except queue.Empty: pass self.pendingbuffs.put_nowait(prevbuff) yield nextbuff except: self.log(wv.loglvls.DEBUG,'move detect thread problem!', exc_info=True) def analysethread(self): prevbuff=None clocktimestart=time.time() cputimestart=time.clock() busytime=0 busystart=time.time() tick5=busystart+5 logpal=None logpng=None detstreamcount=0 channel=self.imagechannel.getIndex() workarray=None while self.running: try: busytime+=time.time()-busystart thisbuff=self.pendingbuffs.get(block=True, timeout=2) busystart=time.time() except queue.Empty: thisbuff=None if not thisbuff is None: thisbuff, prevbuff, workarray = self.analysebuff(thisbuff, prevbuff, workarray, channel) prevbuff=thisbuff if time.time() > tick5: elapsed=time.time()-clocktimestart self.analcpu.setValue(100*(time.clock()-cputimestart)/elapsed,self.agentclass.app) self.analbusy.setValue(100*busytime/elapsed, self.agentclass.app) tick5+=5 if self.condition: try: self.condition.notify_all() # release clients one last time except: pass self.condition=None class MoveDetectCPU(piCamCPU): """ This class analyses successive frames and looks for significant change, setting its 'triggered' watchable True when movement is detected. This remains True until all frames for 'latchtime' have not detected movement. Anything wanting to be triggered can poll or set a notification on this watchable. The code uses picamera to resize the frames (which happens in the GPU) to a (typically) much smaller size for analysis in this thread. Initially this class just sets up a bunch of variables that control and monitor this functionality. When detection is active, it runs a monitor thread to drive the camera and grab frames, and a further thread to actually analyse the frames. The mpnitor thread creates a small number of buffers and uses picamera.capture_sequence to run the camera, the capture_sequence call does not return until an external event causes capture sequence to stop. The buffers are allocated and managed by the member function buffercycle which is called from within picamera.capture_sequence. 'buffercycle' uses 'yield' to give a free buffer back to the camera, and passes places the buffer just filled to be ready for the analysis thread to use. If there was already a buffer waiting for analysis this expired buffer is returned to the free list and replaced by the more recent buffer, if the analysis thread has grabbed the previous buffer, the analysis thread returns it to the queue when it has dealt with it. Starting with the second buffer, the analysis thread picks 1 channel from the buffer and compares it with previous frame to check for differences. """ def __init__(self, statusvals=('off', 'watching', 'triggered'), startbtnvals=('start watching', 'stop watching'), **kwargs): """ initialisation just sets up the vars used. """ super().__init__(statusvals=statusvals, startbtnvals=startbtnvals, wabledefs=[ ('triggercount', wv.intWatch, 0, False), ('lasttrigger', wv.floatWatch, float('nan'), False), ('cellthresh', wv.intWatch, 22, True, {'minv': 1, 'maxv': 255}), ('celltrigcount', wv.intWatch, 100, True, {'minv': 1}), ('latchtime', wv.floatWatch, 4, True), ('maskfold', wv.folderWatch, '~/camfiles/masks', True), ('maskfile', wv.textWatch, '-off-', True), ('overruns', wv.intWatch, 0, False), ('analbusy', wv.floatWatch, 0, False), ('analcpu', wv.floatWatch, 0, False), ], **kwargs) self.running=False def fetchmasksize(self): """ called from web server to retrieve info about mask in preparation for editing """ rr={'width' : self.width.getValue(), 'height' : self.height.getValue(), } return rr def savemask(self, pathinf, name, mask): """ called from webserver when user saves a mask after editing """ mfile=(self.maskfold.getFolder()/name).with_suffix('.png') print('savemask (%3d/%3d) to %s (%s): ' % (len(mask[0]), len(mask), name, mfile)) pw = png.Writer(len(mask[0]), len(mask), greyscale=True, bitdepth=1) with mfile.open('wb') as fff: pw.write(fff,mask) return {'resp': 200, 'rdata':{'message': 'saved to %s' % mfile}} def checkmask(self, var, agent, newValue, oldValue): pass def preparearray(self): """ prepares / updates a numpy array or masked array dependent on various variables """ if True: return super().preparearray() if self.maskfile.getValue()=='-off-': return dataarray else: mfile=(self.maskfold.getValue()/self.maskfile.getValue()).with_suffix('.png') if mfile.is_file(): with mfile.open('rb') as mfo: mwidth, mheight, mrows, minfo = png.Reader(file=mfo).read() rowdat=[m for m in mrows] if mwidth==self.width.getValue() and mheight==self.height.getValue(): if minfo['planes']==1 and minfo['bitdepth']==1: mask=numpy.array(rowdat,dtype=numpy.bool_) self.log(wv.loglvls.INFO,'mask updated from %s %d of %d masked' % (str(mfile), len(numpy.nonzero(mask)[0]), mask.shape[0]*mask.shape[1])) return nma.masked_array(data=dataarray, mask=mask) else: self.log(wv.loglvls.INFO, 'mask file has %d planes and bitdepth %d: should be 1 and 1' % (minfo.planes, minfo.bit_depth)) else: self.log(wv.loglvls.INFO,'mask image is wrong size - expected (%3d/%3d), file has (%3d/%3d)' % (self['width'].getValue(), self['height'].getValue(), mwidth, mheight)) else: self.log(wv.loglvls.INFO, 'unable to get maskfile %s' % str(maskfile)) return dataarray def analysebuff(self, thisbuff, prevbuff, workarray, channel): if prevbuff is None: workarray=self.preparearray() else: logthresh=self. cellthresh.getValue() if channel == 3: numpy.copyto(workarray, thisbuff.array) workarray -= prevbuff.array else: numpy.copyto(workarray, thisbuff.array[:,:,channel]) workarray -= prevbuff.array[:,:,channel] numpy.absolute(workarray, workarray) cthresh=self.cellthresh.getValue() if logthresh != cthresh: logthresh = cthresh logpal=None hits=(workarray >= logthresh).nonzero() trig=len(hits[0]) >=self.celltrigcount.getValue() if trig: if self.status.getIndex() < 2: self.triggercount.increment(agent=self.agentclass.app) self.status.setIndex(2, agent=self.agentclass.app) self.lasttrigger.setValue(time.time(), agent=self.agentclass.app) else: if self.status.getIndex() > 1 and time.time() > (self.lasttrigger.getValue() + self.latchtime.getValue()): self.status.setIndex(1, agent=self.agentclass.app) if not self.condition is None: # check if we're streaming the detection overlay if self.laststreamactive+5 < time.time(): # client(s) all gone - stop the stream print('clients gone - shut detect stream') self.condition=None logpal=None streaming=None logpng=None else: if logpal is None: logpal=makepalette(logthresh) streamimg = io.BytesIO() arshape=workarray.shape if logpng is None: logpng = png.Writer(arshape[1], arshape[0], palette=logpal) detimg=workarray.filled(fill_value=0) if hasattr(workarray, 'filled') else workarray if trig and not detoverlay['xbase'] is None: # check if we want a blob xb=detoverlay['xbase'] yb=detoverlay['ybase'] if abs(xb) < self.width.getValue() and abs(yb) < self.height.getValue(): if xb < 0: xe=min((-1, xb+detoverlay['xsize'])) else: xe=min((255, xb+detoverlay['xsize'])) if yb < 0: ye=min((-1, yb+detoverlay['ysize'])) else: ye=min((255, yb+detoverlay['ysize'])) detimg[yb:ye,xb:xe]=255 logpng.write(streamimg, detimg.tolist()) with self.condition: self.frame = streamimg.getvalue() self.condition.notify_all() if False: sfp=pathlib.Path('~/Pictures').expanduser()/('b%04d.png' % self['triggercount'].getValue()) with sfp.open('wb') as sfpf: sfpf.write(self.frame) prevbuff.truncate(0) self.freebuffs.put(prevbuff) return thisbuff, prevbuff, workarray def getStream(self): """ When we get a stream request from the web server, check if already running. This is called by an http handler request thread. THE HTTP thread (there can be several) then loops calling nextframe """ if self.running: if self.condition==None: print('make detect stream') self.condition = threading.Condition() self.laststreamactive=time.time() else: print('returning existing detect stream') return self else: print('aborting detect stream') raise StopIteration() def nextframe(self): """ The http handler thread calls this to get each successive frame. It waits for a new frame to arrive, then updates the lastactive timestamp and returns the frame """ if self.running and self.condition: with self.condition: self.condition.wait() self.laststreamactive=time.time() return self.frame, 'image/png', len(self.frame) else: raise StopIteration() detpalette=( (0.5, ( 0, 0, 0, 0)), # totally transparent black below 1/2 threshold (0.75,( 60, 60, 140, 160)), # medium blue for 1/2 to 3/4 below (1, ( 75, 75, 255, 200)), # brighter and more opaque blue to threshold (1.5, ( 75, 255, 75, 200)), # green just above threshold (2, ( 60, 150, 69, 160)), # then paler green (400, (255, 0, 0, 139)), # then red wash ) detoverlay={ # overlay a blob when triggered 'xbase': -4, # set 'xbase' to None to stop the blob, pos for inset from left, neg for inset from right 'xsize': 4, # size must be +ve 'ybase': 4, 'ysize': 4, 'palette': (255,100,100,255) } def makepalette(thresh): """ prepares a pallette to use with the difference data to make a png image. The diff values are all in range 0..255 so we'll use a simple palette to highlight things as follows: values below cellthreshold * .5 are black values then below cellthreshold *.75 are medium blue values then below cellthreshold are bright blue values then below cellthreshold * 1.25 are dark green values then below cellthreshold * 2 are light green values beyond that go from dark blue to bright blue as the value increases The actual colours and transparency are set in a table, so can be easily changed """ colourno=0 palette=[] for tfact, pal in detpalette: nextmax=tfact*thresh while colourno < nextmax: palette.append(pal) colourno += 1 if colourno > 254: palette.append(detoverlay['palette']) return palette while colourno < 255: palette.append(pal) colourno += 1 palette.append(detoverlay['palette']) return palette
49.149398
186
0.566897
2,272
20,397
5.06118
0.24956
0.015827
0.017741
0.011131
0.115053
0.066441
0.04957
0.0447
0.036525
0.028002
0
0.017405
0.335245
20,397
414
187
49.268116
0.830666
0.22974
0
0.247557
0
0
0.070595
0
0
0
0
0
0.003257
1
0.04886
false
0.009772
0.016287
0
0.110749
0.016287
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d220e80a6442156d54665ed445cceb1c31a96d2
6,950
py
Python
IslanderDataPreprocessing/datacorrelation.py
Islanderrobotics/IslanderDataPreprocessing
a8de863fe0d7d855da7d51c3e06c6fd7360ae9b2
[ "MIT" ]
null
null
null
IslanderDataPreprocessing/datacorrelation.py
Islanderrobotics/IslanderDataPreprocessing
a8de863fe0d7d855da7d51c3e06c6fd7360ae9b2
[ "MIT" ]
null
null
null
IslanderDataPreprocessing/datacorrelation.py
Islanderrobotics/IslanderDataPreprocessing
a8de863fe0d7d855da7d51c3e06c6fd7360ae9b2
[ "MIT" ]
null
null
null
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt from PyQt5 import QtWidgets import sys from .DataVisulization import DataVisulization class DataCorrelation: '''this module allows you to be able to view the correlation values of your dataset allowing you the ability to prevent simple errors DataCorrelation(df = pandas dataframe) df: is where you will input the dataset you would like to evaluate Correlationmatrix(): is the method you call uppon to view which columns have correlation relationships LookingAtCorr() is the method is where you will actually make the changes to your dataset this method returns a pandas dataframe Check(): this method will call uppon both LookingAtCorr, and Correlationmatrix for you this method also will return a pandas dataframe ''' def __init__(self,df): self.df = df self.copy = self.ByeByeText(self.df) self.high_corr = {} self.corr = [] def ByeByeText(self,df): copy = df.copy() for i in copy.columns: if (copy[i].dtype == "object"): copy.drop(columns = i, inplace=True) return (copy) def FindingScreenSize_(self): app = QtWidgets.QApplication(sys.argv) screen = app.primaryScreen() size = screen.size() screensize = (size.width()/95-2, size.height()/96-2) return screensize def Correlationmatrix(self): '''Correlationmatrix(): is the method you call uppon to view which columns have correlation relationships''' for i in self.copy.columns: for j in self.copy.columns: if i == j: pass else: # print(j) corr = self.copy[i].corr(self.copy[j]) if (corr>=0.5 or corr<=-0.5): # print(corr) if (i not in self.high_corr.keys()): self.high_corr[i] = [] self.high_corr[i].extend([j,corr]) self.corr.append(corr) print("these are correlation values for each column") count = 0 for i in self.high_corr.keys(): print(f"{count}:{i},{self.high_corr[i]}") count += 1 def LookingAtCorr(self): '''LookingAtCorr() is the method is where you will actually make the changes to your dataset this method returns a pandas dataframe ''' print("with the values you see up above do you with to see a a scatter matrix of them") choice = input("enter yes if you do") if choice.upper() == "YES": column = [] matrix = [] for i in self.copy.columns: column.append(i) print(column) while (choice.upper()!="Q"): count = 0 for i in column: print(f"{count}:{i}") count+=1 try: index = int(input("enter the corresponding number of each column that you would like to see")) except: print("seems you picked an option that was not available") matrix.append(column[index]) choice = input("enter q to view the scatter matrix") column.pop(index) scatter_matrix(self.copy[matrix],figsize=self.FindingScreenSize_()) plt.show() choice = input("enter yes if there is a plot you would like to view in more depth") if (choice.upper()=="YES"): for i in self.copy.columns: print(i) x = input("enter the name of the column you would like to be on the x axis") y = input("enter the name of the column you would like to be on the y axis") single_plot =DataVisulization(data = self.copy, type_of_plot="scatter",column_values_for_x=x,column_values_for_y=y) single_plot.driver() def combine(self): copy_of_copy = self.copy.copy() drop = [] choice = input("enter yes to combine some of the columns") if (choice.upper() == "YES"): column = [] for i in self.copy.columns: column.append(i) while(choice.upper()!="Q"): count = 0 for i in column: print(f"{count}:{i}") count+=1 while (True): try: numerator = int(input("enter the number of the corresponding column you would like to be the numerator to be")) if (numerator<= len(column)): break else: print("please enter of of the numbers you see on the screen") except ValueError: print("please enter a number") while (True): try: denominator = int(input("enter the number of the corresponding column you would like to be the denominator to be")) if (denominator<= len(column)): break else: print("please enter of of the numbers you see on the screen") except ValueError: print("please enter a number") name_of_new_column = input("enter what you would like the new name of the column to be") self.copy[name_of_new_column]= self.copy[column[numerator]]/self.copy[column[denominator]] drop.append(column[numerator]) drop.append(column[denominator]) choice = input("enter q if that is all the columns you would like to combine") self.copy.drop(columns= drop, inplace=True) # print(self.copy.columns) choice = input("enter yes if you would like to view the new correlation matrix scores") if (choice.upper()=="YES"): self.high_corr.clear() self.corr.clear() self.Correlationmatrix() print("what do you think of those scores?") choice = input("enter yes if you would like to keep these new scores" " or enter nothing to revert them back to the original") if (choice.upper()!="YES"): self.copy = copy_of_copy elif(choice.upper()=="YES"): self.df = self.copy return self.df def Check(self): ''' Check(): this method will call uppon both LookingAtCorr, and Correlationmatrix for you this method also will return a pandas dataframe''' self.Correlationmatrix() self.LookingAtCorr() return self.df
44.551282
139
0.543453
832
6,950
4.497596
0.207933
0.04062
0.035275
0.037413
0.415019
0.361304
0.343666
0.343666
0.343666
0.306788
0
0.003887
0.370647
6,950
155
140
44.83871
0.851623
0.146906
0
0.344
0
0
0.215979
0.005326
0
0
0
0
0
1
0.056
false
0.008
0.04
0
0.136
0.104
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d235624dd54cd2de5b4269e1d88562233f30c47
3,460
py
Python
data/process_data.py
Lydiafz/Udacity_DisasterPipeline
5673530e187c679c87569e826c131c256d0b6ed3
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
data/process_data.py
Lydiafz/Udacity_DisasterPipeline
5673530e187c679c87569e826c131c256d0b6ed3
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
data/process_data.py
Lydiafz/Udacity_DisasterPipeline
5673530e187c679c87569e826c131c256d0b6ed3
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
import sys import pandas as pd import numpy as np from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): ''' Load the message and category .csv data files Input: the filepath of the two .csv files Output: return the two data frames ''' messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return messages,categories def clean_data(messages,categories): ''' merge and clean the dataframe for further ML model Input: the message and category dataframes Output: the final clean, merged dataframe ''' df= messages.merge(categories,how='outer',on=['id']) # create a dataframe of the 36 individual category columns categories = df['categories'].str.split(";",expand=True) # select the first row of the categories dataframe row = categories.iloc[0] # use this row to extract a list of new column names for categories. # one way is to apply a lambda function that takes everything # up to the second to last character of each string with slicing category_colnames=row.map(lambda x: x[:-2]) # rename the columns of `categories` categories.columns = category_colnames # Convert category values to just numbers 0 or 1. for column in categories: # set each value to be the last character of the string categories[column] = categories[column].astype(str).str[-1] # convert column from string to numeric categories[column]=categories[column].map(lambda x:int(x)) # drop the original categories column from `df` df=df.drop(['categories'],axis=1) # concatenate the original dataframe with the new `categories` dataframe df = pd.concat([df,categories],axis=1) # drop duplicates df = df.drop_duplicates() # replace the label2 in category related with 1 df['related']=np.where(df['related']==2,1,0) return df def save_data(df, database_filename): ''' save the data into a sql table Input: the dataframe to be saved and the filepath to save the table ''' engine = create_engine('sqlite:///'+database_filename, echo=False) df.to_sql('DisasterData', engine, if_exists='replace', index=False) def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('step1: Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) messages,categories = load_data(messages_filepath, categories_filepath) print('step2: Cleaning data...') df = clean_data(messages,categories) print('step3: Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('step4: Cleaned data saved to database!') else: print('Warning: Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
38.444444
87
0.656936
442
3,460
5.049774
0.357466
0.013441
0.058244
0.060932
0.037634
0.037634
0
0
0
0
0
0.007737
0.25289
3,460
90
88
38.444444
0.855706
0.292197
0
0
0
0
0.243539
0.019273
0
0
0
0
0
1
0.088889
false
0
0.088889
0
0.222222
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d26941eb40fda35e6cbb0aab9a6f55f70706908
2,438
py
Python
dgp/cli.py
weihaosky/dgp
b221534ea5515d03dce5c29dc6e60c1eee129785
[ "MIT" ]
1
2021-05-14T09:16:58.000Z
2021-05-14T09:16:58.000Z
dgp/cli.py
weihaosky/dgp
b221534ea5515d03dce5c29dc6e60c1eee129785
[ "MIT" ]
null
null
null
dgp/cli.py
weihaosky/dgp
b221534ea5515d03dce5c29dc6e60c1eee129785
[ "MIT" ]
null
null
null
#!/usr/bin/env python # Copyright 2019-2020 Toyota Research Institute. All rights reserved. """DGP command line interface """ import glob import itertools import os import sys from multiprocessing import Pool, cpu_count import click from dgp.proto.dataset_pb2 import SceneDataset from dgp.utils.aws import (convert_uri_to_bucket_path, parallel_upload_s3_objects) from dgp.utils.dataset_conversion import MergeSceneDatasetGen from dgp.utils.protobuf import open_pbobject @click.group() @click.version_option() def cli(): pass @cli.command(name="upload-scenes") @click.option( "--scene-dataset-json", required=True, help="Path to a local scene dataset .json file i.e. /mnt/scene_dataset_v1.2.json" ) @click.option( "--s3-dst-dir", required=True, help="Prefix for uploaded scenes" ) def upload_scenes(scene_dataset_json, s3_dst_dir): """Parallelized upload for scenes from a scene dataset JSON. NOTE: This tool only verifies the presence of a scene, not the validity any of its contents. """ bucket_name, s3_base_path = convert_uri_to_bucket_path(s3_dst_dir) dataset = open_pbobject(scene_dataset_json, SceneDataset) local_dataset_root = os.path.dirname(os.path.abspath(scene_dataset_json)) if not dataset: print('Failed to parse dataset artifacts {}'.format(scene_dataset_json)) sys.exit(0) scene_dirs = [] for split in dataset.scene_splits.keys(): scene_dirs.extend([ os.path.join(local_dataset_root, os.path.dirname(filename)) for filename in dataset.scene_splits[split].filenames ]) # Make sure the scenes exist with Pool(cpu_count()) as proc: file_list = list(itertools.chain.from_iterable(proc.map(_get_scene_files, scene_dirs))) # Upload the scene JSON, too. file_list += [scene_dataset_json] print("Creating file manifest for S3 for {} files".format(len(file_list))) s3_file_list = [os.path.join(s3_base_path, os.path.relpath(_f, local_dataset_root)) for _f in file_list] print("Done. Uploading to S3.") parallel_upload_s3_objects(file_list, s3_file_list, bucket_name) def _get_scene_files(scene): assert os.path.exists(scene), "Scene {} doesn't exist".format(scene) scene_files = glob.glob(os.path.join(scene, "**"), recursive=True) return [_f for _f in scene_files if os.path.isfile(_f)] if __name__ == '__main__': cli()
32.945946
108
0.720263
356
2,438
4.691011
0.384831
0.064671
0.076647
0.021557
0.082635
0.034731
0
0
0
0
0
0.011426
0.174323
2,438
73
109
33.39726
0.818182
0.132075
0
0.04
0
0.02
0.132409
0.013384
0
0
0
0
0.02
1
0.06
false
0.02
0.2
0
0.28
0.06
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d28289012166b79e101e010e38b5f25d78c2a26
2,124
py
Python
app/game.py
d4glushko/task_puzzle_15
ca308e2f07dfbe8aac50dbf3110443a274199018
[ "MIT" ]
null
null
null
app/game.py
d4glushko/task_puzzle_15
ca308e2f07dfbe8aac50dbf3110443a274199018
[ "MIT" ]
null
null
null
app/game.py
d4glushko/task_puzzle_15
ca308e2f07dfbe8aac50dbf3110443a274199018
[ "MIT" ]
null
null
null
import curses from app.environment import PuzzleEnvironment, PuzzleEnvironmentSettings, PuzzleAction from app.views import TerminalView, AbstractView from app.inputs import TerminalInput, AbstractInput class CursesKeysWrapper: Q = 113 ESC = 27 R = 114 W = 119 KEY_UP = curses.KEY_UP D = 100 KEY_RIGHT = curses.KEY_RIGHT S = 115 KEY_DOWN = curses.KEY_DOWN A = 97 KEY_LEFT = curses.KEY_LEFT class PuzzleGame: def __init__(self, window, env_settings: PuzzleEnvironmentSettings, debug: bool): self.max_value = env_settings.cols_number * env_settings.rows_number - 1 self.puzzle_env: PuzzleEnvironment = PuzzleEnvironment(env_settings) self.view: AbstractView = TerminalView(window, debug) self.input: AbstractInput = TerminalInput(window) def start(self): self.__reset_game() key = None while True: env_state = self.puzzle_env.get_state() is_completed = self.puzzle_env.is_completed() self.view.render_screen(self.step, env_state, key, self.max_value, is_completed) key = self.input.get_ch() if key == CursesKeysWrapper.Q or key == CursesKeysWrapper.ESC: break if key == CursesKeysWrapper.R: self.__reset_game() continue if is_completed: continue if key == CursesKeysWrapper.W or key == CursesKeysWrapper.KEY_UP: action = PuzzleAction.UP elif key == CursesKeysWrapper.D or key == CursesKeysWrapper.KEY_RIGHT: action = PuzzleAction.RIGHT elif key == CursesKeysWrapper.S or key == CursesKeysWrapper.KEY_DOWN: action = PuzzleAction.DOWN elif key == CursesKeysWrapper.A or key == CursesKeysWrapper.KEY_LEFT: action = PuzzleAction.LEFT else: continue self.step += 1 self.puzzle_env.act(action) def __reset_game(self): self.puzzle_env.setup() self.step = 0
32.181818
92
0.618644
231
2,124
5.484848
0.333333
0.173639
0.051302
0.078927
0
0
0
0
0
0
0
0.014976
0.30838
2,124
65
93
32.676923
0.847515
0
0
0.096154
0
0
0
0
0
0
0
0
0
1
0.057692
false
0
0.076923
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d2b6494460f181733e9edcda853c431b0c53cb5
443
py
Python
template_mathing_6methods.py
dmeseguerw/ImageDetection
fd18fa81514a9745a5e3138c360b0d878f2a6606
[ "MIT", "Unlicense" ]
null
null
null
template_mathing_6methods.py
dmeseguerw/ImageDetection
fd18fa81514a9745a5e3138c360b0d878f2a6606
[ "MIT", "Unlicense" ]
null
null
null
template_mathing_6methods.py
dmeseguerw/ImageDetection
fd18fa81514a9745a5e3138c360b0d878f2a6606
[ "MIT", "Unlicense" ]
null
null
null
import cv2 import numpy as np img_rgb = cv2.imread('mainimage.jpg') img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread('template.jpg',0) w, h = template.shape[::-1] res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED) threshold = 0.8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]): cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,255), 1) cv2.imshow('Det',img_rgb) cv2.waitKey()
27.6875
70
0.693002
78
443
3.820513
0.487179
0.080537
0.090604
0
0
0
0
0
0
0
0
0.066838
0.121896
443
16
71
27.6875
0.699229
0
0
0
0
0
0.063063
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d2befe5ea7c58a4f8bc6f563a72998b31c89000
7,607
py
Python
line_class.py
jackiele07/CarND-Advanced-Lane-Lines
0ee629fbaff6db42b6cf12c90c06e0b5fabbde40
[ "MIT" ]
null
null
null
line_class.py
jackiele07/CarND-Advanced-Lane-Lines
0ee629fbaff6db42b6cf12c90c06e0b5fabbde40
[ "MIT" ]
null
null
null
line_class.py
jackiele07/CarND-Advanced-Lane-Lines
0ee629fbaff6db42b6cf12c90c06e0b5fabbde40
[ "MIT" ]
null
null
null
import numpy as np import cv2 from matplotlib import pyplot as plt class window(): def __init__(self, nwindow, margin, mpixel, centroid): self.nwindow = nwindow self.margin = margin self.mpixel = mpixel self.centroid = centroid # Function to return the point within boundarys of widown-n # -- Input: # 1. window_no: number of widown - should be < than nwindow # 2. basex: base x to draw the rectangle (will be +- margin to get high & low) # 3. binary_wrapped: binary image to detect point within boundary # -- Output: # 1. point_inds: array of all detected points # 2. ncount: number of points # 3. window_coordinate: window coordinate to draw rectangle if requires def get_points_within_boundary(self, window_no, binary_wrapped): if (window_no < self.nwindow): # calculate window size window_height = np.int(binary_wrapped.shape[0]//self.nwindow) # calculate window coordinates win_y_low = int(binary_wrapped.shape[0] - (window_no+1) * window_height) win_y_high = int(binary_wrapped.shape[0] - window_no * window_height) win_x_low = int(self.centroid - self.margin) win_x_high = int(self.centroid + self.margin) window_coordinate = [(win_x_low, win_y_low), (win_x_high, win_y_high)] # Identify the x and y positions of all nonzero pixels in the image nonzero = binary_wrapped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Get the indexes of point within boundary point_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & (nonzerox < win_x_high)).nonzero()[0] # Get the count of point within boundary ncount = len(point_inds) # Update centroid of window if (ncount > self.mpixel): self.centroid = np.mean(nonzerox[point_inds]) else: print ("Invalid window_no %d", (widown_no)) return point_inds, window_coordinate, nonzerox, nonzeroy #Function to draw the rectagle and overlay that to image for sanity check def draw_rectangle(self, window_coordinate, overlay_image): # Draw the windows on the visualization image window_coordinate = tuple(window_coordinate) print (window_coordinate) output = np.copy(overlay_image) cv2.rectangle(output, window_coordinate[0], window_coordinate[1], (0,255,0), 2) return output class Line(): def __init__(self, nwindow, margin, mpixel, centroid, us_line_long, us_line_wide): self.us_line_wide = us_line_wide self.us_line_long = us_line_long self .has_data = False # was the line detected in the last iteration? self.detected = False #average x values of the fitted line over the last n iterations self.bestx = None #polynomial coefficients averaged over the last n iterations self.best_fit = np.array([0,0,0], dtype='float') #polynomial coefficients for the most recent fit self.current_fit = [] self.current_fit_cr = [] #radius of curvature of the line in some units self.radius_of_curvature = [] self.radius_of_curvature_value = 0 #distance in meters of vehicle center from the line self.line_base_pos = 0 #difference in fit coefficients between last and new fits self.diffs = np.array([0,0,0], dtype='float') #x values for detected line pixels self.allx = None #y values for detected line pixels self.ally = None # window used for running itegration self.window = window(nwindow, margin, mpixel, centroid) self.count = 0 self.sliding_window =[] self.epsilon = float(4 * (10 ** (-3))) self.epsilon2 = float(100) self.epsilon3 = float(500) def update_curvature(self): self.radius_of_curvature_value = np.average(self.radius_of_curvature) def update_polycoeff(self, binary_warped): lane_point_inds = [] ym_per_pix = self.us_line_long/720 xm_per_pix = self.us_line_wide/640 debug_image = np.copy(binary_warped) debug = [] for no in range(self.window.nwindow): point_inds, window_coordinate, nonzerox, nonzeroy = self.window.get_points_within_boundary(no, binary_warped) self.sliding_window.append(window_coordinate) lane_point_inds.append(point_inds) # append all the x & y to Line ycal = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) if (len(lane_point_inds)): ## TODO: Need to add validation before updating lines try: lane_point_inds = np.concatenate(lane_point_inds) except ValueError: pass if(len(nonzerox[lane_point_inds]) & len(nonzeroy[lane_point_inds])): self.allx = nonzerox[lane_point_inds] self.ally = nonzeroy[lane_point_inds] var_current_fit = np.polyfit(self.ally, self.allx, 2) self.current_fit_cr = np.polyfit((np.array(self.ally) * ym_per_pix), (np.array(self.allx) * xm_per_pix), 2) self.detected = True else: self.detected = False if(self.detected == True): if(self.has_data == True): self.diffs = np.absolute(var_current_fit - self.best_fit) if ((self.diffs[0] < self.epsilon) & (self.diffs[1] < self.epsilon2)): if (self.diffs[2] > self.epsilon3): var_current_fit[2] = self.best_fit[2] if(self.count == 10): self.count = 0 if(len(self.current_fit) > self.count): self.current_fit.pop(self.count) self.current_fit.insert(self.count, var_current_fit) self.best_fit = np.average(self.current_fit, axis=0) self.radius_of_curvature.append(((1 + (2*self.current_fit_cr[0]*max(self.ally)*ym_per_pix + self.current_fit_cr[1])**2)**1.5) / np.absolute(2*self.current_fit_cr[0])) else: self.detected = False else: self.current_fit.insert(self.count, np.polyfit(self.ally, self.allx, 2)) self.best_fit = self.current_fit[-1] self.radius_of_curvature.append(((1 + (2*self.current_fit_cr[0]*max(self.ally)*ym_per_pix + self.current_fit_cr[1])**2)**1.5) / np.absolute(2*self.current_fit_cr[0])) self.count = self.count + 1 self.has_data = True try: self.bestx = self.best_fit[0]*ycal**2 + self.best_fit[1]*ycal + self.best_fit[2] self.line_base_pos = self.bestx[-1] except TypeError: # Avoids an error if `left` and `right_fit` are still none or incorrect print('The function failed to fit a line!') self.bestx = 1*ycal**2 + 1*ycal print("+++++++++++", file=open("debug.txt", "a")) print (self.diffs,file=open("debug.txt", "a")) print("----", file=open("debug.txt", "a")) print (self.best_fit,file=open("debug.txt", "a"))
45.825301
190
0.593269
988
7,607
4.37247
0.20749
0.043981
0.048611
0.02963
0.271065
0.204398
0.121296
0.080556
0.052315
0.052315
0
0.018192
0.306297
7,607
165
191
46.10303
0.800455
0.180492
0
0.115044
0
0
0.0192
0
0
0
0
0.006061
0
1
0.053097
false
0.00885
0.026549
0
0.115044
0.061947
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d2dc51a6684f0eee600755a8aa65da621a005ab
550
py
Python
Learning/8.2-recursion.py
DishantIsrani/Python-Learning
f810fd64adeecd34fd2d95182f6be2bdfb4f9ac6
[ "MIT" ]
null
null
null
Learning/8.2-recursion.py
DishantIsrani/Python-Learning
f810fd64adeecd34fd2d95182f6be2bdfb4f9ac6
[ "MIT" ]
null
null
null
Learning/8.2-recursion.py
DishantIsrani/Python-Learning
f810fd64adeecd34fd2d95182f6be2bdfb4f9ac6
[ "MIT" ]
null
null
null
# ITERATIVE FUNCTION FOR FACTORIAL # def factorial_iterative(n): # fact=1 # for b in range(1, n+1): # fact=fact*b # return fact # a = int(input("enter the number you want the factorial of: ")) # print(f"the factorial of {a} is {factorial_iterative(a)}") # RECURSIVE FUNCTION FOR FACTORIAL def factorial_recursive(n): if n==1 or n==0: return 1 return n * factorial_recursive(n-1) a = int(input("enter the number you want the factorial of: ")) print(f"the factorial of {a} is {factorial_recursive(a)}")
20.37037
64
0.652727
86
550
4.116279
0.325581
0.135593
0.158192
0.129944
0.610169
0.429379
0.429379
0.429379
0.429379
0.429379
0
0.016393
0.223636
550
26
65
21.153846
0.812646
0.529091
0
0
0
0
0.378601
0.098765
0
0
0
0
0
1
0.166667
false
0
0
0
0.5
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d2e03e276bae7527bce92f21abff29448cd794b
5,943
py
Python
futaba/cogs/welcome/role_reapplication.py
Hoffs/futaba
4d07c421c4229c81ddd42da1a49594b8cf11832d
[ "MIT" ]
23
2018-09-17T09:06:27.000Z
2021-05-27T15:21:37.000Z
futaba/cogs/welcome/role_reapplication.py
Hoffs/futaba
4d07c421c4229c81ddd42da1a49594b8cf11832d
[ "MIT" ]
257
2018-08-18T21:27:54.000Z
2020-12-29T23:27:10.000Z
futaba/cogs/welcome/role_reapplication.py
Hoffs/futaba
4d07c421c4229c81ddd42da1a49594b8cf11832d
[ "MIT" ]
22
2018-09-09T09:03:13.000Z
2021-11-09T03:34:34.000Z
# # cogs/welcome/role_reapplication.py # # futaba - A Discord Mod bot for the Programming server # Copyright (c) 2017-2020 Jake Richardson, Ammon Smith, jackylam5 # # futaba is available free of charge under the terms of the MIT # License. You are free to redistribute and/or modify it under those # terms. It is distributed in the hopes that it will be useful, but # WITHOUT ANY WARRANTY. See the LICENSE file for more details. # """ Handling to reapply roles when the member rejoins the guild. """ import asyncio import logging from collections import deque, namedtuple import discord from discord.ext import commands from futaba.converters import UserConv from futaba.utils import user_discrim from ..abc import AbstractCog logger = logging.getLogger(__name__) FakeMember = namedtuple("FakeMember", ("name", "id", "guild")) __all__ = ["RoleReapplication"] class RoleReapplication(AbstractCog): __slots__ = ("journal", "lock", "recent_updates") def __init__(self, bot): super().__init__(bot) self.journal = bot.get_broadcaster("/roles") self.lock = asyncio.Lock() self.recent_updates = deque(maxlen=20) async def bg_setup(self): """ Update all of the member's saved roles. Since this task can be very slow with several thousand members, the task is run in the background delays itself to avoid clogging the bot. However, this will degrade reapply-role performance until it's finished. """ async with self.lock: with self.bot.sql.transaction(): for i, member in enumerate(self.bot.get_all_members()): self.bot.sql.roles.update_saved_roles(member) if i % 20 == 0: await asyncio.sleep(0.2) def setup(self): logger.info("Running member role update in background") self.bot.loop.create_task(self.bg_setup()) async def member_update(self, before, after): if before.roles == after.roles: return entry = (before, after) if entry in self.recent_updates: return else: self.recent_updates.append(entry) special_roles = self.bot.sql.settings.get_special_roles(after.guild) if special_roles.guest_role in after.roles: return await self.save_roles(after) def get_reapply_roles(self, guild): logger.debug( "Getting possible reapplication roles for guild '%s' (%d)", guild.name, guild.id, ) reapply_roles = self.bot.sql.settings.get_reapply_roles(guild) can_reapply = list(reapply_roles) special_roles = self.bot.sql.settings.get_special_roles(guild) if special_roles.mute_role is not None: can_reapply.append(special_roles.mute_role) if special_roles.jail_role is not None: can_reapply.append(special_roles.jail_role) if "SelfAssignableRoles" in self.bot.cogs: can_reapply.extend(self.bot.sql.roles.get_assignable_roles(guild)) return can_reapply def get_roles_to_reapply(self, member): roles = self.bot.sql.roles.get_saved_roles(member) if not roles: logger.debug("No roles to reapply, user is new") return None can_reapply = self.get_reapply_roles(member.guild) return list(filter(lambda r: r in can_reapply, roles)) @commands.guild_only() @commands.command(name="savedroles", aliases=["saveroles", "userroles", "uroles"]) async def saved_roles(self, ctx, user: UserConv = None): """ Returns all roles that would be reapplied when a given user rejoins. """ if user is None: member = ctx.author mention = ctx.author.mention else: member = FakeMember(id=user.id, name=user.name, guild=ctx.guild) mention = user.mention roles = self.get_roles_to_reapply(member) if roles: roles.sort(key=lambda r: r.position, reverse=True) role_list = " ".join(role.mention for role in roles) sep = "\n\n" if len(roles) > 3 else " " embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.title = "\N{MILITARY MEDAL} Roles which would be applied on join" embed.description = f"{mention}:{sep}{role_list}" else: embed = discord.Embed(colour=discord.Colour.dark_purple()) embed.description = f"No roles are saved for {mention}." await ctx.send(embed=embed) async def reapply_roles(self, member): roles = self.get_roles_to_reapply(member) if roles is None: return None logger.info( "Reapplying roles to member '%s' (%d): [%s]", member.name, member.id, ", ".join(role.name for role in roles), ) await member.add_roles( *roles, reason="Automatically reapplying roles", atomic=True ) content = ( f"Reapplied roles to {member.mention}: {', '.join(f'`{role.name}`' for role in roles)}" if roles else f"Reapplied no roles to {member.mention}" ) self.journal.send( "reapply", member.guild, content, member=member, roles=roles, icon="role" ) return roles async def save_roles(self, member): logger.info( "Member '%s' (%d) updated roles in '%s' (%d)", member.name, member.id, member.guild.name, member.guild.id, ) async with self.lock: with self.bot.sql.transaction(): self.bot.sql.roles.update_saved_roles(member) content = f"Saved updated roles for {user_discrim(member)}" self.journal.send("save", member.guild, content, member=member, icon="save")
33.576271
99
0.621235
751
5,943
4.793609
0.283622
0.025278
0.025
0.016667
0.183889
0.157222
0.137778
0.137778
0.092778
0
0
0.003969
0.27932
5,943
176
100
33.767045
0.836563
0.079085
0
0.184874
0
0.008403
0.132913
0.014001
0
0
0
0
0
1
0.033613
false
0
0.067227
0
0.184874
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d2e1a90ddfac3c804d234d5ca9b2888a866f064
9,292
py
Python
app/common/util.py
openstreetmap-polska/gugik2osm
60ebd0660ed55e3b3db0f034486a607975c5eb45
[ "MIT" ]
18
2020-01-31T11:19:28.000Z
2022-01-05T08:58:51.000Z
app/common/util.py
openstreetmap-polska/gugik2osm
60ebd0660ed55e3b3db0f034486a607975c5eb45
[ "MIT" ]
103
2019-12-11T11:35:01.000Z
2022-03-01T21:10:50.000Z
app/common/util.py
openstreetmap-polska/gugik2osm
60ebd0660ed55e3b3db0f034486a607975c5eb45
[ "MIT" ]
6
2020-12-06T14:53:43.000Z
2021-04-18T18:09:29.000Z
import logging from dataclasses import dataclass from typing import List, Union, Dict, Any, Tuple from lxml import etree @dataclass class Feature: id: str tags: Dict[str, Any] geojson_geometry: Dict[str, Any] def to_geojson_dict(features: List[Feature]) -> Dict[str, Any]: results = { 'type': 'FeatureCollection', 'features': [ { 'type': 'Feature', 'geometry': feature.geojson_geometry, 'properties': { 'id': feature.id, 'tags': feature.tags } } for feature in features ] } return results @dataclass class InputPoint: tags: Dict[str, Any] latitude: float longitude: float @dataclass class InputLine: tags: Dict[str, Any] list_of_coordinate_pairs: List[Tuple[float, float]] @dataclass class InputPolygon: tags: Dict[str, Any] outer_ring: List[Tuple[float, float]] inner_rings: List[List[Tuple[float, float]]] @dataclass class InputMultiPolygon: tags: Dict[str, Any] outer_rings: List[List[Tuple[float, float]]] inner_rings: List[List[Tuple[float, float]]] @dataclass class Node: id: int tags: Dict[str, Any] latitude: float longitude: float def as_xml_element(self) -> etree.Element: tags_elements = [etree.Element('tag', k=key, v=str(value)) for key, value in self.tags.items()] node_element = etree.Element('node', id=str(self.id), lat=str(self.latitude), lon=str(self.longitude)) for elem in tags_elements: node_element.append(elem) return node_element @dataclass class Way: id: int tags: Dict[str, Any] node_ids: List[int] def as_xml_element(self) -> etree.Element: tags_elements = [etree.Element('tag', k=key, v=str(value)) for key, value in self.tags.items()] node_id_elements = [etree.Element('nd', ref=str(node_id)) for node_id in self.node_ids] way_element = etree.Element('way', id=str(self.id)) for elem in node_id_elements: way_element.append(elem) for elem in tags_elements: way_element.append(elem) return way_element @dataclass class RelationMember: type: str id: int role: str def as_xml_element(self) -> etree.Element: return etree.Element('member', type=self.type, ref=str(self.id), role=self.role) @dataclass class Relation: id: int tags: Dict[str, Any] members: List[RelationMember] def as_xml_element(self) -> etree.Element: tags_elements = [etree.Element('tag', k=key, v=str(value)) for key, value in self.tags.items()] member_elements = [member.as_xml_element() for member in self.members] relation_element = etree.Element('relation', id=str(self.id)) for elem in member_elements: relation_element.append(elem) for elem in tags_elements: relation_element.append(elem) return relation_element class DecreasingSequence: def __init__(self, starting_value: int = 0, step: int = -1): self.value = starting_value if step == 0: raise ValueError('Step cannot be equal zero.') if step > 0: logging.warning('DecreasingSequence given step greater than zero. Sequence will increase instead of decreasing.') self.step = step def next_value(self): self.value += self.step return self.value def trim_coordinates(lat: float, lon: float) -> Tuple[float, float]: return round(lat, 7), round(lon, 7) def input_feature_factory(geom_type: str, **kwargs) -> Union[InputPoint, InputLine, InputPolygon]: if geom_type == 'POINT': feature = InputPoint(tags=kwargs['tags'], latitude=kwargs['latitude'], longitude=kwargs['longitude']) elif geom_type == 'LINESTRING': feature = InputLine(tags=kwargs['tags'], list_of_coordinate_pairs=kwargs['list_of_coordinate_pairs']) elif geom_type == 'POLYGON': feature = InputPolygon(tags=kwargs['tags'], outer_ring=kwargs['outer_ring'], inner_rings=kwargs['inner_rings']) elif geom_type == 'MULTIPOLYGON': feature = InputMultiPolygon(tags=kwargs['tags'], outer_rings=kwargs['outer_rings'], inner_rings=kwargs['inner_rings']) else: raise AttributeError(f'Geometry type: {geom_type} currently not supported.') return feature def convert_to_osm_style_objects( list_of_features: List[Union[InputPoint, InputLine, InputPolygon, InputMultiPolygon]] ) -> Tuple[List[Node], List[Way], List[Relation]]: """"Method converts input features (points, lines, polygons) into OSM style objects (nodes, ways, relations).""" id_generator = DecreasingSequence() list_of_nodes: List[Node] = [] node_dict: Dict[Tuple[float, float], Node] = {} list_of_ways: List[Way] = [] list_of_relations: List[Relation] = [] def create_way(list_of_coordinates: List[Tuple[float, float]], tags: Dict[str, Any]) -> int: node_ids = [] for coordinates in list_of_coordinates: lat_lon_tuple = trim_coordinates(*coordinates) if node_dict.get(lat_lon_tuple): node_id = node_dict[lat_lon_tuple].id else: new_node = Node(id_generator.next_value(), {}, *lat_lon_tuple) node_id = new_node.id node_dict[lat_lon_tuple] = new_node list_of_nodes.append(new_node) node_ids.append(node_id) w = Way(id_generator.next_value(), tags, node_ids) list_of_ways.append(w) return w.id expected_classes = [InputPoint, InputLine, InputPolygon, InputMultiPolygon] for feature in list_of_features: if isinstance(feature, InputPoint): lat, lon = trim_coordinates(feature.latitude, feature.longitude) if node_dict.get((lat, lon)): existing_node = node_dict[(lat, lon)] existing_node.tags = {**existing_node.tags, **feature.tags} logging.warning(f'Node with coordinates {lat}, {lon} already exists in dictionary. Merging tags.') continue n = Node(id_generator.next_value(), feature.tags, lat, lon) node_dict[(lat, lon)] = n list_of_nodes.append(n) elif isinstance(feature, InputLine): create_way(feature.list_of_coordinate_pairs, feature.tags) elif isinstance(feature, InputPolygon): if len(feature.inner_rings) == 0: # create way create_way(feature.outer_ring, feature.tags) else: # create a relation outer_id = create_way(feature.outer_ring, dict()) inner_ids = [create_way(ring, dict()) for ring in feature.inner_rings] members = [RelationMember('way', outer_id, 'outer')] + [RelationMember('way', i, 'inner') for i in inner_ids] relation_tags = {**feature.tags, 'type': 'multipolygon'} r = Relation(id_generator.next_value(), relation_tags, members) list_of_relations.append(r) elif isinstance(feature, InputMultiPolygon): outer_ids = [create_way(ring, dict()) for ring in feature.outer_rings] inner_ids = [create_way(ring, dict()) for ring in feature.inner_rings] members = [RelationMember('way', i, 'outer') for i in outer_ids] + [RelationMember('way', i, 'inner') for i in inner_ids] relation_tags = {**feature.tags, 'type': 'multipolygon'} r = Relation(id_generator.next_value(), relation_tags, members) list_of_relations.append(r) else: raise ValueError(f'Feature is not one of expected types: {type(feature)}. Expected one of: {expected_classes}') return list_of_nodes, list_of_ways, list_of_relations def create_osm_xml(list_of_features: List[Union[InputPoint, InputLine, InputPolygon]]) -> etree.Element: """Method """ root = etree.Element('osm', version='0.6') list_of_nodes, list_of_ways, list_of_relations = convert_to_osm_style_objects(list_of_features) for node in list_of_nodes: root.append(node.as_xml_element()) for way in list_of_ways: root.append(way.as_xml_element()) for relation in list_of_relations: root.append(relation.as_xml_element()) return root def bbox_to_geojson_geometry(bbox: Tuple[float, float, float, float]) -> Dict[str, Any]: return { "type": "Polygon", "coordinates": [ [ [bbox[0], bbox[1]], [bbox[2], bbox[1]], [bbox[2], bbox[3]], [bbox[0], bbox[3]], [bbox[0], bbox[1]] ] ] } def create_geojson_dict(list_of_geometries: List[dict], list_of_properties: List[dict]) -> dict: response_dict = { 'type': 'FeatureCollection', 'features': [ { 'type': 'Feature', 'geometry': geom, 'properties': { **properties } } for geom, properties in zip(list_of_geometries, list_of_properties) ] } return response_dict
34.287823
133
0.62344
1,136
9,292
4.898768
0.138204
0.033423
0.021563
0.022642
0.35867
0.296316
0.256963
0.23522
0.17628
0.156873
0
0.002768
0.2613
9,292
270
134
34.414815
0.807984
0.015497
0
0.2891
0
0
0.0787
0.002627
0
0
0
0
0
1
0.066351
false
0
0.018957
0.014218
0.322275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d30e5200b0df52892d77fbbb3bc4df9f13ca5c1
477
py
Python
Infra/Library/Uteis.py
buenex/Desafio-Nilo
64a4452e847020d54eb4b0d5ab7bbc86c1afa546
[ "MIT" ]
null
null
null
Infra/Library/Uteis.py
buenex/Desafio-Nilo
64a4452e847020d54eb4b0d5ab7bbc86c1afa546
[ "MIT" ]
null
null
null
Infra/Library/Uteis.py
buenex/Desafio-Nilo
64a4452e847020d54eb4b0d5ab7bbc86c1afa546
[ "MIT" ]
null
null
null
import random class Uteis(): def gerar_nome(self,length): letters ="AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz" result = "" for n in range(int(length)): result += letters[random.randint(0,len(letters)-1)] return result def gerar_email(self): domains = ["teste","mail","outmail","gmail"] result= self.gerar_nome(8)+"@"+domains[random.randint(0,len(domains)-1)]+".com" return result
31.8
87
0.616352
52
477
5.596154
0.576923
0.054983
0.09622
0.116838
0
0
0
0
0
0
0
0.013812
0.24109
477
15
88
31.8
0.790055
0
0
0.166667
0
0
0.16318
0.108787
0
0
0
0
0
1
0.166667
false
0
0.083333
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d3a6948bd5e57b6f976e39bbd2a75b6e3d5c0bc
25,302
py
Python
src/olympia/blocklist/tests/test_views.py
jpetto/olympia
f4e9badac9634657068dfbd4733ab5d17798e3f6
[ "BSD-3-Clause" ]
null
null
null
src/olympia/blocklist/tests/test_views.py
jpetto/olympia
f4e9badac9634657068dfbd4733ab5d17798e3f6
[ "BSD-3-Clause" ]
null
null
null
src/olympia/blocklist/tests/test_views.py
jpetto/olympia
f4e9badac9634657068dfbd4733ab5d17798e3f6
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import base64 from datetime import datetime from xml.dom import minidom from django.conf import settings from django.core.cache import cache from nose.tools import eq_, ok_ from olympia import amo from olympia.amo.tests import TestCase from olympia.amo.urlresolvers import reverse from olympia.blocklist.models import ( BlocklistApp, BlocklistCA, BlocklistDetail, BlocklistGfx, BlocklistItem, BlocklistIssuerCert, BlocklistPlugin, BlocklistPref) base_xml = """ <?xml version="1.0"?> <blocklist xmlns="http://www.mozilla.org/2006/addons-blocklist"> </blocklist> """ class XMLAssertsMixin(object): def assertOptional(self, obj, field, xml_field): """Make sure that if the field isn't filled in, it's not in the XML.""" # Save the initial value. initial = getattr(obj, field) try: # If not set, the field isn't in the XML. obj.update(**{field: ''}) eq_(self.dom(self.fx4_url).getElementsByTagName(xml_field), []) # If set, it's in the XML. obj.update(**{field: 'foobar'}) element = self.dom(self.fx4_url).getElementsByTagName(xml_field)[0] eq_(element.firstChild.nodeValue, 'foobar') finally: obj.update(**{field: initial}) def assertAttribute(self, obj, field, tag, attr_name): # Save the initial value. initial = getattr(obj, field) try: # If set, it's in the XML. obj.update(**{field: 'foobar'}) element = self.dom(self.fx4_url).getElementsByTagName(tag)[0] eq_(element.getAttribute(attr_name), 'foobar') finally: obj.update(**{field: initial}) def assertEscaped(self, obj, field): """Make sure that the field content is XML escaped.""" obj.update(**{field: 'http://example.com/?foo=<bar>&baz=crux'}) r = self.client.get(self.fx4_url) assert 'http://example.com/?foo=&lt;bar&gt;&amp;baz=crux' in r.content class BlocklistViewTest(TestCase): def setUp(self): super(BlocklistViewTest, self).setUp() self.fx4_url = reverse('blocklist', args=[3, amo.FIREFOX.guid, '4.0']) self.fx2_url = reverse('blocklist', args=[2, amo.FIREFOX.guid, '2.0']) self.tb4_url = reverse('blocklist', args=[3, amo.THUNDERBIRD.guid, '4.0']) self.mobile_url = reverse('blocklist', args=[2, amo.MOBILE.guid, '.9']) cache.clear() self.details = BlocklistDetail.objects.create() def create_blplugin(self, app_guid=None, app_min=None, app_max=None, *args, **kw): plugin = BlocklistPlugin.objects.create(*args, **kw) app = BlocklistApp.objects.create(blplugin=plugin, guid=app_guid, min=app_min, max=app_max) return plugin, app def normalize(self, s): return '\n'.join(x.strip() for x in s.split()) def eq_(self, x, y): return eq_(self.normalize(x), self.normalize(y)) def dom(self, url): r = self.client.get(url) return minidom.parseString(r.content) class BlocklistItemTest(XMLAssertsMixin, BlocklistViewTest): def setUp(self): super(BlocklistItemTest, self).setUp() self.item = BlocklistItem.objects.create(guid='guid@addon.com', details=self.details) self.pref = BlocklistPref.objects.create(blitem=self.item, pref='foo.bar') self.app = BlocklistApp.objects.create(blitem=self.item, guid=amo.FIREFOX.guid) def stupid_unicode_test(self): junk = u'\xc2\x80\x15\xc2\x80\xc3' url = reverse('blocklist', args=[3, amo.FIREFOX.guid, junk]) # Just make sure it doesn't fail. eq_(self.client.get(url).status_code, 200) def test_content_type(self): response = self.client.get(self.fx4_url) eq_(response['Content-Type'], 'text/xml') def test_empty_string_goes_null_on_save(self): b = BlocklistItem(guid='guid', min='', max='', os='') b.save() assert b.min is None assert b.max is None assert b.os is None def test_lastupdate(self): def eq(a, b): eq_(a, b.replace(microsecond=0)) def find_lastupdate(): bl = self.dom(self.fx4_url).getElementsByTagName('blocklist')[0] t = int(bl.getAttribute('lastupdate')) / 1000 return datetime.fromtimestamp(t) eq(find_lastupdate(), self.item.created) self.item.save() eq(find_lastupdate(), self.item.modified) plugin, app = self.create_blplugin(app_guid=amo.FIREFOX.guid) eq(find_lastupdate(), plugin.created) plugin.save() eq(find_lastupdate(), plugin.modified) gfx = BlocklistGfx.objects.create(guid=amo.FIREFOX.guid) eq(find_lastupdate(), gfx.created) gfx.save() eq(find_lastupdate(), gfx.modified) assert (self.item.created != self.item.modified != plugin.created != plugin.modified != gfx.created != gfx.modified) def test_no_items(self): self.item.delete() dom = self.dom(self.fx4_url) children = dom.getElementsByTagName('blocklist')[0].childNodes # There are only text nodes. assert all(e.nodeType == 3 for e in children) def test_existing_user_cookie(self): self.client.cookies[settings.BLOCKLIST_COOKIE] = 'adfadf' self.client.get(self.fx4_url) eq_(self.client.cookies[settings.BLOCKLIST_COOKIE].value, 'adfadf') def test_url_params(self): eq_(self.client.get(self.fx4_url).status_code, 200) eq_(self.client.get(self.fx2_url).status_code, 200) # We ignore trailing url parameters. eq_(self.client.get(self.fx4_url + 'other/junk/').status_code, 200) def test_app_guid(self): # There's one item for Firefox. r = self.client.get(self.fx4_url) eq_(r.status_code, 200) eq_(len(r.context['items']), 1) # There are no items for mobile. r = self.client.get(self.mobile_url) eq_(r.status_code, 200) eq_(len(r.context['items']), 0) # Without the app constraint we see the item. self.app.delete() r = self.client.get(self.mobile_url) eq_(r.status_code, 200) eq_(len(r.context['items']), 1) def test_item_guid(self): items = self.dom(self.fx4_url).getElementsByTagName('emItem') eq_(len(items), 1) eq_(items[0].getAttribute('id'), 'guid@addon.com') def test_block_id(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] eq_(item.getAttribute('blockID'), 'i' + str(self.details.id)) def test_item_os(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] assert 'os' not in item.attributes.keys() self.item.update(os='win,mac') item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] eq_(item.getAttribute('os'), 'win,mac') def test_item_pref(self): self.item.update(severity=2) eq_(len(self.vr()), 1) item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] prefs = item.getElementsByTagName('prefs') pref = prefs[0].getElementsByTagName('pref') eq_(pref[0].firstChild.nodeValue, self.pref.pref) def test_item_severity(self): self.item.update(severity=2) eq_(len(self.vr()), 1) item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] vrange = item.getElementsByTagName('versionRange') eq_(vrange[0].getAttribute('severity'), '2') def test_item_severity_zero(self): # Don't show severity if severity==0. self.item.update(severity=0, min='0.1') eq_(len(self.vr()), 1) item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] vrange = item.getElementsByTagName('versionRange') eq_(vrange[0].getAttribute('minVersion'), '0.1') assert not vrange[0].hasAttribute('severity') def vr(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] return item.getElementsByTagName('versionRange') def test_item_version_range(self): self.item.update(min='0.1') eq_(len(self.vr()), 1) eq_(self.vr()[0].attributes.keys(), ['minVersion']) eq_(self.vr()[0].getAttribute('minVersion'), '0.1') self.item.update(max='0.2') keys = self.vr()[0].attributes.keys() eq_(len(keys), 2) ok_('minVersion' in keys) ok_('maxVersion' in keys) eq_(self.vr()[0].getAttribute('minVersion'), '0.1') eq_(self.vr()[0].getAttribute('maxVersion'), '0.2') def test_item_multiple_version_range(self): # There should be two <versionRange>s under one <emItem>. self.item.update(min='0.1', max='0.2') BlocklistItem.objects.create(guid=self.item.guid, severity=3) item = self.dom(self.fx4_url).getElementsByTagName('emItem') eq_(len(item), 1) vr = item[0].getElementsByTagName('versionRange') eq_(len(vr), 2) eq_(vr[0].getAttribute('minVersion'), '0.1') eq_(vr[0].getAttribute('maxVersion'), '0.2') eq_(vr[1].getAttribute('severity'), '3') def test_item_target_app(self): app = self.app self.app.delete() self.item.update(severity=2) version_range = self.vr()[0] eq_(version_range.getElementsByTagName('targetApplication'), []) app.save() version_range = self.vr()[0] target_app = version_range.getElementsByTagName('targetApplication') eq_(len(target_app), 1) eq_(target_app[0].getAttribute('id'), amo.FIREFOX.guid) app.update(min='0.1', max='*') version_range = self.vr()[0] target_app = version_range.getElementsByTagName('targetApplication') eq_(target_app[0].getAttribute('id'), amo.FIREFOX.guid) tvr = target_app[0].getElementsByTagName('versionRange') eq_(tvr[0].getAttribute('minVersion'), '0.1') eq_(tvr[0].getAttribute('maxVersion'), '*') def test_item_multiple_apps(self): # Make sure all <targetApplication>s go under the same <versionRange>. self.app.update(min='0.1', max='0.2') BlocklistApp.objects.create(guid=amo.FIREFOX.guid, blitem=self.item, min='3.0', max='3.1') version_range = self.vr()[0] apps = version_range.getElementsByTagName('targetApplication') eq_(len(apps), 2) eq_(apps[0].getAttribute('id'), amo.FIREFOX.guid) vr = apps[0].getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '0.1') eq_(vr.getAttribute('maxVersion'), '0.2') eq_(apps[1].getAttribute('id'), amo.FIREFOX.guid) vr = apps[1].getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '3.0') eq_(vr.getAttribute('maxVersion'), '3.1') def test_item_empty_version_range(self): # No version_range without an app, min, max, or severity. self.app.delete() self.item.update(min=None, max=None, severity=None) eq_(len(self.vr()), 0) def test_item_empty_target_app(self): # No empty <targetApplication>. self.item.update(severity=1) self.app.delete() eq_(self.dom(self.fx4_url).getElementsByTagName('targetApplication'), []) def test_item_target_empty_version_range(self): app = self.dom(self.fx4_url).getElementsByTagName('targetApplication') eq_(app[0].getElementsByTagName('versionRange'), []) def test_name(self): self.assertAttribute(self.item, field='name', tag='emItem', attr_name='name') def test_creator(self): self.assertAttribute(self.item, field='creator', tag='emItem', attr_name='creator') def test_homepage_url(self): self.assertAttribute(self.item, field='homepage_url', tag='emItem', attr_name='homepageURL') def test_update_url(self): self.assertAttribute(self.item, field='update_url', tag='emItem', attr_name='updateURL') def test_urls_escaped(self): self.assertEscaped(self.item, 'homepage_url') self.assertEscaped(self.item, 'update_url') class BlocklistPluginTest(XMLAssertsMixin, BlocklistViewTest): def setUp(self): super(BlocklistPluginTest, self).setUp() self.plugin, self.app = self.create_blplugin(app_guid=amo.FIREFOX.guid, details=self.details) def test_no_plugins(self): dom = BlocklistViewTest.dom(self, self.mobile_url) children = dom.getElementsByTagName('blocklist')[0].childNodes # There are only text nodes. assert all(e.nodeType == 3 for e in children) def dom(self, url=None): url = url or self.fx4_url r = self.client.get(url) d = minidom.parseString(r.content) return d.getElementsByTagName('pluginItem')[0] def test_plugin_empty(self): self.app.delete() eq_(self.dom().attributes.keys(), ['blockID']) eq_(self.dom().getElementsByTagName('match'), []) eq_(self.dom().getElementsByTagName('versionRange'), []) def test_block_id(self): item = self.dom(self.fx4_url) eq_(item.getAttribute('blockID'), 'p' + str(self.details.id)) def test_plugin_os(self): self.plugin.update(os='win') eq_(sorted(self.dom().attributes.keys()), ['blockID', 'os']) eq_(self.dom().getAttribute('os'), 'win') def test_plugin_xpcomabi(self): self.plugin.update(xpcomabi='win') eq_(sorted(self.dom().attributes.keys()), ['blockID', 'xpcomabi']) eq_(self.dom().getAttribute('xpcomabi'), 'win') def test_plugin_name(self): self.plugin.update(name='flash') match = self.dom().getElementsByTagName('match') eq_(len(match), 1) eq_(dict(match[0].attributes.items()), {'name': 'name', 'exp': 'flash'}) def test_plugin_description(self): self.plugin.update(description='flash') match = self.dom().getElementsByTagName('match') eq_(len(match), 1) eq_(dict(match[0].attributes.items()), {'name': 'description', 'exp': 'flash'}) def test_plugin_filename(self): self.plugin.update(filename='flash') match = self.dom().getElementsByTagName('match') eq_(len(match), 1) eq_(dict(match[0].attributes.items()), {'name': 'filename', 'exp': 'flash'}) def test_plugin_severity(self): self.plugin.update(severity=2) v = self.dom().getElementsByTagName('versionRange')[0] eq_(v.getAttribute('severity'), '2') def test_plugin_severity_zero(self): self.plugin.update(severity=0) v = self.dom().getElementsByTagName('versionRange')[0] eq_(v.getAttribute('severity'), '0') def test_plugin_no_target_app(self): self.plugin.update(severity=1, min='1', max='2') self.app.delete() vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getElementsByTagName('targetApplication'), [], 'There should not be a <targetApplication> if there was no app') eq_(vr.getAttribute('severity'), '1') eq_(vr.getAttribute('minVersion'), '1') eq_(vr.getAttribute('maxVersion'), '2') def test_plugin_with_target_app(self): self.plugin.update(severity=1) self.app.update(guid=amo.FIREFOX.guid, min='1', max='2') vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '1') assert not vr.getAttribute('vulnerabilitystatus') app = vr.getElementsByTagName('targetApplication')[0] eq_(app.getAttribute('id'), amo.FIREFOX.guid) vr = app.getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '1') eq_(vr.getAttribute('maxVersion'), '2') def test_plugin_with_multiple_target_apps(self): self.plugin.update(severity=1, min='5', max='6') self.app.update(guid=amo.FIREFOX.guid, min='1', max='2') BlocklistApp.objects.create(guid=amo.THUNDERBIRD.guid, min='3', max='4', blplugin=self.plugin) vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '1') eq_(vr.getAttribute('minVersion'), '5') eq_(vr.getAttribute('maxVersion'), '6') assert not vr.getAttribute('vulnerabilitystatus') app = vr.getElementsByTagName('targetApplication')[0] eq_(app.getAttribute('id'), amo.FIREFOX.guid) vr = app.getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '1') eq_(vr.getAttribute('maxVersion'), '2') vr = self.dom(self.tb4_url).getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '1') eq_(vr.getAttribute('minVersion'), '5') eq_(vr.getAttribute('maxVersion'), '6') assert not vr.getAttribute('vulnerabilitystatus') app = vr.getElementsByTagName('targetApplication')[0] eq_(app.getAttribute('id'), amo.THUNDERBIRD.guid) vr = app.getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '3') eq_(vr.getAttribute('maxVersion'), '4') def test_plugin_with_target_app_with_vulnerability(self): self.plugin.update(severity=0, vulnerability_status=2) self.app.update(guid=amo.FIREFOX.guid, min='1', max='2') vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '0') eq_(vr.getAttribute('vulnerabilitystatus'), '2') app = vr.getElementsByTagName('targetApplication')[0] eq_(app.getAttribute('id'), amo.FIREFOX.guid) vr = app.getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('minVersion'), '1') eq_(vr.getAttribute('maxVersion'), '2') def test_plugin_with_severity_only(self): self.plugin.update(severity=1) self.app.delete() vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '1') assert not vr.getAttribute('vulnerabilitystatus') eq_(vr.getAttribute('minVersion'), '') eq_(vr.getAttribute('maxVersion'), '') eq_(vr.getElementsByTagName('targetApplication'), [], 'There should not be a <targetApplication> if there was no app') def test_plugin_without_severity_and_with_vulnerability(self): self.plugin.update(severity=0, vulnerability_status=1) self.app.delete() vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '0') eq_(vr.getAttribute('vulnerabilitystatus'), '1') eq_(vr.getAttribute('minVersion'), '') eq_(vr.getAttribute('maxVersion'), '') def test_plugin_without_severity_and_with_vulnerability_and_minmax(self): self.plugin.update(severity=0, vulnerability_status=1, min='2.0', max='3.0') self.app.delete() vr = self.dom().getElementsByTagName('versionRange')[0] eq_(vr.getAttribute('severity'), '0') eq_(vr.getAttribute('vulnerabilitystatus'), '1') eq_(vr.getAttribute('minVersion'), '2.0') eq_(vr.getAttribute('maxVersion'), '3.0') def test_plugin_apiver_lt_3(self): self.plugin.update(severity='2') # No min & max so the app matches. e = self.dom(self.fx2_url).getElementsByTagName('versionRange')[0] eq_(e.getAttribute('severity'), '2') eq_(e.getElementsByTagName('targetApplication'), []) # The app version is not in range. self.app.update(min='3.0', max='4.0') self.assertRaises(IndexError, self.dom, self.fx2_url) # The app is back in range. self.app.update(min='1.1') e = self.dom(self.fx2_url).getElementsByTagName('versionRange')[0] eq_(e.getAttribute('severity'), '2') eq_(e.getElementsByTagName('targetApplication'), []) def test_info_url(self): self.assertOptional(self.plugin, 'info_url', 'infoURL') self.assertEscaped(self.plugin, 'info_url') class BlocklistGfxTest(BlocklistViewTest): def setUp(self): super(BlocklistGfxTest, self).setUp() self.gfx = BlocklistGfx.objects.create( guid=amo.FIREFOX.guid, os='os', vendor='vendor', devices='x y z', feature='feature', feature_status='status', details=self.details, driver_version='version', driver_version_max='version max', driver_version_comparator='compare', hardware='giant_robot') def test_no_gfx(self): dom = self.dom(self.mobile_url) children = dom.getElementsByTagName('blocklist')[0].childNodes # There are only text nodes. assert all(e.nodeType == 3 for e in children) def test_gfx(self): r = self.client.get(self.fx4_url) dom = minidom.parseString(r.content) gfx = dom.getElementsByTagName('gfxBlacklistEntry')[0] def find(e): return gfx.getElementsByTagName(e)[0].childNodes[0].wholeText assert find('os') == self.gfx.os assert find('feature') == self.gfx.feature assert find('vendor') == self.gfx.vendor assert find('featureStatus') == self.gfx.feature_status assert find('driverVersion') == self.gfx.driver_version assert find('driverVersionMax') == self.gfx.driver_version_max expected_version_comparator = self.gfx.driver_version_comparator assert find('driverVersionComparator') == expected_version_comparator assert find('hardware') == self.gfx.hardware devices = gfx.getElementsByTagName('devices')[0] for device, val in zip(devices.getElementsByTagName('device'), self.gfx.devices.split(' ')): assert device.childNodes[0].wholeText == val def test_empty_devices(self): self.gfx.devices = None self.gfx.save() r = self.client.get(self.fx4_url) self.assertNotContains(r, '<devices>') def test_no_empty_nodes(self): self.gfx.update(os=None, vendor=None, devices=None, feature=None, feature_status=None, driver_version=None, driver_version_max=None, driver_version_comparator=None, hardware=None) r = self.client.get(self.fx4_url) self.assertNotContains(r, '<os>') self.assertNotContains(r, '<vendor>') self.assertNotContains(r, '<devices>') self.assertNotContains(r, '<feature>') self.assertNotContains(r, '<featureStatus>') self.assertNotContains(r, '<driverVersion>') self.assertNotContains(r, '<driverVersionMax>') self.assertNotContains(r, '<driverVersionComparator>') self.assertNotContains(r, '<hardware>') def test_block_id(self): item = (self.dom(self.fx4_url) .getElementsByTagName('gfxBlacklistEntry')[0]) eq_(item.getAttribute('blockID'), 'g' + str(self.details.id)) class BlocklistCATest(BlocklistViewTest): def setUp(self): super(BlocklistCATest, self).setUp() self.ca = BlocklistCA.objects.create(data=u'Ètå…, ≥•≤') def test_ca(self): r = self.client.get(self.fx4_url) dom = minidom.parseString(r.content) ca = dom.getElementsByTagName('caBlocklistEntry')[0] eq_(base64.b64decode(ca.childNodes[0].toxml()), 'Ètå…, ≥•≤') class BlocklistIssuerCertTest(BlocklistViewTest): def setUp(self): super(BlocklistIssuerCertTest, self).setUp() self.issuerCertBlock = BlocklistIssuerCert.objects.create( issuer='testissuer', serial='testserial', details=BlocklistDetail.objects.create(name='one')) self.issuerCertBlock2 = BlocklistIssuerCert.objects.create( issuer='anothertestissuer', serial='anothertestserial', details=BlocklistDetail.objects.create(name='two')) def test_extant_nodes(self): r = self.client.get(self.fx4_url) dom = minidom.parseString(r.content) certItem = dom.getElementsByTagName('certItem')[0] eq_(certItem.getAttribute('issuerName'), self.issuerCertBlock.issuer) serialNode = dom.getElementsByTagName('serialNumber')[0] serialNumber = serialNode.childNodes[0].wholeText eq_(serialNumber, self.issuerCertBlock.serial) certItem = dom.getElementsByTagName('certItem')[1] eq_(certItem.getAttribute('issuerName'), self.issuerCertBlock2.issuer) serialNode = dom.getElementsByTagName('serialNumber')[1] serialNumber = serialNode.childNodes[0].wholeText eq_(serialNumber, self.issuerCertBlock2.serial)
40.41853
79
0.624891
2,968
25,302
5.198787
0.109501
0.023137
0.036293
0.016332
0.551977
0.490149
0.421646
0.378483
0.324498
0.302981
0
0.016569
0.229547
25,302
625
80
40.4832
0.774341
0.034266
0
0.326489
0
0
0.111298
0.00295
0
0
0
0
0.098563
1
0.143737
false
0
0.020534
0.00616
0.195072
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d3cd024737891057712136d2e705890110f9afe
4,896
py
Python
tools/generate_pseudo_label.py
Jmq14/FCOS
5b9b7c2757584b323545988838d020f5b2b9f002
[ "BSD-2-Clause" ]
null
null
null
tools/generate_pseudo_label.py
Jmq14/FCOS
5b9b7c2757584b323545988838d020f5b2b9f002
[ "BSD-2-Clause" ]
null
null
null
tools/generate_pseudo_label.py
Jmq14/FCOS
5b9b7c2757584b323545988838d020f5b2b9f002
[ "BSD-2-Clause" ]
1
2020-04-14T07:19:16.000Z
2020-04-14T07:19:16.000Z
import os import numpy as np from pycocotools.coco import COCO import cv2 from tqdm import tqdm import argparse import json import torch from fcos_core.structures.bounding_box import BoxList from fcos_core.structures.boxlist_ops import boxlist_iou def generate_pseudo_label_with_confidence_score(boxes, image_id, score_thre): scores = boxes.get_field("scores") _, idx = scores.sort(0, descending=True) if isinstance(score_thre, float): keep = torch.nonzero(scores >= score_thre).squeeze(1) else: labels = boxes.get_field("labels") keep = torch.nonzero(scores >= score_thre[labels]).squeeze(1) return idx[:len(keep)] def parse_predictions(): pass def new_annotation_json(pseudo_labels, img_id, ann_id): labels = pseudo_labels.get_field("labels").tolist() boxes = pseudo_labels.convert("xywh").bbox annos = [] for box, c in zip(boxes, labels): annos.append({ "id": ann_id, "image_id": img_id, "category_id": c, "bbox": box.tolist(), "segmentation": [[0., 0.]], "area": float(box[2] * box[3]), "iscrowd": 0, "ispseudo": True, }) ann_id = ann_id + 1 return annos, ann_id def main(args): annFile = 'datasets/coco/annotations/instances_train2017_0.5.json' coco = COCO(annFile) with open(annFile, 'r') as f: result_json = json.load(f) annos_json = result_json['annotations'] # anno_id = max([ann['id'] for ann in annos_json]) + 1 output_dir = os.path.join(args.predictions, 'coco_2017_train_partial') image_ids = torch.load(os.path.join(output_dir, 'image_ids.pth')) predictions = torch.load(os.path.join(output_dir, 'predictions.pth')) anno_id = max(torch.load(os.path.join(output_dir, 'box_ids.pth'))) + 1 imgIds=sorted(coco.getImgIds()) threshold = args.confidence # threshold = torch.tensor([-1.0, 0.46633365750312805, 0.4409848749637604, 0.47267603874206543, 0.4707889258861542, 0.5220812559127808, 0.5358721613883972, 0.5226702690124512, 0.45160290598869324]) iou_threshold = 0.5 cpu_device = torch.device("cpu") partial_box_num = 0 N = len(image_ids) for i in tqdm(range(N)): im_idx = image_ids[i] bbox = predictions[i] imginfo = coco.loadImgs(imgIds[im_idx])[0] image_width = imginfo['width'] image_height = imginfo['height'] # load annotations partial_anns = coco.loadAnns(coco.getAnnIds(imgIds=(imgIds[im_idx],))) # full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(imgIds[im_idx],), catIds=catIds)) partial_boxes = [obj["bbox"] for obj in partial_anns] partial_boxes_ids = set([obj["id"] for obj in partial_anns]) partial_boxes = torch.as_tensor(partial_boxes).reshape(-1, 4) # guard against no boxes partial_boxes = BoxList(partial_boxes, (image_width, image_height), mode="xywh").convert( "xyxy" ) partial_box_num += len(partial_boxes_ids) # get predictions bbox = bbox.resize((image_width, image_height)) bbox = bbox.to(cpu_device) # generate pseudo labels idx = generate_pseudo_label_with_confidence_score(bbox, im_idx, threshold) if len(idx) > 0: pseudo_labels = bbox[idx] scores = pseudo_labels.get_field("scores").tolist() # compute iou overlaps = boxlist_iou(partial_boxes, pseudo_labels) matched_id = [True] * len(pseudo_labels) # remove predictions for partial labels for i in range(len(partial_boxes)): matched = np.argmax(overlaps[i]) if overlaps[i, matched] >= iou_threshold: matched_id[matched] = False pseudo_labels = pseudo_labels[matched_id] # print(num, len(pseudo_labels)) pseudo_annos, anno_id = new_annotation_json(pseudo_labels, imgIds[im_idx], anno_id) annos_json.extend(pseudo_annos) print('confidence threshold: {}'.format(threshold)) result_json['annotations'] = annos_json with open(args.annotation, 'w') as f: json.dump(result_json, f) print(partial_box_num, len(result_json['annotations'])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--predictions", help="prediction directory path. e.g output/stage1/", type=str, default="/home/mengqinj/capstone/output/stage1/") parser.add_argument("--annotation", help="output annotation path. e.g instances_train_2017.json", type=str, default="instances_train_2017.json") parser.add_argument("--confidence", help="confidence score threshold", type=float, default=0.5) args = parser.parse_args() main(args)
35.478261
201
0.646446
621
4,896
4.872786
0.268921
0.047588
0.013219
0.014871
0.130205
0.093853
0.048249
0
0
0
0
0.04852
0.233864
4,896
138
202
35.478261
0.758198
0.102328
0
0
0
0
0.118358
0.037628
0
0
0
0
0
1
0.040816
false
0.010204
0.102041
0
0.163265
0.020408
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d3d92d541909aa34760d1f3e45a73e1d7ed39b8
6,562
py
Python
Learn_matplotlib.py
maufia/MyPyCourse
5818182992f93745bee4904768442e99837e6d61
[ "MIT" ]
null
null
null
Learn_matplotlib.py
maufia/MyPyCourse
5818182992f93745bee4904768442e99837e6d61
[ "MIT" ]
null
null
null
Learn_matplotlib.py
maufia/MyPyCourse
5818182992f93745bee4904768442e99837e6d61
[ "MIT" ]
null
null
null
"""Learn matplotlib""" import os import easygui as eg import csv import matplotlib.pyplot as plt TITLE = """Learn - Matplotlib """ def select_file() -> str: """Use EasyGUI to select a function""" current_directory = os.path.join(os.getcwd(), 'Data') selected_file = eg.fileopenbox(title=f'{TITLE}: Open a file', default=os.path.join(current_directory, ".."), filetypes="*.txt") print(f"Selected file: {os.path.basename(selected_file)}") print(f"In directory: {os.path.dirname(selected_file)}") return selected_file def read_file(data: dict) -> dict: """ Read the file. Save the original into the input data dictionary. :param data: dictionary for passing data :return: a data structure with data >>> data= {'orig': {'filename': os.path.join('Data', 'trends_cupcakes.csv')}} >>> read_file(data)['orig']['Name time-line'] 'Month' >>> read_file(data)['orig']['Name values'] 'Cupcake: (Worldwide)' """ data['orig']['x'] = [] data['orig']['values'] = [] with open(data['orig']['filename']) as csvfile: for row_count, row in enumerate(csv.reader(csvfile, delimiter=',')): if not row: continue if row_count == 0: data['orig']['category'] = row[0] continue if row_count == 2: data['orig']['Name x-line'] = row[0] data['orig']['Name values'] = row[1] else: data['orig']['x'].append(row[0]) try: data['orig']['values'].append(int(row[1])) except ValueError: data['orig']['values'].append(0) return data def display_original_data(data: dict) -> True: """ Display data :param data: data set to display needs 'time' and 'values' :return: True >>> data= {'orig': {'filename': os.path.join('Data', 'trends_cupcakes.csv')}} >>> data = read_file(data) >>> display_original_data(data) True """ time = range(len(data['orig']['x'])) # open figure fig = plt.figure() fig.suptitle(f"Analysis of searches for {data['orig']['Name values']}") # set up 2 subplots ax = fig.subplots(nrows=2, ncols=1) # first subplot ax[0].plot(time, data['orig']['values'], '.b') ax[0].set(xlabel='time [months]', ylabel='Number of searches', title='Searches per number of months') ax[0].grid() # second plot ax[1].plot(time, data['orig']['values'], '.r') ax[1].set_xlabel('time [months]') ax[1].set_ylabel('Number of searches') ax[1].set_title('Monthly searches') ax[1].grid() # proper axis and ticks labels = ax[1].get_xticklabels() for cnt, xtick in enumerate(ax[1].get_xticks()[0:-2]): labels[cnt] = data['orig']['x'][int(xtick)] ax[1].set_xticklabels(labels) # Rotate the ticks for tick in ax[1].get_xticklabels(): tick.set_rotation(55) # sort out fig.subplots_adjust(bottom=0.2, top=0.8, hspace=0.6) plt.show() return True def display_per_nation(data: dict) -> True: all_countries = data['orig']['x'] all_values = data['orig']['values'] max_size = 70 pie_labels, pie_sizes = [f'Rest of the World'], [0] for cnt, value in enumerate(all_values): if value >= max_size: pie_labels.append(all_countries[cnt]) pie_sizes.append(value) else: pie_sizes[0] += value explode = [0.0 for _ in pie_sizes] maximum_size: int = pie_sizes.index(max(pie_sizes)) explode[maximum_size] = .3 fig1, ax1 = plt.subplots() fig1.suptitle(f"Analysis of searches for {data['orig']['Name values']}") ax1.pie(pie_sizes, labels=pie_labels, explode=explode, shadow=True, autopct='%1.1f%%') # ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. fig2, ax2 = plt.subplots() fig2.suptitle(f"Analysis of searches for {data['orig']['Name values']}") ax2.barh(pie_labels, pie_sizes) ax2.set_yticks(range(len(pie_labels))) ax2.set_yticklabels(pie_labels) ax2.invert_yaxis() # labels read top-to-bottom ax2.set_xlabel('Searches') fig2.subplots_adjust(left=0.2, right=1.0) plt.show() return True def select_operation(data) -> True: """""" all_choices = {'Monthly trend': display_original_data, 'National data': display_per_nation} # Use Gui to select a choice choice: str = eg.buttonbox(msg="Select what to display", title=TITLE, choices=list(all_choices.keys()), image=os.path.join('Images', 'qm.png')) assert choice in all_choices, show_error_message('The choice is not available') # This is the clever bit!! Run the choice as a function all_choices[choice](data) return True def message_box(message: str) -> True: message += "\n\nFor resources see: https://matplotlib.org/gallery " eg.msgbox(title=TITLE, msg=message, ok_button='OK', image=os.path.join('Images', 'Learn.png')) return True def show_error_message(error_string="Error!") -> True: """ A message box can be used for alert of error, success, etc return: True """ eg.msgbox(title="Learn_EasyGUI: Example Error", image=os.path.join('Images', 'failure.gif'), msg=error_string) return True def main() -> True: # Initialse the data set data = {'orig': {'filename': ""}, 'analysis': {}} message_box("First select a data file") data['orig']['filename'] = select_file() data = read_file(data) select_operation(data) return True # -------------------------------------------------- if __name__ == "__main__": import click @click.group(help=TITLE) def cli(): pass @cli.command('run', help='Run full program') def cli_run(): main() @cli.command('test', help='Test csv testing') def cli_test(): import doctest failures_count, test_count = doctest.testmod(verbose=False) assert failures_count == 0, 'Test failure... bailing out' print(f'All {test_count} tests passed') @cli.command('disp', help='display short cut') def cli_disp(): data = {'orig': {'filename': os.path.join('Data', 'geoMap_cupcakes.csv')}} data = read_file(data) display_per_nation(data) cli(obj={})
31.247619
86
0.585949
853
6,562
4.390387
0.28136
0.053405
0.021362
0.024032
0.136716
0.086782
0.086782
0.063551
0.063551
0.063551
0
0.012454
0.253581
6,562
209
87
31.397129
0.752144
0.161384
0
0.106061
0
0
0.18857
0.0121
0
0
0
0
0.015152
1
0.090909
false
0.015152
0.045455
0
0.19697
0.022727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d3f8a189f427be39fe163edc538d242e89521a0
2,970
py
Python
docker/dstat/plugins/dstat_nfsstat4.py
hzy9819/GreenPlum_WooKongDB
9dca9b3bcd15f29b2a0136acc818064222220059
[ "PostgreSQL", "Apache-2.0" ]
34
2021-01-18T14:25:24.000Z
2021-06-05T03:21:10.000Z
docker/dstat/plugins/dstat_nfsstat4.py
hzy9819/GreenPlum_WooKongDB
9dca9b3bcd15f29b2a0136acc818064222220059
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
docker/dstat/plugins/dstat_nfsstat4.py
hzy9819/GreenPlum_WooKongDB
9dca9b3bcd15f29b2a0136acc818064222220059
[ "PostgreSQL", "Apache-2.0" ]
2
2021-04-20T20:11:08.000Z
2021-06-02T02:56:16.000Z
### Author: Adam Michel <elfurbe@furbism.com> ### Based on work by: Dag Wieers <dag@wieers.com> class dstat_plugin(dstat): def __init__(self): self.name = 'nfs4 client' # this vars/nick pair is the ones I considered relevant. Any set of the full list would work. self.vars = ('read', 'write', 'readdir', 'commit', 'getattr', 'create', 'link','remove') self.nick = ('read', 'writ', 'rdir', 'cmmt', 'gatr','crt','link','rmv') # this is every possible variable if you're into that #self.vars = ("read", "write", "commit", "open", "open_conf", "open_noat", "open_dgrd", "close", # "setattr", "fsinfo", "renew", "setclntid", "confirm", "lock", "lockt", "locku", # "access", "getattr", "lookup", "lookup_root", "remove", "rename", "link", "symlink", # "create", "pathconf", "statfs", "readlink", "readdir", "server_caps", "delegreturn", # "getacl", "setacl", "fs_locations", "rel_lkowner", "secinfo") # these are terrible shortnames for every possible variable #self.nick = ("read", "writ", "comt", "open", "opnc", "opnn", "opnd", "clse", "seta", "fnfo", # "renw", "stcd", "cnfm", "lock", "lckt", "lcku", "accs", "gatr", "lkup", "lkp_r", # "rem", "ren", "lnk", "slnk", "crte", "pthc", "stfs", "rdlk", "rdir", "scps", "delr", # "gacl", "sacl", "fslo", "relo", "seco") self.type = 'd' self.width = 5 self.scale = 1000 self.open('/proc/net/rpc/nfs') def check(self): # other NFS modules had this, so I left it. It seems to work. info(1, 'Module %s is still experimental.' % self.filename) def extract(self): # list of fields from nfsstat, in order of output from cat /proc/net/rpc/nfs nfs4_names = ("version", "fieldcount", "null", "read", "write", "commit", "open", "open_conf", "open_noat", "open_dgrd", "close", "setattr", "fsinfo", "renew", "setclntid", "confirm", "lock", "lockt", "locku", "access", "getattr", "lookup", "lookup_root", "remove", "rename", "link", "symlink", "create", "pathconf", "statfs", "readlink", "readdir", "server_caps", "delegreturn", "getacl", "setacl", "fs_locations", "rel_lkowner", "secinfo") for line in self.splitlines(): fields = line.split() if fields[0] == "proc4": # just grab NFSv4 stats assert int(fields[1]) == len(fields[2:]), ("reported field count (%d) does not match actual field count (%d)" % (int(fields[1]), len(fields[2:]))) for var in self.vars: self.set2[var] = fields[nfs4_names.index(var)] for name in self.vars: self.val[name] = (int(self.set2[name]) - int(self.set1[name])) * 1.0 / elapsed if step == op.delay: self.set1.update(self.set2) # vim:ts=4:sw=4:et
56.037736
162
0.545455
359
2,970
4.451253
0.554318
0.020025
0.015019
0.021277
0.330413
0.330413
0.305382
0.305382
0.305382
0.305382
0
0.011374
0.259933
2,970
52
163
57.115385
0.715651
0.412458
0
0
0
0
0.277003
0
0
0
0
0
0.035714
1
0.107143
false
0
0
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4017e7261ae361b154fa97a6f870b3295280f2
1,873
py
Python
astropyp/instruments/decam/pipeline.py
fred3m/astropyp
414c9e6d84da2604c6466b2046827d8b1988edab
[ "BSD-3-Clause" ]
8
2016-04-28T22:19:22.000Z
2022-03-14T04:22:00.000Z
astropyp/instruments/decam/pipeline.py
fred3m/astropyp
414c9e6d84da2604c6466b2046827d8b1988edab
[ "BSD-3-Clause" ]
null
null
null
astropyp/instruments/decam/pipeline.py
fred3m/astropyp
414c9e6d84da2604c6466b2046827d8b1988edab
[ "BSD-3-Clause" ]
null
null
null
import datapyp import warnings import os class DecamPipeError(Exception): pass class Pipeline(datapyp.core.Pipeline): def __init__(self, **kwargs): from datapyp.utils import get_bool # Make sure that the user included a dictionary of paths to initialize the pipeline if 'paths' not in kwargs: raise DecamPipeError( "You must initialize a pipeline with the following paths: 'temp'") if('stacks' not in kwargs['paths'] or 'config' not in kwargs['paths'] or 'log' not in kwargs['paths'] or 'decam' not in kwargs['paths']): warnings.warn( "It is recommended to initialize a Pipeline with " "'log', 'stacks', 'config', 'decam' paths") # Check for the decam file index if 'connection' not in kwargs: warnings.warn("If you do not set an 'idx_connect_str' parts of the Pipeline may not work") else: if kwargs['connection'].startswith('sqlite'): if not os.path.isfile(kwargs['connection'][10:]): logger.info('path', kwargs['connection'][10:]) if 'create_idx' in kwargs: if not create_idx: raise PipelineError("Unable to locate DECam file index") else: if not get_bool( "DECam file index does not exist, create it now? ('y'/'n')"): raise PipelineError("Unable to locate DECam file index") import astropyp.index as index recursive = get_bool( "Search '{0}' recursively for images? ('y'/'n')") index.build(img_path, idx_connect_str, True, recursive, True) datapyp.core.Pipeline.__init__(self, **kwargs)
49.289474
102
0.555259
216
1,873
4.731481
0.412037
0.054795
0.064579
0.062622
0.142857
0.09002
0.09002
0.09002
0
0
0
0.004105
0.349706
1,873
38
103
49.289474
0.834975
0.059797
0
0.117647
0
0
0.283115
0
0
0
0
0
0
1
0.029412
false
0.029412
0.147059
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d409d33559b4a6519981780018a2f54e1281d04
3,487
py
Python
powerlaw.py
AlexanderDavid/Powerlaw-Highway-Env
e3e3b6277e0a75e4dcbc7988a9cb144137328d22
[ "MIT" ]
null
null
null
powerlaw.py
AlexanderDavid/Powerlaw-Highway-Env
e3e3b6277e0a75e4dcbc7988a9cb144137328d22
[ "MIT" ]
null
null
null
powerlaw.py
AlexanderDavid/Powerlaw-Highway-Env
e3e3b6277e0a75e4dcbc7988a9cb144137328d22
[ "MIT" ]
null
null
null
import gym import highway_env from agent import Agent import pandas as pd import numpy as np env = gym.make("highway-v0") done = False # Notes # Action space between 0 and 4 inclusive # 0 is merge left # 1 is do nothing # 2 is merge right # 3 is speed up # 4 is slow down # ## Obs space is a 5x5 matrix with values between -1 and 1 ## This represents a matrix with the labels: ## presence, x, y, vx, vy: Ego Vehicle ## presence, x, y, vx, vy: VEHICLE 1 ## presence, x, y, vx, vy: VEHICLE 2 ## presence, x, y, vx, vy: VEHICLE 3 ## ## X increases over time ## Y = 0 in top line ## Y = 4 in next line ## Y = 8 in next lane ## Y = 12 in bottom lane next_step = 1 while not env.vehicle.crashed: obs, _, _, _ = env.step(next_step) # print(pd.DataFrame.from_records([env.vehicle.to_dict()])["x", "y", "vx", "vy"]) ego_dict = env.vehicle.to_dict() ego_agent = Agent( np.array([ego_dict["x"], ego_dict["y"] / 4]), np.array([ego_dict["x"] + 100, ego_dict["y"] / 4]), 50, 50, 5, np.array([ego_dict["vx"], ego_dict["vy"] / 4]), ) print(f"Ego (x, y): {ego_agent.pos[0], ego_agent.pos[1], ego_agent.vel[0], ego_agent.vel[1]}") # print(f"Ego (lane, lane_index): {env.vehicle.lane, env.vehicle.lane_index}") neighbors = [] for vehicle in env.road.close_vehicles_to( env.vehicle, env.PERCEPTION_DISTANCE, see_behind=True ): adj_dict = vehicle.to_dict() neighbors.append( Agent( np.array([adj_dict["x"], adj_dict["y"] / 4]), np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]), 50, 50, 5, np.array([adj_dict["vx"], adj_dict["vy"] / 4]), ) ) print(f"Neighbor (x, y): {neighbors[-1].pos[0], neighbors[-1].pos[1], neighbors[-1].vel[0], neighbors[-1].vel[1], ego_agent.time_to_collision(neighbors[-1])}") # Add agents so the ego doesnt merge off of the edge of the lane neighbors.append( Agent( np.array([-1, ego_dict["y"] / 4]), np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]), 50, 50, 5, np.array([ego_dict["vx"], 0.5]), ) ) neighbors.append( Agent( np.array([5, ego_dict["y"] / 4]), np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]), 50, 50, 5, np.array([ego_dict["vx"], -0.5]), ) ) delta_v = ego_agent.computeForces(neighbors) print(delta_v) # If the X instruction is larger # If the X instruction is positive # if abs(delta_v[0]) == delta_v[0]: # print("Speed up") # else: # print("Slow down") lane_epsilon = 0.0125 move_epsilon = 0.01 def how_close(x): return abs(round(x) - x), round(x) laneness = how_close(ego_agent.pos[1]) can_change = False if laneness[1] in [0, 1, 2, 3] and lane_epsilon > laneness[0]: can_change = True if can_change and abs(delta_v[1]) > move_epsilon: if abs(delta_v[1]) == delta_v[1]: print("Merge down") next_step = 2 else: print("Merge up") next_step = 0 else: if abs(delta_v[0]) == delta_v[0]: print("Speed up") next_step = 3 else: print("Slow down") next_step = 4 env.render()
28.120968
167
0.53341
517
3,487
3.462282
0.237911
0.046927
0.026816
0.01676
0.297765
0.184916
0.140782
0.140782
0.140782
0.140782
0
0.045056
0.31259
3,487
123
168
28.349594
0.70171
0.241468
0
0.296296
0
0.024691
0.116788
0.049174
0
0
0
0
0
1
0.012346
false
0
0.061728
0.012346
0.08642
0.08642
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d41975bab1b82a3f84cbeb994a57f4874792563
849
py
Python
terraform/module/lambda/src/timestream_data_writer.py
Jimon-s/terraform-example-timestream
f24b3d5feb1d497374c52bff64308a296a01d158
[ "MIT" ]
1
2021-09-12T08:54:48.000Z
2021-09-12T08:54:48.000Z
terraform/module/lambda/src/timestream_data_writer.py
Jimon-s/terraform-example-timestream
f24b3d5feb1d497374c52bff64308a296a01d158
[ "MIT" ]
null
null
null
terraform/module/lambda/src/timestream_data_writer.py
Jimon-s/terraform-example-timestream
f24b3d5feb1d497374c52bff64308a296a01d158
[ "MIT" ]
null
null
null
from typing import List class TimeStreamDataWriter: def __init__(self, client) -> None: self.client = client def write_records(self, database_name: str, table_name: str, records: List[dict], common_attributes: List[dict] = None,): if self.client is None: raise Exception('client is not set') if common_attributes is None: response = self.client.write_records( DatabaseName=database_name, TableName=table_name, Records=records ) return response else: response = self.client.write_records( DatabaseName=database_name, TableName=table_name, CommonAttributes=common_attributes, Records=records ) return response
30.321429
125
0.580683
82
849
5.817073
0.390244
0.104822
0.075472
0.096436
0.301887
0.301887
0.301887
0.301887
0.301887
0.301887
0
0
0.353357
849
27
126
31.444444
0.868852
0
0
0.454545
0
0
0.020024
0
0
0
0
0
0
1
0.090909
false
0
0.045455
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d43fd7c79d6110719d20a77a2cbf996accb638e
4,015
py
Python
zodiacy/cli.py
greenify/zodiacy
faf46a10b9b70869cb4caca02027921f1418cfcf
[ "MIT" ]
1
2015-10-16T10:24:53.000Z
2015-10-16T10:24:53.000Z
zodiacy/cli.py
greenify/zodiacy
faf46a10b9b70869cb4caca02027921f1418cfcf
[ "MIT" ]
null
null
null
zodiacy/cli.py
greenify/zodiacy
faf46a10b9b70869cb4caca02027921f1418cfcf
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # encoding: utf-8 import argparse import sqlite3 from os import path from .wrapper import wrap_calls, wrap_corpus import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) """generate_horoscope.py: Generates horoscopes based provided corpuses""" __author__ = "Project Zodiacy" __copyright__ = "Copyright 2015, Project Zodiacy" def restricted_weight(x, max_range=1.0): x = float(x) if x < 0.0 or x > max_range: raise argparse.ArgumentTypeError( "%r not in range [0.0, %.2f]" % (x, max_range)) return x here = path.abspath(path.dirname(__file__)) _parser = argparse.ArgumentParser(description="Awesome horoscope generator") _parser.add_argument('-d', '--debug', dest='debug', help='show debug logs', action='store_true') _parser.add_argument('-a', '--database', dest='database', default=path.join(here, 'data', 'zodiac.sqlite'), help='sqlite database file') _parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None) _parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None) _parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', type=int, default=10) _parser.add_argument('-o', '--order', dest='order', choices=range(1, 20), help='order of the used markov chain', type=int, default=4) _parser.add_argument('--order-emissions', dest='order_emissions', choices=range(1, 20), help='max. order to look back at prev. emissions (HMM)', type=int, default=1) _parser.add_argument('-n', '--horoscopes', dest='nr_horoscopes', choices=range(1, 11), help='number of horoscopes', type=int, default=1) _parser.add_argument('-c', '--synonyms-generation', dest='use_synonyms_generation', help='additionally use synonyms of keywords for generation', action='store_true') _parser.add_argument('-m', '--markov_type', dest='markov_type', choices=('markov', 'hmm', 'hmm_past'), help='Markov type to use (default: markov)', default="markov") _parser.add_argument('--prob-hmm-states', dest='prob_hmm_states', type=restricted_weight, help='When using previous states and emissions, weight for the previous states', default=0.5) _parser.add_argument('--prob-hmm-emissions', dest='prob_hmm_emissions', type=restricted_weight, help='When using previous states and emissions, weight for the previous emissions', default=0.5) _parser.add_argument('-y', '--synonyms-emission', dest='use_synonyms_emission', help='use synonyms on emissions', action='store_true') _parser.add_argument('--prob-syn-emissions', dest='prob_synonyms_emission', type=restricted_weight, help='probability to emit synonyms', default=0.3) _parser.add_argument('--list-keywords', dest='list_keywords', action='store_true', help='show all available keywords') _parser.add_argument('-r', '--random-keyword', dest='random_keyword', action='store_true', help='select keyword randomly (weighted on occurrence)') _parser.add_argument('--ratings', dest='use_ratings', action='store_true', help='weight states according to ratings') _parser.add_argument('--moon', dest='use_moon', action='store_true', help='Use current moon phase for the keyword selection') def main(): args = vars(_parser.parse_args()) with sqlite3.connect(args["database"]) as conn: if args["list_keywords"]: for row in wrap_corpus(conn, **args).list_keywords(): print("%-4s%s" % (row[1], row[0])) else: res = wrap_calls(conn, **args) print("\n".join(res)) if __name__ == '__main__': main()
46.149425
104
0.644583
499
4,015
4.985972
0.330661
0.065113
0.12299
0.030547
0.198955
0.148714
0.089228
0.063505
0.063505
0.063505
0
0.01198
0.209963
4,015
86
105
46.686047
0.772383
0.009215
0
0.030769
0
0
0.358955
0.022291
0
0
0
0
0
1
0.030769
false
0
0.076923
0
0.123077
0.030769
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4642e8f398da24a0527a67fb597113262e14dc
732
py
Python
ioloop-futures/future_done_callback.py
psuresh39/async-design-patterns
f514edaf2b11ecf34b5b8dc2f237b869aa4ff1b9
[ "Apache-2.0" ]
3
2021-02-25T22:20:07.000Z
2021-07-02T09:43:07.000Z
ioloop-futures/future_done_callback.py
psuresh39/async-design-patterns
f514edaf2b11ecf34b5b8dc2f237b869aa4ff1b9
[ "Apache-2.0" ]
null
null
null
ioloop-futures/future_done_callback.py
psuresh39/async-design-patterns
f514edaf2b11ecf34b5b8dc2f237b869aa4ff1b9
[ "Apache-2.0" ]
2
2021-01-27T08:44:31.000Z
2021-05-31T16:36:34.000Z
__author__ = 'psuresh' import asyncio @asyncio.coroutine def slow_operation(future): print("inside task") yield from asyncio.sleep(1) print("task done") future.set_result('Future is done!') def got_result(future): print("inside callback") print(future.result()) loop.stop() loop = asyncio.get_event_loop() future = asyncio.Future() print("future initialized") print("task scheduled") asyncio.ensure_future(slow_operation(future)) future.add_done_callback(got_result) try: loop.run_forever() finally: loop.close() # Output # ------------------------------ # python future_done_callback.py # future initialized # task scheduled # inside task # task done # inside callback # Future is done!
18.3
45
0.695355
91
732
5.406593
0.417582
0.067073
0.077236
0
0
0
0
0
0
0
0
0.001608
0.150273
732
39
46
18.769231
0.789389
0.213115
0
0
0
0
0.157244
0
0
0
0
0
0
1
0.090909
false
0
0.045455
0
0.136364
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4a0bcb9c1a2d9b83eb8672bd2a88bd3b493c65
6,555
py
Python
resources.py
cozhiv/tokenauthentication
c6fec21134d55177b99b23dfe21a89d23eda8394
[ "MIT" ]
null
null
null
resources.py
cozhiv/tokenauthentication
c6fec21134d55177b99b23dfe21a89d23eda8394
[ "MIT" ]
null
null
null
resources.py
cozhiv/tokenauthentication
c6fec21134d55177b99b23dfe21a89d23eda8394
[ "MIT" ]
null
null
null
from flask_restful import Resource, reqparse from models import UserModel, RevokedTokenModel, PortfolioModel from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt, get_jwt_claims) import json from flask import request parser = reqparse.RequestParser() class UserRegistration(Resource): def post(self): parser.add_argument('username', help = 'This field cannot be blank', required = True) parser.add_argument('password', help = 'This field cannot be blank', required = True) data = parser.parse_args() if UserModel.find_by_username(data['username']): return {'message': 'User {} already exists'.format(data['username'])} new_user = UserModel( username = data['username'], password = UserModel.generate_hash(data['password']) ) try: new_user.save_to_db() access_token = create_access_token(identity = data['username']) refresh_token = create_refresh_token(identity = data['username']) return { 'message': 'User {} was created'.format(data['username']), 'access_token': access_token, 'refresh_token': refresh_token } except: return {'message': 'Something went wrong'}, 500 class UserLogin(Resource): def post(self): parser.add_argument('username', help = 'This field cannot be blank', required = True) parser.add_argument('password', help = 'This field cannot be blank', required = True) data = parser.parse_args() current_user = UserModel.find_by_username(data['username']) if not current_user: return {'message': 'User {} doesn\'t exist'.format(data['username'])} if UserModel.verify_hash(data['password'], current_user.password): access_token = create_access_token(identity = data['username']) refresh_token = create_refresh_token(identity = data['username']) return { 'message': 'Logged in as {}'.format(current_user.username), 'access_token': access_token, 'refresh_token': refresh_token } else: return {'message': 'Wrong credentials'} class UserLogoutAccess(Resource): @jwt_required def post(self): jti = get_raw_jwt()['jti'] try: revoked_token = RevokedTokenModel(jti = jti) revoked_token.add() return {'message': 'Access token has been revoked'} except: return {'message': 'Something went wrong'}, 500 class UserLogoutRefresh(Resource): @jwt_refresh_token_required def post(self): jti = get_raw_jwt()['jti'] try: revoked_token = RevokedTokenModel(jti = jti) revoked_token.add() return {'message': 'Refresh token has been revoked'} except: return {'message': 'Something went wrong'}, 500 class TokenRefresh(Resource): @jwt_refresh_token_required def post(self): current_user = get_jwt_identity() print(current_user) access_token = create_access_token(identity = current_user) return {'access_token': access_token} class AllUsers(Resource): def get(self): return UserModel.return_all() def delete(self): return UserModel.delete_all() class SecretResource(Resource): @jwt_required def get(self): return { 'answer': 42 } class Mirror(Resource): @jwt_required def post(self): data = parser.parse_args() print(data) return { 'you': json.dumps(data) } class Portfolio(Resource): @jwt_required def post(self): #try: parser.add_argument('portfolio', help = 'This field cannot be blank', required = True) data = parser.parse_args() name = data['portfolio'] current_user = get_jwt_identity() current_user_model = UserModel.find_by_username(current_user) new_portfolio = PortfolioModel(name = name) current_user_model.add_portfolio(new_portfolio) new_portfolio.add_data() current_user_model.add_data() new_portfolio.commit() return { "message": "{0} created {1}".format(current_user, name)} #except: #return {"message":"Something went wrong"} @jwt_required def get(self): current_user = get_jwt_identity() current_user_model= UserModel.find_by_username(current_user) porto = [] for i in current_user_model.portfolios: porto.append(i.name) return {"user":current_user,"portfolios":porto} class PortfolioSpecific(Resource): @jwt_required def get(self, id): current_user = get_jwt_identity() current_user_model= UserModel.find_by_username(current_user) return {"portfolio":current_user_model.portfolios[id+1].name} #index out of range exception possible @jwt_required def put(self, id): #parser.add_argument('portfolio', help = 'This field cannot be blank', required = True) #data = parser.parse_args() data = request.get_json(silent=True) new_name = data['portfolio'] current_user = get_jwt_identity() current_user_model= UserModel.find_by_username(current_user) new_portfolio = PorfolioModel(name = new_name) old_name = current_user_model.portfolios[id+1].name current_user_model.portfolios[id+1] = new_portfolio current_user_model.add_data() new_portfolio.add_data() new_portfolio.commit() return {"message": "Portfolio {} has been changed to {}".format(old_name, new_name)} @jwt_required def delete(self, id): #parser.add_argument('portfolio', help = 'This field cannot be blank', required = True) #data = parser.parse_args() data = request.get_json(silent=True) delete_name = data['portfolio'] current_user = get_jwt_identity() current_user_model= UserModel.find_by_username(current_user) return {"message": "{}'s Porfolio {} wasn't deleted cause this functionality is still not implementd".format(current_user, delete_name)} class TestRest(Resource): def post(self): dada = request.get_json() print(dada) return {"whatvar":dada}
35.625
163
0.633562
736
6,555
5.400815
0.179348
0.088553
0.048302
0.033459
0.613836
0.604277
0.528553
0.471698
0.444025
0.416855
0
0.003309
0.262243
6,555
183
164
35.819672
0.818652
0.049428
0
0.517241
0
0
0.133901
0
0
0
0
0
0
1
0.103448
false
0.027586
0.034483
0.02069
0.358621
0.02069
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4a1b0bb0e6cd8b58de78b8042c2fe98a20bb35
1,164
py
Python
src/data/get_raw_data.py
oscarv17/titanic-disaster-project
b3663c6e02ca2796dd982e0b6fe624968a935963
[ "MIT" ]
null
null
null
src/data/get_raw_data.py
oscarv17/titanic-disaster-project
b3663c6e02ca2796dd982e0b6fe624968a935963
[ "MIT" ]
null
null
null
src/data/get_raw_data.py
oscarv17/titanic-disaster-project
b3663c6e02ca2796dd982e0b6fe624968a935963
[ "MIT" ]
null
null
null
import os import kaggle from dotenv import find_dotenv, load_dotenv import logging # setting credentials os.system('set KAGGLE_USERNAME =' + os.environ.get('kaggle_username')) os.system('set KAGGLE_KEY =' + os.environ.get('kaggle_key')) # function to extract the data def extractData(path): os.system('kaggle datasets download mhouellemont/titanic -f train.csv -p %s'%path) os.system('kaggle datasets download mhouellemont/titanic -f test.csv -p %s'%path) def main(project_dir): logger = logging.getLogger(__name__) logger.info('Getting raw data') # set data paths raw_data_path = os.path.join(os.path.pardir,'data','raw') # extract the data extractData(raw_data_path) logger.info('Data downloaded') if __name__ == '__main__': project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir) # set up logger log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level = logging.INFO, format = log_format) # loading dotenv dotenv = find_dotenv() load_dotenv(dotenv) main(project_dir)
29.846154
87
0.671821
155
1,164
4.832258
0.380645
0.042724
0.056075
0.053405
0.186916
0.144192
0.144192
0.144192
0.144192
0
0
0
0.207904
1,164
38
88
30.631579
0.812364
0.093643
0
0
0
0
0.284158
0
0
0
0
0
0
1
0.090909
false
0
0.181818
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4b25fdee1ba1da4de6a0ee18903a26769fc38d
4,031
py
Python
4. Data Pipelines with Airflow/dags/sparkify_dend_dag.py
moni2096/Data-Engineering-Nanodegree---Udacity
6202a535ebc5ff95921ce56d37f8116e3e961a3b
[ "MIT" ]
4
2021-07-02T06:17:53.000Z
2022-01-31T19:54:20.000Z
4. Data Pipelines with Airflow/dags/sparkify_dend_dag.py
moni2096/Data-Engineering-Nanodegree-Udacity
6202a535ebc5ff95921ce56d37f8116e3e961a3b
[ "MIT" ]
null
null
null
4. Data Pipelines with Airflow/dags/sparkify_dend_dag.py
moni2096/Data-Engineering-Nanodegree-Udacity
6202a535ebc5ff95921ce56d37f8116e3e961a3b
[ "MIT" ]
null
null
null
from datetime import datetime, timedelta import os from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators import (StageToRedshiftOperator, LoadFactOperator, LoadDimensionOperator, DataQualityOperator) from helpers import SqlQueries default_args = { 'owner': 'udacity', 'depends_on_past': False, 'start_date': datetime(2021, 7, 1), 'retries': 1, 'retry_delay': timedelta(minutes=5), 'catchup': False, 'data_quality_checks':[ {'check_sql': "SELECT COUNT(*) FROM users WHERE userid is null", 'expected_result':0}, {'check_sql': "SELECT COUNT(*) FROM songs WHERE songid is null", 'expected_result':0}, {'check_sql': "SELECT COUNT(*) FROM artists WHERE artistid is null", 'expected_result':0}, {'check_sql': "SELECT COUNT(*) FROM time WHERE start_time is null", 'expected_result':0}, {'check_sql': "SELECT COUNT(*) FROM songplays WHERE userid is null", 'expected_result':0} ] } dag = DAG('udac_example_dag', default_args=default_args, description='Load and transform data in Redshift with Airflow', schedule_interval='0 * * * *' ) start_operator = DummyOperator(task_id='Begin_execution', dag=dag) stage_events_to_redshift = StageToRedshiftOperator( task_id="Stage_events", redshift_conn_id="redshift", aws_credentials_id="aws_credentials", table="staging_events", s3_bucket="udacity-dend", s3_key="log_data", json_path="s3://udacity-dend/log_json_path.json", dag=dag ) stage_songs_to_redshift = StageToRedshiftOperator( task_id="Stage_songs", redshift_conn_id="redshift", aws_credentials_id="aws_credentials", table="staging_songs", s3_bucket="udacity-dend", s3_key="song_data", json_path="auto", dag=dag ) load_songplays_table = LoadFactOperator( task_id="Load_songplays_fact_table", redshift_conn_id="redshift", table="songplays", sql_query=SqlQueries.songplay_table_insert, dag=dag ) load_user_dimension_table = LoadDimensionOperator( task_id="Load_user_dim_table", redshift_conn_id = "redshift", table="users", sql_query=SqlQueries.user_table_insert, truncate_table=True, dag=dag ) load_song_dimension_table = LoadDimensionOperator( task_id="Load_song_dim_table", redshift_conn_id="redshift", table="songs", sql_query=SqlQueries.song_table_insert, truncate_table=True, dag=dag ) load_artist_dimension_table = LoadDimensionOperator( task_id="Load_artist_dim_table", redshift_conn_id="redshift", table="artists", sql_query=SqlQueries.artist_table_insert, truncate_table=True, dag=dag ) load_time_dimension_table = LoadDimensionOperator( task_id="Load_time_dim_table", redshift_conn_id="redshift", table="time", sql_query=SqlQueries.time_table_insert, truncate_table=True, dag=dag ) run_quality_checks = DataQualityOperator( task_id="Run_data_quality_checks", redshift_conn_id="redshift", dq_checks=default_args['data_quality_checks'], dag=dag ) end_operator = DummyOperator(task_id="End_execution", dag=dag) # Load the staging table start_operator >> stage_events_to_redshift start_operator >> stage_songs_to_redshift # Load the songplays fact table stage_events_to_redshift >> load_songplays_table stage_songs_to_redshift >> load_songplays_table # Load the dimension table load_songplays_table >> load_song_dimension_table load_songplays_table >> load_user_dimension_table load_songplays_table >> load_artist_dimension_table load_songplays_table >> load_time_dimension_table # Run the quality checks load_song_dimension_table >> run_quality_checks load_user_dimension_table >> run_quality_checks load_artist_dimension_table >> run_quality_checks load_time_dimension_table >> run_quality_checks # End execution run_quality_checks >> end_operator
30.537879
110
0.73034
500
4,031
5.492
0.206
0.066278
0.040787
0.064093
0.50874
0.44756
0.231245
0.152221
0.110706
0.110706
0
0.005711
0.174646
4,031
132
111
30.537879
0.819657
0.028281
0
0.224299
0
0
0.242331
0.02684
0
0
0
0
0
1
0
false
0
0.056075
0
0.056075
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4cf8e51d0fbaf6857ed11d10bfb09f5a4b1db4
3,151
py
Python
Mnist_conv.py
yashchandak/TensorFlow-fun
d2ec9c6eb52c5d92f417c62f99bc3e9385d43f0d
[ "MIT" ]
null
null
null
Mnist_conv.py
yashchandak/TensorFlow-fun
d2ec9c6eb52c5d92f417c62f99bc3e9385d43f0d
[ "MIT" ]
null
null
null
Mnist_conv.py
yashchandak/TensorFlow-fun
d2ec9c6eb52c5d92f417c62f99bc3e9385d43f0d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Jun 6 15:11:12 2016 @author: yash """ import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot = True) sess = tf.InteractiveSession() """ Convolutional Neural Net """ def weight_variable(shape): #return initialised weight variable initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): #retuens initialised bias variables initial = tf.constant(0.1, shape = shape) return tf.Variable(initial) def conv2d(x, W): #returns result of convolving x with W return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') def max_pool_2x2(x): #returns pooled values from x return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') def validate(inp, out, accuracy, i = -1): #validating at ith step and logging progress train_accuracy = accuracy.eval(feed_dict={x:inp, y_:out, keep_prob:1.0}) print("step %d, training accuracy %g" %(i, train_accuracy)) def variable_summaries(var, name): mean = tf.reduce_mean(var) tf.scalar_summary('mean/'+name, mean) tf.scalar_summary('max/'+name, tf.reduce_max(var)) tf.histogram_summary(name, var) x = tf.placeholder(tf.float32, shape=[None,784]) #Input tf.image_summary('input', x, 10) y_ = tf.placeholder(tf.float32, shape=[None,10]) #Expected outcome x_image = tf.reshape(x, [-1,28,28,1]) #reshape input vector #First convolution layer with 32 filters of size 5x5 W_conv1 = weight_variable([5,5,1,32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) #second convloutional layer with 64 filters of size 5x5 W_conv2 = weight_variable([5,5,32,64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) #fully connected layer with 1024 hidden units W_fc1 = weight_variable([7*7*64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) #Flatten the result of convolution h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #dropouts keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #final classifier W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) #loss function cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv), reduction_indices=[1])) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.initialize_all_variables()) for i in range(500): batch = mnist.train.next_batch(50) if i%100 == 0: validate(batch[0], batch[1], accuracy, i) train_step.run(feed_dict = {x:batch[0], y_:batch[1], keep_prob: 1.0}) validate(mnist.test.images, mnist.test.labels, accuracy)
31.828283
89
0.698508
516
3,151
4.079457
0.348837
0.013302
0.014252
0.031354
0.070309
0.029454
0
0
0
0
0
0.062074
0.161536
3,151
99
90
31.828283
0.734671
0.161853
0
0.036364
0
0
0.023589
0
0
0
0
0
0
1
0.109091
false
0
0.036364
0.036364
0.218182
0.018182
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d4de2f63adc59698f65d7e1665d7fdff8be3785
5,036
py
Python
models.py
AnselCmy/MetaR
47897ef0268b2c6c00e211be26a983d201e54565
[ "Apache-2.0" ]
84
2019-09-17T03:21:30.000Z
2022-03-18T12:28:59.000Z
models.py
zjukg/MetaR
47897ef0268b2c6c00e211be26a983d201e54565
[ "Apache-2.0" ]
4
2019-09-16T06:30:04.000Z
2022-01-02T12:26:03.000Z
models.py
zjukg/MetaR
47897ef0268b2c6c00e211be26a983d201e54565
[ "Apache-2.0" ]
10
2019-09-24T01:23:18.000Z
2021-08-09T03:00:00.000Z
from embedding import * from collections import OrderedDict import torch class RelationMetaLearner(nn.Module): def __init__(self, few, embed_size=100, num_hidden1=500, num_hidden2=200, out_size=100, dropout_p=0.5): super(RelationMetaLearner, self).__init__() self.embed_size = embed_size self.few = few self.out_size = out_size self.rel_fc1 = nn.Sequential(OrderedDict([ ('fc', nn.Linear(2*embed_size, num_hidden1)), ('bn', nn.BatchNorm1d(few)), ('relu', nn.LeakyReLU()), ('drop', nn.Dropout(p=dropout_p)), ])) self.rel_fc2 = nn.Sequential(OrderedDict([ ('fc', nn.Linear(num_hidden1, num_hidden2)), ('bn', nn.BatchNorm1d(few)), ('relu', nn.LeakyReLU()), ('drop', nn.Dropout(p=dropout_p)), ])) self.rel_fc3 = nn.Sequential(OrderedDict([ ('fc', nn.Linear(num_hidden2, out_size)), ('bn', nn.BatchNorm1d(few)), ])) nn.init.xavier_normal_(self.rel_fc1.fc.weight) nn.init.xavier_normal_(self.rel_fc2.fc.weight) nn.init.xavier_normal_(self.rel_fc3.fc.weight) def forward(self, inputs): size = inputs.shape x = inputs.contiguous().view(size[0], size[1], -1) x = self.rel_fc1(x) x = self.rel_fc2(x) x = self.rel_fc3(x) x = torch.mean(x, 1) return x.view(size[0], 1, 1, self.out_size) class EmbeddingLearner(nn.Module): def __init__(self): super(EmbeddingLearner, self).__init__() def forward(self, h, t, r, pos_num): score = -torch.norm(h + r - t, 2, -1).squeeze(2) p_score = score[:, :pos_num] n_score = score[:, pos_num:] return p_score, n_score class MetaR(nn.Module): def __init__(self, dataset, parameter): super(MetaR, self).__init__() self.device = parameter['device'] self.beta = parameter['beta'] self.dropout_p = parameter['dropout_p'] self.embed_dim = parameter['embed_dim'] self.margin = parameter['margin'] self.abla = parameter['ablation'] self.embedding = Embedding(dataset, parameter) if parameter['dataset'] == 'Wiki-One': self.relation_learner = RelationMetaLearner(parameter['few'], embed_size=50, num_hidden1=250, num_hidden2=100, out_size=50, dropout_p=self.dropout_p) elif parameter['dataset'] == 'NELL-One': self.relation_learner = RelationMetaLearner(parameter['few'], embed_size=100, num_hidden1=500, num_hidden2=200, out_size=100, dropout_p=self.dropout_p) self.embedding_learner = EmbeddingLearner() self.loss_func = nn.MarginRankingLoss(self.margin) self.rel_q_sharing = dict() def split_concat(self, positive, negative): pos_neg_e1 = torch.cat([positive[:, :, 0, :], negative[:, :, 0, :]], 1).unsqueeze(2) pos_neg_e2 = torch.cat([positive[:, :, 1, :], negative[:, :, 1, :]], 1).unsqueeze(2) return pos_neg_e1, pos_neg_e2 def forward(self, task, iseval=False, curr_rel=''): # transfer task string into embedding support, support_negative, query, negative = [self.embedding(t) for t in task] few = support.shape[1] # num of few num_sn = support_negative.shape[1] # num of support negative num_q = query.shape[1] # num of query num_n = negative.shape[1] # num of query negative rel = self.relation_learner(support) rel.retain_grad() # relation for support rel_s = rel.expand(-1, few+num_sn, -1, -1) # because in test and dev step, same relation uses same support, # so it's no need to repeat the step of relation-meta learning if iseval and curr_rel != '' and curr_rel in self.rel_q_sharing.keys(): rel_q = self.rel_q_sharing[curr_rel] else: if not self.abla: # split on e1/e2 and concat on pos/neg sup_neg_e1, sup_neg_e2 = self.split_concat(support, support_negative) p_score, n_score = self.embedding_learner(sup_neg_e1, sup_neg_e2, rel_s, few) y = torch.Tensor([1]).to(self.device) self.zero_grad() loss = self.loss_func(p_score, n_score, y) loss.backward(retain_graph=True) grad_meta = rel.grad rel_q = rel - self.beta*grad_meta else: rel_q = rel self.rel_q_sharing[curr_rel] = rel_q rel_q = rel_q.expand(-1, num_q + num_n, -1, -1) que_neg_e1, que_neg_e2 = self.split_concat(query, negative) # [bs, nq+nn, 1, es] p_score, n_score = self.embedding_learner(que_neg_e1, que_neg_e2, rel_q, num_q) return p_score, n_score
39.653543
112
0.578832
657
5,036
4.193303
0.217656
0.033031
0.026134
0.021779
0.339746
0.262069
0.201815
0.15245
0.128494
0.08784
0
0.029121
0.297657
5,036
126
113
39.968254
0.749788
0.060763
0
0.14433
0
0
0.022467
0
0
0
0
0
0
1
0.072165
false
0
0.030928
0
0.175258
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5363097e402538bc42aa0b70cfd1c02f3ca6fb
2,526
py
Python
tests/apps/courses/test_models_subject.py
sampaccoud/richie
3d222aedab0636a84011dced568c5dcd48fc5b15
[ "MIT" ]
null
null
null
tests/apps/courses/test_models_subject.py
sampaccoud/richie
3d222aedab0636a84011dced568c5dcd48fc5b15
[ "MIT" ]
null
null
null
tests/apps/courses/test_models_subject.py
sampaccoud/richie
3d222aedab0636a84011dced568c5dcd48fc5b15
[ "MIT" ]
null
null
null
""" Unit tests for the Subject model """ from django.test import TestCase from cms.api import create_page from richie.apps.courses.factories import CourseFactory, SubjectFactory from richie.apps.courses.models import Subject class SubjectTestCase(TestCase): """ Unit test suite to validate the behavior of the Subject model """ def test_subject_str(self): """ The string representation should be built with the title of the related page. Only 1 query to the associated page should be generated. """ page = create_page("Art", "courses/cms/subject_detail.html", "en") subject = SubjectFactory(extended_object=page) with self.assertNumQueries(1): self.assertEqual(str(subject), "Subject: Art") def test_subject_courses_copied_when_publishing(self): """ When publishing a subject, the links to draft courses on the draft version of the subject should be copied (clear then add) to the published version. Links to published courses should not be copied as they are redundant and not up-to-date. """ # Create draft courses course1, course2 = CourseFactory.create_batch(2) # Create a draft subject draft_subject = SubjectFactory(with_courses=[course1, course2]) # Publish course1 course1.extended_object.publish("en") course1.refresh_from_db() # The draft subject should see all courses and propose a custom filter to easily access # the draft versions self.assertEqual( set(draft_subject.courses.all()), {course1, course1.public_extension, course2}, ) self.assertEqual(set(draft_subject.courses.drafts()), {course1, course2}) # Publish the subject and check that the courses are copied draft_subject.extended_object.publish("en") published_subject = Subject.objects.get( extended_object__publisher_is_draft=False ) self.assertEqual(set(published_subject.courses.all()), {course1, course2}) # When publishing, the courses that are obsolete should be cleared draft_subject.courses.remove(course2) self.assertEqual(set(published_subject.courses.all()), {course1, course2}) # courses on the published subject are only cleared after publishing the draft page draft_subject.extended_object.publish("en") self.assertEqual(set(published_subject.courses.all()), {course1})
38.272727
95
0.684481
307
2,526
5.521173
0.335505
0.056637
0.053097
0.056637
0.183481
0.183481
0.098525
0.098525
0.068437
0
0
0.010881
0.235946
2,526
65
96
38.861538
0.867358
0.332542
0
0.137931
0
0
0.034221
0.019645
0
0
0
0
0.241379
1
0.068966
false
0
0.137931
0
0.241379
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d53e547415669e075a1146807e89c0b079587f0
2,935
py
Python
Scripts/main.py
MainDuelo/Python-Tkinter-and-SQLite
7f69780ce9c1c8ebe807197448030aed94c3a082
[ "MIT" ]
null
null
null
Scripts/main.py
MainDuelo/Python-Tkinter-and-SQLite
7f69780ce9c1c8ebe807197448030aed94c3a082
[ "MIT" ]
null
null
null
Scripts/main.py
MainDuelo/Python-Tkinter-and-SQLite
7f69780ce9c1c8ebe807197448030aed94c3a082
[ "MIT" ]
null
null
null
from Scripts.bank.bankController import BankController from tkinter import ttk, Tk, Button, Label, END from tkinter.scrolledtext import ScrolledText from Scripts.support.textManipulation import TextManipulation MAROON = "#800000" WHITE = "#FFFFFF" VALUES = "values" class Main: def __init__(self): BankController.create_database(None) global window global text_description global combo window = Tk() window.config(background="#ADD8E6") window.title("INFOS") window.geometry("900x500+100+10") button_newInfo = Button(window, width=10, text="New Info", command=self.buttonNewInfo, background="#3CB371", foreground=WHITE, activebackground=MAROON, activeforeground=WHITE) button_newInfo.place(x=150, y=50) button_update = Button(window, width=10, text="Update", command=self.buttonUpdate, background="#1E90FF", foreground="#F0F8FF", activebackground=MAROON, activeforeground=WHITE) button_update.place(x=630, y=50) button_delete = Button(window, width=10, text="Delete", command=self.buttonDelete, background="#FF6347", foreground="#F0F8FF", activebackground=MAROON, activeforeground=WHITE) button_delete.place(x=730, y=50) button_selectInfo = Button(window, width=10, text="Confirm", command=self.buttonSelectInfo, activebackground=MAROON, activeforeground=WHITE) button_selectInfo.place(x=530, y=50) button_exit = Button(window, width=10, text="Quit", command=self.buttonExit, background="#696969", foreground="#FFFFFF", activebackground=MAROON, activeforeground=WHITE) button_exit.place(x=820, y=0) text_description = ScrolledText(window, width=106, height=21) text_description.place(x=10, y=150) combo = ttk.Combobox(window, width=40, height=34) combo.place(x=250, y=50) combo[VALUES] = (BankController.getNames(None)) label_description = Label(window, text="DESCRIPTION", width=10, height=1, background="#ADD8E6") label_description.place(x=400, y=120) label_infos = Label(window, text="INFOS", width=10, height=1, background="#ADD8E6") label_infos.place(x=350, y=25) window.mainloop() def buttonNewInfo(self): BankController.insert(None, combo.get(), TextManipulation.formatText(text_description.get(0.0, END))) combo[VALUES] = (BankController.getNames(None)) def buttonUpdate(self): BankController.update(None, combo.get(), text_description.get(0.0, END)) def buttonDelete(self): BankController.delete(None, combo.get()) combo[VALUES] = (BankController.getNames(None)) def buttonSelectInfo(self): text_description.delete('0.0', '100.0') text_description.insert(END,BankController.getDescription(None, combo.get())) def buttonExit(self): BankController.close(None) window.destroy() gui = Main()
40.763889
183
0.696763
339
2,935
5.955752
0.283186
0.026746
0.0421
0.047053
0.309559
0.161466
0.099059
0
0
0
0
0.05124
0.175468
2,935
72
184
40.763889
0.783058
0
0
0.057692
0
0
0.055858
0
0
0
0
0
0
1
0.115385
false
0
0.076923
0
0.211538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d55e3cd7ef5d22859ae8f2c2273a0f46b67eba4
1,651
py
Python
python-verilog/examples_py3/faulted_sqrt/learn_sql/create_table.py
vhnatyk/vlsistuff
0981097bd19a0c482728dcc5048a3615ac9a9a90
[ "MIT" ]
26
2018-03-17T18:14:22.000Z
2022-03-14T07:23:13.000Z
python-verilog/examples_py3/faulted_sqrt/learn_sql/create_table.py
psumesh/vlsistuff
1fe64b093d0581d99c7d826b74c31b8655fa0b31
[ "MIT" ]
1
2019-10-16T10:31:11.000Z
2019-10-17T04:14:53.000Z
python-verilog/examples_py3/faulted_sqrt/learn_sql/create_table.py
psumesh/vlsistuff
1fe64b093d0581d99c7d826b74c31b8655fa0b31
[ "MIT" ]
7
2018-07-16T07:51:25.000Z
2022-02-15T14:22:54.000Z
#! /usr/bin/env python3 import os,sys,string import sqlite3 from sqlite3 import Error def create_connection(path): connection = None try: connection = sqlite3.connect(path) print("Connection to SQLite DB successful") except Error as e: print(f"The error '{e}' occurred") return connection def execute_query(connection, query): cursor = connection.cursor() try: cursor.execute(query) connection.commit() print("Query executed successfully") except Error as e: print(f"The error '{e}' occurred") create_faults_table = """ CREATE TABLE IF NOT EXISTS faults ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, width INTEGER, position INTEGER, discovered TEXT, notdiscovered TEXT ); """ def execute_read_query(connection, query): cursor = connection.cursor() result = None try: cursor.execute(query) result = cursor.fetchall() return result except Error as e: print(f"The error '{e}' occurred") def main(): Db = create_connection('faultsBase.sql') execute_query(Db,create_faults_table) Fname = sys.argv[1] File = open(Fname) while 1: line = File.readline() if line=='': return wrds = line.split() if len(wrds)==0: pass elif (wrds[0] == 'register'): Query = "INSERT INTO faults (name,width,position,discovered,notdiscovered) VALUES ( '%s', %s, %s, 'false', 'false')"%(wrds[1],wrds[ 2],wrds[3]) print(Query) execute_query(Db,Query) if __name__ == '__main__': main()
22.930556
158
0.615385
198
1,651
5.035354
0.409091
0.060181
0.039117
0.042126
0.195587
0.195587
0.111334
0.111334
0.111334
0.111334
0
0.009106
0.268322
1,651
71
159
23.253521
0.816225
0.013325
0
0.240741
0
0.018519
0.275184
0.028256
0
0
0
0
0
1
0.074074
false
0.018519
0.055556
0
0.166667
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d568baa19ca1079cc5a15acbd9559b0d736935c
1,836
py
Python
cq_editor/icons.py
possibilities/CQ-editor
dc950180b365ae39840f6787c8f5a061492734ed
[ "Apache-2.0" ]
351
2018-06-08T14:36:35.000Z
2022-03-29T22:03:04.000Z
cq_editor/icons.py
possibilities/CQ-editor
dc950180b365ae39840f6787c8f5a061492734ed
[ "Apache-2.0" ]
315
2018-06-08T14:35:08.000Z
2022-03-31T15:45:27.000Z
cq_editor/icons.py
possibilities/CQ-editor
dc950180b365ae39840f6787c8f5a061492734ed
[ "Apache-2.0" ]
71
2018-06-19T02:00:24.000Z
2022-03-25T08:55:02.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri May 25 14:47:10 2018 @author: adam """ from PyQt5.QtGui import QIcon from . import icons_res _icons = { 'app' : QIcon(":/images/icons/cadquery_logo_dark.svg") } import qtawesome as qta _icons_specs = { 'new' : (('fa.file-o',),{}), 'open' : (('fa.folder-open-o',),{}), # borrowed from spider-ide 'autoreload': [('fa.repeat', 'fa.clock-o'), {'options': [{'scale_factor': 0.75, 'offset': (-0.1, -0.1)}, {'scale_factor': 0.5, 'offset': (0.25, 0.25)}]}], 'save' : (('fa.save',),{}), 'save_as': (('fa.save','fa.pencil'), {'options':[{'scale_factor': 1,}, {'scale_factor': 0.8, 'offset': (0.2, 0.2)}]}), 'run' : (('fa.play',),{}), 'delete' : (('fa.trash',),{}), 'delete-many' : (('fa.trash','fa.trash',), {'options' : \ [{'scale_factor': 0.8, 'offset': (0.2, 0.2), 'color': 'gray'}, {'scale_factor': 0.8}]}), 'help' : (('fa.life-ring',),{}), 'about': (('fa.info',),{}), 'preferences' : (('fa.cogs',),{}), 'inspect' : (('fa.cubes','fa.search'), {'options' : \ [{'scale_factor': 0.8, 'offset': (0,0), 'color': 'gray'},{}]}), 'screenshot' : (('fa.camera',),{}), 'screenshot-save' : (('fa.save','fa.camera'), {'options' : \ [{'scale_factor': 0.8}, {'scale_factor': 0.8, 'offset': (.2,.2)}]}) } def icon(name): if name in _icons: return _icons[name] args,kwargs = _icons_specs[name] return qta.icon(*args,**kwargs)
31.118644
158
0.421024
196
1,836
3.841837
0.433673
0.131474
0.12749
0.103586
0.158035
0.106242
0.106242
0.061089
0.061089
0
0
0.044971
0.333878
1,836
59
159
31.118644
0.570728
0.064815
0
0.162791
0
0
0.294496
0.021663
0
0
0
0
0
1
0.023256
false
0
0.069767
0
0.139535
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5780d158839c24f720b62513828f2502747533
493
py
Python
Exercicios/ex100.py
mauriciozago/CursoPython3
cbcff9ebfd4d5f5e3a32a369dac8521c6758bfe5
[ "MIT" ]
null
null
null
Exercicios/ex100.py
mauriciozago/CursoPython3
cbcff9ebfd4d5f5e3a32a369dac8521c6758bfe5
[ "MIT" ]
null
null
null
Exercicios/ex100.py
mauriciozago/CursoPython3
cbcff9ebfd4d5f5e3a32a369dac8521c6758bfe5
[ "MIT" ]
null
null
null
from random import randint from time import sleep def sorteia(lista): print('Sorteando 5 valores da lista:', end=' ') for num in range(0, 5): lista.append(randint(1, 10)) sleep(0.5) print(lista[num], end=' ') print('PRONTO!') def somaPar(lista): soma = 0 for valor in lista: if valor % 2 == 0: soma += valor print(f'Somando os valores pares de {lista}, temos {soma}') numeros = list() sorteia(numeros) somaPar(numeros)
19.72
63
0.596349
69
493
4.26087
0.536232
0.013605
0
0
0
0
0
0
0
0
0
0.030641
0.271805
493
24
64
20.541667
0.788301
0
0
0
0
0
0.176471
0
0
0
0
0
0
1
0.111111
false
0
0.111111
0
0.222222
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5878f65e5314a4e23460be695252643785c2ed
4,728
py
Python
python/fate_client/flow_client/flow_cli/utils/cli_args.py
kakasu/FATE
cfc61ef268154e08a9e7125c047c318c5e5eb42a
[ "Apache-2.0" ]
2
2020-11-21T11:25:08.000Z
2020-11-21T11:25:11.000Z
python/fate_client/flow_client/flow_cli/utils/cli_args.py
TroubleMaker1994/FATE
23ad848bcc7ae7f304a376d3f46f4af26872c8a2
[ "Apache-2.0" ]
null
null
null
python/fate_client/flow_client/flow_cli/utils/cli_args.py
TroubleMaker1994/FATE
23ad848bcc7ae7f304a376d3f46f4af26872c8a2
[ "Apache-2.0" ]
1
2021-02-03T08:23:42.000Z
2021-02-03T08:23:42.000Z
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import click CONF_PATH = click.option("-c", "--conf-path", type=click.Path(exists=True), required=True, help="Configuration file path.") DSL_PATH = click.option("-d", "--dsl-path", type=click.Path(exists=True), help="Domain-specific language(DSL) file path. If the type of job is 'predict', " "you can leave this feature blank, or you can provide a valid dsl file to " "replace the one that aotumatically generated by fate.") LIMIT = click.option("-l", "--limit", type=click.INT, default=10, help="LIMIT flag constrains the number of records to return. (default: 10)") JOBID = click.option("-j", "--job-id", type=click.STRING, help="A valid job id.") JOBID_REQUIRED = click.option("-j", "--job-id", type=click.STRING, required=True, help="A valid job id.") role_choices_list = ["local", "guest", "arbiter", "host"] ROLE = click.option("-r", "--role", type=click.Choice(role_choices_list), metavar="TEXT", help="Role name. Users can choose one from {} and {}.".format(",".join(role_choices_list[:-1]), role_choices_list[-1])) ROLE_REQUIRED = click.option("-r", "--role", type=click.Choice(role_choices_list), required=True, metavar="TEXT", help="Role name. Users can choose one from {} and {}.".format(",".join(role_choices_list[:-1]), role_choices_list[-1])) PARTYID = click.option("-p", "--party-id", type=click.STRING, help="A valid party id.") PARTYID_REQUIRED = click.option("-p", "--party-id", type=click.STRING, required=True, help="A valid party id.") COMPONENT_NAME = click.option("-cpn", "--component-name", type=click.STRING, help="A valid component name.") COMPONENT_NAME_REQUIRED = click.option("-cpn", "--component-name", type=click.STRING, required=True, help="A valid component name.") status_choices_list = ["complete", "failed", "running", "waiting", "timeout", "canceled", "partial", "deleted"] STATUS = click.option("-s", "--status", type=click.Choice(status_choices_list), metavar="TEXT", help="Job status. Users can choose one from {} and {}.".format(", ".join(status_choices_list[:-1]), status_choices_list[-1])) OUTPUT_PATH_REQUIRED = click.option("-o", "--output-path", type=click.Path(exists=False), required=True, help="User specifies output directory path.") OUTPUT_PATH = click.option("-o", "--output-path", type=click.Path(exists=False), help="User specifies output directory path.") NAMESPACE = click.option("-n", "--namespace", type=click.STRING, help="Namespace.") TABLE_NAME = click.option("-t", "--table-name", type=click.STRING, help="Table name.") NAMESPACE_REQUIRED = click.option("-n", "--namespace", type=click.STRING, required=True, help="Namespace.") TABLE_NAME_REQUIRED = click.option("-t", "--table-name", type=click.STRING, required=True, help="Table name.") TAG_NAME_REQUIRED = click.option("-t", "--tag-name", type=click.STRING, required=True, help="The name of tag.") TAG_DESCRIPTION = click.option("-d", "--tag-desc", type=click.STRING, help="The description of tag. Note that if there are some whitespaces in description, " "please make sure the description text is enclosed in double quotation marks.") MODEL_VERSION = click.option("-m", "--model_version", type=click.STRING, help="Model version.")
51.956044
124
0.572758
555
4,728
4.803604
0.302703
0.086647
0.073143
0.049887
0.436984
0.391973
0.334959
0.28132
0.165041
0.135784
0
0.00536
0.289763
4,728
90
125
52.533333
0.788565
0.123519
0
0.313725
0
0
0.292949
0
0
0
0
0
0
1
0
false
0
0.019608
0
0.019608
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d595897f47c1cc37b47f1c81df0318c37ce2e88
5,210
py
Python
lightning_transformers/task/nlp/masked_language_modeling/data.py
maksym-taranukhin/lightning-transformers
aa7202657973b5b65c3c36eb745621043859ebc4
[ "Apache-2.0" ]
null
null
null
lightning_transformers/task/nlp/masked_language_modeling/data.py
maksym-taranukhin/lightning-transformers
aa7202657973b5b65c3c36eb745621043859ebc4
[ "Apache-2.0" ]
null
null
null
lightning_transformers/task/nlp/masked_language_modeling/data.py
maksym-taranukhin/lightning-transformers
aa7202657973b5b65c3c36eb745621043859ebc4
[ "Apache-2.0" ]
null
null
null
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Callable, Optional, Union from datasets import Dataset from transformers import DataCollatorForLanguageModeling, DataCollatorForWholeWordMask, PreTrainedTokenizerBase from lightning_transformers.core.nlp import HFDataModule from lightning_transformers.task.nlp.masked_language_modeling.config import MaskedLanguageModelingDataConfig class MaskedLanguageModelingDataModule(HFDataModule): """ Defines ``LightningDataModule`` for Language Modeling Datasets. Args: *args: ``HFDataModule`` specific arguments. cfg: Contains data specific parameters when processing/loading the dataset (Default ``MaskedLanguageModelingDataConfig``) **kwargs: ``HFDataModule`` specific arguments. """ cfg: MaskedLanguageModelingDataConfig def __init__( self, *args, cfg: MaskedLanguageModelingDataConfig = MaskedLanguageModelingDataConfig(), **kwargs ) -> None: super().__init__(*args, cfg=cfg, **kwargs) def process_data(self, dataset: Dataset, stage: Optional[str] = None) -> Dataset: column_names = dataset["train" if stage == "fit" else "validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] tokenize_function = partial( self.tokenize_function, tokenizer=self.tokenizer, text_column_name=text_column_name, line_by_line=self.cfg.line_by_line, padding=self.cfg.padding, max_length=self.cfg.max_length, ) dataset = dataset.map( tokenize_function, batched=True, num_proc=self.cfg.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=self.cfg.load_from_cache_file, ) if not self.cfg.line_by_line: convert_to_features = partial( self.convert_to_features, max_seq_length=self.cfg.max_length, ) dataset = dataset.map( convert_to_features, batched=True, num_proc=self.cfg.preprocessing_num_workers, load_from_cache_file=self.cfg.load_from_cache_file, ) return dataset @staticmethod def tokenize_function( examples, tokenizer: Union[PreTrainedTokenizerBase], text_column_name: str = None, line_by_line: bool = False, padding: Union[str, bool] = "max_length", max_length: int = 128, ): if line_by_line: examples[text_column_name] = [ line for line in examples[text_column_name] if len(line) > 0 and not line.isspace() ] return tokenizer( examples[text_column_name], padding=padding, truncation=True, max_length=max_length, # We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it # receives the `special_tokens_mask`. return_special_tokens_mask=True, ) else: # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. return tokenizer(examples[text_column_name], return_special_tokens_mask=True) @staticmethod def convert_to_features(examples, max_seq_length: int, **kwargs): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // max_seq_length) * max_seq_length # Split by chunks of max_len. result = { k: [t[i:i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result @property def collate_fn(self) -> Callable: if self.cfg.wwm: return DataCollatorForWholeWordMask(self.tokenizer, mlm_probability=self.cfg.mlm_probability) else: return DataCollatorForLanguageModeling(self.tokenizer, mlm_probability=self.cfg.mlm_probability)
41.68
119
0.668906
592
5,210
5.684122
0.342905
0.024963
0.033284
0.020208
0.230906
0.184844
0.164636
0.164636
0.112927
0.084398
0
0.002844
0.257582
5,210
124
120
42.016129
0.867115
0.278311
0
0.15
0
0
0.009722
0
0
0
0
0
0
1
0.0625
false
0
0.075
0
0.2375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5b404b1dc5e6d856457623444459cb2d318391
297
py
Python
lc/1461_CheckIfAStringContainsAllBinaryCodesOfSizeK.py
xiangshiyin/coding-challenge
a75a644b96dec1b6c7146b952ca4333263f0a461
[ "Apache-2.0" ]
null
null
null
lc/1461_CheckIfAStringContainsAllBinaryCodesOfSizeK.py
xiangshiyin/coding-challenge
a75a644b96dec1b6c7146b952ca4333263f0a461
[ "Apache-2.0" ]
null
null
null
lc/1461_CheckIfAStringContainsAllBinaryCodesOfSizeK.py
xiangshiyin/coding-challenge
a75a644b96dec1b6c7146b952ca4333263f0a461
[ "Apache-2.0" ]
null
null
null
class Solution: def hasAllCodes(self, s: str, k: int) -> bool: seen = set() i = 0 n = len(s) while i <= n-k: if s[i:i+k] not in seen: seen.add(s[i:i+k]) i += 1 return len(seen)==2 ** k
21.214286
50
0.363636
42
297
2.571429
0.571429
0.037037
0.055556
0.074074
0
0
0
0
0
0
0
0.020134
0.498317
297
14
51
21.214286
0.704698
0
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5ee2879ab66f8685eefe4e79bc72d5182956c0
8,313
py
Python
pythonFiles/arcgis_Script.py
huangysh/ASCA_Cluster
3f7ff5df514cbe48730ba0634abe7f9726d3b98e
[ "MIT" ]
null
null
null
pythonFiles/arcgis_Script.py
huangysh/ASCA_Cluster
3f7ff5df514cbe48730ba0634abe7f9726d3b98e
[ "MIT" ]
null
null
null
pythonFiles/arcgis_Script.py
huangysh/ASCA_Cluster
3f7ff5df514cbe48730ba0634abe7f9726d3b98e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # ********************************************************************************************************************** # MIT License # Copyright (c) 2020 School of Environmental Science and Engineering, Shanghai Jiao Tong University # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ---------------------------------------------------------------------------------------------------------------------- # This file is part of the ASCA Algorithm, it is used for spatial point clustering analysis. This model contains mainly # three parts, they are points trend analysis, point cluster analysis and spatial visualization. # # Author: Yuansheng Huang # Date: 2020-06-18 # Version: V 1.2 # Literature # ========== # Yuansheng Huang, Peng Li, Yiliang He: To centralize or to decentralize? A systematic framework for optimizing # rural wastewater treatment investment # Clark and Evans, 1954; Gao, 2013 # ********************************************************************************************************************** # general import import gc import os import sys from ASCA_Functions import * pythonScriptPath = r"...\RuST\ASCA_Cluster\pythonFiles" gc.disable() pythonPath = os.getcwd() sys.path.append(pythonPath) sys.path.append(pythonScriptPath) # ====================================================================================================================== # 调用ArcGIS界面输入 # ====================================================================================================================== arcpy.env.overwriteOutput = True buildings = sys.argv[1] # Building shape file studyArea = sys.argv[2] obstacleFile = sys.argv[3] # optical parameter. num = int(sys.argv[4]) # 管道长度约束[米] optical parameter. outputFolder = sys.argv[5] # folder setting outputFile = outputFolder + "/" + "Cluster.shp" addFiledFile = outputFile # sys.argv[5] + ".shp" # ---------------------------------------------------------------------------------------------------------------------- # 空间点分布模式判定 # ---------------------------------------------------------------------------------------------------------------------- pointList, spatialRef = readSpatialPoint(buildings) # 读取空间点及输入文件的 distanceList = getNearestDistance(pointList) area = float(readArea(studyArea)) # 读取研究区域面积 index, _ = NNI(pointList, distanceList, area) triangleVertexIndex, triangleVertexCoordinate = getDelaunayTriangle(pointList) # 核实未使用参数 # 输出空间点集分布趋势 arcpy.AddMessage("\n") arcpy.AddMessage("************************************") arcpy.AddMessage("Points spatial cluster analysis was successfully calculated!!") arcpy.AddMessage("NNI index : " + str(index)) arcpy.AddMessage("************************************") # 开始空间点集聚类分析 arcpy.AddMessage("\n") arcpy.AddMessage("====================================") arcpy.AddMessage("Ready for cluster module...") arcpy.AddMessage("====================================") arcpy.AddMessage("\n") _, edgeList = getEdgeLength(triangleVertexIndex, triangleVertexCoordinate) if index >= 1: # 空间点集呈均匀(>1)/随机分布(=1) arcpy.AddMessage("Random distribution OR Uniform distribution (NNI >= 1)") arcpy.AddMessage("Skip cluster analysis module!!!" + "\n" + "Perform Obstacle and Restriction analysis!!!") # obstacle if len(obstacleFile) > 1: obstacleList = readObstacle(obstacleFile) reachableEdge = getReachableEdge(edgeList, obstacleList, pointList) indexList_O = aggregation(reachableEdge) mark_O = "O" cluster(pointList, indexList_O, mark_O) arcpy.AddMessage("Unreachable edges were deleted!!!") else: reachableEdge = edgeList[:] pointList = [i + ["O0"] for i in pointList] arcpy.AddMessage("No obstacles!!!") # restrict if num > 0: unrestrictedEdge = deleteRestrictionEdge(reachableEdge, num) indexList_C = aggregation(unrestrictedEdge) mark_C = "C" cluster(pointList, indexList_C, mark_C) arcpy.AddMessage("Restricted edges were deleted!!!") else: unrestrictedEdge = reachableEdge[:] pointList = [i + i[-1] for i in pointList] arcpy.AddMessage("No Length restriction!!!") createShapeFile(pointList, spatialRef, outputFile) addMarkerFields(addFiledFile, pointList) arcpy.AddMessage("-----" + "Spatial Cluster Model successfully performed!" + "-----") elif index < 1: # 空间点集呈聚集分布 arcpy.AddMessage("Spatial points is aggregated, perform cluster analysis Module!!!") # obstacle if len(obstacleFile) > 1: obstacleList = readObstacle(obstacleFile) reachableEdge = getReachableEdge(edgeList, obstacleList, pointList) indexList_O = aggregation(reachableEdge) mark_O = "O" cluster(pointList, indexList_O, mark_O) # return marked pointList arcpy.AddMessage("Unreachable edges were deleted!!!" + "\n") else: reachableEdge = edgeList[:] pointList = [i + ["O0"] for i in pointList] arcpy.AddMessage("No obstacles!!!" + "\n") # global long edge todo check globalEdgeMean, globalEdgeVariation = getGlobalEdgeStatistic(reachableEdge) firstOrderEdges, _ = getFirstOrderEdges(pointList, reachableEdge) firstOrderEdgesMean = getFirstOrderEdgesMean(firstOrderEdges) globalCutValueList = getGlobalCutValue(globalEdgeMean, globalEdgeVariation, firstOrderEdgesMean) globalOtherEdgeList = getGlobalOtherEdge(reachableEdge, globalCutValueList) indexListG = aggregation(globalOtherEdgeList) markG = "G" cluster(pointList, indexListG, markG) arcpy.AddMessage("Global long edges were deleted !!!" + "\n") # local long edge subgraphVertexList, subgraphEdgeList = getSubgraphEdge(pointList, globalOtherEdgeList, indexListG) subgraphSecondOrderEdgeMean = getSecondOrderEdges(subgraphVertexList, subgraphEdgeList) subgraphMeanVariation = getSubgraphEdgeStatistic(subgraphVertexList, subgraphEdgeList) subgraphLocalCutValueList = getLocalCutValue(subgraphMeanVariation, subgraphSecondOrderEdgeMean) localOtherEdge = getLocalOtherEdge(globalOtherEdgeList, subgraphLocalCutValueList) indexListL = aggregation(localOtherEdge) markL = "L" cluster(pointList, indexListL, markL) arcpy.AddMessage("Local long edges were deleted !!!" + "\n") # restrict if num > 0: unrestrictedEdge = deleteRestrictionEdge(localOtherEdge, num) indexList_C = aggregation(unrestrictedEdge) mark_C = "C" cluster(pointList, indexList_C, mark_C) arcpy.AddMessage("Restricted edges were deleted!!!" + "\n") else: unrestrictedEdge = localOtherEdge[:] pointList = [i + i[-1] for i in pointList] arcpy.AddMessage("No Length restriction!!!" + "\n") arcpy.AddMessage("pointList:") arcpy.AddMessage(str(pointList)) createShapeFile(pointList, spatialRef, outputFile) addMarkerFields(addFiledFile, pointList) D = list(set([i[7] for i in pointList])) # local long edge LIST = [len(pointList), index, len(D)] output = outputFolder + "/" + "Cluster" name = "Output" outputWriteToTxt(output, name, LIST, pointList) arcpy.AddMessage("-----" + "Spatial Cluster Model successfully performed!" + "-----") arcpy.AddMessage("\n")
41.358209
120
0.634428
775
8,313
6.778065
0.396129
0.079954
0.036551
0.014278
0.285741
0.276604
0.241196
0.194936
0.170569
0.170569
0
0.006156
0.15975
8,313
200
121
41.565
0.745884
0.337303
0
0.389381
0
0
0.169389
0.032483
0
0
0
0.005
0
1
0
false
0
0.035398
0
0.035398
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d5f3bebf3edb62a00a49fe6236b8ebde098e6fa
19,903
py
Python
examples/example_topop_tb_v4_analysis_roi.py
qiancao/BoneBox
0d10dac7c93f16f0643bebc62c63be2f4bd099f6
[ "BSD-3-Clause" ]
1
2022-03-11T20:49:19.000Z
2022-03-11T20:49:19.000Z
examples/example_topop_tb_v4_analysis_roi.py
qiancao/BoneBox
0d10dac7c93f16f0643bebc62c63be2f4bd099f6
[ "BSD-3-Clause" ]
null
null
null
examples/example_topop_tb_v4_analysis_roi.py
qiancao/BoneBox
0d10dac7c93f16f0643bebc62c63be2f4bd099f6
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Aug 20 21:29:32 2021 @author: qcao Analysis code for example_topop_tb_v3.py Parses and cleans load-driven phantoms. Computes Radiomic signatures. Compares with BvTv. Compare with ROIs """ # FEA and BoneBox Imports import os import sys sys.path.append('../') # use bonebox from source without having to install/build from bonebox.phantoms.TrabeculaeVoronoi import * from bonebox.FEA.fea import * import numpy as np import matplotlib.pyplot as plt from matplotlib import colors import vtk from pyvistaqt import BackgroundPlotter from skimage.morphology import ball, closing, binary_dilation, binary_closing import pyvista as pv pv.set_plot_theme("document") # For PyRadiomics import logging import six import SimpleITK as sitk import radiomics from radiomics import featureextractor from radiomics import firstorder, getTestCase, glcm, glrlm, glszm, imageoperations, shape volumeShape = (100,100,100) def ind2dir(ss,uu): # converts ss and uu to output directories saveNameAppend = "_phantom_ss_"+str(ss)+"_uu_"+str(uu) return "/data/BoneBox-out/topopt/lazy_v3_sweep/randstate_"+str(ss)+saveNameAppend+"/" def getBVFandE(ss,uu): # Parse output directory given series and Ul index BVF = np.nan elasticModulus = np.nan out_dir = ind2dir(ss,uu) if os.path.exists(out_dir): if os.path.exists(out_dir+"bvf7.npy"): BVF = np.load(out_dir+"bvf7.npy") if os.path.exists(out_dir+"elasticModulus7.npy"): elasticModulus = np.load(out_dir+"elasticModulus7.npy") return BVF, elasticModulus def getVolume(ss,uu): # Parse output directory and get volume volume = np.zeros(volumeShape) volume[:] = np.nan out_dir = ind2dir(ss,uu) if os.path.exists(out_dir): if os.path.exists(out_dir+"volume_8.npy"): volume = np.load(out_dir+"volume_8.npy") return volume def computeWaveletFeatures(image, mask, featureFunc=glcm.RadiomicsGLCM): """ featureFunc: firstorder.RadiomicsFirstOrder glcm.RadiomicsGLCM """ featureNames = [] featureVals = [] for decompositionImage, decompositionName, inputKwargs in imageoperations.getWaveletImage(image, mask): waveletFirstOrderFeaturs = featureFunc(decompositionImage, mask, **inputKwargs) waveletFirstOrderFeaturs.enableAllFeatures() results = waveletFirstOrderFeaturs.execute() print('Calculated firstorder features with wavelet ', decompositionName) for (key, val) in six.iteritems(results): waveletFeatureName = '%s_%s' % (str(decompositionName), key) print(' ', waveletFeatureName, ':', val) featureNames.append(waveletFeatureName) featureVals.append(val) return featureNames, np.array(featureVals) def calculate_fid(act1, act2): # calculate mean and covariance statistics mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False) mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False) # calculate sum squared difference between means ssdiff = numpy.sum((mu1 - mu2)**2.0) # calculate sqrt of product between cov covmean = sqrtm(sigma1.dot(sigma2)) # check and correct imaginary numbers from sqrt if iscomplexobj(covmean): covmean = covmean.real # calculate score fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean) return fid if __name__ == "__main__": save_dir = "/data/BoneBox-out/topopt/lazy_v3_sweep/" # Generate N phantom series, 3 resorption intensities per series Nseries = 400 Nresorption = 3 # Create array of BVFs and ElasticModuli bvfs = np.zeros((Nseries, Nresorption)) Es = np.zeros((Nseries, Nresorption)) # Array of random Uls (between 0.1 and 0.25), should be same as in example script. randStateUls = 3012 Ulmin = 0.1 Ulmax = 0.25 Uls = sampleUniformZeroOne(((Nseries,Nresorption)), randState=randStateUls)*(Ulmax-Ulmin) + Ulmin # Retrieve BVF and ElasticModulus for ss in range(Nseries): for uu in range(Nresorption): bvfs[ss,uu], Es[ss,uu] = getBVFandE(ss,uu) inds = np.invert(np.isnan(bvfs)) inds_nz = np.nonzero(inds) # Correlation Coefficients def linearFit(xx, yy): # r2 with radiomics # returns fit x, fit y, rs mfit, bfit = np.polyfit(xx, yy, 1) rs = np.corrcoef(xx, yy)[0,1]**2 mi, ma = np.min(xx), np.max(yy) xxx = np.array([mi, ma]) yyy = mfit*xxx + bfit return xxx, yyy, rs def reject_outliers(data, m=2): ind = abs(data - np.mean(data)) < m * np.std(data) return ind, data[ind] # Correlation Coefficients def linearFitRejectOutliers(xx, yy): # r2 with radiomics # returns fit x, fit y, rs ind, yy = reject_outliers(yy, m=2) xx = xx[ind] mfit, bfit = np.polyfit(xx, yy, 1) rs = np.corrcoef(xx, yy)[0,1]**2 mi, ma = np.min(xx), np.max(yy) xxx = np.array([mi, ma]) yyy = mfit*xxx + bfit return xxx, yyy, rs # Correlation Coefficients def polyFitRejectOutliers(xx, yy, order = 2): # r2 with radiomics # returns fit x, fit y, rs ind, yy = reject_outliers(yy, m=2) xx = xx[ind] p = np.polyfit(xx, yy, 1) yyf = np.polyval(p,xx) rs = np.corrcoef(yy, yyf)[0,1]**2 mi, ma = np.min(xx), np.max(yy) xxx = np.array([mi, ma]) return np.sort(xx), np.sort(yyf), rs # Plot BVF and Elastic Modulus vs Uls fig, ax1 = plt.subplots() xx, yy, rs1 = linearFitRejectOutliers(Uls[inds].flatten(), bvfs[inds].flatten()) ax1.plot(Uls[inds].flatten(), bvfs[inds].flatten(),'ko') ax1.plot(xx, yy, 'k-') ax1.set_ylim(0.16,0.28) ax1.set_xlabel("Resorption Threshold $U_l$") ax1.set_ylabel("BVF") ax1.grid("major") ax1.set_xlim(0.1,0.25) xx, yy, rs2 = linearFitRejectOutliers(Uls[inds].flatten(), Es[inds].flatten()) ax2 = ax1.twinx() ax2.plot(Uls[inds].flatten(), Es[inds].flatten(),'rv') ax2.plot(xx, yy, 'r--') ax2.set_ylabel("Elastic Modulus $E$",color='r') ax2.set_ylim(0,10e7) ax2.tick_params(axis ='y', labelcolor = 'r') plt.savefig(save_dir+"BVF_Es_vs_Ul.png") print("BVF vs Ul: r2="+str(rs1)) print("Es vs Ul: r2="+str(rs2)) # np.corrcoef(bvfs[inds], Es[inds]) # np.corrcoef(bvfs[inds], Uls[inds]) # # np.corrcoef(Es[inds], Uls[inds]) # Plot Es vs BVF fig, ax1 = plt.subplots() xx, yy, rs3 = polyFitRejectOutliers(bvfs[inds].flatten(), Es[inds].flatten()) ax1.plot(bvfs[inds].flatten(), Es[inds].flatten(),'ko') ax1.plot(xx, yy, 'k-') ax1.set_ylim(0,3e7) ax1.set_xlim(0.16,0.28) ax1.set_xlabel("BVF") ax1.set_ylabel("Elastic Modulus $E$") ax1.grid("major") plt.savefig(save_dir+"Es_vs_BVF.png") print("Es vs BVF: r2="+str(rs3)) #% Look at radiomics features # Initialize array of features features = np.zeros((Nseries, Nresorption, 93)) features[:] = np.nan # Define settings for signature calculation # These are currently set equal to the respective default values settings = {} settings['binWidth'] = 25 settings['resampledPixelSpacing'] = None # [3,3,3] is an example for defining resampling (voxels with size 3x3x3mm) settings['interpolator'] = sitk.sitkBSpline settings['imageType'] = ['original','wavelet'] # Initialize feature extractor extractor = featureextractor.RadiomicsFeatureExtractor(**settings) extractor.enableImageTypeByName("Wavelet") # extractor.disableAllImageTypes() # extractor.enableImageTypeByName(imageType="Original") # extractor.enableImageTypeByName(imageType="Wavelet") # extractor.enableFeatureClassByName("glcm") # Test extraction pipeline on one volume ss = 0; uu = 0 volume = getVolume(ss,uu).astype(int)*255 volumeSITK = sitk.GetImageFromArray(volume) maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int)) wvltFeatureNames, wvltFeatures = computeWaveletFeatures(volumeSITK, maskSITK) featureVectorOriginal = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original") volumeSITKWavelets = radiomics.imageoperations.getWaveletImage(volumeSITK, maskSITK) featureVectorWavelet = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="wavelet") featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original") #% computeFeatures = False if computeFeatures: wvltFeatures = np.zeros((Nseries, Nresorption, 192)) wvltFeatures[:] = np.nan # Extract volume and compute features for ss in range(Nseries): for uu in range(Nresorption): if inds[ss,uu]: volume = getVolume(ss,uu).astype(int)*255 volumeSITK = sitk.GetImageFromArray(volume) maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int)) featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original") featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()]) features[ss,uu,:] = featureVectorArray wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK) # Reshape feature matrices featuresReshaped = features.reshape((-1,93), order='F') wvltFeaturesReshaped = wvltFeatures.reshape((-1,192), order='F') indsReshaped = inds.reshape((-1,), order='F') featuresReshaped = featuresReshaped[indsReshaped,:] wvltFeaturesReshaped = wvltFeaturesReshaped[indsReshaped,:] # Save feature vectors np.save(save_dir+"features",features) np.save(save_dir+"featuresReshaped",featuresReshaped) np.save(save_dir+"wvltFeaturesReshaped",wvltFeaturesReshaped) #%% Radiomic Features of ROIs plt.close('all') import nrrd import glob def readROI(filename): roiBone, header = nrrd.read(filename) roiBone[roiBone==255] = 1 # units for this is volume return roiBone roi_dir = "/data/BoneBox/data/rois/" Nrois = len(glob.glob(roi_dir+"isodata_*_roi_*.nrrd")) featuresROI = np.zeros((Nrois,93)) for ind in range(Nrois): print(ind) fn = glob.glob(roi_dir+"isodata_*_roi_"+str(ind)+".nrrd")[0] roiBone = readROI(fn) volume = roiBone.astype(int)*255 # Take ROI center volume = volume[50:150,50:150,50:150] volumeSITK = sitk.GetImageFromArray(volume) maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int)) featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original") featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()]) featuresROI[ind,:] = featureVectorArray # wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK) np.save(save_dir+"featuresROI",featuresROI) #%% featureNames = list(featureVector.keys()) import seaborn as sns import pandas as pd sns.set_theme(style="whitegrid") featuresReshaped = np.load(save_dir+"featuresReshaped.npy") featuresROI = np.load(save_dir+"featuresROI.npy") featuresAll = np.vstack((featuresReshaped,featuresROI)) sourceList = [] for ii in range(200): sourceList.append("Phantom") for ii in range(208): sourceList.append("L1 Spine") df = pd.DataFrame(data = featuresAll, columns = featureNames) df["source"] = sourceList df["all"] = "" fig_dir = save_dir+"comparison_with_rois/" if not os.path.exists(fig_dir): os.mkdir(fig_dir) # Draw a nested violinplot and split the violins for easier comparison for ind in range(93): fig, ax = plt.subplots(figsize=(5,10)) sns.violinplot(data=df, x="all", y=featureNames[ind], hue="source", split=True, inner="quart", linewidth=1) sns.despine(left=True) plt.savefig(fig_dir+"fig_"+str(ind)+"_"+featureNames[ind]) plt.close("all") #%% Komogorov-smirnov test from scipy.stats import ks_2samp kss = np.zeros(93) ps = np.zeros(93) for ind in range(93): kss[ind], ps[ind] = scipy.stats.ks_2samp(featuresReshaped[:,ind], featuresROI[:,ind]) #%% Prep data for regressor # # Extract Feature Names # featureNames = list(featureVector.keys()) # indsReshaped = inds.reshape((-1,), order='F') # features = np.load(save_dir+"features.npy") # featuresReshaped = np.load(save_dir+"featuresReshaped.npy") # wvltFeaturesReshaped = np.load(save_dir+"wvltFeaturesReshaped.npy") # EsReshaped = Es.reshape((-1,), order='F')[indsReshaped] # bvfsReshaped = bvfs.reshape((-1,), order='F')[indsReshaped] # # combine BVF with wavelet GLCM features # # features_norm = np.concatenate((bvfsReshaped[:,None],wvltFeaturesReshaped),axis=1) # featuresReshaped # Feature Vector # features_norm = np.concatenate((bvfsReshaped[:,None],featuresReshaped),axis=1) # featuresReshaped # Feature Vector # features_norm -= np.mean(features_norm,axis=0) # center on mean # features_norm /= np.std(features_norm,axis=0) # scale to standard deviation # features_norm[np.isnan(features_norm)] = 0 # # features_norm_names = ["BVF"]+wvltFeatureNames # features_norm_names = ["BVF"]+featureNames # roi_vm_mean = EsReshaped # Label # # Reject pathologic outliers in the dataset # ii, roi_vm_mean = reject_outliers(roi_vm_mean, m=1) # features_norm = features_norm[ii,:] # bvfsReshaped = bvfsReshaped[ii] # Ntrain = 110 # Training Testing Split # #% Feature selection # from sklearn.feature_selection import SelectKBest, VarianceThreshold # from sklearn.feature_selection import chi2, f_classif, f_regression # # # features_norm = SelectKBest(f_regression, k=20).fit_transform(features_norm, roi_vm_mean) # # features_norm = VarianceThreshold(0.95).fit_transform(features_norm) # # print(features_norm.shape) # #% # ytrain = roi_vm_mean[:Ntrain] # ytest = roi_vm_mean[Ntrain:] # Xtrain1 = features_norm[:Ntrain,:] # Xtrain2 = bvfsReshaped[:Ntrain].reshape(-1,1) # Xtest1 = features_norm[Ntrain:,:] # Xtest2 = bvfsReshaped[Ntrain:].reshape(-1,1) # # from xgboost import XGBRegressor # # from sklearn.model_selection import cross_val_score # # scores = cross_val_score(XGBRegressor(objective='reg:squarederror'), Xtrain1, ytrain, scoring='neg_mean_squared_error') # #%% Radiomics + Random Forestocu # plt.close('all') # import random # randState = 123 # random.seed(randState) # # non-linear without feature selection # from sklearn.ensemble import RandomForestRegressor # from sklearn.model_selection import GridSearchCV # param_grid = [ # {'max_depth': [2,4,8,16,32,64], # 16 # 'max_leaf_nodes': [2,4,8,16,32,64], # 8 # 'n_estimators': [10,50,100,150,200]} # 50 # ] # # param_grid = [ # # {'max_depth': [2,4,8,16], # 16 # # 'max_leaf_nodes': [2,4,8,16], # 8 # # 'n_estimators': [10,50,100]} # 50 # # ] # rfr = GridSearchCV( # RandomForestRegressor(random_state = randState), # param_grid, cv = 5, # scoring = 'explained_variance', # n_jobs=-1 # ) # rfr2 = GridSearchCV( # RandomForestRegressor(random_state = randState), # param_grid, cv = 5, # scoring = 'explained_variance', # n_jobs=-1 # ) # # Fit with full set of features. # grid_result = rfr.fit(Xtrain1, ytrain) # yTrain_fit_rfr = rfr.predict(Xtest1) # print("Best estimator for BvF+radiomics...") # rfr.best_estimator_ # # Fit with BVTV only. # grid_result2 = rfr2.fit(Xtrain2, ytrain) # yTrain_fit_rfr2 = rfr2.predict(Xtest2) # print("Best estimator for BVF...") # rfr2.best_estimator_ # # r2 with radiomics # mfit, bfit = np.polyfit(ytest, yTrain_fit_rfr, 1) # rs = np.corrcoef(roi_vm_mean[Ntrain:], yTrain_fit_rfr)[0,1]**2 # print("BVF+Radiomics rs:"+str(rs)) # # r2 with BVFS # mfit2, bfit2 = np.polyfit(roi_vm_mean[Ntrain:], yTrain_fit_rfr2, 1) # rs2 = np.corrcoef(roi_vm_mean[Ntrain:], yTrain_fit_rfr2)[0,1]**2 # print("BVF rs:"+str(rs2)) # plt.figure() # plt.plot(roi_vm_mean[Ntrain:],yTrain_fit_rfr2,'bv') # plt.plot(roi_vm_mean[Ntrain:], mfit2*roi_vm_mean[Ntrain:] + bfit2, "b--") # plt.plot(roi_vm_mean[Ntrain:],yTrain_fit_rfr,'ko') # plt.plot(roi_vm_mean[Ntrain:], mfit*roi_vm_mean[Ntrain:] + bfit, "k-") # plt.xlabel("$\mu$FE Elastic Modulus") # plt.ylabel("Predicted Elastic Modulus") # plt.savefig(save_dir+"Elastic Modulus Predicted vs True.png") # plt.close("all") # # Plot feature importance # importances = rfr.best_estimator_.feature_importances_ # indices = np.argsort(importances)[::-1] # std = np.std([tree.feature_importances_ for tree in rfr.best_estimator_], axis = 0) # plt.figure() # plt.title('Feature importances') # plt.barh(range(20), importances[indices[0:20]], yerr = std[indices[0:20]], align = 'center',log=True) # plt.yticks(range(20), list(features_norm_names[i] for i in indices[0:20] ), rotation=0) # plt.gca().invert_yaxis() # plt.show() # plt.subplots_adjust(left=0.7,bottom=0.1, right=0.8, top=0.9) # plt.savefig(save_dir+"Feature Importances.png") # plt.close("all") # #%% Changes in radiomic signature with remodeling # # Retrieve indices of samples with continuous Uls # indsUl = (np.sum(inds,axis=1) == 3) # indsUlnz = np.nonzero(indsUl)[0] # featuresCrop = features[indsUlnz,:,:] # UlsCrop = Uls[indsUlnz,:] # plt.figure() #%% # grid_result = rfr.fit(features_norm[:Ntrain,:], roi_vm_mean[:Ntrain]) # yTest_fit_rfr = rfr.predict(features_norm[Ntrain:]) # # sns.set(font_scale=1) # mfit, bfit = np.polyfit(roi_vm_mean[Ntrain:], yTest_fit_rfr, 1) # pr2 = np.corrcoef(roi_vm_mean[Ntrain:], yTest_fit_rfr)[0,1]**2 # print(pr2) # plt.figure() # plt.plot(roi_vm_mean[Ntrain:],yTest_fit_rfr,'ko') # plt.plot(roi_vm_mean[Ntrain:], mfit*roi_vm_mean[Ntrain:] + bfit, "b--") # importances = rfr.best_estimator_.feature_importances_ # indices = np.argsort(importances)[::-1] # std = np.std([tree.feature_importances_ for tree in rfr.best_estimator_], axis = 0) # plt.figure() # plt.title('Feature importances') # plt.barh(range(20), importances[indices[0:20]], yerr = std[indices[0:20]], align = 'center',log=True) # plt.yticks(range(20), list(featureNames[i] for i in indices[0:20] ), rotation=0) # plt.gca().invert_yaxis() # plt.show() # plt.subplots_adjust(left=0.7,bottom=0.1, right=0.8, top=0.9)
33.848639
127
0.631061
2,392
19,903
5.146739
0.231605
0.021444
0.015352
0.020713
0.354967
0.309967
0.26456
0.2328
0.190155
0.190155
0
0.026779
0.240115
19,903
588
128
33.848639
0.787226
0.395418
0
0.204348
0
0
0.065108
0.01309
0
0
0
0
0
1
0.043478
false
0
0.095652
0
0.182609
0.026087
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d61966b535b9419b168bfc49be236b95f338598
1,711
py
Python
getKeypoints.py
franzqueissner/mimic-detection
9dc49cf57baaa7da8bb1d8eee0efe00b57384fca
[ "MIT" ]
null
null
null
getKeypoints.py
franzqueissner/mimic-detection
9dc49cf57baaa7da8bb1d8eee0efe00b57384fca
[ "MIT" ]
null
null
null
getKeypoints.py
franzqueissner/mimic-detection
9dc49cf57baaa7da8bb1d8eee0efe00b57384fca
[ "MIT" ]
null
null
null
import os import threading import json from classes import Keypoint def wait_for_frames(): while not os.path.isdir('keypoints/run0'): print("waiting for frame dir, pls start openpose") print("dir detected!") while len(os.listdir("keypoints/run0")) == 0: print("dir empty, waiting for frames") print("frame detected!") def get_frames(): return os.listdir("keypoints/run0") def collect_keypoints(frame): os.chdir("keypoints/run0") # read json file with open(frame, 'r') as frame: data=frame.read() # parse file obj = json.loads(data) # get raw keypoints list if not obj['people']: os.chdir("../..") print("No people found") return [] else: keypoints_face_raw = obj['people'][0]['face_keypoints_2d'] os.chdir("../..") print("collected keypoints succesfully!") return keypoints_face_raw def filter_keypoints(keypoints_face_raw): # filter different values x_positions = keypoints_face_raw[::3] y_positions = keypoints_face_raw[1::3] c_values = keypoints_face_raw[2::3] # generate keypoints list keypoints_face = [] for (x, y, c) in zip(x_positions, y_positions, c_values): keypoints_face.append(Keypoint(x, y, c)) # filter different features keypoints_eyebrows = keypoints_face[17:27] keypoints_nosebridge = keypoints_face[27:31] keypoints_nostrils = keypoints_face[31:36] keypoints_eyes = keypoints_face[36:48] keypoints_mouth = keypoints_face[48:68] # log print("filtered keypoints succesfully!") return keypoints_eyebrows, keypoints_nosebridge, keypoints_nostrils, keypoints_eyes, keypoints_mouth
29
104
0.675044
219
1,711
5.082192
0.374429
0.151842
0.086253
0.039533
0
0
0
0
0
0
0
0.023704
0.210988
1,711
58
105
29.5
0.800741
0.073641
0
0.04878
0
0
0.172808
0
0
0
0
0
0
1
0.097561
false
0
0.097561
0.02439
0.292683
0.170732
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6d635c6db89e149bfd386a8f61c701f4329339cc
6,311
py
Python
locations/spiders/dickeys_barbecue_pit.py
davidchiles/alltheplaces
6f35f6cd652e7462107ead0a77f322caff198653
[ "MIT" ]
297
2017-12-07T01:29:14.000Z
2022-03-29T06:58:01.000Z
locations/spiders/dickeys_barbecue_pit.py
davidchiles/alltheplaces
6f35f6cd652e7462107ead0a77f322caff198653
[ "MIT" ]
2,770
2017-11-28T04:20:21.000Z
2022-03-31T11:29:16.000Z
locations/spiders/dickeys_barbecue_pit.py
davidchiles/alltheplaces
6f35f6cd652e7462107ead0a77f322caff198653
[ "MIT" ]
111
2017-11-27T21:40:02.000Z
2022-01-22T01:21:52.000Z
import scrapy import re from urllib.parse import urlparse from locations.hours import OpeningHours from locations.items import GeojsonPointItem ALL_DAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su'] class DickeysBarbecuePitSpider(scrapy.Spider): name = "dickeys_barbecue_pit" item_attributes = { 'brand': "Dickey's Barbecue Pit", 'brand_wikidata': "Q19880747" } allowed_domains = ["dickeys.com"] download_delay = 0.5 start_urls = ( "https://www.dickeys.com/location/search-by-state", ) def parse(self, response): directory_links = response.xpath('//a[@class="state-links"]/@href').extract() for link in directory_links: yield scrapy.Request( response.urljoin(link), callback=self.parse ) regex_phone_prefix = re.compile(r'^\s*Telephone\:\s*(.+)$') all_restaurants = response.xpath('//*[@itemtype="http://schema.org/Restaurant"]') for restaurant in all_restaurants: properties = { "name": restaurant.xpath('.//*[@itemprop="name"]/text()').get(), "addr_full": restaurant.xpath('.//*[@itemprop="streetAddress"]/text()').get(), "city": restaurant.xpath('.//*[@itemprop="addressLocality"]/text()').get(), "state": restaurant.xpath('.//*[@itemprop="addressRegion"]/text()').get(), "postcode": restaurant.xpath('.//*[@itemprop="postalCode"]/text()').get(), "phone": restaurant.xpath('.//a[starts-with(text(), "Telephone:")]/text()').get(), "website": response.url } # URLs with details of all restaurants in a given city look like: # '/location/search-by-city/<num>/<city-name>', where: # # <num> appears to be a number associated with the state containing the city # <city-name> is the name of the city. # # Strip off the '/location/search-by-city' prefix, then append the street address we found for each # restaurant. Use this as the unique ID of the restaurant in the crawl, as no other # reliable ID seems to appear in the data. ref = urlparse(response.url).path.split('/', maxsplit=3)[3] properties['ref'] = '_'.join([ref, properties['addr_full']]) # If phone has a 'Telephone: ' prefix, strip it away. match_phone = re.search(regex_phone_prefix, properties['phone']) if match_phone: properties['phone'] = match_phone.groups()[0] # Some fields may have leading/trailing space. We've seen that city often has both # trailing comma and space. for key in properties: properties[key] = properties[key].strip(', ') opening_hours = self.parse_hours(restaurant) if opening_hours: properties["opening_hours"] = opening_hours yield GeojsonPointItem(**properties) def parse_hours(self, restaurant_item): opening_hours = OpeningHours() opening_hours_str = restaurant_item.xpath('.//*[@itemprop="openingHours"]/@content').get() if opening_hours_str: regex = re.compile(r'(.+)\:\s*(\d{1,2}:\d{2} [A|P][M])\s*-\s*(\d{1,2}:\d{2} [A|P][M])', flags=re.IGNORECASE) # Opening hours specifications may look like either of the following: # # Open Daily: 11:00 AM - 9:00 PM # Mon-Thur: 11:00 AM - 8:00 PM,Fri-Sat: 11:00 AM - 9:00 PM,Sunday: 11:00 AM - 8:00 PM # for hours in opening_hours_str.split(','): hours = hours.strip() match = re.search(regex, hours) if match: day_range = match.group(1) open_time = match.group(2) close_time = match.group(3) if close_time == "00:00": close_time = "23:59" for day in self.get_days(day_range): opening_hours.add_range( day, open_time, close_time, time_format="%I:%M %p" ) return opening_hours.as_opening_hours() def get_days(self, day_range_str): day_range_str = day_range_str.casefold() # Most stores list same opening hours for every day of the week: "Open Daily: ..." if day_range_str == 'open daily': return ALL_DAYS # A few store list different hours for different days: else: start_and_end = day_range_str.split('-') # Handle cases like the "Sunday" in "Sunday: ..." if len(start_and_end) == 1: return [self.day_mapping(start_and_end[0])] # Handle cases like the "Mon-Thur" in "Mon-Thur: ..." elif len(start_and_end) == 2: try: start_index = ALL_DAYS.index(self.day_mapping(start_and_end[0])) end_index = ALL_DAYS.index(self.day_mapping(start_and_end[1])) if start_index < end_index: return ALL_DAYS[start_index:end_index+1] except ValueError: # In case we extract an unexpected string as a day of the week return None # In case the day/day range doesn't look like what we expect else: return None def day_mapping(self, day_str): day_str = day_str.casefold() if 'sunday'.startswith(day_str, ): return 'Su' elif 'monday'.startswith(day_str): return 'Mo' elif 'tuesday'.startswith(day_str): return 'Tu' elif 'wednesday'.startswith(day_str): return 'We' elif 'thursday'.startswith(day_str): return 'Th' elif 'friday'.startswith(day_str): return 'Fr' elif 'saturday'.startswith(day_str): return 'Sa' return None
41.519737
121
0.534464
717
6,311
4.566248
0.320781
0.047648
0.034209
0.047037
0.06292
0.06292
0.039401
0.031155
0.031155
0.025657
0
0.015463
0.344161
6,311
151
122
41.794702
0.77555
0.18238
0
0.049505
0
0.009901
0.149448
0.074423
0
0
0
0
0
1
0.039604
false
0
0.049505
0
0.287129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed8cc5798db544fbe8ee797bd37f85e2f59ad788
8,156
py
Python
facePose.py
cyndi088/head-pose-estimation-face-landmark
f4ef5b977800cc8c0c54dae8b86d21f616ecb38b
[ "MIT" ]
null
null
null
facePose.py
cyndi088/head-pose-estimation-face-landmark
f4ef5b977800cc8c0c54dae8b86d21f616ecb38b
[ "MIT" ]
null
null
null
facePose.py
cyndi088/head-pose-estimation-face-landmark
f4ef5b977800cc8c0c54dae8b86d21f616ecb38b
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # pylint: disable=C0103 # pylint: disable=E1101 import os import numpy as np import cv2 import caffe def retifyxxyy(img, xxyy): """ let xxyy within image size img: image xxyy: left, right, top, bottom return modified xxyy """ img_height, img_width = img.shape[:2] xxyy = retifyxxyysize(img_height, img_width, xxyy) return xxyy def retifyxxyysize(img_height, img_width, xxyy): """return xxyy within image region img_height: img_width: xxyy: return xxyy """ xxyy[0] = max(xxyy[0], 0) xxyy[1] = max(xxyy[1], 0) xxyy[2] = max(xxyy[2], 0) xxyy[3] = max(xxyy[3], 0) xxyy[0] = min(xxyy[0], img_width) xxyy[1] = min(xxyy[1], img_width) xxyy[2] = min(xxyy[2], img_height) xxyy[3] = min(xxyy[3], img_height) return xxyy def getCutSize(xxyy, left, right, top, bottom): #left, right, top, and bottom u""" xxyy: left: right: top: bottom: left, right, top, bottom are ratio. The return value is a region with a margin. """ box_width = xxyy[1] - xxyy[0] box_height = xxyy[3] - xxyy[2] cut_size = np.zeros((4)) cut_size[0] = xxyy[0] + left * box_width cut_size[1] = xxyy[1] + (right - 1) * box_width cut_size[2] = xxyy[2] + top * box_height cut_size[3] = xxyy[3] + (bottom-1) * box_height return cut_size def dets2xxyys(dets): """ In this module xxyy = [left, right, top, bottom] """ xxyys = np.zeros((len(dets), 4)) for i, d in enumerate(dets): xxyys[i, 0] = d.left() xxyys[i, 1] = d.right() xxyys[i, 2] = d.top() xxyys[i, 3] = d.bottom() return xxyys class FacePosePredictor(object): """ A face Pose Predcitor using pre-trained caffe model. The orignal code was modified to class version. https://github.com/guozhongluo/head-pose-estimation-and-face-landmark Example: posePredictor = facePose.FacePosePredictor() predictpoints, landmarks, headposes = posePredictor.predict(frameCopy, np.array([[left, right, top, bottom]])) """ def __init__(self): self.M_left = -0.15 self.M_right = +1.15 self.M_top = -0.10 self.M_bottom = +1.25 self.vgg_height = 224 self.vgg_width = 224 vgg_point_MODEL_FILE = 'model/deploy.prototxt' vgg_point_PRETRAINED = 'model/68point_dlib_with_pose.caffemodel' mean_filename = 'model/VGG_mean.binaryproto' self.vgg_point_net = caffe.Net(vgg_point_MODEL_FILE, vgg_point_PRETRAINED, caffe.TEST) caffe.set_mode_cpu() # caffe.set_mode_gpu() # caffe.set_device(0) proto_data = open(mean_filename, "rb").read() a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) self.mean = caffe.io.blobproto_to_array(a)[0] def predict(self, colorImage, xxyys): """ predcit pitch yaw, roll for each rectangle. colorImage: xxyys: list of rectangle return predictpoints: 68 point landmarks: predictposes: pitch yaw roll """ def getRGBTestPart(img, xxyy, left, right, top, bottom, asHeight, asWidth): """return face image as float32 returned image size width, height """ largexxyy = getCutSize(xxyy, left, right, top, bottom) retixxyy = retifyxxyy(img, largexxyy) retixxyy = [int(round(x)) for x in retixxyy] face = img[retixxyy[2]:retixxyy[3], retixxyy[0]:retixxyy[1], :] face = cv2.resize(face, (asHeight, asWidth), interpolation=cv2.INTER_AREA) face = face.astype('float32') return face pointNum = 68 faceNum = xxyys.shape[0] faces = np.zeros((1, 3, self.vgg_height, self.vgg_width)) predictpoints = np.zeros((faceNum, pointNum*2)) predictposes = np.zeros((faceNum, 3)) imgsize = colorImage.shape[:2] TotalSize = np.zeros((faceNum, 2)) normalface = np.zeros(self.mean.shape) for i in range(0, faceNum): TotalSize[i] = imgsize colorface = getRGBTestPart(colorImage, xxyys[i], self.M_left, self.M_right, self.M_top, self.M_bottom, self.vgg_height, self.vgg_width) normalface[0] = colorface[:, :, 0] normalface[1] = colorface[:, :, 1] normalface[2] = colorface[:, :, 2] normalface = normalface - self.mean faces[0] = normalface data4DL = np.zeros([faces.shape[0], 1, 1, 1]) self.vgg_point_net.set_input_arrays(faces.astype(np.float32), data4DL.astype(np.float32)) self.vgg_point_net.forward() predictpoints[i] = self.vgg_point_net.blobs['68point'].data[0] predictposes[i] = 50 * self.vgg_point_net.blobs['poselayer'].data predictpoints = predictpoints * self.vgg_height/2 + self.vgg_width/2 landmarks = self.batchRecoverPart(predictpoints, xxyys, TotalSize, self.M_left, self.M_right, self.M_top, self.M_bottom, self.vgg_height, self.vgg_width) return predictpoints, landmarks, predictposes def batchRecoverPart(self, predictPoint, totalxxyy, totalSize, left, right, top, bottom, height, width): def recover_coordinate(largetxxyy, landmarks, width, height): point = np.zeros(np.shape(landmarks)) cut_width = largetxxyy[1] - largetxxyy[0] cut_height = largetxxyy[3] - largetxxyy[2] scale_x = cut_width*1.0/width scale_y = cut_height*1.0/height point[0::2] = [float(j * scale_x + largetxxyy[0]) for j in landmarks[0::2]] point[1::2] = [float(j * scale_y + largetxxyy[2]) for j in landmarks[1::2]] return point def recoverPart(point, xxyy, left, right, top, bottom, img_height, img_width, height, width): largexxyy = getCutSize(xxyy, left, right, top, bottom) retixxyy = retifyxxyysize(img_height, img_width, largexxyy) recover = recover_coordinate(retixxyy, point, height, width) recover = recover.astype('float32') return recover recoverPoint = np.zeros(predictPoint.shape) for i in range(0, predictPoint.shape[0]): recoverPoint[i] = recoverPart(predictPoint[i], totalxxyy[i], left, right, top, bottom, totalSize[i, 0], totalSize[i, 1], height, width) return recoverPoint def predict1(self, colorImage, xxyy): """ predcit pitch yaw, roll for single rectangle. colorImage: xxyy: single rectangle return value predictposes[0, :] : pitch, yaw, roll """ predictpoints, landmarks, predictposes = self.predict(colorImage, np.array([xxyy])) return predictpoints[0], landmarks[0], predictposes[0, :] def roundByD(angle, delta): """round angle by delta angle: delta: >>> roundByD(8, 10) 10.0 >>> roundByD(-9.5, 10) -10.0 """ return delta*round(angle/float(delta)) def getPyrStr(pitch, yaw, roll): """ pitch: yaw: roll: """ pitchDelta = 5 yawDelta = 5 rollDelta = 10 pyrDir = "P_%+03d_Y_%+03d_R_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta), roundByD(roll, rollDelta)) return pyrDir def getPyStr(pitch, yaw): """ pitch: yaw: """ pitchDelta = 5 yawDelta = 5 rollDelta = 10 pyrDir = "P_%+03d_Y_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta)) return pyrDir def getPyrDir(outDir, pitch, yaw, roll): """ pitch: yaw: roll: """ pyrDir = os.path.join(outDir, getPyrStr(pitch, yaw, roll)) if not os.path.isdir(pyrDir): os.makedirs(pyrDir) return pyrDir def getPyDir(outDir, pitch, yaw): """ pitch: yaw: roll: """ pitchDelta = 5 yawDelta = 5 rollDelta = 10 pyrDir = "P_%+03d_Y_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta)) pyrDir = os.path.join(outDir, pyrDir) if not os.path.isdir(pyrDir): os.makedirs(pyrDir) return pyrDir
29.128571
161
0.608877
1,037
8,156
4.678881
0.2054
0.021641
0.032152
0.044518
0.237634
0.186727
0.165087
0.149423
0.096661
0.096661
0
0.031469
0.26361
8,156
279
162
29.232975
0.77639
0.166135
0
0.169118
0
0
0.026627
0.016973
0
0
0
0
0
1
0.117647
false
0
0.029412
0
0.264706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed9343bfb5b3ee8263500da7447ed1563a0c1cf8
10,469
py
Python
tutorials/05-dcr/plot_fwd_2_dcr2d.py
ElliotCheung/simpeg
ce5bde154179ca63798a62a12787a7ec3535472c
[ "MIT" ]
1
2022-02-18T16:31:27.000Z
2022-02-18T16:31:27.000Z
tutorials/05-dcr/plot_fwd_2_dcr2d.py
ElliotCheung/simpeg
ce5bde154179ca63798a62a12787a7ec3535472c
[ "MIT" ]
null
null
null
tutorials/05-dcr/plot_fwd_2_dcr2d.py
ElliotCheung/simpeg
ce5bde154179ca63798a62a12787a7ec3535472c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ DC Resistivity Forward Simulation in 2.5D ========================================= Here we use the module *SimPEG.electromagnetics.static.resistivity* to predict DC resistivity data and plot using a pseudosection. In this tutorial, we focus on the following: - How to define the survey - How to define the forward simulation - How to predict normalized voltage data for a synthetic conductivity model - How to include surface topography - The units of the model and resulting data """ ######################################################################### # Import modules # -------------- # from discretize import TreeMesh from discretize.utils import mkvc, refine_tree_xyz from SimPEG.utils import model_builder, surface2ind_topo from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc from SimPEG import maps, data from SimPEG.electromagnetics.static import resistivity as dc from SimPEG.electromagnetics.static.utils.static_utils import ( generate_dcip_sources_line, apparent_resistivity_from_voltage, plot_pseudosection, ) import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.colors import LogNorm try: from pymatsolver import Pardiso as Solver except ImportError: from SimPEG import SolverLU as Solver write_output = False mpl.rcParams.update({"font.size": 16}) # sphinx_gallery_thumbnail_number = 3 ############################################################### # Defining Topography # ------------------- # # Here we define surface topography as an (N, 3) numpy array. Topography could # also be loaded from a file. In our case, our survey takes place within a set # of valleys that run North-South. # x_topo, y_topo = np.meshgrid( np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101) ) z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0 x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) topo_xyz = np.c_[x_topo, y_topo, z_topo] # Create 2D topography. Since our 3D topography only changes in the x direction, # it is easy to define the 2D topography projected along the survey line. For # arbitrary topography and for an arbitrary survey orientation, the user must # define the 2D topography along the survey line. topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0) ##################################################################### # Create Dipole-Dipole Survey # --------------------------- # # Here we define a single EW survey line that uses a dipole-dipole configuration. # For the source, we must define the AB electrode locations. For the receivers # we must define the MN electrode locations. Instead of creating the survey # from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility. # # Define survey line parameters survey_type = "dipole-dipole" dimension_type = "2D" data_type = "volt" end_locations = np.r_[-400.0, 400.0] station_separation = 40.0 num_rx_per_src = 10 # Generate source list for DC survey line source_list = generate_dcip_sources_line( survey_type, data_type, dimension_type, end_locations, topo_2d, num_rx_per_src, station_separation, ) # Define survey survey = dc.survey.Survey(source_list, survey_type=survey_type) ############################################################### # Create Tree Mesh # ------------------ # # Here, we create the Tree mesh that will be used to predict DC data. # dh = 4 # base cell width dom_width_x = 3200.0 # domain width x dom_width_z = 2400.0 # domain width z nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z # Define the base mesh hx = [(dh, nbcx)] hz = [(dh, nbcz)] mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography mesh = refine_tree_xyz( mesh, topo_xyz[:, [0, 2]], octree_levels=[0, 0, 4, 4], method="surface", finalize=False, ) # Mesh refinement near transmitters and receivers. First we need to obtain the # set of unique electrode locations. electrode_locations = np.c_[ survey.locations_a, survey.locations_b, survey.locations_m, survey.locations_n, ] unique_locations = np.unique( np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0 ) mesh = refine_tree_xyz( mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False ) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] mesh = refine_tree_xyz( mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False ) mesh.finalize() ############################################################### # Create Conductivity Model and Mapping for Tree Mesh # ----------------------------------------------------- # # It is important that electrodes are not modeled as being in the air. Even if the # electrodes are properly located along surface topography, they may lie above # the discretized topography. This step is carried out to ensure all electrodes # lie on the discretized surface. # # Define conductivity model in S/m (or resistivity model in Ohm m) air_conductivity = 1e-8 background_conductivity = 1e-2 conductor_conductivity = 1e-1 resistor_conductivity = 1e-3 # Find active cells in forward modeling (cell below surface) ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]]) # Define mapping from model to active cells nC = int(ind_active.sum()) conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity) # Define model conductivity_model = background_conductivity * np.ones(nC) ind_conductor = model_builder.getIndicesSphere(np.r_[-120.0, -160.0], 60.0, mesh.gridCC) ind_conductor = ind_conductor[ind_active] conductivity_model[ind_conductor] = conductor_conductivity ind_resistor = model_builder.getIndicesSphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC) ind_resistor = ind_resistor[ind_active] conductivity_model[ind_resistor] = resistor_conductivity # Plot Conductivity Model fig = plt.figure(figsize=(9, 4)) plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) norm = LogNorm(vmin=1e-3, vmax=1e-1) ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7]) mesh.plot_image( plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm} ) ax1.set_xlim(-600, 600) ax1.set_ylim(-600, 0) ax1.set_title("Conductivity Model") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7]) cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12) plt.show() ############################################################### # Project Survey to Discretized Topography # ---------------------------------------- # # It is important that electrodes are not model as being in the air. Even if the # electrodes are properly located along surface topography, they may lie above # the discretized topography. This step is carried out to ensure all electrodes # like on the discretized surface. # survey.drape_electrodes_on_topography(mesh, ind_active, option="top") ####################################################################### # Predict DC Resistivity Data # --------------------------- # # Here we predict DC resistivity data. If the keyword argument *sigmaMap* is # defined, the simulation will expect a conductivity model. If the keyword # argument *rhoMap* is defined, the simulation will expect a resistivity model. # simulation = dc.simulation_2d.Simulation2DNodal( mesh, survey=survey, sigmaMap=conductivity_map, solver=Solver ) # Predict the data by running the simulation. The data are the raw voltage in # units of volts. dpred = simulation.dpred(conductivity_model) ####################################################################### # Plotting in Pseudo-Section # -------------------------- # # Here, we demonstrate how to plot 2D data in pseudo-section. # First, we plot the voltages in pseudo-section as a scatter plot. This # allows us to visualize the pseudo-sensitivity locations for our survey. # Next, we plot the apparent conductivities in pseudo-section as a filled # contour plot. # # Plot voltages pseudo-section fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) plot_pseudosection( survey, dobs=np.abs(dpred), plot_type="scatter", ax=ax1, scale="log", cbar_label="V/A", scatter_opts={"cmap": mpl.cm.viridis}, ) ax1.set_title("Normalized Voltages") plt.show() # Get apparent conductivities from volts and survey geometry apparent_conductivities = 1 / apparent_resistivity_from_voltage(survey, dpred) # Plot apparent conductivity pseudo-section fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) plot_pseudosection( survey, dobs=apparent_conductivities, plot_type="contourf", ax=ax1, scale="log", cbar_label="S/m", mask_topography=True, contourf_opts={"levels": 20, "cmap": mpl.cm.viridis}, ) ax1.set_title("Apparent Conductivity") plt.show() ####################################################################### # Optional: Write out dpred # ------------------------- # # Write DC resistivity data, topography and true model # if write_output: dir_path = os.path.dirname(__file__).split(os.path.sep) dir_path.extend(["outputs"]) dir_path = os.path.sep.join(dir_path) + os.path.sep if not os.path.exists(dir_path): os.mkdir(dir_path) # Add 10% Gaussian noise to each datum np.random.seed(225) std = 0.05 * np.abs(dpred) dc_noise = std * np.random.rand(len(dpred)) dobs = dpred + dc_noise # Create a survey with the original electrode locations # and not the shifted ones # Generate source list for DC survey line source_list = generate_dcip_sources_line( survey_type, data_type, dimension_type, end_locations, topo_xyz, num_rx_per_src, station_separation, ) survey_original = dc.survey.Survey(source_list) # Write out data at their original electrode locations (not shifted) data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std) fname = dir_path + "dc_data.obs" write_dcip2d_ubc(fname, data_obj, "volt", "dobs") fname = dir_path + "topo_xyz.txt" np.savetxt(fname, topo_xyz, fmt="%.4e")
31.250746
88
0.66845
1,468
10,469
4.628747
0.270436
0.02752
0.010007
0.006475
0.230022
0.189845
0.15482
0.117145
0.1039
0.1039
0
0.028097
0.153501
10,469
334
89
31.344311
0.738659
0.385042
0
0.192771
0
0
0.039455
0
0
0
0
0
0
1
0
false
0
0.090361
0
0.090361
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed936cafecf6046c66b2300745cd962b854537e7
23,944
py
Python
Feature_Selection.py
ksegaba/ML-Pipeline
cd3914563ccd2e2eb863a55e7fe774108280ed47
[ "MIT" ]
12
2019-09-30T21:17:40.000Z
2022-02-11T00:22:52.000Z
Feature_Selection.py
DanyelleJhang/ML-Pipeline
78073fd1004f831c4efdd05e0f1eb78c8bae4fcb
[ "MIT" ]
6
2021-08-03T14:29:16.000Z
2021-11-17T22:39:13.000Z
Feature_Selection.py
DanyelleJhang/ML-Pipeline
78073fd1004f831c4efdd05e0f1eb78c8bae4fcb
[ "MIT" ]
17
2017-05-22T21:03:42.000Z
2022-03-01T15:06:29.000Z
""" PURPOSE: Run feature selection mettestd available from sci-kit learn on a given dataframe Must set path to Miniconda in HPC: export PATH=/mnt/testme/azodichr/miniconda3/bin:$PATH INPUT: -df Feature file for ML. If class/Y values are in a separate file use -df for features and -df2 for class/Y -alg Feature selection mettestd to use - Chi2 need: -n - RandomForest need: -n, -type - Enrichment using Fisher's Exact (for classification with binary feats only) need: -p (default=0.05) - LASSO need: -p, -type, n - Bayesian LASSO (bl) need: -n - Elastic Net (EN) need: -n -p (default=0.5, proportion of L1 to L2 penalty) - Relief (https://github.com/EpistasisLab/scikit-rebate) (currently for regression only) need: -n, - BayesA regression (for regression only) need: -n - rrBLUP (i.e. mixed.solve) (for regression only) need: -n - Random need: -n OPTIONAL INPUT: -n Number(s) of features you would like to keep (required for chi2, RF, relief, bayesA) Example: -n 10 or -n 10,50,100 -test File with list of intances to testldout from feature selection -save Save name for list of features selected. Will automatically append _n to the name Default: df_F_n or df_f_cvJobNum_n -cl_train Since only RF works for multi-class problems, use cl_train to give a list of what classes you want to include (Default = 'all') If binary, first label = positive class. -sep Set seperator for input data (Default = '\t') -df2 File with class information. Use only if df contains the features but not the classes * Need to specifiy what column in df2 is y using -y_name -y_name Name of the column to predict (Default = Class) -drop_na T/F to drop rows with NAs -feat File containing the features you want to use from the df (one feature per line) Default: all (i.e. everything in the dataframe given). -type r = regression, c = classification (required for LASSO and RF) -p Parameter value for LASSO, EN, or Fisher's Exact Test. Fishers: pvalue cut off (Default = 0.05) EN: Ratio of L1 to L2 regularization (larger = fewer features selected) LASSO: If type = r: need alpha value, try 0.01, 0.001. (larger = fewer features selected) LASSO: If type = c: need C which controls the sparcity, try 0.01, 0.1 (smaller = fewer features selected) -pos String for what codes for the positive example (i.e. UUN) Default = 1 -neg String for what codes for the negative example (i.e. NNN) Default = 0 -cvs To run feat. sel. withing cross-validation scheme provide a CVs matrix and -JobNum CVs maxtrix: rows = instances, columns = CV replicates, value are the CV-fold each instance belongs to. -scores T/F to output scores/coefficients for each feature (Not available for: FET, LASSO, or Chi2) OUTPUT: -df_alg.txt New dataframe with columns only from feature selection AUTtestR: Christina Azodi REVISIONS: Written 8/16/2016 Added relief algorithm 10/22/2017 Added BayesA algorithm 3/23/2018 """ import pandas as pd import numpy as np import subprocess as sp import sys, os, time start_time = time.time() def SaveTopFeats(top, save_name): try: top.remove('Class') except: pass out = open(save_name, 'w') for f in top: out.write(f + '\n') def DecisionTree(df, n, TYPE, save_name, SCORES): """Feature selection using DecisionTree on the wtestle dataframe Feature importance from the Random Forest Classifier is the Gini importance (i.e. the normalized total reduction of the criterion for the decendent nodes compared to the parent node brought by that feature across all trees.) """ from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from math import sqrt X_all = df.drop('Class', axis=1).values Y_all = df.loc[:, 'Class'].values fean_num_feat_sel = len(list(df.columns.values)[1:]) if TYPE.lower() == 'c': feat_sel_forest = RandomForestClassifier(criterion='entropy', max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1) elif TYPE.lower() == 'r': Y_all = Y_all.astype('float') feat_sel_forest = RandomForestRegressor(max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1) else: print('Need to specify -type r/c (regression/classification)') exit() print("=====* Running decision tree based feature selection *=====") #Train the model & derive importance scores feat_sel_forest = feat_sel_forest.fit(X_all, Y_all) importances = feat_sel_forest.feature_importances_ # Sort importance scores and keep top n feat_names = list(df.columns.values)[1:] temp_imp = pd.DataFrame(importances, columns = ["imp"], index=feat_names) indices = np.argsort(importances)[::-1] if SCORES.lower() != 'f': save_scores = save_name + '_RFScores.txt' temp_imp.to_csv(save_scores) for n_size in n: indices_keep = indices[0:int(n_size)] fixed_index = [] # Translate keep indices into the indices in the df for i in indices_keep: new_i = i + 1 fixed_index.append(new_i) fixed_index = [0] + fixed_index good = [df.columns[i] for i in fixed_index] print("Features selected using DecisionTree feature selection: %s" % str(good.remove('Class'))) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(good, save_name2) def Chi2(df, n, save_name): """Feature selection using Chi2 on the wtestle dataframe. Chi2 measures the dependence between stochastic variables, this mettestd weeds out features that are most likely to be independent of class""" from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.feature_selection import mutual_info_classif print('This function might not be working right now.... Bug Christina if you need it!') X_all = df.drop('Class', axis=1).values Y_all = df.loc[:, 'Class'].values #Y_all = Y_all.astype('int') print(Y_all) # Set selection to chi2 with n to keep for n_size in n: # Set selection to chi2 with n to keep ch2 = SelectKBest(chi2, k=n) ch2.fit_transform(X_all, Y_all) index = ch2.get_support(indices=True) # Translate keep indices into the indices in the df fixed_index = [] for i in index: new_i = i + 1 fixed_index.append(new_i) fixed_index = [0] + fixed_index good = [df.columns[i] for i in fixed_index] print("Features selected using DecisionTree feature selection: %s" % str(good)) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(good, save_name2) def Relief(df, n, n_jobs, save_name, SCORES): """Feature selection using Relief on the wtestle dataframe.""" from skrebate import ReliefF X_all = df.drop('Class', axis=1).values Y_all = df.loc[:, 'Class'].values Y_all = Y_all.astype('int') feature_names = list(df) feature_names.remove('Class') print("=====* Running relief/rebase based feature selection *=====") # Set selection to relief fs = ReliefF(n_jobs = int(n_jobs)) fs.fit(X_all, Y_all) imp = pd.DataFrame(fs.feature_importances_, index = feature_names, columns = ['relief_imp']) imp_top = imp.sort_values(by='relief_imp', ascending=False) if SCORES.lower() != 'f': save_scores = save_name + '_ReliefScores.txt' imp_top.to_csv(save_scores) for n_size in n: keep = imp_top.index.values[0:int(n_size)] print("Features selected using Relief from rebase: %s" % str(keep)) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(keep, save_name2) def L1(df, PARAMETER, TYPE, save_name): """Apply a linear model with a L1 penalty and select features wtest's coefficients aren't shrunk to zero. Unlike Chi2, this mettestd accounts for the effect of all of the other features when determining if a feature is a good predictor. For a regression problem, it uses linear_model.Lasso For a classification problem, it uses svm.LinearSVC """ from sklearn.feature_selection import SelectFromModel from sklearn.svm import LinearSVC from sklearn.linear_model import Lasso X_all = df.drop('Class', axis=1).values Y_all = df.loc[:, 'Class'].values Y_all = Y_all.astype('int') if TYPE == 'c' or TYPE == 'classification': estimator = LinearSVC(C = PARAMETER, penalty='l1', dual=False).fit(X_all, Y_all) elif TYPE == 'r' or TYPE == 'regression': estimator = Lasso(alpha = PARAMETER).fit(X_all, Y_all) print("=====* Running L1/LASSO based feature selection *=====") model = SelectFromModel(estimator, prefit=True) keep = model.get_support([]) X_new = model.transform(X_all) feat_names = np.array(list(df)[1:]) good = feat_names[keep] print("Features selected using LASSO: %s" % str(good)) print('\nNumber of features selected using LASSO (sparcity parameter = %s): %i' % (str(PARAMETER), X_new.shape[1])) save_name2 = save_name SaveTopFeats(good, save_name2) def EN(df, PARAMETER, n, save_name, SCORES): """Apply Elastic-Net based feature selection. Can tune l1:l2 (penalty:zero) ratio (Default = 0.5)""" from sklearn.linear_model import ElasticNet X_all = df.drop('Class', axis=1).values Y_all = df.loc[:, 'Class'].values feature_names = list(df) feature_names.remove('Class') enet = ElasticNet(alpha=0.5, l1_ratio=PARAMETER, fit_intercept=True, positive=False).fit(X_all, Y_all) imp = pd.DataFrame(enet.coef_, index = feature_names, columns = ['EN_coef']) imp = imp.abs() imp_top = imp.sort_values(by='EN_coef', ascending=False) # Count the number of coefficients that were not zero non_zero = imp_top[imp_top > 0 ].count() if SCORES.lower() != 'f': save_scores = save_name + '_ENScores.txt' imp_top.to_csv(save_scores) for n_size in n: keep = imp_top.index.values[0:int(n_size)] if int(n_size) > int(non_zero): print("!!!!!!!!!!!WARNING!!!!!!!!!!!! ONLY %i FEATURES HAD A NON-ZERO COEFFICIENT." % non_zero) print('!!!!!!!!!!!WARNING!!!!!!!!!!!! THIS LIST CONTAINS SOME FEATURES THAT HAD A COEF = 0') print("Features selected using Elastic-Net with l1 ratio = %0.6f: %s..." % (PARAMETER, str(keep[:5]))) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(keep, save_name2) print("Note that using a l1 ratio = %.6f, there were %i non-zero features" % (PARAMETER, non_zero)) def BayesA(df_use, n, save_name, SCORES): """ Use BayesA from BGLR package to select features with largest abs(coefficients) """ cwd = os.getcwd() temp_name = 'temp_' + save_name df_use.to_csv(temp_name) # Write temp Rscript tmpR=open("%s_BayA.R" % temp_name,"w") tmpR.write('library(BGLR)\n') tmpR.write("setwd('%s')\n" % cwd) tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name) tmpR.write("Y <- df[, 'Class']\n") tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n") tmpR.write("X=scale(X)\n") tmpR.write("ETA=list(list(X=X,model='BayesA'))\n") tmpR.write("fm=BGLR(y=Y,ETA=ETA,verbose=FALSE, nIter=12000,burnIn=2000)\n") tmpR.write("coef <- abs(fm$ETA[[1]]$b)\n") tmpR.write("coef_df <- as.data.frame(coef)\n") tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_BayAScores.txt')) tmpR.close() print('Running bayesA model from BGLR inplemented in R.') process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_BayA.R' % temp_name, shell=True) process.wait() coefs = pd.read_csv(temp_name + '_BayAScores.txt', sep = ',') coefs['coef_abs'] = coefs.coef.abs() coefs_top = coefs.sort_values(by='coef_abs', ascending=False) if SCORES.lower() == 'f': os.system("rm %s_BayAScores.txt" % temp_name) else: os.system("mv %s_BayAScores.txt %s_BayAScores.txt" % (temp_name, save_name)) os.system("rm %s %s_BayA.R varE.dat mu.dat ETA_1_ScaleBayesA.dat ETA_1_lambda.dat" % (temp_name, temp_name)) for n_size in n: keep = coefs_top.index.values[0:int(n_size)] print("Top %s features selected using BayesA from BGLR: %s" % (str(n_size), str(keep))) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(keep, save_name2) def BLASSO(df_use, n, save_name, SCORES): """ Use BayesA from BGLR package to select features with largest abs(coefficients) """ cwd = os.getcwd() temp_name = 'temp_' + save_name df_use.to_csv(temp_name) # Write temp Rscript tmpR=open("%s_BL.R" % temp_name,"w") tmpR.write('library(BGLR)\n') tmpR.write("setwd('%s')\n" % cwd) tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name) tmpR.write("Y <- df[, 'Class']\n") tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n") tmpR.write("ETA=list(list(X=X,model='BL'))\n") tmpR.write("fm=BGLR(y=Y,ETA=ETA,verbose=FALSE, nIter=12000,burnIn=2000)\n") tmpR.write("coef <- abs(fm$ETA[[1]]$b)\n") tmpR.write("coef_df <- as.data.frame(coef)\n") tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_BLScores.txt')) tmpR.close() print('Running bayesA model from BGLR inplemented in R.') process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_BL.R' % temp_name, shell=True) process.wait() coefs = pd.read_csv(temp_name + '_BLScores.txt', sep = ',') coefs['coef_abs'] = coefs.coef.abs() coefs_top = coefs.sort_values(by='coef_abs', ascending=False) if SCORES.lower() == 'f': os.system("rm %s_BLScores.txt" % temp_name) else: os.system("mv %s_BLScores.txt %s_BLScores.txt" % (temp_name, save_name)) os.system("rm %s %s_rrB.R varE.dat mu.dat ETA_1_ScaleBL.dat ETA_1_lambda.dat" % (temp_name, temp_name, temp_name)) for n_size in n: keep = coefs_top.index.values[0:int(n_size)] print("Top %s features selected using BL from BGLR: %s" % (str(n_size), str(keep))) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(keep, save_name2) def rrBLUP(df_use, n, save_name, SCORES): """ Use BayesA from BGLR package to select features with largest abs(coefficients) """ cwd = os.getcwd() temp_name = 'temp_' + save_name df_use.to_csv(temp_name) # Write temp Rscript tmpR=open("%s_rrB.R" % temp_name,"w") tmpR.write("setwd('%s')\n" % cwd) tmpR.write('library(rrBLUP)\n') tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name) tmpR.write("Y <- df[, 'Class']\n") tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n") tmpR.write("mod <- mixed.solve(Y, Z=X, K=NULL, SE=FALSE, return.Hinv=FALSE)\n") tmpR.write("coef <- mod$u\n") tmpR.write("coef_df <- as.data.frame(coef)\n") tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_rrBScores.txt')) tmpR.close() print('Running rrBLUP using mixed.solve in R.') process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_rrB.R' % temp_name, shell=True) process.wait() coefs = pd.read_csv(temp_name + '_rrBScores.txt', sep = ',') coefs['coef_abs'] = coefs.coef.abs() coefs_top = coefs.sort_values(by='coef_abs', ascending=False) if SCORES.lower() == 'f': os.system("rm %s_rrBScores.txt" % temp_name) else: os.system("mv %s_rrBScores.txt %s_rrBScores.txt" % (temp_name, save_name)) os.system("rm %s %s_rrB.R varE.dat mu.dat ETA_1_ScalerrB.dat" % (temp_name, temp_name)) for n_size in n: keep = coefs_top.index.values[0:int(n_size)] print("Top %s features selected using mixed.solve in R (similar to rrBLUP): %s" % (str(n_size), str(keep))) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(keep, save_name2) def FET(df, PARAMETER, pos, neg, save_name): """Use Fisher's Exact Test to look for enriched features""" from scipy.stats import fisher_exact kmers = list(df) kmers.remove(CL) enriched = [CL] print("=====* Running enrichement based feature selection *=====") for k in kmers: temp = df.groupby([CL, k]).size().reset_index(name="Count") try: TP = temp.loc[(temp[CL] == pos) & (temp[k] == 1), 'Count'].iloc[0] except: TP = 0 try: TN = temp.loc[(temp[CL] == neg) & (temp[k] == 0), 'Count'].iloc[0] except: TN = 0 try: FP = temp.loc[(temp[CL] == neg) & (temp[k] == 1), 'Count'].iloc[0] except: FP = 0 try: FN = temp.loc[(temp[CL] == pos) & (temp[k] == 0), 'Count'].iloc[0] except: FN = 0 oddsratio,pvalue = fisher_exact([[TP,FN],[FP,TN]],alternative='greater') if pvalue <= PARAMETER: enriched.append(k) save_name2 = save_name SaveTopFeats(enriched, save_name2) def Random(df, n, save_name): """Randomly select n features""" from random import sample feat_names = list(df) feat_names.remove('Class') for n_size in n: rand_feats = sample(feat_names, int(n_size)) save_name2 = save_name + "_" + str(n_size) SaveTopFeats(rand_feats, save_name2) if __name__ == "__main__": #Default parameters FEAT = 'all' #Features to include from dataframe. Default = all (i.e. don't remove any from the given dataframe) neg = 0 #Default value for negative class = 0 pos = 1 #Default value for positive class = 1 save_list = 'false' p = 0.05 CL = 'Class' TYPE = 'c' n_jobs = 1 CVs, REPS = 'pass', 1 SEP = '\t' SAVE, DF2 = 'default', 'None' UNKNOWN = 'unk' y_name = 'Class' test = '' cl_train = '' drop_na = 'f' NA = 'na' SCORES = 'f' N = 10 for i in range (1,len(sys.argv),2): if sys.argv[i].lower() == "-df": DF = sys.argv[i+1] if sys.argv[i].lower() == "-df2": DF2 = sys.argv[i+1] if sys.argv[i].lower() == "-sep": SEP = sys.argv[i+1] if sys.argv[i].lower() == '-save': SAVE = sys.argv[i+1] if sys.argv[i].lower() == '-alg': alg = sys.argv[i+1] if alg.lower() == 'en': PARAMETER = 0.5 if sys.argv[i].lower() == '-n': N = sys.argv[i+1] if sys.argv[i].lower() == '-n_jobs': n_jobs = int(sys.argv[i+1]) if sys.argv[i].lower() == '-feat': FEAT = sys.argv[i+1] if sys.argv[i].lower() == '-cl_train': cl_train = sys.argv[i+1] if sys.argv[i].lower() == '-p': PARAMETER = float(sys.argv[i+1]) if sys.argv[i].lower() == '-type': TYPE = sys.argv[i+1] if sys.argv[i].lower() == '-y_name': y_name = sys.argv[i+1] if sys.argv[i].lower() == '-pos': pos = sys.argv[i+1] if sys.argv[i].lower() == '-neg': neg = sys.argv[i+1] if sys.argv[i].lower() == '-cvs': CVs = sys.argv[i+1] if sys.argv[i].lower() == '-jobnum': jobNum = sys.argv[i+1] if sys.argv[i].lower() == '-test': test = sys.argv[i+1] if sys.argv[i].lower() == '-drop_na': drop_na = sys.argv[i+1] if sys.argv[i].lower() == '-scores': SCORES = sys.argv[i+1] if len(sys.argv) <= 1: print(__doc__) exit() #Load feature matrix and save feature names df = pd.read_csv(DF, sep=SEP, index_col = 0) # If features and class info are in separate files, merge them: if DF2 != 'None': start_dim = df.shape df_class = pd.read_csv(DF2, sep=SEP, index_col = 0) df = pd.concat([df_class[y_name], df], axis=1, join='inner') print('Merging the feature & class dataframes changed the dimensions from %s to %s (instance, features).' % (str(start_dim), str(df.shape))) # Specify class column - default = Class if y_name != 'Class': df = df.rename(columns = {y_name:'Class'}) # Check for Nas if df.isnull().values.any() == True: if drop_na.lower() == 't' or drop_na.lower() == 'true': start_dim = df.shape df = df.dropna(axis=0) print('Dropping rows with NA values changed the dimensions from %s to %s.' % (str(start_dim), str(df.shape))) else: print(df.columns[df.isnull().any()].tolist()) print('There are Na values in your dataframe.\n Impute them or add -drop_na True to remove rows with nas') quit() # Drop instances in testld out set if provided if test !='': print('Removing testldout instances...') with open(test) as test_file: test_instances = test_file.read().splitlines() try: df = df.drop(test_instances) except: print('Trying converting instance names to int') test_instances = [int(i) for i in test_instances] df = df.drop(test_instances) # Drop instances that aren't in the listed classes (i.e. make binary matrix) if cl_train !='': start_dim = df.shape use_classes = cl_train.strip().split(',') df = df[df['Class'].isin(use_classes)] print('Dropping instances that are not in %s, changed dimensions from %s to %s (instance, features).' % (str(use_classes), str(start_dim), str(df.shape))) df = df[df['Class'] != UNKNOWN] #Recode class as 1 for positive and 0 for negative if TYPE.lower() == 'c': if cl_train != '': use_classes = cl_train.strip().split(',') pos = use_classes[0] neg = use_classes[1] df["Class"] = df["Class"].replace(pos, 1) df["Class"] = df["Class"].replace(neg, 0) # If requesting multiple n, convert to list try: N = N.strip().split(',') except: N = [N] #If 'features to keep' list given, remove columns not in that list if FEAT != 'all': with open(FEAT) as f: features = f.read().splitlines() features = ['Class'] + features df = df.loc[:,features] print('\nSnapshot of data:') print(df.iloc[:6, :5]) # Run feature selection df_use = df.copy() # Run FS within a cross-validation scheme if CVs != 'pass': print("Working on cv_%s" % str(jobNum)) cv_folds = pd.read_csv(CVs, sep=',', index_col=0) cv = cv_folds['cv_' + str(jobNum)] df_use['Class'][cv==5] = 'unk' if SAVE != 'default': save_name = SAVE else: try: save_name = DF.split("/")[-1] + "_" + y_name + '_'+ alg + '_cv' + str(jobNum) except: save_name = DF.split("/")[-1] + "_" + alg if alg.lower() == "randomforest" or alg.lower() == "rf": DecisionTree(df_use, N, TYPE, save_name, SCORES) elif alg.lower() == "chi2" or alg.lower() == "c2": Chi2(df_use, N, save_name) elif alg.lower() == "l1" or alg.lower() == "lasso": if SAVE == 'default': save_name = save_name + '_' + str(PARAMETER) L1(df_use, PARAMETER, TYPE, save_name) elif alg.lower() == "en" or alg.lower() == "elasticnet": if SAVE == 'default': save_name = save_name + '_' + str(PARAMETER) EN(df_use, PARAMETER, N, save_name, SCORES) elif alg.lower() == "relief" or alg.lower() == "rebate" or alg.lower() == "rl": Relief(df_use, N, n_jobs, save_name, SCORES) elif alg.lower() == "bayesa" or alg.lower() == "ba": BayesA(df_use, N, save_name, SCORES) elif alg.lower() == "blasso" or alg.lower() == "bl": BLASSO(df_use, N, save_name, SCORES) elif alg.lower() == "rrblup" or alg.lower() == "rrb": rrBLUP(df_use, N, save_name, SCORES) elif alg.lower() == "fisher" or alg.lower() == "fet" or alg.lower() == 'enrich': if SAVE == 'default': save_name = save_name + '_' + str(PARAMETER) FET(df_use, PARAMETER, pos, neg, save_name) elif alg.lower() == "random" or alg.lower() == "rand" or alg.lower() == 'ran': Random(df_use, N, save_name) run_time = time.time() - start_time print('Run time (sec):' + str(round(run_time,2))) print('Done!')
36.169184
144
0.636527
3,667
23,944
4.029452
0.145623
0.027612
0.020574
0.012859
0.458311
0.398078
0.366337
0.342921
0.308541
0.253181
0
0.012953
0.213248
23,944
661
145
36.223903
0.771419
0.247244
0
0.356808
0
0.025822
0.231826
0.023897
0
0
0
0
0
1
0.025822
false
0.007042
0.049296
0
0.075117
0.08216
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed95341fd58164725063ce6ee238cb6800234854
6,815
py
Python
analyses/seasonality_paper_st/comparisons/specific.py
akuhnregnier/wildfire-analysis
a04deada145cec864051d2fb15aec1a53a0246b9
[ "MIT" ]
null
null
null
analyses/seasonality_paper_st/comparisons/specific.py
akuhnregnier/wildfire-analysis
a04deada145cec864051d2fb15aec1a53a0246b9
[ "MIT" ]
null
null
null
analyses/seasonality_paper_st/comparisons/specific.py
akuhnregnier/wildfire-analysis
a04deada145cec864051d2fb15aec1a53a0246b9
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import sys import warnings from pathlib import Path PROJECT_DIR = Path(__file__).resolve().parent if sys.path[0] != str(PROJECT_DIR.parent): sys.path.insert(0, str(PROJECT_DIR.parent)) warnings.filterwarnings( "ignore", category=FutureWarning, module="sklearn.utils.deprecation" ) from common import * from common import _sci_format warnings.filterwarnings( "always", category=FutureWarning, module="sklearn.utils.deprecation" ) figure_saver = PaperFigureSaver( directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name, debug=True, ) map_figure_saver = figure_saver(**map_figure_saver_kwargs) for fig_saver in (figure_saver, map_figure_saver): fig_saver.experiment = PROJECT_DIR.name memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100) CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name def single_ax_multi_ale_1d( ax, feature_data, feature, bins=20, xlabel=None, ylabel=None, title=None, n_jobs=8, verbose=False, ): quantile_list = [] ale_list = [] for experiment, single_experiment_data in zip( tqdm( feature_data["experiment"], desc="Calculating feature ALEs", disable=not verbose, ), feature_data["single_experiment_data"], ): cache = SimpleCache( f"{experiment}_{feature}_ale_{bins}", cache_dir=CACHE_DIR / "ale", verbose=10 if verbose else 0, ) try: quantiles, ale = cache.load() except NoCachedDataError: model = single_experiment_data["model"] model.n_jobs = n_jobs X_train = single_experiment_data["X_train"] with parallel_backend("threading", n_jobs=n_jobs): quantiles, ale = first_order_ale_quant( model.predict, X_train, feature, bins=bins ) cache.save((quantiles, ale)) quantile_list.append(quantiles) ale_list.append(ale) # Construct quantiles from the individual quantiles, minimising the amount of interpolation. combined_quantiles = np.vstack([quantiles[None] for quantiles in quantile_list]) final_quantiles = np.mean(combined_quantiles, axis=0) mod_quantiles = np.arange(len(quantiles)) for plot_kwargs, quantiles, ale in zip( feature_data["plot_kwargs"], quantile_list, ale_list ): # Interpolate each of the quantiles relative to the accumulated final quantiles. ax.plot( np.interp(quantiles, final_quantiles, mod_quantiles), ale, **{"marker": "o", "ms": 3, **plot_kwargs}, ) ax.set_xticks(mod_quantiles[::2]) ax.set_xticklabels( [ t if t != "0.0e+0" else "0" for t in _sci_format(final_quantiles[::2], scilim=0) ] ) ax.xaxis.set_tick_params(rotation=18) ax.grid(alpha=0.4, linestyle="--") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_title(title) def multi_model_ale_1d( feature_name, experiment_data, experiment_plot_kwargs, lags=(0, 1, 3, 6, 9), bins=20, title=None, n_jobs=8, verbose=False, figure_saver=None, single_figsize=(5.4, 1.5), legend_bbox=(0.5, 0.5), fig=None, axes=None, legend=True, legend_labels=None, ): assert set(experiment_data) == set(experiment_plot_kwargs) plotted_experiments = set() # Compile data for later plotting. comp_data = {} for lag in tqdm(lags, desc="Lags", disable=not verbose): if lag: feature = f"{feature_name} {-lag} Month" else: feature = feature_name feature_data = defaultdict(list) experiment_count = 0 for experiment, single_experiment_data in experiment_data.items(): # Skip experiments that do not contain this feature. if feature not in single_experiment_data["X_train"]: continue experiment_count += 1 plotted_experiments.add(experiment) # Data required to calculate the ALEs. feature_data["experiment"].append(experiment) feature_data["single_experiment_data"].append(single_experiment_data) feature_data["plot_kwargs"].append(experiment_plot_kwargs[experiment]) if experiment_count <= 1: # We need at least two models for a comparison. continue comp_data[feature] = feature_data n_plots = len(comp_data) n_cols = 2 n_rows = math.ceil(n_plots / n_cols) if fig is None and axes is None: fig, axes = plt.subplots( n_rows, n_cols, figsize=np.array(single_figsize) * np.array([n_cols, n_rows]), ) elif fig is not None and axes is not None: pass else: raise ValueError("Either both or none of fig and axes need to be given.") # Disable unused axes. if len(axes.flatten()) > n_plots: for ax in axes.flatten()[-(len(axes.flatten()) - n_plots) :]: ax.axis("off") for ax, feature, feature_data in zip(axes.flatten(), comp_data, comp_data.values()): single_ax_multi_ale_1d( ax, feature_data=feature_data, feature=feature, bins=bins, xlabel=add_units(shorten_features(feature).replace(fill_name(""), "")), n_jobs=n_jobs, verbose=verbose, ) @ticker.FuncFormatter def major_formatter(x, pos): t = np.format_float_scientific(x, precision=1, unique=False, exp_digits=1) if t == "0.0e+0": return "0" elif ".0" in t: return t.replace(".0", "") return t for ax in axes.flatten()[:n_plots]: ax.yaxis.set_major_formatter(major_formatter) for row_axes in axes: row_axes[0].set_ylabel("ALE (BA)") fig.tight_layout() lines = [] labels = [] for experiment in sort_experiments(plotted_experiments): lines.append(Line2D([0], [0], **experiment_plot_kwargs[experiment])) labels.append(experiment_plot_kwargs[experiment]["label"]) labels = labels if legend_labels is None else legend_labels if legend: fig.legend( lines, labels, loc="center", bbox_to_anchor=legend_bbox, ncol=len(labels) if len(labels) <= 6 else 6, ) if figure_saver is not None: figure_saver.save_figure( fig, f"{shorten_features(feature_name).replace(' ', '_').lower()}_ale_comp", sub_directory="ale_comp", )
29.375
96
0.612619
840
6,815
4.738095
0.282143
0.033166
0.040201
0.015075
0.195477
0.101005
0.058291
0.019095
0.019095
0
0
0.01263
0.279677
6,815
231
97
29.502165
0.798126
0.055613
0
0.10989
0
0
0.071417
0.029563
0
0
0
0
0.005495
1
0.016484
false
0.005495
0.027473
0
0.06044
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed96aca008acbd291e61b7b834d23df210a0de3f
722
py
Python
scripts/bbann_script/rewrite.py
PwzXxm/BBAnn
2dafce027599b3cdf84070248467294dca2a1042
[ "MIT" ]
11
2021-11-01T06:49:30.000Z
2022-02-25T08:09:21.000Z
scripts/bbann_script/rewrite.py
PwzXxm/BBAnn
2dafce027599b3cdf84070248467294dca2a1042
[ "MIT" ]
null
null
null
scripts/bbann_script/rewrite.py
PwzXxm/BBAnn
2dafce027599b3cdf84070248467294dca2a1042
[ "MIT" ]
5
2021-11-04T02:18:41.000Z
2022-03-17T04:13:07.000Z
#!/usr/bin/python3 import sys column_num=eval(sys.argv[1]) print("ARGUMENT column_num: ", column_num) file_name = "tana_res.txt" records = {} with open(file_name) as f: while True: line = f.readline() # Line 1: log file name if not line: break print(line.strip()) key = f.readline().strip() # Line 2: the key == nprobe, refine_nprobe print("key: ${0}".format(key)) new_record = [] for x in range(column_num): # Line 3-n new_record.append(f.readline().strip()) records[key] = new_record sorted_records = sorted(records.items()) outfile="form.txt" with open(outfile, 'w') as f: for kv in sorted_records: f.write(kv[0] + " ") for val in kv[1]: f.write(val) f.write(' ') f.write('\n')
22.5625
71
0.65097
119
722
3.840336
0.462185
0.078775
0.061269
0
0
0
0
0
0
0
0
0.013445
0.1759
722
31
72
23.290323
0.754622
0.123269
0
0
0
0
0.08744
0
0
0
0
0
0
1
0
false
0
0.038462
0
0.038462
0.115385
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed96b143353b2e72a3e901bf774af07ab594b2aa
2,743
py
Python
kloppy/tests/test_datafactory.py
ThomasSeidl/kloppy
ca59bb2aa3b54b08a50d35e2ed2dd3c2f56cdded
[ "BSD-3-Clause" ]
176
2020-04-24T09:12:05.000Z
2022-03-27T07:03:44.000Z
kloppy/tests/test_datafactory.py
ThomasSeidl/kloppy
ca59bb2aa3b54b08a50d35e2ed2dd3c2f56cdded
[ "BSD-3-Clause" ]
95
2020-04-24T18:37:36.000Z
2022-03-23T21:59:10.000Z
kloppy/tests/test_datafactory.py
ThomasSeidl/kloppy
ca59bb2aa3b54b08a50d35e2ed2dd3c2f56cdded
[ "BSD-3-Clause" ]
39
2020-05-08T21:45:26.000Z
2022-03-19T09:29:41.000Z
import os from kloppy import DatafactorySerializer from kloppy.domain import ( AttackingDirection, Ground, Orientation, Period, Point, Provider, SetPieceType, ) from kloppy.domain.models.common import DatasetType class TestDatafactory: def test_correct_deserialization(self): base_dir = os.path.dirname(__file__) serializer = DatafactorySerializer() with open( f"{base_dir}/files/datafactory_events.json", "r" ) as event_data: dataset = serializer.deserialize( inputs={"event_data": event_data}, options={"coordinate_system": Provider.DATAFACTORY}, ) assert dataset.metadata.provider == Provider.DATAFACTORY assert dataset.dataset_type == DatasetType.EVENT assert len(dataset.events) == 1027 assert len(dataset.metadata.periods) == 2 assert dataset.events[10].ball_owning_team == dataset.metadata.teams[1] assert dataset.events[23].ball_owning_team == dataset.metadata.teams[0] assert dataset.metadata.orientation == Orientation.HOME_TEAM assert dataset.metadata.teams[0].name == "Team A" assert dataset.metadata.teams[0].ground == Ground.HOME assert dataset.metadata.teams[1].name == "Team B" assert dataset.metadata.teams[1].ground == Ground.AWAY player = dataset.metadata.teams[0].players[0] assert player.player_id == "38804" assert player.jersey_no == 1 assert str(player) == "Daniel Bold" assert player.position is None # not set assert player.starting assert dataset.metadata.periods[0] == Period( id=1, start_timestamp=0, end_timestamp=2912, attacking_direction=AttackingDirection.HOME_AWAY, ) assert dataset.metadata.periods[1] == Period( id=2, start_timestamp=2700, end_timestamp=5710, attacking_direction=AttackingDirection.AWAY_HOME, ) assert dataset.events[0].coordinates == Point(0.01, 0.01) # Check the qualifiers assert dataset.events[0].qualifiers[0].value == SetPieceType.KICK_OFF assert dataset.events[412].qualifiers[0].value == SetPieceType.THROW_IN def test_correct_normalized_deserialization(self): base_dir = os.path.dirname(__file__) serializer = DatafactorySerializer() with open( f"{base_dir}/files/datafactory_events.json", "r" ) as event_data: dataset = serializer.deserialize( inputs={"event_data": event_data}, ) assert dataset.events[0].coordinates == Point(0.505, 0.505)
33.864198
79
0.638717
297
2,743
5.750842
0.343434
0.114169
0.098361
0.04918
0.357143
0.293911
0.254098
0.210773
0.210773
0.210773
0
0.030617
0.261757
2,743
80
80
34.2875
0.81284
0.010208
0
0.215385
0
0
0.054204
0.029499
0
0
0
0
0.338462
1
0.030769
false
0
0.061538
0
0.107692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed96c2f783024bd964e139219b7d2d0bf6f1f219
2,308
py
Python
models/BiLSTM_MHATT.py
shiqiuwang/shiqiuwang-N2NCause
a6cdb702b000b62b29ccdbd74bfbb666420124f1
[ "MIT" ]
null
null
null
models/BiLSTM_MHATT.py
shiqiuwang/shiqiuwang-N2NCause
a6cdb702b000b62b29ccdbd74bfbb666420124f1
[ "MIT" ]
null
null
null
models/BiLSTM_MHATT.py
shiqiuwang/shiqiuwang-N2NCause
a6cdb702b000b62b29ccdbd74bfbb666420124f1
[ "MIT" ]
null
null
null
import tensorflow as tf from layers.attention import stacked_multihead_attention from layers.recurrent import rnn_layer from layers.similarity import manhattan_similarity from models.base_model import BaseSiameseNet class LSTMATTBasedSiameseNet(BaseSiameseNet): def __init__( self, max_sequence_len, vocabulary_size, main_cfg, model_cfg, ): BaseSiameseNet.__init__( self, max_sequence_len, vocabulary_size, main_cfg, model_cfg, ) def siamese_layer( self, sequence_len, model_cfg, ): #print(model_cfg) hidden_size = model_cfg['PARAMS'].getint('hidden_size') cell_type = model_cfg['PARAMS'].get('cell_type') num_blocks = model_cfg['PARAMS'].getint('num_blocks') num_heads = model_cfg['PARAMS'].getint('num_heads') use_residual = model_cfg['PARAMS'].getboolean('use_residual') dropout_rate = model_cfg['PARAMS'].getfloat('dropout_rate') BiLSTM_sen1 = rnn_layer( embedded_x=self.embedded_x1, hidden_size=hidden_size, bidirectional=False, cell_type=cell_type, ) BiLSTM_sen2 = rnn_layer( embedded_x=self.embedded_x2, hidden_size=hidden_size, bidirectional=False, cell_type=cell_type, reuse=True, ) ATT_out1, self.debug_vars['attentions_x1'] = stacked_multihead_attention( BiLSTM_sen1, num_blocks=num_blocks, num_heads=num_heads, use_residual=use_residual, is_training=self.is_training, dropout_rate=dropout_rate, ) ATT_out2, self.debug_vars['attentions_x2'] = stacked_multihead_attention( BiLSTM_sen2, num_blocks=num_blocks, num_heads=num_heads, use_residual=use_residual, is_training=self.is_training, dropout_rate=dropout_rate, reuse=True, ) out1 = tf.reduce_mean(ATT_out1, axis=1) out2 = tf.reduce_mean(ATT_out2, axis=1) return manhattan_similarity(out1, out2), out1, out2
30.368421
81
0.596187
246
2,308
5.195122
0.288618
0.062598
0.065728
0.046948
0.405321
0.369327
0.323944
0.323944
0.323944
0.323944
0
0.012862
0.326257
2,308
75
82
30.773333
0.809003
0.006932
0
0.5
0
0
0.054561
0
0
0
0
0
0
1
0.03125
false
0
0.078125
0
0.140625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed992d81dea8a5144be3f93a447f968a5f7b383d
2,923
py
Python
ddesigner/conditional.py
Ball-Man/python-ddesigner
2e1522e28389fe6e2d7b40f8877732563d3dd368
[ "MIT" ]
1
2021-08-17T10:40:48.000Z
2021-08-17T10:40:48.000Z
ddesigner/conditional.py
Ball-Man/python-ddesigner
2e1522e28389fe6e2d7b40f8877732563d3dd368
[ "MIT" ]
null
null
null
ddesigner/conditional.py
Ball-Man/python-ddesigner
2e1522e28389fe6e2d7b40f8877732563d3dd368
[ "MIT" ]
null
null
null
"""Module containing the logic for an arithmetic parser. Lark is used as a parser generator. """ from typing import Mapping import operator import lark # Default syntax for arithmetic expressions ARITHM_EXPRESSIONS_SYNTAX = """ ?start: or ?or: and | or "||" and -> or_ | or "or" and -> or_ ?and: comparison | and "&&" comparison -> and_ | and "and" comparison -> and_ ?comparison: sum | comparison "<" sum -> lt | comparison ">" sum -> gt | comparison "<=" sum -> lte | comparison ">=" sum -> gte | comparison "==" sum -> eq | comparison "!=" sum -> neq ?sum: product | sum "+" product -> add | sum "-" product -> sub ?product: not | product "*" not -> mul | product "/" not -> div | product "//" not -> floordiv ?not: atom | "!"atom -> not_ | "not" atom -> not_ ?atom: NUMBER -> number | "-" atom -> neg | "False" -> false | "True" -> true | NAME -> var | STRING -> string | "(" or ")" %import common.ESCAPED_STRING -> STRING %import common.CNAME -> NAME %import common.NUMBER %import common.WS_INLINE %ignore WS_INLINE """ @lark.v_args(inline=True) class ArithmExpressionTransformer(lark.Transformer): """Transformer used to calculate the result of arithmetic exp.""" add = operator.add sub = operator.sub mul = operator.mul div = operator.truediv floordiv = operator.floordiv neg = operator.neg lt = operator.lt gt = operator.gt lte = operator.le gte = operator.ge eq = operator.eq neq = operator.ne def __init__(self, variables): self.variables = variables def and_(self, x, y): """Boolean and. For simplicity, an easy implementation of boolean operators which will NOT provide shot circuiting. """ return x and y def or_(self, x, y): """Boolean or. For simplicity, an easy implementation of boolean operators which will NOT provide shot circuiting. """ return x or y not_ = operator.not_ number = float def string(self, string: str): return string.strip('"') def true(self): return True def false(self): return False def var(self, name): return self.variables[name] # Default parser for arithmetic expressions (using the default syntax) ARITHM_EXPRESSIONS_PARSER = lark.Lark(ARITHM_EXPRESSIONS_SYNTAX) def arithm_expression_evaluate( expression: str, variables: Mapping, parser: lark.Lark = ARITHM_EXPRESSIONS_PARSER, transformer: lark.Transformer = ArithmExpressionTransformer): """Return the value of the given expression. Free variable names will be calculated using the 'variables' mapping. """ return transformer(variables).transform(parser.parse(expression))
23.384
70
0.609648
331
2,923
5.299094
0.305136
0.051881
0.011973
0.010262
0.152794
0.103763
0.103763
0.103763
0.103763
0.103763
0
0
0.280192
2,923
124
71
23.572581
0.83365
0.205953
0
0
0
0
0.441256
0.009417
0
0
0
0
0
1
0.103896
false
0
0.090909
0.051948
0.480519
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ed9cb27df1647e3b076c465c218bb03a9d9b60ef
20,065
py
Python
source.py
Shailendram1990/COVID19_mobility
70dc3d05313b233229ea5f8d1c4c1b0dffe44e33
[ "MIT" ]
1
2020-07-21T16:11:51.000Z
2020-07-21T16:11:51.000Z
source.py
Shailendram1990/COVID19_mobility
70dc3d05313b233229ea5f8d1c4c1b0dffe44e33
[ "MIT" ]
null
null
null
source.py
Shailendram1990/COVID19_mobility
70dc3d05313b233229ea5f8d1c4c1b0dffe44e33
[ "MIT" ]
null
null
null
""" This script loads Google and Apple Mobility reports, builds cleaned reports in different formats and builds merged files from both sources. Original data: - Google Community Mobility reports: https://www.google.com/covid19/mobility/ - Apple Mobility Trends reports: https://www.apple.com/covid19/mobility """ import os import datetime import requests import urllib.request import time from bs4 import BeautifulSoup import re import json import pandas as pd def get_google_link(): '''Get link of Google Community Mobility report file Returns: link (str): link of Google Community report file ''' # get webpage source url = 'https://www.google.com/covid19/mobility/' response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") csv_tag = soup.find('a', {"class": "icon-link"}) link = csv_tag['href'] return link def download_google_report(directory="google_reports"): '''Download Google Community Mobility report in CSV format Args: directory: directory to which CSV report will be downloaded Returns: new_files (bool): flag indicating whether or not new files have been downloaded ''' new_files = False # create directory if it don't exist if not os.path.exists(directory): os.makedirs(directory) # download CSV file link = get_google_link() file_name = "Global_Mobility_Report.csv" path = os.path.join(directory, file_name) if not os.path.isfile(path): new_files = True urllib.request.urlretrieve(link, path) else: path_new = os.path.join(directory, file_name + "_new") urllib.request.urlretrieve(link, path_new) if os.path.getsize(path) == os.path.getsize(path_new): os.remove(path_new) else: new_files = True os.remove(path) os.rename(path_new, path) if not new_files: print('Google: No updates') else: print('Google: Update available') return new_files def build_google_report( source=os.path.join("google_reports", "Global_Mobility_Report.csv"), report_type="regions"): '''Build cleaned Google report for the worldwide or for some country (currently only for the US) Args: source: location of the raw Google CSV report report_type: two options available: "regions" - report for the worldwide, "US" - report for the US Returns: google (DataFrame): generated Google report ''' google = pd.read_csv(source, low_memory=False) google.columns = google.columns.str.replace( r'_percent_change_from_baseline', '') google.columns = google.columns.str.replace(r'_', ' ') google = google.rename(columns={'country region': 'country'}) if report_type == "regions": google = google[google['sub region 2'].isnull()] google = google.rename(columns={'sub region 1': 'region'}) google = google.loc[:, ['country', 'region', 'date', 'retail and recreation', 'grocery and pharmacy', 'parks', 'transit stations', 'workplaces', 'residential']] google['region'].fillna('Total', inplace=True) elif report_type == "US": google = google[(google['country'] == "United States")] google = google.rename( columns={ 'sub region 1': 'state', 'sub region 2': 'county'}) google = google.loc[:, ['state', 'county', 'date', 'retail and recreation', 'grocery and pharmacy', 'parks', 'transit stations', 'workplaces', 'residential']] google['state'].fillna('Total', inplace=True) google['county'].fillna('Total', inplace=True) return google def get_apple_link(): '''Get link of Apple Mobility Trends report file Returns: link (str): link of Apple Mobility Trends report file ''' # get link via API json_link = "https://covid19-static.cdn-apple.com/covid19-mobility-data/current/v3/index.json" with urllib.request.urlopen(json_link) as url: json_data = json.loads(url.read().decode()) link = "https://covid19-static.cdn-apple.com" + \ json_data['basePath'] + json_data['regions']['en-us']['csvPath'] return link def download_apple_report(directory="apple_reports"): '''Download Apple Mobility Trends report in CSV Args: directory: directory to which CSV report will be downloaded Returns: new_files (bool): flag indicating whether or not a new file has been downloaded ''' new_files = False if not os.path.exists(directory): os.makedirs(directory) link = get_apple_link() file_name = "applemobilitytrends.csv" path = os.path.join(directory, file_name) if not os.path.isfile(path): new_files = True urllib.request.urlretrieve(link, path) else: path_new = os.path.join(directory, file_name + "_new") urllib.request.urlretrieve(link, path_new) if os.path.getsize(path) == os.path.getsize(path_new): os.remove(path_new) else: new_files = True os.remove(path) os.rename(path_new, path) if not new_files: print('Apple: No updates') else: print('Apple: Update available') return new_files def build_apple_report( source=os.path.join( 'apple_reports', "applemobilitytrends.csv"), report_type="regions"): '''Build cleaned Apple report (transform dates from columns to rows, add country names for subregions and cities) for the worldwide or for some country (currently only for the US) Args: source: location of the raw Apple CSV report destination: destination file path report_type: two options available: "regions" - report for the worldwide, "US" - report for the US Returns: apple (DataFrame): generated Apple report ''' apple = pd.read_csv(source) apple = apple.drop(columns=['alternative_name']) apple['country'] = apple.apply( lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'], axis=1) if report_type == 'regions': apple = apple[apple.geo_type != 'county'] apple['sub-region'] = apple.apply(lambda x: 'Total' if x['geo_type'] == 'country/region' else ( x['region'] if x['geo_type'] == 'sub-region' else x['sub-region']), axis=1) apple['subregion_and_city'] = apple.apply( lambda x: 'Total' if x['geo_type'] == 'country/region' else x['region'], axis=1) apple = apple.drop(columns=['region']) apple['sub-region'] = apple['sub-region'].fillna( apple['subregion_and_city']) apple = apple.melt( id_vars=[ 'geo_type', 'subregion_and_city', 'sub-region', 'transportation_type', 'country'], var_name='date') apple['value'] = apple['value'] - 100 apple = apple.pivot_table( index=[ "geo_type", "subregion_and_city", "sub-region", "date", "country"], columns='transportation_type').reset_index() apple.columns = [t + (v if v != "value" else "") for v, t in apple.columns] apple = apple.loc[:, ['country', 'sub-region', 'subregion_and_city', 'geo_type', 'date', 'driving', 'transit', 'walking']] apple = apple.sort_values(by=['country', 'sub-region', 'subregion_and_city', 'date']).reset_index(drop=True) elif report_type == "US": apple = apple[apple.country == "United States"].drop(columns=[ 'country']) apple['sub-region'] = apple['sub-region'].fillna( apple['region']).replace({"United States": "Total"}) apple['region'] = apple.apply(lambda x: x['region'] if ( x['geo_type'] == 'city' or x['geo_type'] == 'county') else 'Total', axis=1) apple = apple.rename( columns={ 'sub-region': 'state', 'region': 'county_and_city'}) apple = apple.melt( id_vars=[ 'geo_type', 'state', 'county_and_city', 'transportation_type'], var_name='date') apple['value'] = apple['value'] - 100 apple = apple.pivot_table( index=[ 'geo_type', 'state', 'county_and_city', 'date'], columns='transportation_type').reset_index() apple.columns = [t + (v if v != "value" else "") for v, t in apple.columns] apple = apple.loc[:, ['state', 'county_and_city', 'geo_type', 'date', 'driving', 'transit', 'walking']] apple = apple.sort_values( by=['state', 'county_and_city', 'geo_type', 'date']).reset_index(drop=True) return apple def check_waze_report(countries_source = os.path.join("waze_reports", "Waze_Country-Level_Data.csv"), cities_source = os.path.join("waze_reports", "Waze_City-Level_Data.csv"), report_source = os.path.join("waze_reports", "waze_mobility.csv")): '''Checks if new raw Waze CSV reports have been added. Args: countries_source: location of the raw Waze country-level CSV report cities_source: location of the raw Waze city-level CSV report report_source: location of the Waze CSV report generated by build_waze_report function (if available) Returns: new_files (bool): flag indicating whether or not new raw Waze CSV reports have been added ''' if not os.path.isfile(report_source): new_files = True else: # check by a number of rows in files with open(countries_source) as f: country_rows = sum(1 for line in f) with open(cities_source) as f: cities_rows = sum(1 for line in f) with open(report_source) as f: report_rows = sum(1 for line in f) new_files = country_rows + cities_rows != report_rows + 1 if not new_files: print('Waze: No updates') else: print('Waze: Update available') return new_files def build_waze_report(countries_source=os.path.join("waze_reports", "Waze_Country-Level_Data.csv"), cities_source=os.path.join("waze_reports", "Waze_City-Level_Data.csv")): '''Build cleaned Waze report (transform dates from string to date format, merge country&city-level data, add geo_type column) Args: countries_source: location of the raw Waze country-level CSV report cities_source: location of the raw Waze city-level CSV report Returns: waze (DataFrame): generated Waze report ''' waze_countries = pd.read_csv(countries_source, parse_dates=['Date']) waze_cities = pd.read_csv(cities_source, parse_dates=['Date']) waze_countries['City'] = 'Total' waze_countries['geo_type'] = 'country' waze_cities['geo_type'] = 'city' waze = waze_countries.append(waze_cities) waze = waze.rename(columns={'Country':'country', 'City':'city', 'Date':'date', '% Change In Waze Driven Miles/KMs':'driving_waze'}) waze['driving_waze'] = waze['driving_waze'] * 100 waze['date'] = waze['date'].dt.date waze = waze.loc[:,['country', 'city','geo_type','date', 'driving_waze']] waze = waze.sort_values(by=['country', 'city', 'geo_type', 'date']).reset_index(drop=True) return waze def build_summary_report(apple_source, google_source, report_type="regions"): '''Build a merged report from Google and Apple data Args: apple_source: location of the CSV report generated by build_apple_report function google_source: location of the CSV report generated by build_google_report function report_type: two options available: "regions" - report for the worldwide, "US" - report for the US Returns: summary (DataFrame): merged report from Google and Apple data ''' apple = pd.read_csv(apple_source, low_memory=False) google = pd.read_csv(google_source, low_memory=False) summary = pd.DataFrame() # build report for regions if report_type == "regions": apple = apple.rename(columns={'subregion_and_city': 'region'}) apple = apple.loc[:, ['country', 'region', 'date', 'driving', 'transit', 'walking']] # get matching table for converting Apple countries and subregions to # Google names country_AtoG_file = os.path.join( 'auxiliary_data', 'country_Apple_to_Google.csv') subregions_AtoG_file = os.path.join( 'auxiliary_data', 'subregions_Apple_to_Google.csv') if os.path.isfile(country_AtoG_file): country_AtoG = pd.read_csv(country_AtoG_file, index_col=0) else: country_AtoG = None if os.path.isfile(subregions_AtoG_file): subregions_AtoG = pd.read_csv(subregions_AtoG_file, index_col=0) else: subregions_AtoG = None # convert Apple countries and subregions to Google names apple['country'] = apple.apply(lambda x: country_AtoG.loc[x['country'], 'country_google'] if ( country_AtoG is not None and x['country'] in country_AtoG.index) else x['country'], axis=1) apple['region'] = apple.apply(lambda x: subregions_AtoG.loc[x['region'], 'subregion_Google'] if ( subregions_AtoG is not None and x['region'] in subregions_AtoG.index) else x['region'], axis=1) # merge reports apple = apple.set_index(['country', 'region', 'date']) google = google.set_index(['country', 'region', 'date']) summary = google.join(apple, how='outer') summary = summary.reset_index(level=['country', 'region', 'date']) elif report_type == "US": apple = apple.loc[:, ['state', 'county_and_city', 'date', 'driving', 'transit', 'walking']] apple.loc[apple.state == 'Washington DC', 'state'] = 'District of Columbia' apple.loc[apple.county_and_city == 'Washington DC', 'county_and_city'] = 'Total' google = google.rename(columns={'county': 'county_and_city'}) # merge reports apple = apple.set_index(['state', 'county_and_city', 'date']) google = google.set_index(['state', 'county_and_city', 'date']) summary = google.join(apple, how='outer') summary = summary.reset_index( level=['state', 'county_and_city', 'date']) return summary def run(): """Run parse flow and build reports""" # process Google reports new_files_status_google = download_google_report() if new_files_status_google: # build reports google_world = build_google_report() google_US = build_google_report(report_type="US") # write reports to CSV and Excel google_world.to_csv(os.path.join("google_reports", "mobility_report_countries.csv"), index=False) google_world.to_excel(os.path.join("google_reports", "mobility_report_countries.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') google_US.to_csv(os.path.join("google_reports", "mobility_report_US.csv"), index=False) google_US.to_excel(os.path.join("google_reports", "mobility_report_US.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') # process Apple reports new_files_status_apple = download_apple_report() if new_files_status_apple: # build reports apple_world = build_apple_report() apple_US = build_apple_report(report_type="US") # write reports to CSV and Excel apple_world.to_csv(os.path.join("apple_reports", "apple_mobility_report.csv"), index=False) apple_world.to_excel(os.path.join("apple_reports", "apple_mobility_report.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') apple_US.to_csv(os.path.join("apple_reports", "apple_mobility_report_US.csv"), index=False) apple_US.to_excel(os.path.join("apple_reports", "apple_mobility_report_US.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') # process Waze reports new_files_status_waze = check_waze_report() if new_files_status_waze: # build report waze = build_waze_report() # write report to CSV and Excel waze.to_csv(os.path.join("waze_reports", "waze_mobility.csv"), index=False) waze.to_excel(os.path.join("waze_reports", "waze_mobility.xlsx"), index=False, sheet_name='Data', engine='xlsxwriter') # build summary reports if new_files_status_apple or new_files_status_google: print("Merging reports...") summary_regions = build_summary_report(os.path.join("apple_reports","apple_mobility_report.csv"), os.path.join("google_reports", "mobility_report_countries.csv")) summary_US = build_summary_report(os.path.join("apple_reports", "apple_mobility_report_US.csv"), os.path.join("google_reports", "mobility_report_US.csv"), 'US') summary_countries = summary_regions[summary_regions['region']=='Total'].drop(columns=['region']) print('Writing merged reports to files...') summary_regions.to_csv(os.path.join("summary_reports", "summary_report_regions.csv"), index=False) summary_regions.to_excel(os.path.join("summary_reports", "summary_report_regions.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') summary_US.to_csv(os.path.join("summary_reports", "summary_report_US.csv"), index=False) summary_US.to_excel(os.path.join("summary_reports", "summary_report_US.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') summary_countries.to_csv(os.path.join("summary_reports", "summary_report_countries.csv"), index=False) summary_countries.to_excel(os.path.join("summary_reports", "summary_report_countries.xlsx"), index=False, sheet_name='Data', engine = 'xlsxwriter') if __name__ == '__main__': run()
42.782516
140
0.579018
2,303
20,065
4.854103
0.111594
0.023616
0.02952
0.011629
0.607747
0.541014
0.488147
0.41086
0.380267
0.312819
0
0.002805
0.306952
20,065
468
141
42.873932
0.801093
0.170596
0
0.384858
0
0.003155
0.228871
0.048013
0
0
0
0
0
1
0.031546
false
0
0.028391
0
0.088328
0.025237
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eda08d58045fdf93c52e02e54d88ae92b393de36
2,130
py
Python
cogs/lyrics.py
minihut/leafy-bot
b9c12b18f8a6ba8409ced5fe352421623bbffcee
[ "MIT" ]
12
2021-01-19T05:47:03.000Z
2022-01-14T12:51:33.000Z
cogs/lyrics.py
minihut/leafy-bot
b9c12b18f8a6ba8409ced5fe352421623bbffcee
[ "MIT" ]
1
2021-02-22T12:08:10.000Z
2021-02-22T12:08:10.000Z
cogs/lyrics.py
minihut/leafy-bot
b9c12b18f8a6ba8409ced5fe352421623bbffcee
[ "MIT" ]
12
2021-01-17T07:31:34.000Z
2021-05-17T14:01:07.000Z
import discord import requests from discord.ext import commands from discord.ext.commands import BucketType, cooldown class Lyrics(commands.Cog): def __init__(self, client): self.client = client @commands.Cog.listener() async def on_ready(self): print("Lyrics cog loaded successfully") @commands.command(aliases=["lyrics"], description="Shows the lyrics of given song") @cooldown(1, 30, BucketType.user) async def ly(self, ctx, *, lyrics): if lyrics is None: await ctx.send("You forgot lyrcis") else: words = "+".join(lyrics.split(" ")) print(words) URL = f"https://some-random-api.ml/lyrics?title={words}" def check_valid_status_code(request): if request.status_code == 200: return request.json() return False def get_song(): request = requests.get(URL) data = check_valid_status_code(request) return data song = get_song() if not song: await ctx.channel.send("Couldn't get lyrcis from API. Try again later.") else: music = song["lyrics"] ti = song["title"] au = song["author"] embed = discord.Embed( timestamp=ctx.message.created_at, Title="Title: Song", color=0xFF0000, ) embed.add_field(name=f"Title: {ti}", value=f"Author: {au}") chunks = [music[i : i + 1024] for i in range(0, len(music), 2000)] for chunk in chunks: embed.add_field(name="\u200b", value=chunk, inline=False) # embed.add_field(name='Song',value=f'{music}', inline=True) embed.set_footer( text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}", ) await ctx.send(embed=embed) def setup(client): client.add_cog(Lyrics(client))
31.323529
88
0.528638
239
2,130
4.619247
0.456067
0.021739
0.035326
0.046196
0.048913
0
0
0
0
0
0
0.01685
0.359155
2,130
67
89
31.791045
0.791941
0.02723
0
0.04
0
0
0.139614
0.011111
0
0
0.003865
0
0
1
0.08
false
0
0.08
0
0.24
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eda1be23822efe6556205accc545c9d894a8431d
2,761
py
Python
model_train.py
Sanatramesh/PCamNet
7238a87584ffec26336ae2034ec5723d8a035dca
[ "BSD-3-Clause" ]
null
null
null
model_train.py
Sanatramesh/PCamNet
7238a87584ffec26336ae2034ec5723d8a035dca
[ "BSD-3-Clause" ]
null
null
null
model_train.py
Sanatramesh/PCamNet
7238a87584ffec26336ae2034ec5723d8a035dca
[ "BSD-3-Clause" ]
null
null
null
import time import pickle import numpy as np from copy import deepcopy class ModelTraining: def __init__(self, model, data_loader, batch_size = 10, epochs = 20, model_ckpt_file = 'model/PCamNet'): self.model = model self.data_loader = data_loader # List of tuple: (left_cam, right_cam, disp_map) filenames self.batch_size = batch_size self.num_epochs = epochs self.num_batch = len( self.data_loader ) self.model_ckpt_file = model_ckpt_file self.train_stats = [] def train_model(self): print ( 'Training Model: %s ... ' % self.model.get_name() ) # train model for one epoch - call fn model.train_batch(data, label) for each batch for epoch in range( self.num_epochs ): training_loss = 0.0 training_count = 0 validation_loss = 0.0 validation_count = 0 validation_predict = [] true_labels = [] true_labels = [] t1 = time.time() for batch_data, batch_labels in self.data_loader: if training_count <= 0.8 * self.num_batch: training_loss += self.model.train_batch( batch_data, batch_labels ) training_count += 1 else: validation_loss += self.model.compute_loss( batch_data, batch_labels ) # validation_predict.append(self.model.forward_pass( batch_data ).numpy()) validation_count += 1 # true_labels.append(deepcopy(batch_labels).numpy()) t2 = time.time() self.train_stats.append([epoch, training_loss.numpy(), training_count, validation_loss.numpy(), validation_count, validation_predict, true_labels, t2 - t1]) print () print ( 'epoch: %4d train loss: %20.6f val loss: %20.6f' % ( epoch, training_loss / training_count, validation_loss / validation_count ) ) print('epoch time:', np.round(t2 - t1, 2), 's') print('time for completion:', np.round((t2 - t1) * (self.num_epochs - epoch - 1) / 60, 2), 'm') print ('') self.model.save_model( self.model_ckpt_file + '.pth') pickle.dump(self.train_stats, open(self.model_ckpt_file+'_stats.pkl','wb')) print ( 'Training Model: %s ... Complete' % self.model.get_name() ) print ( 'Saving stats into model/stats.pkl') return 0 def get_model(self): return self.model def set_model_save(self, filename): self.model_ckpt_file = filename
37.821918
108
0.561391
321
2,761
4.582555
0.277259
0.079538
0.053025
0.046227
0
0
0
0
0
0
0
0.019189
0.33937
2,761
72
109
38.347222
0.787281
0.094893
0
0.039216
0
0
0.080994
0
0
0
0
0
0
1
0.078431
false
0
0.078431
0.019608
0.215686
0.156863
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eda47ef3a198b2afd2d1ca2fa747773c857b5cf8
3,745
py
Python
aiohttp_pydantic/oas/docstring_parser.py
HerrMuellerluedenscheid/aiohttp-pydantic
87b4487cc46213a3248807825e2e3e71413fa543
[ "MIT" ]
42
2020-11-18T16:14:45.000Z
2022-03-21T09:18:48.000Z
aiohttp_pydantic/oas/docstring_parser.py
HerrMuellerluedenscheid/aiohttp-pydantic
87b4487cc46213a3248807825e2e3e71413fa543
[ "MIT" ]
26
2020-11-15T08:27:09.000Z
2022-03-04T15:26:20.000Z
aiohttp_pydantic/oas/docstring_parser.py
HerrMuellerluedenscheid/aiohttp-pydantic
87b4487cc46213a3248807825e2e3e71413fa543
[ "MIT" ]
11
2020-11-24T22:13:35.000Z
2021-10-02T19:56:26.000Z
""" Utility to extract extra OAS description from docstring. """ import re import textwrap from typing import Dict, List class LinesIterator: def __init__(self, lines: str): self._lines = lines.splitlines() self._i = -1 def next_line(self) -> str: if self._i == len(self._lines) - 1: raise StopIteration from None self._i += 1 return self._lines[self._i] def rewind(self) -> str: if self._i == -1: raise StopIteration from None self._i -= 1 return self._lines[self._i] def __iter__(self): return self def __next__(self): return self.next_line() def _i_extract_block(lines: LinesIterator): """ Iter the line within an indented block and dedent them. """ # Go to the first not empty or not white space line. try: line = next(lines) except StopIteration: return # No block to extract. while line.strip() == "": try: line = next(lines) except StopIteration: return indent = re.fullmatch("( *).*", line).groups()[0] indentation = len(indent) start_of_other_block = re.compile(f" {{0,{indentation}}}[^ ].*") yield line[indentation:] # Yield lines until the indentation is the same or is greater than # the first block line. try: line = next(lines) except StopIteration: return while not start_of_other_block.fullmatch(line): yield line[indentation:] try: line = next(lines) except StopIteration: return lines.rewind() def _dedent_under_first_line(text: str) -> str: """ Apply textwrap.dedent ignoring the first line. """ lines = text.splitlines() other_lines = "\n".join(lines[1:]) if other_lines: return f"{lines[0]}\n{textwrap.dedent(other_lines)}" return text def status_code(docstring: str) -> Dict[int, str]: """ Extract the "Status Code:" block of the docstring. """ iterator = LinesIterator(docstring) for line in iterator: if re.fullmatch("status\\s+codes?\\s*:", line, re.IGNORECASE): iterator.rewind() blocks = [] lines = [] i_block = _i_extract_block(iterator) next(i_block) for line_of_block in i_block: if re.search("^\\s*\\d{3}\\s*:", line_of_block): if lines: blocks.append("\n".join(lines)) lines = [] lines.append(line_of_block) if lines: blocks.append("\n".join(lines)) return { int(status.strip()): _dedent_under_first_line(desc.strip()) for status, desc in (block.split(":", 1) for block in blocks) } return {} def tags(docstring: str) -> List[str]: """ Extract the "Tags:" block of the docstring. """ iterator = LinesIterator(docstring) for line in iterator: if re.fullmatch("tags\\s*:.*", line, re.IGNORECASE): iterator.rewind() lines = " ".join(_i_extract_block(iterator)) return [" ".join(e.split()) for e in re.split("[,;]", lines.split(":")[1])] return [] def operation(docstring: str) -> str: """ Extract all docstring except the "Status Code:" block. """ lines = LinesIterator(docstring) ret = [] for line in lines: if re.fullmatch("status\\s+codes?\\s*:|tags\\s*:.*", line, re.IGNORECASE): lines.rewind() for _ in _i_extract_block(lines): pass else: ret.append(line) return ("\n".join(ret)).strip()
27.335766
87
0.558344
442
3,745
4.58371
0.223982
0.017275
0.011846
0.031589
0.333169
0.307009
0.2769
0.217177
0.172754
0.172754
0
0.004673
0.314286
3,745
136
88
27.536765
0.784268
0.125234
0
0.387097
0
0
0.053605
0.036677
0
0
0
0
0
1
0.107527
false
0.010753
0.032258
0.021505
0.311828
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eda80428d8aaafe609a0be6935df873454da8b92
10,224
py
Python
tests/test_objects.py
kipyin/phanpy
f66fb1b181aeec6183bb03bd748e6ed535496a54
[ "MIT" ]
null
null
null
tests/test_objects.py
kipyin/phanpy
f66fb1b181aeec6183bb03bd748e6ed535496a54
[ "MIT" ]
null
null
null
tests/test_objects.py
kipyin/phanpy
f66fb1b181aeec6183bb03bd748e6ed535496a54
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pytest import os, sys file_path = os.path.dirname(os.path.abspath(__file__)) root_path = file_path.replace('/phanpy/tests', '') sys.path.append(root_path) if root_path not in sys.path else None import numpy as np from phanpy.core.objects import Status, Item, Move, Pokemon, Trainer class TestItems(): def test_no_item(self): item = Item(0) assert item.name == 'no-item' assert item.category_id == 23 assert item.fling.power == 0 assert item.fling.effect_id == 0 assert item.fling.effect_name == 'no-effect' assert list(item.flags.id.values) == [] def test_flags_id_and_name_map(self): item = Item(1) assert sorted(item.flags.id.values) == sorted([1, 2, 4, 5]) assert sorted(item.flags.name.values) == sorted(['countable', 'consumable', 'usable-in-battle', 'holdable']) def test_instantiate_item_with_undefined_fling_effects(self): item = Item(1) assert item.name == 'master-ball' assert item.category_id == 34 assert item.fling.power == 0 assert item.fling.effect_id == 0 assert item.fling.effect_name == 'no-effect' def test_instantiate_item_with_defined_fling_effects(self): item = Item(126) assert item.name == 'cheri-berry' assert item.category_id == 3 assert item.fling.power == 10 assert item.fling.effect_id == 3 assert item.fling.effect_name == 'berry-effect' assert item.flags.id.values == [7] class TestStatusInstantiation(): def test_declare_a_status_by_id_from_the_table(self): assert Status(5).name[0] == 'poison' def test_declare_a_status_with_a_timer(self): assert Status(5, 5).duration[0] == 5 def test_declare_a_status_by_name_from_the_table(self): assert Status('poison').id[0] == 5 def test_declare_a_custom_status(self): trick_room = Status('trick-room', 5) assert trick_room.id[0] >= 100000 assert trick_room.duration[0] == 5 assert trick_room.volatile[0] == True def test_status_volatility(self): assert Status(20).volatile[0] == True assert Status(0).volatile[0] == False @pytest.fixture(scope='function') def setUpStatus(): poison = Status(5) burn = Status('burn') confused = Status('confused', 5) disabled = Status('disabled', 4) yield poison, burn, confused, disabled class TestStatusAddition(): def test_add_two_non_volatile(self, setUpStatus): poison, burn, __, __ = setUpStatus non_volatile = poison + burn # burn should override poison assert non_volatile.name == ['burn'] # Check if the original ones are mutated or not. assert poison.name == ['poison'] assert burn.name == ['burn'] def test_add_a_volatile_to_non_volatile(self, setUpStatus): poison, __, confused, __ = setUpStatus mixed = poison + confused assert sorted(mixed.name) == sorted(['poison', 'confused']) assert sorted(mixed.duration) == sorted([float('inf'), 5]) def test_add_two_volatile_statuses(self, setUpStatus): __, __, confused, disabled = setUpStatus volatile = confused + disabled assert sorted(volatile.name) == sorted(['confused', 'disabled']) assert sorted(volatile.duration) == sorted([5, 4]) def test_add_a_status_to_a_mixed(self, setUpStatus): poison, __, confused, disabled = setUpStatus volatile = confused + disabled mixed2 = volatile + poison assert sorted(mixed2.name) == sorted(['confused', 'disabled', 'poison']) assert sorted(mixed2.volatile) == sorted([True, True, False]) def test_add_multiple_in_one_line(self, setUpStatus): __, burn, confused, disabled = setUpStatus mixed = burn + confused + disabled assert sorted(mixed.name) == sorted(['burn', 'confused', 'disabled']) class TestStatusMethods(): def test_remove_an_existing_status_by_name(self, setUpStatus): poison, __, confused, disabled = setUpStatus combined = poison + confused + disabled combined.remove('poison') assert sorted(combined.name) == sorted(['confused', 'disabled']) assert sorted(combined.duration) == sorted([5, 4]) assert sorted(combined.volatile) == sorted([True, True]) def test_remove_an_existing_status_by_id(self, setUpStatus): poison, __, confused, disabled = setUpStatus combined = poison + confused + disabled combined.remove(5) assert sorted(combined.name) == sorted(['confused', 'disabled']) assert sorted(combined.duration) == sorted([5, 4]) assert sorted(combined.volatile) == sorted([True, True]) def test_remove_the_only_status(self): poison = Status(5) poison.remove('poison') assert poison.id == np.array([0]) assert poison.name == np.array(['normal']) assert poison.duration == np.array([float('inf')]) assert poison.volatile == np.array([False]) def test_remove_a_non_existing_status(self): poison = Status(5) with pytest.raises(KeyError): poison.remove('burn') def test_reduce_duration_by_1(self, setUpStatus): __, burn, confused, disabled = setUpStatus mixed = burn + confused + disabled mixed.reduce() assert sorted(mixed.duration) == sorted([float('inf'), 4, 3]) def test_reduce_the_duration_by_1_where_the_duration_was_1(self): burn = Status('burn', 1) confused = Status('confused', 5) mixed = burn + confused mixed.reduce() assert mixed.name == ['confused'] assert mixed.duration == [4] assert mixed.volatile == [True] @pytest.fixture(scope='function') def setUpPokemon(): p = Pokemon(10001) yield p class TestPokemon(): def test_id_over_10000(self, setUpPokemon): p = setUpPokemon assert p.name == 'deoxys-attack' def test_types_single(self, setUpPokemon): p = setUpPokemon assert p.types == [14] def test_types_double(self): p = Pokemon(10004) assert p.types == [7, 5] def test_effort_values_sum(self, setUpPokemon): p = setUpPokemon assert sum(p.ev.values) == 510 def test_nature_id_assignment(self, setUpPokemon): p = setUpPokemon p.set_nature(18) # lax nature, decrease 5, increase 3. assert p.nature.id == 18 def test_set_nature_by_name(self, setUpPokemon): p = setUpPokemon p.set_nature('lax') # 18 assert p.nature.id == 18 def test_nature_modifier(self, setUpPokemon): p = setUpPokemon p.set_nature(18) assert p.nature_modifier.defense == 1.1 assert p.nature_modifier.specialDefense == 0.9 def test_set_iv(self, setUpPokemon): p = setUpPokemon p.set_iv([31. for x in range(6)]) assert p.iv.defense == 31. def test_set_ev(self, setUpPokemon): p = setUpPokemon p.set_ev([31. for x in range(6)]) assert p.ev.defense == 31. def test_calculated_stats(self): """Using the example on https://bulbapedia.bulbagarden.net/wiki/Statistic#Determination_of_stats """ p = Pokemon('garchomp', 78) p.set_nature('adamant') p.set_iv([24, 12, 30, 16, 23, 5]) p.set_ev([74, 190, 91, 48, 84, 23]) expected = [289, 278, 193, 135, 171, 171] for i in range(6): assert p.stats[i] == expected[i] def test_stage_factor_changes_when_stage_changes(self, setUpPokemon): p = setUpPokemon p.stage.attack += 3 # the factor should be multiplied by 2.5 assert p.stage_factor.attack == 2.5 def test_current_stats_change_when_factors_change(self, setUpPokemon): p = setUpPokemon p.stage.attack += 3 assert p.current.attack == np.floor(p.stats.attack * 2.5) def test_add_received_damage(self, setUpPokemon): p = setUpPokemon p.history.damage.appendleft(288) p.history.damage.appendleft(199) assert p.history.damage[0] == 199 def test_add_stage(self, setUpPokemon): p = setUpPokemon p.history.stage += 5 assert p.history.stage == 5 def test_set_moves(self, setUpPokemon): p = setUpPokemon first_4_moves = [Move(x) for x in range(1, 5)] p.moves = first_4_moves for i in range(4): assert p.moves[i].name == first_4_moves[i].name def test_set_pp_and_power(self, setUpPokemon): p = setUpPokemon p.moves[0] = Move(33) # tackle, power 40, pp 35, accuracy 100. p.moves[0].pp -= 4 p.moves[0].power *= 2 assert p.moves[0].pp == 31 assert p.moves[0].power == 80 assert p.moves[0].power != Move(33).power def test_holding_item303_changes_critical_stage(self, setUpPokemon): p = setUpPokemon p.item = Item(303) assert p.stage.critical == 1. def test_dual_abilities_successfully_initiated(self): p = Pokemon(19) assert p.ability in [50, 62] def test_single_ability_successfully_initiated(self): p = Pokemon(1) assert p.ability == 65 def test_reset_current_stats(self, setUpPokemon): p = setUpPokemon p.stage += 3 p.reset_current() assert p.current.attack == p.stats.attack def test_two_pokemons_are_equal(self, setUpPokemon): p = setUpPokemon q = Pokemon(10001) assert p != q q.set_iv(p.iv.values) q.unique_id = p.unique_id assert p == q def test_trainer_set_pokemon(self): t = Trainer('Satoshi') t.set_pokemon(3, Pokemon(10005)) assert t.party(3).name == 'wormadam-trash' def test_set_trainers_pokemons_moves(self): t = Trainer('Satoshi') t.set_pokemon(3, Pokemon(10001)) t.party(1).moves[1] = Move(33) assert t.party(1).moves[1].name == 'tackle'
33.742574
83
0.624902
1,306
10,224
4.698315
0.196784
0.049055
0.047099
0.080346
0.416069
0.297099
0.204368
0.169492
0.135593
0.123207
0
0.033847
0.257336
10,224
302
84
33.854305
0.774266
0.032179
0
0.253219
0
0
0.041257
0
0
0
0
0
0.334764
1
0.193133
false
0
0.017167
0
0.23176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eda9ec61c9c268e74679c06d29ce37757b47bb1d
5,743
py
Python
Modules/Dependency/Metadata_Interpreter.py
dobedobedo/Parrot_Sequoia_Image_Handler
e8d44d88006cf1f4e597aac1523c6f4458534e5b
[ "MIT" ]
6
2018-06-27T10:13:29.000Z
2020-05-11T03:00:10.000Z
Modules/Dependency/Metadata_Interpreter.py
dobedobedo/Parrot_Sequoia_Image_Handler
e8d44d88006cf1f4e597aac1523c6f4458534e5b
[ "MIT" ]
null
null
null
Modules/Dependency/Metadata_Interpreter.py
dobedobedo/Parrot_Sequoia_Image_Handler
e8d44d88006cf1f4e597aac1523c6f4458534e5b
[ "MIT" ]
3
2017-09-25T12:46:38.000Z
2021-06-15T15:57:50.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 23 16:08:19 2017 @author: uqytu1 """ import math import numpy as np import urllib.request import json import base64 import struct import datetime import pytz def GetLonLat(Metadata): Position = Metadata['GPSPosition'].split(',') Latitude = Position[0].split() Lat = eval(Latitude[0]) + \ eval(Latitude[2].strip("'"))/60.0 + \ eval(Latitude[3].strip('"'))/3600.0 if Latitude[4] == 'S': Lat = math.copysign(Lat,-1) else: Lat = math.copysign(Lat,1) Longitude = Position[1].split() Lon = eval(Longitude[0]) + \ eval(Longitude[2].strip("'"))/60.0 + \ eval(Longitude[3].strip('"'))/3600.0 if Longitude[4] == 'W': Lon = math.copysign(Lon,-1) else: Lon = math.copysign(Lon,1) return Lon, Lat def GetAltitude(Metadata): return eval(Metadata['GPSAltitude'].split()[0]) def GetRollPitchYaw(Metadata): roll = eval(Metadata['Roll']) pitch = eval(Metadata['Pitch']) yaw = eval(Metadata['Yaw']) if yaw < 0: yaw = yaw + 360 return roll, pitch, yaw def GetTime(Metadata): Time = datetime.datetime.strptime(Metadata['SubSecDateTimeOriginal'], "%Y:%m:%d %H:%M:%S.%f") Time_UTC = pytz.utc.localize(Time, is_dst=False) return Time_UTC def GetGPSTime(Metadata): Time = datetime.datetime.strptime(Metadata['GPSTimeStamp'], "%H:%M:%S.%f") Time_UTC = pytz.utc.localize(Time, is_dst=False) return Time_UTC.time() def GetTimefromStart(Metadata): Time = datetime.datetime.strptime(Metadata['SubSecCreateDate'], "%Y:%m:%d %H:%M:%S.%f") Time_UTC = pytz.utc.localize(Time, is_dst=False) duration = datetime.timedelta(hours = Time_UTC.hour, minutes = Time_UTC.minute, seconds = Time_UTC.second, microseconds = Time_UTC.microsecond) return duration def GetTimeOffset(Metadata): GPSTime = GetGPSTime(Metadata) ImageTime = GetTime(Metadata).time() offset = datetime.timedelta(hours = GPSTime.hour - ImageTime.hour, minutes = GPSTime.minute - ImageTime.minute, seconds = GPSTime.second - ImageTime.second, microseconds = GPSTime.microsecond - ImageTime.microsecond) return offset def GetPrincipalPoint(Metadata, sensor_size): cx, cy = eval(Metadata['PrincipalPoint'].split(',').strip()) w = eval(Metadata['ImageWidth']) h = eval(Metadata['ImageHeight']) # Note that Sequoia's origin is at lower left instead of top left CP = np.array([[w*cx/sensor_size[0]],[h*(cy/sensor_size[1])]]) return CP def GetFisheyeAffineMatrix(Metadata): CDEF = eval(Metadata['FisheyeAffineMatrix'].split(',')) FisheyeAffineMatrix = np.array([[CDEF[0], CDEF[1]],[CDEF[2],CDEF[3]]]) return FisheyeAffineMatrix def GetFisheyePolynomial(Metadata): return eval(Metadata['FisheyePolynomial'].split(',')) def GetElevation(Metadata): Lon, Lat = GetLonLat(Metadata) # Retrieve Elevation from Google Map API # Note: 2500 querry per day for free users Elevation_Base_URL = 'http://maps.googleapis.com/maps/api/elevation/json?' URL_Params = 'locations={Lat},{Lon}&sensor={Bool}'.format(Lat=Lat, Lon=Lon, Bool='false') url = Elevation_Base_URL + URL_Params with urllib.request.urlopen(url) as f: response = json.loads(f.read().decode()) result = response['results'][0] elevation = result['elevation'] return elevation def GetSunIrradiance(Metadata): encoded = Metadata['IrradianceList'] # decode the string data = base64.standard_b64decode(encoded) # ensure that there's enough data QHHHfff assert len(data) % 28 == 0 # determine how many datasets there are count = len(data) // 28 # unpack the data as uint64, uint16, uint16, uint16, uint16, float, float, float result = [] for i in range(count): index = 28 * i s = struct.unpack('<QHHHHfff', data[index:index + 28]) result.append(s) CreateTime = GetTimefromStart(Metadata) timestamp = [] for measurement in result: q, r = divmod(measurement[0], 1000000) timestamp.append(abs(datetime.timedelta(seconds=q, microseconds=r)-CreateTime)) TargetIndex = timestamp.index(min(timestamp)) count = result[TargetIndex][1] gain = result[TargetIndex][3] exposuretime = result[TargetIndex][4] Irradiance = count / (gain * exposuretime) return Irradiance def GetPowerCoefficients(Metadata): powers = Metadata['VignettingPolynomial2DName'] coefficients = Metadata['VignettingPolynomial2D'] power_items = powers.split(',') coefficient_items = coefficients.split(',') powers_coefficients = list() for i in range(0, len(power_items), 2): powers_coefficients.append((int(power_items[i]), int(power_items[i+1]), float(coefficient_items[int(i/2)]))) return powers_coefficients def GetSensorModelCoef(Metadata): Coefs = Metadata['SensorModel'].split(',') return float(Coefs[0].strip('.')), float(Coefs[1].strip('.')), float(Coefs[2].strip('.')) def GetExposureTime(Metadata): ExposureTime = Metadata['ExposureTime'].split('/') if len(ExposureTime) > 1: ExpinSec = float(ExposureTime[0]) / float(ExposureTime[1]) else: ExpinSec = float(ExposureTime[0]) return ExpinSec def GetISO(Metadata): return int(Metadata['ISO']) def GetFNumber(Metadata): return float(Metadata['FNumber'])
34.596386
97
0.632596
661
5,743
5.449319
0.323752
0.029983
0.016657
0.02332
0.114381
0.078845
0.042199
0.042199
0.042199
0.042199
0
0.024803
0.227756
5,743
166
98
34.596386
0.787373
0.072262
0
0.046875
0
0
0.081686
0.019763
0
0
0
0
0.007813
1
0.132813
false
0
0.0625
0.03125
0.328125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edabc77fd3da28138cd10a06ef81ba6b153764ea
453
py
Python
lectures/5_Image_Analysis/combine_color_image.py
jagar2/Summer_2020_MAT-395-495_Scientific-Data-Analysis-and-Computing
e4b831460bddd34e7ad1d8888327c8d85b80e35e
[ "BSD-3-Clause" ]
1
2021-11-10T15:34:37.000Z
2021-11-10T15:34:37.000Z
lectures/5_Image_Analysis/combine_color_image.py
jagar2/Summer_2020_MAT-395-495_Scientific-Data-Analysis-and-Computing
e4b831460bddd34e7ad1d8888327c8d85b80e35e
[ "BSD-3-Clause" ]
null
null
null
lectures/5_Image_Analysis/combine_color_image.py
jagar2/Summer_2020_MAT-395-495_Scientific-Data-Analysis-and-Computing
e4b831460bddd34e7ad1d8888327c8d85b80e35e
[ "BSD-3-Clause" ]
3
2020-08-06T15:11:50.000Z
2022-01-05T20:21:09.000Z
from skimage import draw red = np.zeros((300, 300)) green = np.zeros((300, 300)) blue = np.zeros((300, 300)) r, c = draw.circle(100, 100, 100) red[r, c] = 1 r, c = draw.circle(100, 200, 100) green[r, c] = 1 r, c = draw.circle(200, 150, 100) blue[r, c] = 1 f, axes = plt.subplots(1, 3) for (ax, channel) in zip(axes, [red, green, blue]): ax.imshow(channel, cmap='gray') ax.axis('off') plt.imshow(np.stack([red, green, blue], axis=2));
19.695652
51
0.600442
84
453
3.238095
0.404762
0.044118
0.110294
0.143382
0.176471
0.110294
0.110294
0
0
0
0
0.138587
0.187638
453
22
52
20.590909
0.600543
0
0
0
0
0
0.015487
0
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb02ecbb069aa18674ef4e8555933c211f6074c
1,398
py
Python
chapters/chapter_3/NN_yelp/main.py
Penguin-Run/PyTorchBook
a310246ffed33d53a70cd7f2fd971f1626dcbebf
[ "Apache-2.0" ]
null
null
null
chapters/chapter_3/NN_yelp/main.py
Penguin-Run/PyTorchBook
a310246ffed33d53a70cd7f2fd971f1626dcbebf
[ "Apache-2.0" ]
null
null
null
chapters/chapter_3/NN_yelp/main.py
Penguin-Run/PyTorchBook
a310246ffed33d53a70cd7f2fd971f1626dcbebf
[ "Apache-2.0" ]
null
null
null
from .training.ReviewClassifier import ReviewClassifier from .data_managing.Dataset import ReviewDataset from .training.hyperparameters import args from .testing import compute_loss_acc as loss_acc from .testing import predict_rating as predict from .testing import analizing as analyze if __name__ == '__main__': # creating dataset if args.reload_from_files: # training from a checkpoint print("Loading dataset and vectorizer") dataset = ReviewDataset.load_dataset_and_load_vectorizer(args.review_csv, args.vectorizer_file) else: print("Loading dataset and creating vectorizer") # create dataset and vectorizer dataset = ReviewDataset.load_dataset_and_make_vectorizer(args.review_csv) dataset.save_vectorizer(args.vectorizer_file) vectorizer = dataset.get_vectorizer() classifier = ReviewClassifier(num_features=len(vectorizer.review_vocab)) # computing loass and accuracy of model loss_acc.compute(classifier, args, dataset) # testing model on real data test_review = "не бери грех на душу" classifier = classifier.cpu() prediction = predict.predict_rating(test_review, classifier, vectorizer, decision_threshold=0.5) print("{} -> {}".format(test_review, prediction)) analyze.influencial_words(classifier, vectorizer)
41.117647
100
0.724607
158
1,398
6.170886
0.424051
0.051282
0.052308
0.045128
0.110769
0.110769
0.110769
0.110769
0
0
0
0.001803
0.206724
1,398
33
101
42.363636
0.877367
0.098712
0
0
0
0
0.083732
0
0
0
0
0
0
1
0
false
0
0.26087
0
0.26087
0.130435
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb0ab27e8375216bb7fe46df1fbcdeb336314c4
6,573
py
Python
setup.py
hpleva/ai4materials
5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2
[ "Apache-2.0" ]
null
null
null
setup.py
hpleva/ai4materials
5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2
[ "Apache-2.0" ]
null
null
null
setup.py
hpleva/ai4materials
5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2
[ "Apache-2.0" ]
null
null
null
from setuptools import setup, find_packages, Extension # To use a consistent encoding from codecs import open # Other stuff import sys, os, fileinput import versioneer here = os.path.dirname(os.path.realpath(__file__)) def main(): # Start package setup # Get the long description from the README file with open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( # template at https://github.com/pypa/sampleproject/blob/master/setup.py name='ai4materials', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html # version=get_property('__version__'), version="0.1", description='Data-analytics modeling of materials science data', long_description=long_description, zip_safe=True, # The project's main homepage. url='https://https://github.com/angeloziletti/ai4materials', # Author details author='Ziletti, Angelo and Leitherer, Andreas', author_email='angelo.ziletti@gmail.com, andreas.leitherer@gmail.com', # Choose your license license='Apache License 2.0', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[# How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Science/Research', 'Topic :: Physics :: Materials science :: Machine learning :: Deep learning :: Data analytics', # Pick your license as you wish (should match "license" above) 'License :: Apache Licence 2.0', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. # 'Programming Language :: Python :: 2.7', # 'Programming Language :: Python :: 3.5', # 'Programming Language :: Python :: 3.6', # 'Programming Language :: Python :: 3.7' ], # What does your project relate to? keywords='Data analytics of materials science data.', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=['ai4materials', 'ai4materials.dataprocessing', 'ai4materials.descriptors', 'ai4materials.interpretation', 'ai4materials.visualization', 'ai4materials.models', 'ai4materials.utils', 'ai4materials.external'], #packages=find_packages(include=['ai4materials']), package_dir={'ai4materials': 'ai4materials'}, # Alternatively, if you want to distribute just a my_module.py, uncomment # this: # py_modules=["my_module"], # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=[ 'ase>=3.19.0', 'tensorflow==1.13.1', 'keras==2.2.4', 'scikit-learn>=0.17.1', 'pint', 'future', 'pandas>=0.25.0', 'enum34', 'pymatgen>=2020.3.13', 'keras-tqdm', 'seaborn', 'paramiko', 'scipy', 'nose>=1.0', 'numpy', 'h5py<=2.9.0', 'cython>=0.19', 'Jinja2', 'progressbar'], # #'ase==3.15.0', # neighbors list does not work for ase 3.16 # 'scikit-learn >=0.17.1', 'tensorflow==1.8.0', 'pint', 'future', 'pandas', # 'bokeh', # 'enum34', 'pymatgen', 'keras==1.2.0', 'pillow>=2.7.0', 'mendeleev', 'keras-tqdm', # 'seaborn', 'paramiko', 'scipy', 'nose>=1.0', 'sqlalchemy', 'theano==0.9.0', # 'numpy', 'h5py', 'cython>=0.19', 'pyshtools', 'Jinja2'], # 'bokeh==0.11.0', # 'multiprocessing', # , 'asap3'], #'mayavi', 'weave'], #setup_requires=['nomadcore', 'atomic_data'], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] extras_require={ # 'dev': ['check-manifest'], 'test': ['pytest', 'coverage'], }, # https://mike.zwobble.org/2013/05/adding-git-or-hg-or-svn-dependencies-in-setup-py/ # add atomic_data and nomadcore dependency_links=['https://github.com/libAtoms/QUIP', 'https://github.com/FXIhub/condor.git'], # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'ai4materials': ['descriptors/descriptors.nomadmetainfo.json', 'data/nn_models/*.h5', 'data/nn_models/*.json', 'utils/units.txt', 'utils/constants.txt', 'data/PROTOTYPES/*/*/*.in', 'data/training_data/*.pkl', 'data/training_data/*.json' ]}, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' # data_files=[('my_data', ['data/data_file'])], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. # entry_points={ # 'console_scripts': [ # 'condor=condor.scripts.condor_script:main', # ], # }, # test_suite = "condor.tests.test_all", project_urls={ # Optional 'Bug Reports': 'https://gitlab.com/ai4materials/issues', 'Source': 'https://gitlab.com/ai4materials/', }, ) # Run main function by default if __name__ == "__main__": main()
42.681818
126
0.597596
764
6,573
5.068063
0.434555
0.015496
0.014463
0.020145
0.043388
0.035641
0.035641
0.018079
0
0
0
0.026536
0.27187
6,573
153
127
42.960784
0.782491
0.499772
0
0
0
0
0.387539
0.097508
0
0
0
0
0
1
0.020408
false
0
0.081633
0
0.102041
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb1c338921c46604a227fc5ad3a3537657d82d7
1,074
py
Python
zeekofile/_controllers/blog/permapage.py
cdarlint/zeekofile
e5c999f0adfa1837c255b856eb030fb6838b0ea1
[ "MIT" ]
1
2022-02-20T08:02:00.000Z
2022-02-20T08:02:00.000Z
zeekofile/_controllers/blog/permapage.py
cdarlint/zeekofile
e5c999f0adfa1837c255b856eb030fb6838b0ea1
[ "MIT" ]
1
2021-07-23T19:45:58.000Z
2021-07-23T19:45:58.000Z
zeekofile/_controllers/blog/permapage.py
cdarlint/zeekofile
e5c999f0adfa1837c255b856eb030fb6838b0ea1
[ "MIT" ]
null
null
null
from zeekofile.cache import zf import re blog = zf.config.controllers.blog def run(): write_permapages() def write_permapages(): "Write blog posts to their permalink locations" site_re = re.compile(zf.config.site.url, re.IGNORECASE) num_posts = len(blog.posts) for i, post in enumerate(blog.posts): if post.permalink: path = site_re.sub("", post.permalink) blog.logger.info("Writing permapage for post: {0}".format(path)) else: #Permalinks MUST be specified. No permalink, no page. blog.logger.info("Post has no permalink: {0}".format(post.title)) continue env = { "post": post, "posts": blog.posts } #Find the next and previous posts chronologically if i < num_posts - 1: env['prev_post'] = blog.posts[i + 1] if i > 0: env['next_post'] = blog.posts[i - 1] zf.writer.materialize_template( "/blog/permapage.mako", zf.util.path_join(path, "index.html"), env)
28.263158
83
0.587523
138
1,074
4.5
0.471014
0.086957
0.045089
0.045089
0.048309
0
0
0
0
0
0
0.007916
0.294227
1,074
37
84
29.027027
0.811346
0.13594
0
0
0
0
0.16358
0
0
0
0
0
0
1
0.076923
false
0
0.076923
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb2a9429239ad3822ac8af03b000d236f86beda
31,643
py
Python
python/nsc/nsc_instcal_sexdaophot.py
dnidever/noaosourcecatalog
bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb
[ "MIT" ]
4
2017-05-23T20:57:33.000Z
2018-01-30T22:51:42.000Z
python/nsc/nsc_instcal_sexdaophot.py
dnidever/noaosourcecatalog
bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb
[ "MIT" ]
null
null
null
python/nsc/nsc_instcal_sexdaophot.py
dnidever/noaosourcecatalog
bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb
[ "MIT" ]
1
2021-07-15T03:06:22.000Z
2021-07-15T03:06:22.000Z
#!/usr/bin/env python # # NSC_INSTCAL_SEXDAOPHOT.PY -- Run SExtractor and DAOPHOT on an exposure # from __future__ import print_function __authors__ = 'David Nidever <dnidever@noao.edu>' __version__ = '20180819' # yyyymmdd import os import sys import numpy as np import warnings from astropy.io import fits from astropy.wcs import WCS from astropy.utils.exceptions import AstropyWarning from astropy.table import Table, Column import time import shutil import re import subprocess import glob import logging import socket #from scipy.signal import convolve2d from scipy.ndimage.filters import convolve import astropy.stats import struct from utils import * from phot import * # Ignore these warnings, it's a bug warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # Get NSC directories def getnscdirs(version=None): # Host hostname = socket.gethostname() host = hostname.split('.')[0] # Version verdir = "" if version is not None: verdir = version if version.endswith('/') else version+"/" # on thing/hulk use if (host == "thing") | (host == "hulk"): basedir = "/dl1/users/dnidever/nsc/instcal/"+verdir tmproot = "/d0/dnidever/nsc/instcal/"+verdir+"tmp/" # on gp09 use if (host == "gp09") | (host == "gp08") | (host == "gp07") | (host == "gp06") | (host == "gp05"): basedir = "/net/dl1/users/dnidever/nsc/instcal/"+verdir tmproot = "/data0/dnidever/nsc/instcal/"+verdir+"tmp/" return basedir,tmproot # Class to represent an exposure to process class Exposure: # Initialize Exposure object def __init__(self,fluxfile,wtfile,maskfile,nscversion="t3a"): # Check that the files exist if os.path.exists(fluxfile) is False: print(fluxfile+" NOT found") return if os.path.exists(wtfile) is False: print(wtfile+" NOT found") return if os.path.exists(maskfile) is False: print(maskfile+" NOT found") return # Setting up the object properties self.origfluxfile = fluxfile self.origwtfile = wtfile self.origmaskfile = maskfile self.fluxfile = None # working files in temp dir self.wtfile = None # working files in temp dir self.maskfile = None # working files in temp dir base = os.path.basename(fluxfile) base = os.path.splitext(os.path.splitext(base)[0])[0] self.base = base self.nscversion = nscversion self.logfile = base+".log" self.logger = None self.origdir = None self.wdir = None # the temporary working directory self.outdir = None self.chip = None # Get instrument head0 = fits.getheader(fluxfile,0) if head0["DTINSTRU"] == 'mosaic3': self.instrument = 'k4m' elif head0["DTINSTRU"] == '90prime': self.instrument = 'ksb' elif head0["DTINSTRU"] == 'decam': self.instrument = 'c4d' else: print("Cannot determine instrument type") return # Get number of extensions hdulist = fits.open(fluxfile) nhdu = len(hdulist) hdulist.close() self.nexten = nhdu # Get night dateobs = head0.get("DATE-OBS") night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10] self.night = night # Output directory basedir,tmpdir = getnscdirs(nscversion) self.outdir = basedir+self.instrument+"/"+self.night+"/"+self.base+"/" # Setup def setup(self): basedir,tmproot = getnscdirs(self.nscversion) # Prepare temporary directory tmpcntr = 1#L tmpdir = tmproot+self.base+"."+str(tmpcntr) while (os.path.exists(tmpdir)): tmpcntr = tmpcntr+1 tmpdir = tmproot+self.base+"."+str(tmpcntr) if tmpcntr > 20: print("Temporary Directory counter getting too high. Exiting") sys.exit() os.mkdir(tmpdir) origdir = os.getcwd() self.origdir = origdir os.chdir(tmpdir) self.wdir = tmpdir # Set up logging to screen and logfile logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s") rootLogger = logging.getLogger() # file handler fileHandler = logging.FileHandler(self.logfile) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) # console/screen handler consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) rootLogger.setLevel(logging.NOTSET) self.logger = rootLogger self.logger.info("Setting up in temporary directory "+tmpdir) self.logger.info("Starting logfile at "+self.logfile) # Copy over images from zeus1:/mss fluxfile = "bigflux.fits.fz" wtfile = "bigwt.fits.fz" maskfile = "bigmask.fits.fz" self.logger.info("Copying InstCal images from mass store archive") shutil.copyfile(basedir+os.path.basename(self.origfluxfile),tmpdir+"/"+os.path.basename(self.origfluxfile)) self.logger.info(" "+self.origfluxfile) if (os.path.basename(self.origfluxfile) != fluxfile): os.symlink(os.path.basename(self.origfluxfile),fluxfile) shutil.copyfile(basedir+os.path.basename(self.origwtfile),tmpdir+"/"+os.path.basename(self.origwtfile)) self.logger.info(" "+self.origwtfile) if (os.path.basename(self.origwtfile) != wtfile): os.symlink(os.path.basename(self.origwtfile),wtfile) shutil.copyfile(basedir+os.path.basename(self.origmaskfile),tmpdir+"/"+os.path.basename(self.origmaskfile)) self.logger.info(" "+self.origmaskfile) if (os.path.basename(self.origmaskfile) != maskfile): os.symlink(os.path.basename(self.origmaskfile),maskfile) # Set local working filenames self.fluxfile = fluxfile self.wtfile = wtfile self.maskfile = maskfile # Make final output directory if not os.path.exists(self.outdir): os.makedirs(self.outdir) # will make multiple levels of directories if necessary self.logger.info("Making output directory: "+self.outdir) # Load chip def loadchip(self,extension,fluxfile="flux.fits",wtfile="wt.fits",maskfile="mask.fits"): # Load the data self.logger.info(" Loading chip "+str(extension)) # Check that the working files set by "setup" if (self.fluxfile is None) | (self.wtfile is None) | (self.maskfile is None): self.logger.warning("Local working filenames not set. Make sure to run setup() first") return try: flux,fhead = fits.getdata(self.fluxfile,extension,header=True) fhead0 = fits.getheader(self.fluxfile,0) # add PDU info fhead.extend(fhead0,unique=True) wt,whead = fits.getdata(self.wtfile,extension,header=True) mask,mhead = fits.getdata(self.maskfile,extension,header=True) except: self.logger.error("No extension "+str(extension)) return # Write the data to the appropriate files if os.path.exists(fluxfile): os.remove(fluxfile) fits.writeto(fluxfile,flux,header=fhead,output_verify='warn') if os.path.exists(wtfile): os.remove(wtfile) fits.writeto(wtfile,wt,header=whead,output_verify='warn') if os.path.exists(maskfile): os.remove(maskfile) fits.writeto(maskfile,mask,header=mhead,output_verify='warn') # Create the chip object self.chip = Chip(fluxfile,wtfile,maskfile,self.base) self.chip.bigextension = extension self.chip.nscversion = self.nscversion self.chip.outdir = self.outdir # Add logger information self.chip.logger = self.logger # Process all chips def process(self): self.logger.info("-------------------------------------------------") self.logger.info("Processing ALL extension images") self.logger.info("-------------------------------------------------") # LOOP through the HDUs/chips #---------------------------- for i in range(1,self.nexten): t0 = time.time() self.logger.info(" ") self.logger.info("=== Processing subimage "+str(i)+" ===") # Load the chip self.loadchip(i) self.logger.info("CCDNUM = "+str(self.chip.ccdnum)) # Process it self.chip.process() # Clean up self.chip.cleanup() self.logger.info("dt = "+str(time.time()-t0)+" seconds") # Teardown def teardown(self): # Delete files and temporary directory self.logger.info("Deleting files and temporary directory.") # Move the final log file shutil.move(self.logfile,self.outdir+self.base+".log") # Delete temporary files and directory tmpfiles = glob.glob("*") for f in tmpfiles: os.remove(f) os.rmdir(self.wdir) # CD back to original directory os.chdir(self.origdir) # RUN all steps to process this exposure def run(self): self.setup() self.process() self.teardown() # Class to represent a single chip of an exposure class Chip: def __init__(self,fluxfile,wtfile,maskfile,bigbase): self.fluxfile = fluxfile self.wtfile = wtfile self.maskfile = maskfile self.bigbase = bigbase self.bigextension = None base = os.path.basename(fluxfile) base = os.path.splitext(os.path.splitext(base)[0])[0] self.dir = os.path.abspath(os.path.dirname(fluxfile)) self.base = base self.meta = makemeta(header=fits.getheader(fluxfile,0)) self.sexfile = self.dir+"/"+self.base+"_sex.fits" self.daofile = self.dir+"/"+self.base+"_dao.fits" self.sexcatfile = None self.sexcat = None self.seeing = None self.apcorr = None # Internal hidden variables self._rdnoise = None self._gain = None self._ccdnum = None self._pixscale = None self._saturate = None self._wcs = None self._exptime = None self._instrument = None self._plver = None self._cpfwhm = None self._daomaglim = None # set by daoaperphot() self._sexmaglim = None # set by runsex() # Logger self.logger = None def __repr__(self): return "Chip object" @property def rdnoise(self): # We have it already, just return it if self._rdnoise is not None: return self._rdnoise # Can't get rdnoise, no header yet if self.meta is None: self.logger.warning("Cannot get RDNOISE, no header yet") return None # Check DECam style rdnoise if "RDNOISEA" in self.meta.keys(): rdnoisea = self.meta["RDNOISEA"] rdnoiseb = self.meta["RDNOISEB"] rdnoise = (rdnoisea+rdnoiseb)*0.5 self._rdnoise = rdnoise return self._rdnoise # Get rdnoise from the header for name in ['RDNOISE','READNOIS','ENOISE']: # We have this key, set _rndoise and return if name in self.meta.keys(): self._rdnoise = self.meta[name] return self._rdnoise self.logger.warning('No RDNOISE found') return None @property def gain(self): # We have it already, just return it if self._gain is not None: return self._gain try: gainmap = { 'c4d': lambda x: 0.5*(x.get('gaina')+x.get('gainb')), 'k4m': lambda x: x.get('gain'), 'ksb': lambda x: [1.3,1.5,1.4,1.4][ccdnum-1] } # bok gain in HDU0, use list here gain = gainmap[self.instrument](self.meta) except: gainmap_avg = { 'c4d': 3.9845419, 'k4m': 1.8575, 'ksb': 1.4} gain = gainmap_avg[self.instrument] self._gain = gain return self._gain ## Can't get gain, no header yet #if self.meta is None: # print("Cannot get GAIN, no header yet") ## Get rdnoise from the header #for name in ['GAIN','EGAIN']: # # We have this key, set _gain and return # if self.meta.has_key(name): # self._gain = self.meta[name] # return self._gain #print('No GAIN found') #return None @property def ccdnum(self): # We have it already, just return it if self._ccdnum is not None: return self._ccdnum # Can't get ccdnum, no header yet if self.meta is None: self.logger.warning("Cannot get CCDNUM, no header yet") return None # Get ccdnum from the header # We have this key, set _rndoise and return if 'CCDNUM' in self.meta.keys(): self._ccdnum = self.meta['CCDNUM'] return self._ccdnum self.logger.warning('No CCDNUM found') return None @property def pixscale(self): # We have it already, just return it if self._pixscale is not None: return self._pixscale pixmap = { 'c4d': 0.27, 'k4m': 0.258, 'ksb': 0.45 } try: pixscale = pixmap[self.instrument] self._pixscale = pixscale return self._pixscale except: self._pixscale = np.max(np.abs(self.wcs.pixel_scale_matrix)) return self._pixscale @property def saturate(self): # We have it already, just return it if self._saturate is not None: return self._saturate # Can't get saturate, no header yet if self.meta is None: self.logger.warning("Cannot get SATURATE, no header yet") return None # Get saturate from the header # We have this key, set _saturate and return if 'SATURATE' in self.meta.keys(): self._saturate = self.meta['SATURATE'] return self._saturate self.logger.warning('No SATURATE found') return None @property def wcs(self): # We have it already, just return it if self._wcs is not None: return self._wcs # Can't get wcs, no header yet if self.meta is None: self.logger.warning("Cannot get WCS, no header yet") return None try: self._wcs = WCS(self.meta) return self._wcs except: self.logger.warning("Problem with WCS") return None @property def exptime(self): # We have it already, just return it if self._exptime is not None: return self._exptime # Can't get exptime, no header yet if self.meta is None: self.logger.warning("Cannot get EXPTIME, no header yet") return None # Get rdnoise from the header # We have this key, set _rndoise and return if 'EXPTIME' in self.meta.keys(): self._exptime = self.meta['EXPTIME'] return self._exptime print('No EXPTIME found') return None @property def instrument(self): # We have it already, just return it if self._instrument is not None: return self._instrument # Can't get instrument, no header yet if self.meta is None: self.logger.warning("Cannot get INSTRUMENT, no header yet") return None # instrument, c4d, k4m or ksb # DTINSTRU = 'mosaic3 ' # DTTELESC = 'kp4m ' # Bok 90Prime data has if self.meta.get("DTINSTRU") == 'mosaic3': self._instrument = 'k4m' return self._instrument elif self.meta.get("DTINSTRU") == '90prime': self._instrument = 'ksb' return self._instrument else: self._instrument = 'c4d' return self._instrument @property def plver(self): # We have it already, just return it if self._plver is not None: return self._plver # Can't get plver, no header yet if self.meta is None: self.logger.warning("Cannot get PLVER, no header yet") return None plver = self.meta.get('PLVER') if plver is None: self._plver = 'V1.0' self._plver = plver return self._plver @property def cpfwhm(self): # We have it already, just return it if self._cpfwhm is not None: return self._cpfwhm # Can't get fwhm, no header yet if self.meta is None: self.logger.warning("Cannot get CPFWHM, no header yet") return None # FWHM values are ONLY in the extension headers cpfwhm_map = { 'c4d': 1.5 if self.meta.get('FWHM') is None else self.meta.get('FWHM')*0.27, 'k4m': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1'), 'ksb': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1') } cpfwhm = cpfwhm_map[self.instrument] self._cpfwhm = cpfwhm return self._cpfwhm @property def maglim(self): # We have it already, just return it if self._daomaglim is not None: return self._daomaglim if self._sexmaglim is not None: return self._sexmaglim self.logger.warning('Maglim not set yet') return None # Write SE catalog in DAO format def sextodao(self,cat=None,outfile=None,format="coo"): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] if outfile is None: outfile=daobase+".coo" if cat is None: cat=self.sexcat sextodao(self.sexcat,self.meta,outfile=outfile,format=format,logger=self.logger) # Run Source Extractor #--------------------- def runsex(self,outfile=None): basedir, tmpdir = getnscdirs(self.nscversion) configdir = basedir+"config/" sexcatfile = "flux_sex.cat.fits" sexcat, maglim = runsex(self.fluxfile,self.wtfile,self.maskfile,self.meta,sexcatfile,configdir,logger=self.logger) self.sexcat = sexcatfile self.sexcat = sexcat self._sexmaglim = maglim # Set the FWHM as well fwhm = sexfwhm(sexcat,logger=self.logger) self.meta['FWHM'] = fwhm # Determine FWHM using SE catalog #-------------------------------- def sexfwhm(self): self.seeing = sexfwhm(self.sexcat) return self.seeing # Pick PSF candidates using SE catalog #------------------------------------- def sexpickpsf(self,nstars=100): base = os.path.basename(self.sexfile) base = os.path.splitext(os.path.splitext(base)[0])[0] fwhm = self.sexfwhm() if self.seeing is None else self.seeing psfcat = sexpickpsf(self.sexcat,fwhm,self.meta,base+".lst",nstars=nstars,logger=self.logger) # Make DAOPHOT option files #-------------------------- #def mkopt(self,**kwargs): def mkopt(self): base = os.path.basename(self.daofile) base = os.path.splitext(os.path.splitext(base)[0])[0] #mkopt(base,self.meta,logger=self.logger,**kwargs) mkopt(base,self.meta,logger=self.logger) # Make image ready for DAOPHOT def mkdaoim(self): mkdaoim(self.fluxfile,self.wtfile,self.maskfile,self.meta,self.daofile,logger=self.logger) # DAOPHOT detection #---------------------- def daofind(self): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] cat = daofind(self.daofile,outfile=daobase+".coo",logger=self.logger) # DAOPHOT aperture photometry #---------------------------- def daoaperphot(self): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] apcat, maglim = daoaperphot(self.daofile,daobase+".coo",outfile=daobase+".ap",logger=self.logger) self._daomaglim = maglim # Pick PSF stars using DAOPHOT #----------------------------- def daopickpsf(self,maglim=None,nstars=100): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] if maglim is None: maglim=self.maglim psfcat = daopickpsf(self.daofile,daobase+".ap",maglim,daobase+".lst",nstars,logger=self.logger) # Run DAOPHOT PSF #------------------- def daopsf(self,verbose=False): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] psfcat = daopsf(self.daofile,daobase+".lst",outfile=daobase+".psf",verbose=verbose,logger=self.logger) # Subtract neighbors of PSF stars #-------------------------------- def subpsfnei(self): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] psfcat = subpsfnei(self.daofile,daobase+".lst",daobase+".nei",daobase+"a.fits",logger=self.logger) # Create DAOPHOT PSF #------------------- def createpsf(self,listfile=None,apfile=None,doiter=True,maxiter=5,minstars=6,subneighbors=True,verbose=False): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] createpsf(daobase+".fits",daobase+".ap",daobase+".lst",meta=self.meta,logger=self.logger) # Run ALLSTAR #------------- def allstar(self,psffile=None,apfile=None,subfile=None): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] alscat = allstar(daobase+".fits",daobase+".psf",daobase+".ap",outfile=daobase+".als",meta=self.meta,logger=self.logger) # Get aperture correction #------------------------ def getapcor(self): daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] apcorr = apcor(daobase+"a.fits",daobase+".lst",daobase+".psf",self.meta,optfile=daobase+'.opt',alsoptfile=daobase+".als.opt",logger=self.logger) self.apcorr = apcorr self.meta['apcor'] = (apcorr,"Aperture correction in mags") # Combine SE and DAOPHOT catalogs #-------------------------------- def finalcat(self,outfile=None,both=True,sexdetect=True): # both Only keep sources that have BOTH SE and ALLSTAR information # sexdetect SE catalog was used for DAOPHOT detection list self.logger.info("-- Creating final combined catalog --") daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] if outfile is None: outfile=self.base+".cat.fits" # Check that we have the SE and ALS information if (self.sexcat is None) | (os.path.exists(daobase+".als") is None): self.logger.warning("SE catalog or ALS catalog NOT found") return # Load ALS catalog als = Table(daoread(daobase+".als")) nals = len(als) # Apply aperture correction if self.apcorr is None: self.logger.error("No aperture correction available") return als['MAG'] -= self.apcorr # Just add columns to the SE catalog ncat = len(self.sexcat) newcat = self.sexcat.copy() alsnames = ['X','Y','MAG','ERR','SKY','ITER','CHI','SHARP'] newnames = ['XPSF','YPSF','MAGPSF','ERRPSF','SKY','ITER','CHI','SHARP','RAPSF','DECPSF'] newtypes = ['float64','float64','float','float','float','float','float','float','float64','float64'] nan = float('nan') newvals = [nan, nan, nan, nan ,nan, nan, nan, nan, nan, nan] # DAOPHOT detection list used, need ALS ID if not sexdetect: alsnames = ['ID']+alsnames newnames = ['ALSID']+newnames newtypes = ['int32']+newtypes newvals = [-1]+newvals newcols = [] for n,t,v in zip(newnames,newtypes,newvals): col = Column(name=n,length=ncat,dtype=t) col[:] = v newcols.append(col) newcat.add_columns(newcols) # Match up with IDs if SE list used by DAOPHOT if sexdetect: mid, ind1, ind2 = np.intersect1d(newcat["NUMBER"],als["ID"],return_indices=True) for id1,id2 in zip(newnames,alsnames): newcat[id1][ind1] = als[id2][ind2] # Only keep sources that have SE+ALLSTAR information # trim out ones that don't have ALS if (both is True) & (nals<ncat): newcat = newcat[ind1] # Match up with coordinates, DAOPHOT detection list used else: print("Need to match up with coordinates") # Only keep sources that have SE+ALLSTAR information # trim out ones that don't have ALS if (both is True) & (nals<ncat): newcat = newcat[ind1] # Add RA, DEC r,d = self.wcs.all_pix2world(newcat["XPSF"],newcat["YPSF"],1) newcat['RAPSF'] = r newcat['DECPSF'] = d # Write to file self.logger.info("Final catalog = "+outfile) fits.writeto(outfile,None,self.meta,overwrite=True) # meta in PDU header # append the table in extension 1 hdulist = fits.open(outfile) hdu = fits.table_to_hdu(newcat) hdulist.append(hdu) hdulist.writeto(outfile,overwrite=True) hdulist.close() #newcat.write(outfile,overwrite=True) #fits.append(outfile,0,self.meta) # meta is header of 2nd extension # Process a single chip #---------------------- def process(self): self.runsex() self.logger.info("-- Getting ready to run DAOPHOT --") self.mkopt() self.mkdaoim() #self.daodetect() # Create DAOPHOT-style coo file # Need to use SE positions self.sextodao(outfile="flux_dao.coo") self.daoaperphot() self.daopickpsf() self.createpsf() self.allstar() self.getapcor() self.finalcat() # Do I need to rerun daoaperphot to get aperture # photometry at the FINAL allstar positions?? # Is there a way to reduce the number of iterations needed to create the PSF? # what do the ?, * mean anyway? # maybe just remove the worse 10% of stars or something # Put all of the daophot-running into separate function (maybe separate module) # same for sextractor # Maybe make my own xmatch function that does one-to-one matching # Clean up the files #-------------------- def cleanup(self): self.logger.info("Copying final files to output directory "+self.outdir) base = os.path.basename(self.fluxfile) base = os.path.splitext(os.path.splitext(base)[0])[0] daobase = os.path.basename(self.daofile) daobase = os.path.splitext(os.path.splitext(daobase)[0])[0] # Copy the files we want to keep # final combined catalog, logs outcatfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".fits" if os.path.exists(outcatfile): os.remove(outcatfile) shutil.copyfile("flux.cat.fits",outcatfile) # Copy DAOPHOT opt files outoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".opt" if os.path.exists(outoptfile): os.remove(outoptfile) shutil.copyfile(daobase+".opt",outoptfile) outalsoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".als.opt" if os.path.exists(outalsoptfile): os.remove(outalsoptfile) shutil.copyfile(daobase+".als.opt",outalsoptfile) # Copy DAOPHOT PSF star list outlstfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf.lst" if os.path.exists(outlstfile): os.remove(outlstfile) shutil.copyfile(daobase+".lst",outlstfile) # Copy DAOPHOT PSF file outpsffile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf" if os.path.exists(outpsffile): os.remove(outpsffile) shutil.copyfile(daobase+".psf",outpsffile) # Copy DAOPHOT .apers file?? # copy Allstar PSF subtracted file to output dir outsubfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+"s.fits" if os.path.exists(outsubfile): os.remove(outsubfile) shutil.copyfile(daobase+"s.fits",outsubfile) # Copy SE config file outconfigfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".sex.config" if os.path.exists(outconfigfile): os.remove(outconfigfile) shutil.copyfile("default.config",outconfigfile) # Combine all the log files logfiles = glob.glob(base+"*.log") loglines = [] for logfil in logfiles: loglines += ["==> "+logfil+" <==\n"] f = open(logfil,'r') lines = f.readlines() f.close() loglines += lines loglines += ["\n"] f = open(base+".logs","w") f.writelines("".join(loglines)) f.close() outlogfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".logs" if os.path.exists(outlogfile): os.remove(outlogfile) shutil.copyfile(base+".logs",outlogfile) # Delete temporary directory/files self.logger.info(" Cleaning up") files1 = glob.glob("flux*") files2 = glob.glob("default*") files = files1+files2+["flux.fits","wt.fits","mask.fits","daophot.opt","allstar.opt"] for f in files: if os.path.exists(f): os.remove(f) # Main command-line program if __name__ == "__main__": # Version verdir = "" if len(sys.argv) > 4: version = sys.argv[4] verdir = version if version.endswith('/') else version+"/" else: version = None # Get NSC directories basedir, tmpdir = getnscdirs(version) # Make sure the directories exist if not os.path.exists(basedir): os.makedirs(basedir) if not os.path.exists(tmpdir): os.makedirs(tmpdir) t0 = time.time() print(sys.argv) # Not enough inputs n = len(sys.argv) if n < 4: print("Syntax - nsc_instcal_sexdaophot.py fluxfile wtfile maskfile version") sys.exit() # File names fluxfile = sys.argv[1] wtfile = sys.argv[2] maskfile = sys.argv[3] # Check that the files exist if os.path.exists(fluxfile) is False: print(fluxfile+" file NOT FOUND") sys.exit() if os.path.exists(wtfile) is False: print(wtfile+" file NOT FOUND") sys.exit() if os.path.exists(maskfile) is False: print(maskile+" file NOT FOUND") sys.exit() # Create the Exposure object exp = Exposure(fluxfile,wtfile,maskfile,nscversion=version) # Run exp.run() print("Total time = "+str(time.time()-t0)+" seconds")
38.078219
152
0.591821
3,815
31,643
4.872608
0.155701
0.027436
0.0241
0.025176
0.32729
0.24337
0.213029
0.171661
0.153801
0.137447
0
0.009331
0.278608
31,643
830
153
38.124096
0.805012
0.170211
0
0.253076
0
0
0.099095
0.009361
0
0
0
0
0
1
0.065026
false
0
0.036907
0.001757
0.195079
0.024605
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb8c9e216e31864005a5218bc360deec4e30ce5
270
py
Python
update-readme.py
jutge-org/j3-logos
f47dfc84e8a2a9f987fdb22c432b6a52893fe294
[ "Apache-2.0" ]
1
2020-12-29T12:19:23.000Z
2020-12-29T12:19:23.000Z
update-readme.py
jutge-org/j3-logos
f47dfc84e8a2a9f987fdb22c432b6a52893fe294
[ "Apache-2.0" ]
null
null
null
update-readme.py
jutge-org/j3-logos
f47dfc84e8a2a9f987fdb22c432b6a52893fe294
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import glob text = ''' # Logos for Jutge.org ''' for png in sorted(glob.glob('*.png')): text += '''- %s\n\n <a href='%s'><img src='%s' height='200'></a>\n\n''' % (png, png, png) with open('README.md', 'w') as file: file.write(text)
16.875
94
0.548148
46
270
3.217391
0.652174
0.027027
0
0
0
0
0
0
0
0
0
0.0181
0.181481
270
15
95
18
0.651584
0.077778
0
0
0
0.125
0.391129
0.084677
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb9060cb2e91122e01e0d89597323dc92dbbaa6
14,950
py
Python
adpkd_segmentation/datasets/datasets.py
kurtteichman/adpkd-segmentation-pytorch
20faedfd77aaa26cadfbe636092db3da0f257940
[ "MIT" ]
5
2021-07-09T16:10:56.000Z
2022-03-23T10:22:16.000Z
adpkd_segmentation/datasets/datasets.py
kurtteichman/adpkd-segmentation-pytorch
20faedfd77aaa26cadfbe636092db3da0f257940
[ "MIT" ]
3
2021-06-23T02:47:42.000Z
2022-02-04T22:43:27.000Z
adpkd_segmentation/datasets/datasets.py
aksg87/adpkd-segmentation-pytorch
9a22e06ab905bca456c978f3b40ea427499ccf7d
[ "MIT" ]
2
2021-06-05T22:19:29.000Z
2022-03-13T20:50:13.000Z
import json import numpy as np import torch from pathlib import Path import pandas as pd import pydicom from ast import literal_eval from adpkd_segmentation.data.data_utils import ( get_labeled, get_y_Path, int16_to_uint8, make_dcmdicts, path_2dcm_int16, path_2label, TKV_update, ) from adpkd_segmentation.data.data_utils import ( KIDNEY_PIXELS, STUDY_TKV, VOXEL_VOLUME, ) from adpkd_segmentation.datasets.filters import PatientFiltering class SegmentationDataset(torch.utils.data.Dataset): """Some information about SegmentationDataset""" def __init__( self, label2mask, dcm2attribs, patient2dcm, patient_IDS=None, augmentation=None, smp_preprocessing=None, normalization=None, output_idx=False, attrib_types=None, ): super().__init__() self.label2mask = label2mask self.dcm2attribs = dcm2attribs self.pt2dcm = patient2dcm self.patient_IDS = patient_IDS self.augmentation = augmentation self.smp_preprocessing = smp_preprocessing self.normalization = normalization self.output_idx = output_idx self.attrib_types = attrib_types # store some attributes as PyTorch tensors if self.attrib_types is None: self.attrib_types = { STUDY_TKV: "float32", KIDNEY_PIXELS: "float32", VOXEL_VOLUME: "float32", } self.patients = list(patient2dcm.keys()) # kept for compatibility with previous experiments # following patient order in patient_IDS if patient_IDS is not None: self.patients = patient_IDS self.dcm_paths = [] for p in self.patients: self.dcm_paths.extend(patient2dcm[p]) self.label_paths = [get_y_Path(dcm) for dcm in self.dcm_paths] # study_id to TKV and TKV for each dcm self.studies, self.dcm2attribs = TKV_update(dcm2attribs) # storring attrib types as tensors self.tensor_dict = self.prepare_tensor_dict(self.attrib_types) def __getitem__(self, index): if isinstance(index, slice): return [self[ii] for ii in range(*index.indices(len(self)))] # numpy int16, (H, W) im_path = self.dcm_paths[index] image = path_2dcm_int16(im_path) # image local scaling by default to convert to uint8 if self.normalization is None: image = int16_to_uint8(image) else: image = self.normalization(image, self.dcm2attribs[im_path]) label = path_2label(self.label_paths[index]) # numpy uint8, one hot encoded (C, H, W) mask = self.label2mask(label[np.newaxis, ...]) if self.augmentation is not None: # requires (H, W, C) or (H, W) mask = mask.transpose(1, 2, 0) sample = self.augmentation(image=image, mask=mask) image, mask = sample["image"], sample["mask"] # get back to (C, H, W) mask = mask.transpose(2, 0, 1) # convert to float image = (image / 255).astype(np.float32) mask = mask.astype(np.float32) # smp preprocessing requires (H, W, 3) if self.smp_preprocessing is not None: image = np.repeat(image[..., np.newaxis], 3, axis=-1) image = self.smp_preprocessing(image).astype(np.float32) # get back to (3, H, W) image = image.transpose(2, 0, 1) else: # stack image to (3, H, W) image = np.repeat(image[np.newaxis, ...], 3, axis=0) if self.output_idx: return image, mask, index return image, mask def __len__(self): return len(self.dcm_paths) def get_verbose(self, index): """returns more details than __getitem__() Args: index (int): index in dataset Returns: tuple: sample, dcm_path, attributes dict """ sample = self[index] dcm_path = self.dcm_paths[index] attribs = self.dcm2attribs[dcm_path] return sample, dcm_path, attribs def get_extra_dict(self, batch_of_idx): return {k: v[batch_of_idx] for k, v in self.tensor_dict.items()} def prepare_tensor_dict(self, attrib_types): tensor_dict = {} for k, v in attrib_types.items(): tensor_dict[k] = torch.zeros( self.__len__(), dtype=getattr(torch, v) ) for idx, _ in enumerate(self): dcm_path = self.dcm_paths[idx] attribs = self.dcm2attribs[dcm_path] for k, v in tensor_dict.items(): v[idx] = attribs[k] return tensor_dict class DatasetGetter: """Create SegmentationDataset""" def __init__( self, splitter, splitter_key, label2mask, augmentation=None, smp_preprocessing=None, filters=None, normalization=None, output_idx=False, attrib_types=None, ): super().__init__() self.splitter = splitter self.splitter_key = splitter_key self.label2mask = label2mask self.augmentation = augmentation self.smp_preprocessing = smp_preprocessing self.filters = filters self.normalization = normalization self.output_idx = output_idx self.attrib_types = attrib_types dcms_paths = sorted(get_labeled()) print( "The number of images before splitting and filtering: {}".format( len(dcms_paths) ) ) dcm2attribs, patient2dcm = make_dcmdicts(tuple(dcms_paths)) if filters is not None: dcm2attribs, patient2dcm = filters(dcm2attribs, patient2dcm) self.all_patient_IDS = list(patient2dcm.keys()) # train, val, or test self.patient_IDS = self.splitter(self.all_patient_IDS)[ self.splitter_key ] patient_filter = PatientFiltering(self.patient_IDS) self.dcm2attribs, self.patient2dcm = patient_filter( dcm2attribs, patient2dcm ) if self.normalization is not None: self.normalization.update_dcm2attribs(self.dcm2attribs) def __call__(self): return SegmentationDataset( label2mask=self.label2mask, dcm2attribs=self.dcm2attribs, patient2dcm=self.patient2dcm, patient_IDS=self.patient_IDS, augmentation=self.augmentation, smp_preprocessing=self.smp_preprocessing, normalization=self.normalization, output_idx=self.output_idx, attrib_types=self.attrib_types, ) class JsonDatasetGetter: """Get the dataset from a prepared patient ID split""" def __init__( self, json_path, splitter_key, label2mask, augmentation=None, smp_preprocessing=None, normalization=None, output_idx=False, attrib_types=None, ): super().__init__() self.label2mask = label2mask self.augmentation = augmentation self.smp_preprocessing = smp_preprocessing self.normalization = normalization self.output_idx = output_idx self.attrib_types = attrib_types dcms_paths = sorted(get_labeled()) print( "The number of images before splitting and filtering: {}".format( len(dcms_paths) ) ) dcm2attribs, patient2dcm = make_dcmdicts(tuple(dcms_paths)) print("Loading ", json_path) with open(json_path, "r") as f: dataset_split = json.load(f) self.patient_IDS = dataset_split[splitter_key] # filter info dicts to correpsond to patient_IDS patient_filter = PatientFiltering(self.patient_IDS) self.dcm2attribs, self.patient2dcm = patient_filter( dcm2attribs, patient2dcm ) if self.normalization is not None: self.normalization.update_dcm2attribs(self.dcm2attribs) def __call__(self): return SegmentationDataset( label2mask=self.label2mask, dcm2attribs=self.dcm2attribs, patient2dcm=self.patient2dcm, patient_IDS=self.patient_IDS, augmentation=self.augmentation, smp_preprocessing=self.smp_preprocessing, normalization=self.normalization, output_idx=self.output_idx, attrib_types=self.attrib_types, ) class InferenceDataset(torch.utils.data.Dataset): """Some information about SegmentationDataset""" def __init__( self, dcm2attribs, patient2dcm, augmentation=None, smp_preprocessing=None, normalization=None, output_idx=False, attrib_types=None, ): super().__init__() self.dcm2attribs = dcm2attribs self.pt2dcm = patient2dcm self.augmentation = augmentation self.smp_preprocessing = smp_preprocessing self.normalization = normalization self.output_idx = output_idx self.attrib_types = attrib_types self.patients = list(patient2dcm.keys()) self.dcm_paths = [] for p in self.patients: self.dcm_paths.extend(patient2dcm[p]) # Sorts Studies by Z axis studies = [ pydicom.dcmread(path).SeriesDescription for path in self.dcm_paths ] folders = [path.parent.name for path in self.dcm_paths] patients = [pydicom.dcmread(path).PatientID for path in self.dcm_paths] x_dims = [pydicom.dcmread(path).Rows for path in self.dcm_paths] y_dims = [pydicom.dcmread(path).Columns for path in self.dcm_paths] z_pos = [ literal_eval(str(pydicom.dcmread(path).ImagePositionPatient))[2] for path in self.dcm_paths ] acc_nums = [ pydicom.dcmread(path).AccessionNumber for path in self.dcm_paths ] ser_nums = [ pydicom.dcmread(path).SeriesNumber for path in self.dcm_paths ] data = { "dcm_paths": self.dcm_paths, "folders": folders, "studies": studies, "patients": patients, "x_dims": x_dims, "y_dims": y_dims, "z_pos": z_pos, "acc_nums": acc_nums, "ser_nums": ser_nums, } group_keys = [ "folders", "studies", "patients", "x_dims", "y_dims", "acc_nums", "ser_nums", ] dataset = pd.DataFrame.from_dict(data) dataset["slice_pos"] = "" grouped_dataset = dataset.groupby(group_keys) for (name, group) in grouped_dataset: sort_key = "z_pos" # handle missing slice position with filename if group[sort_key].isna().any(): sort_key = "dcm_paths" zs = list(group[sort_key]) sorted_idxs = np.argsort(zs) slice_map = { zs[idx]: pos for idx, pos in zip(sorted_idxs, range(len(zs))) } zs_slice_pos = group[sort_key].map(slice_map) for i in group.index: dataset.at[i, "slice_pos"] = zs_slice_pos.get(i) grouped_dataset = dataset.groupby(group_keys) for (name, group) in grouped_dataset: group.sort_values(by="slice_pos", inplace=True) self.df = dataset self.dcm_paths = list(dataset["dcm_paths"]) def __getitem__(self, index): if isinstance(index, slice): return [self[ii] for ii in range(*index.indices(len(self)))] # numpy int16, (H, W) im_path = self.dcm_paths[index] image = path_2dcm_int16(im_path) # image local scaling by default to convert to uint8 if self.normalization is None: image = int16_to_uint8(image) else: image = self.normalization(image, self.dcm2attribs[im_path]) if self.augmentation is not None: sample = self.augmentation(image=image) image = sample["image"] # convert to float image = (image / 255).astype(np.float32) # smp preprocessing requires (H, W, 3) if self.smp_preprocessing is not None: image = np.repeat(image[..., np.newaxis], 3, axis=-1) image = self.smp_preprocessing(image).astype(np.float32) # get back to (3, H, W) image = image.transpose(2, 0, 1) else: # stack image to (3, H, W) image = np.repeat(image[np.newaxis, ...], 3, axis=0) if self.output_idx: return image, index return image def __len__(self): return len(self.dcm_paths) def get_verbose(self, index): """returns more details than __getitem__() Args: index (int): index in dataset Returns: tuple: sample, dcm_path, attributes dict """ sample = self[index] dcm_path = self.dcm_paths[index] attribs = self.dcm2attribs[dcm_path] return sample, dcm_path, attribs class InferenceDatasetGetter: """Get the dataset from a prepared patient ID split""" def __init__( self, inference_path, augmentation=None, smp_preprocessing=None, normalization=None, output_idx=False, attrib_types=None, ): super().__init__() self.augmentation = augmentation self.smp_preprocessing = smp_preprocessing self.normalization = normalization self.output_idx = output_idx self.attrib_types = attrib_types self.inference_path = Path(inference_path) all_paths = set(self.inference_path.glob("**/*")) dcms_paths = [] for path in all_paths: if path.is_file(): try: pydicom.filereader.dcmread(path, stop_before_pixels=True) dcms_paths.append(path) except pydicom.errors.InvalidDicomError: continue self.dcm2attribs, self.patient2dcm = make_dcmdicts( tuple(dcms_paths), label_status=False, WCM=False ) if self.normalization is not None: self.normalization.update_dcm2attribs(self.dcm2attribs) def __call__(self): return InferenceDataset( dcm2attribs=self.dcm2attribs, patient2dcm=self.patient2dcm, augmentation=self.augmentation, smp_preprocessing=self.smp_preprocessing, normalization=self.normalization, output_idx=self.output_idx, attrib_types=self.attrib_types, )
30.572597
79
0.596321
1,649
14,950
5.189206
0.149788
0.050485
0.030852
0.014725
0.660395
0.636087
0.592264
0.572397
0.563282
0.555218
0
0.014673
0.316187
14,950
488
80
30.635246
0.822361
0.083679
0
0.563889
0
0
0.02375
0
0
0
0
0
0
1
0.044444
false
0
0.027778
0.016667
0.127778
0.008333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb94af127b7b6cb84e9109091abb7f212cbe179
947
py
Python
main.py
Arcxdd/Kali-Docker
a59bfdc24dde3e8105762c6e44bd5a6115afd44d
[ "Unlicense" ]
null
null
null
main.py
Arcxdd/Kali-Docker
a59bfdc24dde3e8105762c6e44bd5a6115afd44d
[ "Unlicense" ]
null
null
null
main.py
Arcxdd/Kali-Docker
a59bfdc24dde3e8105762c6e44bd5a6115afd44d
[ "Unlicense" ]
null
null
null
import os import sys from halo import Halo spinner = Halo(text='Please wait...', spinner='dots') def main(): """Main program""" portsToExpose = str(input('Ports to expose [Default: 22 for SSH]: ')) print("Installing...\n") spinner.start() os.system("docker pull kalilinux/kali-rolling") if portsToExpose != "": os.system( f"docker run -dt --name kali-vm -p {portsToExpose}:{portsToExpose} -i kalilinux/kali-rolling") else: os.system( "docker run -dt --name kali-vm -p 22:22 -i kalilinux/kali-rolling") spinner.stop() os.system("docker exec -it kali-vm bash") def run(): """Program entry point""" try: main() except KeyboardInterrupt: print("\n\nExiting...") try: os._exit(1) except SystemExit: sys.exit(1) if __name__ == "__main__": run()
22.547619
107
0.549102
110
947
4.645455
0.5
0.062622
0.082192
0.058708
0.086106
0.086106
0.086106
0
0
0
0
0.012195
0.307286
947
41
108
23.097561
0.766768
0.033791
0
0.142857
0
0.071429
0.359212
0.112399
0
0
0
0
0
1
0.071429
false
0
0.107143
0
0.178571
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edb9a342dd84b94be341c6a7b6d981951da4877d
1,098
py
Python
.vscode/compilers/pycompile.py
croghostrider/Loxone-Recovery
cb47a6fd8a685e5995f11e61f3a6e0126fb19828
[ "MIT" ]
1
2022-03-20T22:27:45.000Z
2022-03-20T22:27:45.000Z
.vscode/compilers/pycompile.py
croghostrider/Loxone-Recovery
cb47a6fd8a685e5995f11e61f3a6e0126fb19828
[ "MIT" ]
null
null
null
.vscode/compilers/pycompile.py
croghostrider/Loxone-Recovery
cb47a6fd8a685e5995f11e61f3a6e0126fb19828
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import os import shutil import subprocess import sys # ARGS fileDirname = sys.argv[1] fileBasename = sys.argv[2] workspaceFolder = sys.argv[3] # TRANSFORMATION relativeFileDirname = fileDirname[len(workspaceFolder)+1:] fileBasenameNoExtension = "".join(fileBasename.rsplit(".py", 1)) distpath = os.path.join(workspaceFolder, "output", relativeFileDirname) # COMMAND GENERATOR def construct(): COMMAND = [ "pyinstaller", "--onefile", "--clean", "--noconsole", "--distpath", distpath, ] for file in os.listdir(fileDirname): if file.endswith(".ico"): COMMAND.append("--icon") COMMAND.append(os.path.join(fileDirname, file)) break COMMAND.append(fileBasename) return COMMAND # CLEANER def clean(): path = os.path.join(fileDirname, "__pycache__") shutil.rmtree(path) path = os.path.join(fileDirname, "build") shutil.rmtree(path) if __name__ == "__main__": COMMAND = construct() subprocess.check_call(COMMAND, cwd=fileDirname) clean()
23.361702
71
0.652095
114
1,098
6.166667
0.482456
0.034139
0.056899
0.089616
0.071124
0
0
0
0
0
0
0.006936
0.212204
1,098
46
72
23.869565
0.80578
0.06102
0
0.057143
0
0
0.088694
0
0
0
0
0
0
1
0.057143
false
0
0.114286
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edba4dad99f149168f42524be29efbe1763f78a1
1,430
py
Python
white-head-mountain/pcdn/mainpcdn.py
jiangwenfan/pythonScripts
c9004944f162af575e111522f98d4de4f59885e6
[ "Apache-2.0" ]
null
null
null
white-head-mountain/pcdn/mainpcdn.py
jiangwenfan/pythonScripts
c9004944f162af575e111522f98d4de4f59885e6
[ "Apache-2.0" ]
null
null
null
white-head-mountain/pcdn/mainpcdn.py
jiangwenfan/pythonScripts
c9004944f162af575e111522f98d4de4f59885e6
[ "Apache-2.0" ]
null
null
null
from hostNameHandle import hostNameHandle from gethostList import get_ips from getwechat import getProxyInfo from gethostNameIp import getIps from getFrequency import getFrequency from getDownloadAccount import getInfo from sendMessage import sendMessage hostName=input("主机名:") type = str(input("1 \"频繁掉线\" or 2 \"接口不在线\": ")) nodeName = hostNameHandle(hostName) #print(nodeName) hostList =get_ips(nodeName) #print(hostList) hoststr = " \n ".join(hostList) #主机名字符串已\n分隔 #print(hoststr) hostipList = getIps(hoststr) #主机ip列表 hostipStr= "\n".join(hostipList) proxyInfoDic = getProxyInfo(nodeName) wecharGroup = proxyInfoDic["wechat_group_name"] #微信群 #print(proxyInfoDic) nameNode=proxyInfoDic['name'] wxgroup=proxyInfoDic['wechat_group_name'] #print(hostipList) def show(messageHead,messageBody): print("\n\t 节点名:\n\t\t"+messageHead.strip("\n")) print("\t 微信群:"+wxgroup) message = messageHead+"\n"+messageBody #print(message) sendMessage(message,wxgroup) def getres(): with open("/home/jwf/pfdx/result.txt",'r') as f: return f.read() if type == "1": getFrequency(hostipStr) #输出统计信息 messageHead = nameNode+" 频繁掉线,麻烦处理一下!\n" messageBody = getres() show(messageHead,messageBody) elif type == "2": data = getInfo(hostipStr) messageHead = nameNode+" 账号拨不上,麻烦处理一下!\n" messageBody = data show(messageHead,messageBody) else: print("input is what fuck")
25.087719
53
0.723776
168
1,430
6.125
0.440476
0.043732
0.075802
0.052478
0
0
0
0
0
0
0
0.003276
0.146154
1,430
56
54
25.535714
0.839476
0.083916
0
0.052632
0
0
0.133655
0.020129
0
0
0
0
0
1
0.052632
false
0
0.184211
0
0.263158
0.078947
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edbce3d11b39d90338cc61b5efce850628014657
2,041
py
Python
automol/reac/_instab.py
snelliott/automol
d1f7d51c1bbe06ba7569ea7c75304618cebee198
[ "Apache-2.0" ]
2
2021-03-01T14:23:25.000Z
2021-11-28T19:17:08.000Z
automol/reac/_instab.py
snelliott/automol
d1f7d51c1bbe06ba7569ea7c75304618cebee198
[ "Apache-2.0" ]
1
2021-02-12T21:02:22.000Z
2021-02-12T21:35:33.000Z
automol/reac/_instab.py
snelliott/automol
d1f7d51c1bbe06ba7569ea7c75304618cebee198
[ "Apache-2.0" ]
6
2020-12-12T18:41:13.000Z
2021-11-11T20:12:14.000Z
""" Build unstable products """ from phydat import instab_fgrps import automol.graph from automol.reac._util import rxn_objs_from_zmatrix import automol.geom import automol.inchi import automol.zmat from automol.graph import radical_dissociation_prods from automol.graph import radical_group_dct def instability_product_zmas(zma): """ Determine if the species has look for functional group attachments that could cause molecule instabilities """ disconn_zmas = () for gra in instability_product_graphs(automol.zmat.graph(zma)): ich = automol.graph.inchi(gra) geo_tmp = automol.inchi.geometry(ich) zma = automol.geom.zmatrix(geo_tmp) disconn_zmas += (zma,) return disconn_zmas def instability_product_graphs(gra): """ Determine if the species has look for functional group attachments that could cause molecule instabilities """ # Build graphs for the detection scheme rad_grp_dct = radical_group_dct(gra) # Check for instability causing functional groups prd_gras = () for atm, grps in rad_grp_dct.items(): if atm in instab_fgrps.DCT: fgrps, prds = instab_fgrps.DCT[atm] for grp in grps: grp_ich = automol.graph.inchi(grp) if grp_ich in fgrps: # If instability found, determine prod of the instability prd_ich = prds[fgrps.index(grp_ich)] prd_geo = automol.inchi.geometry(prd_ich) prd_gra = automol.geom.graph(prd_geo) prd_gras = radical_dissociation_prods( gra, prd_gra) break return prd_gras def instability_transformation(conn_zma, disconn_zmas): """ Build the reaction objects for an instability """ zrxn_objs = rxn_objs_from_zmatrix( [conn_zma], disconn_zmas, indexing='zma') if zrxn_objs: zrxn, zma, _, _ = zrxn_objs[0] else: zrxn, zma = None, None return zrxn, zma
30.462687
79
0.654581
257
2,041
4.984436
0.307393
0.046838
0.017174
0.028103
0.188915
0.143638
0.143638
0.143638
0.143638
0.143638
0
0.000676
0.275355
2,041
66
80
30.924242
0.86545
0.211171
0
0
0
0
0.001929
0
0
0
0
0
0
1
0.075
false
0
0.2
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edbecc8726126a9bb26d9234cf7d31303aa4e928
6,012
py
Python
tests/test_select.py
vail130/norm
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
[ "MIT" ]
null
null
null
tests/test_select.py
vail130/norm
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
[ "MIT" ]
1
2016-02-10T00:43:15.000Z
2016-02-10T01:14:37.000Z
tests/test_select.py
vail130/norm
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
[ "MIT" ]
1
2021-03-12T23:21:02.000Z
2021-03-12T23:21:02.000Z
from __future__ import absolute_import, unicode_literals import unittest from mason import Param, ANY, SELECT, COUNT, SUM, AND, OR, Table, NUMERIC, DATE, COALESCE, CASE class TheSelectClass(unittest.TestCase): def test_returns_string_for_select_query(self): purchases = Table('purchases') users = Table('users') user_id = Param('user_id') start = Param('start') end = Param('end') query = str( SELECT(purchases.id, purchases.product_name, NUMERIC(purchases.product_price, 10, 2), DATE(purchases.datetime_purchased)) .FROM(purchases) .INNER_JOIN(users.ON(purchases.purchaser_id == users.user_id)) .WHERE(AND(purchases.datetime_purchased.BETWEEN(start).AND(end), OR(purchases.purchaser_id == user_id, purchases.purchaser_id.IS_NULL))) .ORDER_BY(purchases.datetime_purchased.ASC) .LIMIT(10) .OFFSET(10) ) expected_query = '\n'.join([ "SELECT purchases.id, purchases.product_name, " "(purchases.product_price)::NUMERIC(10, 2), (purchases.datetime_purchased)::DATE", "FROM purchases", "INNER JOIN users ON purchases.purchaser_id = users.user_id", "WHERE purchases.datetime_purchased BETWEEN %(start)s AND %(end)s " "AND (purchases.purchaser_id = %(user_id)s OR purchases.purchaser_id IS NULL)", "ORDER BY purchases.datetime_purchased ASC", "LIMIT 10", "OFFSET 10", ]) self.assertEqual(query, expected_query) def test_returns_string_for_select_query_grouping(self): purchases = Table('purchases') start = Param('start') end = Param('end') min_category_sum = Param('min_category_sum') num_purchases = COUNT(purchases).AS('num_purchases') category_percent = (SUM( CASE.WHEN(purchases.is_valid) .THEN(COALESCE(purchases.product_price, 0)) .ELSE(0).END ) / 100.0).AS('category_percent') category_sum = SUM(COALESCE(purchases.product_price, 0)).AS('category_sum') query = str( SELECT(purchases.category, category_percent, num_purchases) .FROM(purchases) .WHERE(purchases.datetime_purchased.BETWEEN(start).AND(end)) .GROUP_BY(purchases.category) .HAVING(category_sum > min_category_sum) ) expected_query = '\n'.join([ ("SELECT purchases.category, " "(SUM(CASE WHEN purchases.is_valid " "THEN COALESCE(purchases.product_price, 0) ELSE 0 END) / 100.0) AS category_percent, " "COUNT(*) AS num_purchases"), "FROM purchases", "WHERE purchases.datetime_purchased BETWEEN %(start)s AND %(end)s", "GROUP BY purchases.category", "HAVING category_sum > %(min_category_sum)s", ]) self.assertEqual(query, expected_query) def test_returns_string_for_select_query_with_subqueries(self): purchases = Table('purchases') num_purchases = COUNT(purchases).AS('num_purchases') grouped_purchases = ( SELECT(purchases.category.AS('category'), num_purchases) .FROM(purchases) .GROUP_BY(purchases.category) .AS('grouped_purchases') ) products = Table('products') num_products = COUNT(products).AS('num_products') grouped_products = ( SELECT(products.category.AS('category'), num_products) .FROM(products) .GROUP_BY(products.category) .AS('grouped_products') ) categories_param = Param('categories') categories_table = Table('categories') query = str( SELECT(grouped_purchases.category, grouped_purchases.num_purchases, grouped_products.num_products) .FROM(grouped_purchases) .INNER_JOIN(grouped_products.ON(grouped_purchases.category == grouped_products.category)) .WHERE(AND(grouped_purchases.category == ANY(categories_param), grouped_purchases.category.IN(SELECT(categories_table.category).FROM(categories_table)))) ) expected_query = '\n'.join([ "SELECT grouped_purchases.category, grouped_purchases.num_purchases, grouped_products.num_products", "FROM (", "\tSELECT purchases.category AS category, COUNT(*) AS num_purchases", "\tFROM purchases", "\tGROUP BY purchases.category", ") AS grouped_purchases", "INNER JOIN (", "\tSELECT products.category AS category, COUNT(*) AS num_products", "\tFROM products", "\tGROUP BY products.category", ") AS grouped_products ON grouped_purchases.category = grouped_products.category", "WHERE grouped_purchases.category = ANY(%(categories)s) " "AND grouped_purchases.category IN (", "\tSELECT categories.category", "\tFROM categories", ")", ]) self.assertEqual(query, expected_query) def test_returns_string_for_select_query_with_joins(self): table = Table('table') query = str( SELECT('*') .FROM(table) .LEFT_OUTER_JOIN(table) .RIGHT_OUTER_JOIN(table) .FULL_OUTER_JOIN(table) .OUTER_JOIN(table) .LIMIT(10) ) expected_query = '\n'.join([ "SELECT *", "FROM table", "LEFT OUTER JOIN table", "RIGHT OUTER JOIN table", "FULL OUTER JOIN table", "OUTER JOIN table", "LIMIT 10", ]) self.assertEqual(query, expected_query)
39.552632
116
0.585329
605
6,012
5.583471
0.147107
0.085554
0.061575
0.023683
0.657194
0.605684
0.483422
0.434873
0.434873
0.434873
0
0.007438
0.30672
6,012
151
117
39.81457
0.803023
0
0
0.276923
0
0.007692
0.268796
0.07851
0
0
0
0
0.030769
1
0.030769
false
0
0.023077
0
0.061538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edc2a9f255a5bcdbe6de9a345b01330bea716cf9
37,250
py
Python
gridwxcomp/calc_bias_ratios.py
DRI-WSWUP/grid-et-bias
91998b5827a8069563394b797b253e33c546765f
[ "Apache-2.0" ]
13
2019-04-02T20:21:34.000Z
2022-01-26T22:45:04.000Z
gridwxcomp/calc_bias_ratios.py
DRI-WSWUP/grid-et-bias
91998b5827a8069563394b797b253e33c546765f
[ "Apache-2.0" ]
20
2019-02-27T22:40:13.000Z
2021-05-28T03:06:48.000Z
gridwxcomp/calc_bias_ratios.py
DRI-WSWUP/gridwxcomp
91998b5827a8069563394b797b253e33c546765f
[ "Apache-2.0" ]
6
2019-04-02T17:28:31.000Z
2022-01-29T14:07:25.000Z
# -*- coding: utf-8 -*- """ Calculate monthly bias ratios of variables from climate station to overlapping gridMET (or other gridded dataset) cells. Input file for this module must first be created by running :mod:`gridwxcomp.prep_input` followed by :mod:`gridwxcomp.download_gridmet_opendap`. Attributes: GRIDMET_STATION_VARS (:obj:`dict`): mapping dictionary with gridMET variable names as keys and station variable names as values. Used to determine which station variable to calculate bias ratios according to the given gridMET variable. Default values:: GRIDMET_STATION_VARS = { 'u2_ms' : 'ws_2m (m/s)', 'tmin_c' : 'TMin (C)', 'tmax_c' : 'TMax (C)', 'srad_wm2' : 'Rs (w/m2)', 'ea_kpa' : 'Vapor Pres (kPa)', 'prcp_mm' : 'Precip (mm)', 'etr_mm' : 'ETr (mm)', 'eto_mm' : 'ETo (mm)' } Note: The module attribute ``GRIDMET_STATION_VARS`` can be manually adjusted, if ``gridwxcomp`` is installed in editable mode or used as scripts from the root directory. New pairs of station-to-grid variables can then be made or removed to efficiently use :mod:`gridwxcomp` on station data that was **not** created by `PyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_. Otherwise, the same can be achieved by the ``var_dict`` or ``grid_var`` and ``station_var`` arguments to :func:`calc_bias_ratios`. """ import os import calendar import argparse import warnings import pandas as pd import numpy as np # allows for CL script usage if gridwxcomp not installed try: from .util import parse_yr_filter except: from util import parse_yr_filter # keys = gridMET variable name # values = climate station variable name GRIDMET_STATION_VARS = { 'u2_ms' : 'ws_2m (m/s)', 'tmin_c' : 'TMin (C)', 'tmax_c' : 'TMax (C)', 'srad_wm2' : 'Rs (w/m2)', 'ea_kpa' : 'Vapor Pres (kPa)', 'prcp_mm' : 'Precip (mm)', 'etr_mm' : 'ETr (mm)', 'eto_mm' : 'ETo (mm)' } OPJ = os.path.join def main(input_file_path, out_dir, method='long_term_mean', grid_id_name='GRIDMET_ID', grid_var='etr_mm', station_var=None, station_date_name='date', grid_date_name='date', grid_ID=None, day_limit=10, years='all', comp=True): """ Calculate monthly bias ratios between station climate and gridMET cells that correspond with each other geographically. Saves data to CSV files in the given output directory. If run later with new station data, bias ratios for new stations will be appended to existing output summary CSV. Arguments: input_file_path (str): path to input CSV file containing paired station/gridMET metadata. This file is created by running :mod:`gridwxcomp.prep_input` followed by :mod:`gridwxcomp.download_gridmet_opendap`. out_dir (str): path to directory to save CSV files with monthly bias ratios of etr. Keyword Arguments: method (str): default 'long_term_mean'. How to calculate mean station to grid ratios, currently two options 'long_term_mean' takes the mean of all dates for the station variable that fall in a time periods, e.g. the month of January, to the mean of all paired January dates in the gridded product. The other option is 'mean_of_annual' which calculates ratios, for each time period if enough paired days exist, the ratio of sums for each year in the record and then takes the mean of the annual ratios. This method is always used to calculate standard deviation and coefficient of variation of ratios which describe interannual variation of ratios. grid_var (str): default 'etr_mm'. Grid climate variable to calculate bias ratios. station_var (str): default None. Climate station variable to use to calculate bias ratios. If None, look up using ``grid_var`` as a key to :attr:`GRIDMET_STATION_VARS` dictionary found as a module attribute to :mod:`gridwxcomp.calc_bias_ratios`. grid_ID (int): default None. Grid ID (int cell identifier) to only calculate bias ratios for a single gridcell. day_limit (int): default 10. Threshold number of days in month of missing data, if less exclude month from calculations. years (int or str): default 'all'. Years to use for calculations e.g. 2000-2005 or 2011. comp (bool): default True. Flag to save a "comprehensive" summary output CSV file that contains station metadata and statistics in addition to the mean monthly ratios. Returns: None Examples: From the command line interface, .. code-block:: sh $ # for all gridMET cells in input file for gridMET var "etr_mm" (default) $ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios $ # for all gridMET cells in input file for gridMET var "eto_mm" $ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -gv eto_mm $ # for a specific gridMET cell ID for "etr_mm" $ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -id 509011 $ # to exclude any months with less than 15 days of data $ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -d 15 It is also possible for the user to define their own station variable name if, for example, they are using station data that was **not** created by `PyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_. Let's say our station time series has ETo named as 'EO' then use the ``[-sv, --station-var]`` and ``[-gv, --grid-var]`` options .. code-block:: sh $ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -sv EO -gv eto_mm This will produce two CSV files in ``out_dir`` named "eto_mm_summary_all_yrs.csv" and "eto_mm_summary_comp_all_yrs.csv". If the ``[-y, --years]`` option is assigned, e.g. as '2010', then the output CSVs will have '2010' suffix, i.e. 'eto_mm_summary_comp_2010.csv' For use within Python see :func:`calc_bias_ratios`. Note: If ``[-gv, --grid-var]`` command line option or ``grid_var`` keyword argument is given but the station variable is left as default (None), the corresponding station variable is looked up from the mapping dictionary in :mod:`gridwxcomp.calc_bias_ratios` named ``GRIDMET_STATION_VARS``. To efficiently use climate data that was **not** created by `PyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_ which is where the default names are derived you can manually adjust ``GRIDMET_STATION_VARS`` near the top of the :mod:`gridwxcomp.calc_bias_ratios` submodule file. Alternatively, the gridMET and station variable names may be explicitly passed as command line or function arguments. """ # calculate monthly bias ratios and save to CSV files calc_bias_ratios( input_file_path, out_dir, method=method, grid_id_name=grid_id_name, grid_var=grid_var, station_var=station_var, station_date_name=station_date_name, grid_date_name=grid_date_name, grid_ID=grid_ID, day_limit=day_limit, comp=comp ) def _save_output(out_df, comp_out_df, out_dir, grid_ID, var_name, yrs): """ Save short summary file or overwrite existing data for a single climate station. Arguments: out_df (:class:`pandas.DataFrame`): data containing short summary info, mainly mean monthly bias ratios for a single climate station to save. comp_out_df (:class:`pandas.DataFrame`, bool): either a single row dataframe with comprehensive summary data or False (default). depends on ``comp`` argument to :func:`calc_bias_ratios`. If :class:`pandas.DataFrame` is passed then save or update existing file. out_dir (str): path to directory to save or update summary data for monthly bias ratios. grid_ID (int, optional): depends on ``grid_ID`` argument passed to :func:`calc_bias_ratios`. If not None (default) then save summary files for stations that correspond with the given gridMET ID with the suffix "_X" where X is the gridMET ID value. var_name (str): name of gridMET variable that is being processed. yrs (str): years used to calc ratios, save to out files as suffix. Returns: None """ def __save_update(out_df, out_file): """ Helper function that is reused to save or update both short and long summary files one station or row at a time. Saves station ratio data by appending to existing file or overwriting data for a station if it was previously calculated. `out_df` is a single row from the ratio results table representing data for a single climate station-gridcell pair. """ # if short file exists add/overwrite row for station if os.path.isfile(out_file): existing_df = pd.read_csv(out_file, index_col='STATION_ID') if not out_df.index.values[0] in existing_df.index.values: out_df = pd.concat([existing_df, out_df], sort=False) out_df.to_csv(out_file, na_rep=-999, index=True) # overwrite if station is in existing, could change to # allow for duplicates if values are different else: existing_df.loc[out_df.index.values[0], :] =\ out_df.loc[out_df.index.values[0]] existing_df.to_csv(out_file, na_rep=-999, index=True) else: out_df.to_csv(out_file, na_rep=-999, index=True) # save or update short and comprehensive summary files if not os.path.isdir(out_dir): os.mkdir(out_dir) # save/update short summary file or update existing with new station if not grid_ID: out_file = OPJ( out_dir, '{v}_summary_{y}.csv'.format(v=var_name, y=yrs) ) else: out_file = OPJ( out_dir, '{v}_summary_grid_{g}_{y}.csv'.format( v=var_name, g=grid_ID, y=yrs) ) __save_update(out_df, out_file) # if comprehensive summary is requested save/update if isinstance(comp_out_df, pd.DataFrame): if not grid_ID: comp_out_file = OPJ( out_dir, '{v}_summary_comp_{y}.csv'.format(v=var_name, y=yrs) ) else: comp_out_file = OPJ( out_dir, '{v}_summary_comp_{g}_{y}.csv'.format( v=var_name, g=grid_ID, y=yrs) ) __save_update(comp_out_df, comp_out_file) def calc_bias_ratios(input_path, out_dir, method='long_term_mean', grid_id_name='GRIDMET_ID', grid_var='etr_mm', station_var=None, var_dict=None, station_date_name='date', grid_date_name='date', grid_ID=None, day_limit=10, years='all', comp=True): """ Read input CSV file and calculate mean monthly bias ratios between station to corresponding grid cells for all station and grid pairs, optionally calculate ratios for a single gridcell. Arguments: input_path (str): path to input CSV file with matching station climate and grid metadata. This file is created by running :func:`gridwxcomp.prep_input` followed by :func:`gridwxcomp.download_gridmet_opendap`. out_dir (str): path to directory to save CSV files with monthly bias ratios of etr. Keyword Arguments: method (str): default 'long_term_mean'. How to calculate mean station to grid ratios, currently two options 'long_term_mean' takes the mean of all dates for the station variable that fall in a time periods, e.g. the month of January, to the mean of all paired January dates in the gridded product. The other option is 'mean_of_annual' which calculates ratios, for each time period if enough paired days exist, the ratio of sums for each year in the record and then takes the mean of the annual ratios. This method is always used to calculate standard deviation and coefficient of variation of ratios which describe interannual variation of ratios. grid_id_name (str): default 'GRIDMET_ID'. Name of index/cell identifier for gridded dataset, only change if supplying user grid data. grid_var (str): default 'etr_mm'. Grid climate variable to calculate bias ratios. station_var (str): default None. Climate station variable to use to calculate bias ratios. If None, look up using ``grid_var`` as a key to :attr:`GRIDMET_STATION_VARS` dictionary found as a module attribute to :mod:`gridwxcomp.calc_bias_ratios`. var_dict (dict): default None. Dictionary that maps grid variable names to station variable names to overide gridMET and PyWeatherQaQc defaules used by :attr:`GRIDMET_STATION_VARS`. grid_ID (int): default None. Grid ID (int cell identifier) to only calculate bias ratios for a single gridcell. day_limit (int): default 10. Threshold number of days in month of missing data, if less exclude month from calculations. Ignored when ``method='long_term_mean'``. years (int or str): default 'all'. Years to use for calculations e.g. 2000-2005 or 2011. comp (bool): default True. Flag to save a "comprehensive" summary output CSV file that contains station metadata and statistics in addition to the mean monthly ratios. Returns: None Examples: To use within Python for observed ET, >>> from gridwxcomp import calc_bias_ratios >>> input_path = 'merged_input.csv' >>> out_dir = 'monthly_ratios' >>> grid_variable = 'eto_mm' >>> calc_bias_ratios(input_path, out_dir, grid_var=grid_variable) To use custom station data, give the keyword argument ``station_var``, e.g. if we had climate daily time series data for precipitation with the column named "p" then, >>> calc_bias_ratios(input_path, out_dir, grid_var='prcp_mm', >>> station_var='p') This results in two CSV files in ``out_dir`` named "prcp_mm_summary_all_yrs.csv" and "prcp_mm_summary_comp_all_yrs.csv". Raises: FileNotFoundError: if input file is invalid or not found. KeyError: if the input file does not contain file paths to the climate station and grid time series files. This occurs if, for example, the :mod:`gridwxcomp.prep_input` and/or :mod:`gridwxcomp.download_gridmet_opendap` scripts have not been run first (if using gridMET data). Also raised if the given ``grid_var``, ``station_var``, or values of ``var_dict`` kwargs are invalid. ValueError: if the ``method`` kwarg is invalid. Note: Growing season and summer periods over which ratios are calculated are defined as April through October and June through August respectively. Note: If an existing summary file contains a climate station that is being reprocessed its monthly bias ratios and other data will be overwritten. Also, to proceed with spatial analysis scripts, the comprehensive summary file must be produced using this function first. If ``grid_var`` keyword argument is given but the ``station_var`` is left as default (None), the corresponding station variable is looked up from the mapping dictionary in :mod:`calc_bias_ratios.py` named :attr:`GRIDMET_STATION_VARS`. To use climate data that was **not** created by `pyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_ for station data and/or gridded data other than gridMET, which is where the default names are derived, the grid and station variable names need to be explicitly passed as function arguments. """ # ignore np runtime warnings due to calcs with nans, div by 0 np.seterr(divide='ignore', invalid='ignore') # specific for standard deviation of nans std_warning = "Degrees of freedom <= 0 for slice" warnings.filterwarnings("ignore", message=std_warning) method_options = ('long_term_mean','mean_of_annual') if method not in method_options: raise ValueError('{} is not a valid method, use one of: {}'.format( method, method_options) ) if var_dict is None: var_dict = GRIDMET_STATION_VARS if not var_dict.get(grid_var, None): print( 'Valid grid variable names:\n', '\n'.join([i for i in var_dict.keys()]), '\n' ) err_msg = 'Invalid grid variable name {}'.format(grid_var) raise KeyError(err_msg) if not os.path.isdir(out_dir): print('{} does not exist, creating directory'.format(out_dir)) os.mkdir(out_dir) if not os.path.isfile(input_path): raise FileNotFoundError('Input CSV file given was invalid or not found') input_df = pd.read_csv(input_path) # get matching station variable name if not station_var: station_var = var_dict.get(grid_var) # If only calculating ratios for a single cell, change console message if grid_ID: single_grid_cell_msg = f'For grid cell ID: {grid_ID}.' else: single_grid_cell_msg = '' print( f'Calculating ratios between climate station variable: {station_var}' f'\nand grid variable: {grid_var} using the "{method.replace("_"," ")}"' f' method. {single_grid_cell_msg}' ) # loop through each station and calculate monthly ratio for index, row in input_df.iterrows(): if not 'STATION_FILE_PATH' in row or not 'GRID_FILE_PATH' in row: raise KeyError('Missing station and/or grid file paths in '+\ 'input file. Run prep_input.py followed '+\ 'by download_gridmet_opendap.py first.') # if only doing a single grid cell check for matching ID if grid_ID and int(grid_ID) != row[grid_id_name]: continue # load station and grid time series files try: # if time series not from PyWeatherQaQc, CSV with 'date' column if not row.STATION_FILE_PATH.endswith('.xlsx'): station_df = pd.read_csv( row.STATION_FILE_PATH, parse_dates=True, index_col=station_date_name ) station_df.index = station_df.index.date # for joining # if excel file, assume PyWeatherQaQc format else: station_df = pd.read_excel( row.STATION_FILE_PATH, sheet_name='Corrected Data', parse_dates=True, index_col=0 ) except: print('Time series file for station: ', row.STATION_ID, 'was not found, skipping.') continue if not station_var in station_df.columns: err_msg = '{v} not found in the station file: {p}'.\ format(v=station_var, p=row.STATION_FILE_PATH) raise KeyError(err_msg) print( '\nCalculating {v} bias ratios for station:'.format(v=grid_var), row.STATION_ID ) grid_df = pd.read_csv(row.GRID_FILE_PATH, parse_dates=True, index_col=grid_date_name) # merge both datasets drop missing days result = pd.concat( [ station_df[station_var], grid_df[grid_var] ], axis=1 ) result = result.reindex(grid_df.index) result.dropna(inplace=True) # make datetime index result.index = pd.to_datetime(result.index) # apply year filter result, years_str = parse_yr_filter(result, years, row.STATION_ID) # for calculating ratios with long-term means later orig = result.copy() # monthly sums and day counts for each year result = result.groupby([result.index.year, result.index.month])\ .agg(['sum','mean','count']) result.index.set_names(['year', 'month'], inplace=True) # remove totals with less than XX days result = result[result[grid_var,'count']>=day_limit] # calc mean growing season and June to August ratios with month sums if grid_var in ('tmin_c','tmax_c'): grow_season = result.loc[ result.index.get_level_values('month').isin([4,5,6,7,8,9,10]),\ (station_var)]['mean'].mean() - result.loc[ result.index.get_level_values('month').isin( [4,5,6,7,8,9,10]),(grid_var)]['mean'].mean() june_to_aug = result.loc[ result.index.get_level_values('month').isin( [6,7,8]), (station_var)]['mean'].mean()\ -result.loc[result.index.get_level_values('month')\ .isin([6,7,8]), (grid_var)]['mean'].mean() ann_months = list(range(1,13)) annual = result.loc[ result.index.get_level_values('month').isin(ann_months),\ (station_var)]['mean'].mean() - result.loc[ result.index.get_level_values('month').isin(ann_months),\ (grid_var)]['mean'].mean() else: grow_season = result.loc[ result.index.get_level_values('month').isin([4,5,6,7,8,9,10]),\ (station_var)]['sum'].sum() / result.loc[ result.index.get_level_values('month').isin( [4,5,6,7,8,9,10]),(grid_var)]['sum'].sum() june_to_aug = result.loc[ result.index.get_level_values('month').isin( [6,7,8]), (station_var)]['sum'].sum()\ /result.loc[result.index.get_level_values('month')\ .isin([6,7,8]), (grid_var)]['sum'].sum() ann_months = list(range(1,13)) annual = result.loc[ result.index.get_level_values('month').isin(ann_months),\ (station_var)]['sum'].sum() / result.loc[ result.index.get_level_values('month').isin(ann_months),\ (grid_var)]['sum'].sum() ratio = pd.DataFrame(columns = ['ratio', 'count']) # ratio of monthly sums for each year if grid_var in ('tmin_c','tmax_c'): ratio['ratio']=\ (result[station_var,'mean'])-(result[grid_var,'mean']) else: ratio['ratio']=(result[station_var,'sum'])/(result[grid_var,'sum']) # monthly counts and stddev ratio['count'] = result.loc[:,(grid_var,'count')] if result.empty: print(f'WARNING: no data for site: {row.STATION_ID}, skipping') continue # rebuild Index DateTime ratio['year'] = ratio.index.get_level_values('year').values.astype(int) ratio['month']=ratio.index.get_level_values('month').values.astype(int) ratio.index = pd.to_datetime( ratio.year*10000+ratio.month*100+15,format='%Y%m%d' ) # useful to know how many years were used in addition to day counts start_year = ratio.year.min() end_year = ratio.year.max() counts = ratio.groupby(ratio.index.month).sum()['count'] # get standard deviation of each years' monthly mean ratio stdev = { month: np.std( ratio.loc[ratio.month.isin([month]), 'ratio'].values ) for month in ann_months } stdev = pd.Series(stdev, name='stdev') # mean of monthly means of all years, can change to median or other meth final_ratio = ratio.groupby(ratio.index.month).mean() final_ratio.drop(['year', 'month'], axis=1, inplace=True) final_ratio['count'] = counts final_ratio['stdev'] = stdev final_ratio['cv'] = stdev / final_ratio['ratio'] # calc mean growing season, June through August, ann stdev grow_season_std = np.std( ratio.loc[ratio.month.isin([4,5,6,7,8,9,10]), 'ratio'].values ) june_to_aug_std = np.std( ratio.loc[ratio.month.isin([6,7,8]), 'ratio'].values ) annual_std = np.std( ratio.loc[ratio.month.isin(ann_months), 'ratio'].values ) # get month abbreviations in a column and drop index values for m in final_ratio.index: final_ratio.loc[m,'month'] = calendar.month_abbr[m] # restructure as a row with station index months = final_ratio.month.values final_ratio = final_ratio.T final_ratio.columns = months final_ratio.drop('month', inplace=True) # add monthy means and counts into single row dataframe ratio_cols = [c + '_mean' for c in final_ratio.columns] count_cols = [c + '_count' for c in final_ratio.columns] stddev_cols = [c + '_stdev' for c in final_ratio.columns] coef_var_cols = [c + '_cv' for c in final_ratio.columns] # combine all monthly stats out_cols = ratio_cols + count_cols + stddev_cols + coef_var_cols final_ratio = pd.concat([ final_ratio.loc['ratio'], final_ratio.loc['count'], final_ratio.loc['stdev'], final_ratio.loc['cv'] ]) final_ratio.index = out_cols # transpose so that each station is one row in final output final_ratio = final_ratio.to_frame().T # assign non-monthly stats, growing season, annual, june-aug final_ratio['growseason_mean'] = grow_season final_ratio['summer_mean'] = june_to_aug final_ratio['annual_mean'] = annual # day counts for all years in non monthly periods final_ratio['growseason_count'] =\ counts.loc[counts.index.isin([4,5,6,7,8,9,10])].sum() final_ratio['summer_count'] =\ counts.loc[counts.index.isin([6,7,8])].sum() final_ratio['annual_count'] =\ counts.loc[counts.index.isin(ann_months)].sum() # assign stdev, coef. var. final_ratio['growseason_stdev'] = grow_season_std final_ratio['summer_stdev'] = june_to_aug_std final_ratio['annual_stdev'] = annual_std # coefficient of variation final_ratio['growseason_cv'] = grow_season_std / grow_season final_ratio['summer_cv'] = june_to_aug_std / june_to_aug final_ratio['annual_cv'] = annual_std / annual # start and end years for interpreting annual CV, stdev... final_ratio['start_year'] = start_year final_ratio['end_year'] = end_year # round numerical data before adding string metadata for v in final_ratio: if '_mean' or '_stdev' or '_cv' in v: final_ratio[v] = final_ratio[v].astype(float).round(3) else: final_ratio[v] = final_ratio[v].astype(float).round(0) # set station ID as index final_ratio['STATION_ID'] = row.STATION_ID final_ratio.set_index('STATION_ID', inplace=True) out = final_ratio.copy() out.drop(count_cols+stddev_cols+coef_var_cols, axis=1, inplace=True) # save grid ID for merging with input table, merge other metadata final_ratio[grid_id_name] = row[grid_id_name] final_ratio = final_ratio.merge(input_df, on=grid_id_name) # if more than one site in same gridcell- will have multiple rows # after merge, select the one for the current station if final_ratio.shape[0] > 1: final_ratio=final_ratio.loc[final_ratio.STATION_ID==row.STATION_ID] final_ratio.reset_index(inplace=True) # for slicing with .at[0] # long term mean station to mean grid ratio calc as opposed to mean of # annual ratios- default less bias potential if method == 'long_term_mean': month_means = orig.groupby(orig.index.month).mean() month_means['month'] = month_means.index for m in month_means.index: month_means.loc[m,'month'] = f'{calendar.month_abbr[m]}_mean' month_means.set_index('month', inplace=True) if grid_var in ('tmin_c','tmax_c'): month_means['ratios'] =\ month_means[station_var] - month_means[grid_var] else: month_means['ratios'] =\ month_means[station_var] / month_means[grid_var] long_term = month_means.drop([station_var, grid_var],1).T # non-monthly periods long-term mean to mean ratios grow_season = orig.loc[orig.index.month.isin([4,5,6,7,8,9,10])] summer_season = orig.loc[orig.index.month.isin([6,7,8])] if grid_var in ('tmin_c','tmax_c'): long_term['growseason_mean'] =\ grow_season[station_var].mean()-grow_season[grid_var].mean() long_term['summer_mean'] =\ summer_season[station_var].mean()-summer_season[grid_var].mean() long_term['annual_mean'] =\ orig[station_var].mean() - orig[grid_var].mean() else: long_term['growseason_mean'] =\ grow_season[station_var].mean() / grow_season[grid_var].mean() long_term['summer_mean'] =\ summer_season[station_var].mean()/summer_season[grid_var].mean() long_term['annual_mean'] =\ orig[station_var].mean() / orig[grid_var].mean() # overwrite only mean ratios (keep stats from mean of annual ratios) overwrite = long_term.columns.intersection(final_ratio.columns) #return long_term, overwrite, final_ratio final_ratio[overwrite] = long_term[overwrite].values out[overwrite] = long_term[overwrite].values final_ratio['ratio_method'] = method # round numeric columns final_ratio = final_ratio.round({ 'LAT': 10, 'LON': 10, 'ELEV_M': 0, 'ELEV_FT': 0, 'STATION_LAT': 10, 'STATION_LON': 10, 'STATION_ELEV_M': 0 }) # check if day counts for non-monthly periods are too low, if assign na grow_thresh = 65 sum_thresh = 35 ann_thresh = 125 if final_ratio.at[0,'summer_count'] < sum_thresh: print('WARNING: less than:', sum_thresh, 'days in summer period', '\nfor station:',row.STATION_ID,'assigning -999 for all stats') cols = [col for col in final_ratio.columns if 'summer_' in col and '_count' not in col] final_ratio.loc[:,cols] = np.nan if final_ratio.at[0,'growseason_count'] < grow_thresh: print('WARNING: less than:',grow_thresh,'days in growing season', '\nfor station:',row.STATION_ID,'assigning -999 for all stats') cols = [col for col in final_ratio.columns if 'growseason_' in col and '_count' not in col] final_ratio.loc[:,cols] = np.nan if final_ratio.at[0,'annual_count'] < ann_thresh: print('WARNING: less than:',ann_thresh,'days in annual period', '\nfor station:',row.STATION_ID,'assigning -999 for all stats') cols = [col for col in final_ratio.columns if 'annual_' in col and '_count' not in col] final_ratio.loc[:,cols] = np.nan if comp: out[grid_id_name] = row[grid_id_name] out[grid_id_name] = final_ratio[grid_id_name].unique() # build comprehensive output summary comp_out = final_ratio comp_out.set_index('STATION_ID', inplace=True) # no longer need grid ID in short summary out.drop(columns=grid_id_name, inplace=True) # if comp False else: comp_out = comp # save output depending on options _save_output(out, comp_out, out_dir, grid_ID, grid_var, years_str) print( '\nSummary file(s) for bias ratios saved to: \n', os.path.abspath(out_dir) ) def arg_parse(): """ Command line usage of calc_bias_ratios.py which calculates monthly bias ratios between station climate and grid cells that correspond with each other geographically. Saves data to CSV files in the given output directory. If run later with new station data, bias ratios for new stations will be appended to existing output summary CSV. """ parser = argparse.ArgumentParser( description=arg_parse.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) optional = parser._action_groups.pop() # optionals listed second required = parser.add_argument_group('required arguments') required.add_argument( '-i', '--input', metavar='PATH', required=True, help='Input CSV file of merged climate/grid data that '+\ 'was created by running prep_input.py and '+\ 'download_gridmet_opendap.py') required.add_argument( '-o', '--out', metavar='PATH', required=True, help='Output directory to save CSV files containing bias ratios') optional.add_argument('-meth', '--method', metavar='', required=False, default='long_term_mean', help='ratio calc method "long_term_mean" or'+\ '"mean_of_annual"') optional.add_argument('-gin', '--grid-id-name', metavar='', required=False, default='GRIDMET_ID', help='Name of gridcell identifier if not using '+\ 'gridMET grid') optional.add_argument( '-y', '--years', metavar='', required=False, default='all', help='Years to use, single or range e.g. 2018 or 1995-2010') optional.add_argument( '-gv', '--grid-var', metavar='', required=False, default='etr_mm', help='Grid variable name for bias ratio calculation') optional.add_argument( '-sv', '--station-var', metavar='', required=False, default=None, help='Station variable name for bias ratio calculation') optional.add_argument( '-sdn', '--station-date-name', metavar='',required=False,default='date', help='Date column name in station time series files if not using '+\ 'gridMET.') optional.add_argument( '-gdn', '--grid-date-name', metavar='', required=False, default='date', help='Date column name in grid time series files if not using gridMET.') optional.add_argument( '-id', '--grid-id', metavar='', required=False, default=None, help='Optional grid ID to calculate bias ratios for a single '+\ 'gridcell') optional.add_argument('-d', '--day-limit', metavar='', required=False, default=10, help='Number of days of valid data per month to '+\ 'include it in bias correction calculation.') optional.add_argument('-c', '--comprehensive', required=False, default=True, action='store_false', dest='comprehensive', help='Flag, if given, to NOT save comprehensive summary file with '+\ 'extra metadata and statistics with the suffix "_comp"') # parser.add_argument( # '--debug', default=logging.INFO, const=logging.DEBUG, # help='Debug level logging', action="store_const", dest="loglevel") parser._action_groups.append(optional)# to avoid optionals listed first args = parser.parse_args() return args if __name__ == '__main__': args = arg_parse() main( input_file_path=args.input, out_dir=args.out, method=args.method, grid_id_name=args.grid_id_name, grid_var=args.grid_var, station_var=args.station_var, station_date_name=args.station_date_name, grid_date_name=args.grid_date_name, grid_ID=args.grid_id, day_limit=args.day_limit, years=args.years, comp=args.comprehensive )
46.330846
96
0.61694
4,961
37,250
4.456964
0.123161
0.032563
0.012663
0.01203
0.451088
0.40066
0.36362
0.333318
0.32061
0.29474
0
0.008443
0.287785
37,250
803
97
46.388543
0.824991
0.423624
0
0.225058
0
0
0.164078
0.010111
0
0
0
0
0
1
0.011601
false
0
0.018561
0
0.032483
0.023202
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edc4f04d08129c6528ed7f0c20d812230e0c3895
1,843
py
Python
wiiload/upload.py
fossabot/async-wiiload
a511ffe5646c2bd101a9e0ae064f6b3d35497fd3
[ "Apache-2.0" ]
null
null
null
wiiload/upload.py
fossabot/async-wiiload
a511ffe5646c2bd101a9e0ae064f6b3d35497fd3
[ "Apache-2.0" ]
1
2020-11-18T18:38:49.000Z
2020-11-18T18:38:49.000Z
wiiload/upload.py
fossabot/async-wiiload
a511ffe5646c2bd101a9e0ae064f6b3d35497fd3
[ "Apache-2.0" ]
1
2020-11-18T18:38:03.000Z
2020-11-18T18:38:03.000Z
import asyncio import os import struct import zlib from os import PathLike from typing import List WIILOAD_VERSION_MAJOR = 0 WIILOAD_VERSION_MINOR = 5 async def upload_bytes(dol: bytes, argv: List[str], host: str, port: int = 4299): """ Uploads a file it to a Wii. :param dol: The bytes of a file to upload to wii. :param argv: Arguments to send to wii. The first value is usually the name of the file. :param host: The Wii's hostname/ip :param port: The port that Homebrew Channel is listening at. :return: """ c_data = zlib.compress(dol, 6) args = b"\x00".join(arg.encode("utf-8") for arg in argv) args += b"\x00" reader, writer = await asyncio.open_connection(host, port) writer.write(b"HAXX") writer.write(struct.pack("B", WIILOAD_VERSION_MAJOR)) # one byte, unsigned writer.write(struct.pack("B", WIILOAD_VERSION_MINOR)) # one byte, unsigned writer.write(struct.pack(">H", len(args))) # bigendian, 2 bytes, unsigned writer.write(struct.pack(">L", len(c_data))) # bigendian, 4 bytes, unsigned writer.write(struct.pack(">L", len(dol))) # bigendian, 4 bytes, unsigned writer.write(c_data) writer.write(args) await writer.drain() writer.close() await writer.wait_closed() async def upload_file(path: PathLike, argv: List[str], host: str, port: int = 4299): """ Reads a file from disk and uploads it to a Wii. :param path: Path to a file to be uploaded. :param argv: Extra arguments to send to wii, after the name of the file. :param host: The Wii's hostname/ip :param port: The port that Homebrew Channel is listening at. :return: """ with open(path, "rb") as f: dol = f.read() args = [os.path.basename(path)] args.extend(argv) return await upload_bytes(dol, args, host, port)
31.237288
91
0.669018
287
1,843
4.240418
0.351916
0.072309
0.069844
0.086278
0.459326
0.405094
0.36894
0.267872
0.157765
0.157765
0
0.013122
0.214324
1,843
58
92
31.775862
0.827348
0.067282
0
0
0
0
0.024501
0
0
0
0
0
0
1
0
false
0
0.2
0
0.233333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edc7c2097af55e9aaf7fe9d4a5593d76f55f2e37
899
py
Python
WDCData/StockPankouDay.py
wangdecheng/QAStrategy
d970242ea61cff2f1a6f69545dc7f65e8efd1672
[ "MIT" ]
null
null
null
WDCData/StockPankouDay.py
wangdecheng/QAStrategy
d970242ea61cff2f1a6f69545dc7f65e8efd1672
[ "MIT" ]
null
null
null
WDCData/StockPankouDay.py
wangdecheng/QAStrategy
d970242ea61cff2f1a6f69545dc7f65e8efd1672
[ "MIT" ]
null
null
null
import pandas as pd from QUANTAXIS.QAUtil import ( DATABASE ) _table = DATABASE.stock_pankou_day date = '2021-11-30' # 选最后一天,因为是批量插入,有值就证明存在 def exists(code, field='turn'): data = _table.find_one({'code':code,'date':date}) if data is None: return False if data.get(field) is None: return False return True def exists_shizhi(code): return exists(code,'shiZhi') def query_fundamentals(codes,date): query_condition = { 'date': date, 'code': { '$in': codes } } item_cursor = _table.find(query_condition) items_from_collection = [item for item in item_cursor] df_data = pd.DataFrame(items_from_collection).drop(['_id'],axis=1) return df_data if __name__ == "__main__": #print(exists_shizhi('300522')) #print(exists('300522')) print(query_fundamentals(['603501','603986'],'2018-01-10'))
24.972222
70
0.648498
117
899
4.735043
0.512821
0.032491
0.043321
0.061372
0
0
0
0
0
0
0
0.058239
0.216908
899
36
71
24.972222
0.728693
0.082314
0
0.071429
0
0
0.087591
0
0
0
0
0
0
1
0.107143
false
0
0.071429
0.035714
0.357143
0.035714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edc957ed816ee1c41b0420f5364024791ce07016
576
py
Python
Registradora/Caixa registradora.py
gabrielsoaresg/Projetos-Python
7f05a000c30a03fb9fbdb0f493e0a996ef7258f1
[ "MIT" ]
null
null
null
Registradora/Caixa registradora.py
gabrielsoaresg/Projetos-Python
7f05a000c30a03fb9fbdb0f493e0a996ef7258f1
[ "MIT" ]
null
null
null
Registradora/Caixa registradora.py
gabrielsoaresg/Projetos-Python
7f05a000c30a03fb9fbdb0f493e0a996ef7258f1
[ "MIT" ]
null
null
null
print("\033[1m=-=" * 15) print("\033[1;32mLojas Tabajara\033[m". center(51)) print("=-=\033[m" * 15) cont = 1 somaP = 0 while True: p = float(input(f"Produto {cont}: R$ ")) cont += 1 somaP += p if p == 0: break print(f"\033[1;32mTotal: R${somaP:.2f}\033[m") pagamento = float(input("\033[1mDinheiro: R$ \033[m")) while pagamento < somaP: pagamento = float(input("\033[1mDinheiro: R$ \033[m")) troco = pagamento - somaP if pagamento != somaP: print(f"\033[1;34mTroco: R$ {troco:.2f}\033[m") elif troco == 0: print("\033[1;31mNão há volta!")
27.428571
58
0.597222
92
576
3.73913
0.358696
0.069767
0.052326
0.05814
0.215116
0.215116
0.215116
0.215116
0
0
0
0.142251
0.182292
576
20
59
28.8
0.58811
0
0
0.1
0
0
0.375
0
0
0
0
0
0
1
0
false
0
0
0
0
0.3
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edcad0e725dcaee58a1b72be7bb3b88e8f32af90
3,196
py
Python
webserver.py
Ductapemaster/raspi_datalogger
7c5b54a6a7617fef816aa1410069fb755f167d13
[ "MIT" ]
null
null
null
webserver.py
Ductapemaster/raspi_datalogger
7c5b54a6a7617fef816aa1410069fb755f167d13
[ "MIT" ]
null
null
null
webserver.py
Ductapemaster/raspi_datalogger
7c5b54a6a7617fef816aa1410069fb755f167d13
[ "MIT" ]
null
null
null
from flask import Flask, render_template, request from flask_bootstrap import Bootstrap import json from datetime import datetime from influxdb import InfluxDBClient import secrets import settings influx_client = InfluxDBClient(secrets.influx_database_server, secrets.influx_database_port, secrets.influx_username, secrets.influx_password, database=settings.influx_database_name) # Flask setup app = Flask(__name__) Bootstrap(app) @app.route("/") def main(): return render_template("graph_base.html", title="Plots", plots=[ { 'content_title': "Temperature (deg C)", 'measurement_type': "temperature" }, { 'content_title': "Humidity (%)", 'measurement_type': "humidity" }, { 'content_title': "Pressure (hPa)", 'measurement_type': "pressure" }, { 'content_title': "CO2 (ppm)", 'measurement_type': "co2" } ] ) @app.route("/data") def data(): mtype = str(request.args.get('type')) start_utc = datetime.fromtimestamp(int(request.args.get('start')) / 1000.) end_utc = datetime.fromtimestamp(int(request.args.get('end')) / 1000.) try: query = "SELECT value FROM {} \ WHERE time >= \'{}\' AND time <= \'{}\' \ tz('America/Los_Angeles');".format(mtype, start_utc, end_utc) print(query) measurements = influx_client.query(query, epoch='u') except Exception as e: print("Influx fetch error: {}".format(e)) measurements = [] json_data = { 'cols': [{ 'id': 'Timestamp', 'label': 'Timestamp', 'type': 'date', }, { 'id': mtype, 'label': "{} ({})".format(mtype, settings.units[mtype] if mtype in settings.units.keys() else "unitless"), 'type': 'number', }], 'rows': [], } for m in measurements.get_points(): ts = datetime.fromtimestamp(m['time'] / 1000000) time_str = "Date({},{},{},{},{},{},{})".format( ts.year, ts.month - 1, ts.day, ts.hour, ts.minute, ts.second, int(ts.microsecond / 1000.), ) row = { 'c': [ {'v': time_str}, {'v': m['value']}, ] } json_data['rows'].append(row) json_data = json.dumps(json_data) return json_data if __name__ == "__main__": app.run(host="0.0.0.0", debug=False)
31.333333
122
0.431164
261
3,196
5.099617
0.425287
0.030053
0.031555
0.040571
0.061608
0.061608
0.061608
0
0
0
0
0.01464
0.444305
3,196
101
123
31.643564
0.734797
0.003442
0
0
0
0
0.130735
0.008171
0
0
0
0
0
1
0.023256
false
0.011628
0.081395
0.011628
0.127907
0.023256
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edcce2b8aaea1ebf0e1d0a125ecc6f07e55bf3eb
3,284
py
Python
db_wrapper/tests/sqlalchemy_test.py
Alecyrus/Ares
228c602f41d9ad1cfdc9f9bc25964b6bcc9d746b
[ "MIT" ]
2
2017-04-01T07:05:23.000Z
2017-09-09T02:19:50.000Z
db_wrapper/tests/sqlalchemy_test.py
Alecyrus/Ares
228c602f41d9ad1cfdc9f9bc25964b6bcc9d746b
[ "MIT" ]
null
null
null
db_wrapper/tests/sqlalchemy_test.py
Alecyrus/Ares
228c602f41d9ad1cfdc9f9bc25964b6bcc9d746b
[ "MIT" ]
null
null
null
import sys, os.path sys.path.append(os.path.dirname(os.path.dirname(__file__))) # from brett_sqlalchemy import * # Can not perform relative when it's parent module is not loaded, so I have to change the system's import path. from main import * import unittest from sqlalchemy.ext.declarative import declarative_base import os import json Base = declarative_base() sql_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'db_config.json') sql_config = open(sql_config_path, 'r') sql_config = json.load(sql_config) connection_string = sql_config['server'] + '://' + sql_config['user'] + ':' + sql_config['password'] + '@' + sql_config['address'] + '/' + sql_config['database'] + sql_config['extra'] mode = sql_config['mode'] echo = False # if mode == 'develop' or 'debug': # echo = True provider = ConnectionProvider(connection_string, echo=echo) configure_provider(provider) provider.start() engine = provider._engine class ModelA(Base, TimeStampMixin, SoftDeleteMixin): __tablename__ = 'test_model_a' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(50)) class ModelB(Base, TimeStampMixin, SoftDeleteMixin): __tablename__ = 'test_model_b' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(50), unique=True) metadata = Base.metadata metadata.create_all(engine) class TestSession(unittest.TestCase): # @classmethod # def setupClass(cls): # metadata = Base.metadata # metadata.create_all(engine) # # def test_normal_session(self): # # @db() # def add_one(name, session): # instance_a = ModelA(name=name) # session.add(instance_a) # # @db() # def query_one(name, session): # instance_a = session.query(ModelA).filter_by(name=name).first() # if hasattr(instance_a, 'name'): # return instance_a.name # return None # # # add_one('Eureka') # name = query_one('Eureka') # self.assertEqual(name, 'Eureka') def test_nest_transaction_(self): @db def insert_multi_success(session, outer): insert_one('Spring', outer=outer) insert_one('Summer', outer=outer) insert_one('Autumn', outer=outer) @db def insert_multi_fail(session, outer): insert_one('Winter', outer=outer) insert_one('Winter', outer=outer) insert_one('Peace', outer=outer) @db def insert_one(name, session, outer): instance_a = ModelB(name=name) session.add(instance_a) @db def query_one(name, session, outer): instance_a = session.query(ModelB).filter_by(name=name).first() if hasattr(instance_a, 'name'): return instance_a.name return None insert_multi_success() spring = query_one('Spring') summer = query_one('Summer') autumn = query_one('Autumn') self.assertEqual(spring, 'Spring') self.assertEqual(summer, 'Summer') self.assertEqual(autumn, 'Autumn') insert_multi_fail() winter = query_one('Winter') self.assertFalse(winter)
30.691589
184
0.636419
389
3,284
5.1491
0.303342
0.053919
0.041937
0.037943
0.400399
0.363455
0.291563
0.218672
0.18672
0.18672
0
0.001606
0.241474
3,284
106
185
30.981132
0.802489
0.221985
0
0.133333
0
0
0.066561
0
0
0
0
0
0.066667
1
0.083333
false
0.016667
0.1
0
0.366667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edce3a2737d93538f5eca2f4d5c2a1e9ad86dba9
31,641
py
Python
web/indexData/demandSrc.py
BennyJane/career-planning-info
406b41dc2913fce8623609efe4885abd9474cb6d
[ "MIT" ]
1
2021-01-11T01:37:18.000Z
2021-01-11T01:37:18.000Z
web/indexData/demandSrc.py
BennyJane/career-planning-info
406b41dc2913fce8623609efe4885abd9474cb6d
[ "MIT" ]
null
null
null
web/indexData/demandSrc.py
BennyJane/career-planning-info
406b41dc2913fce8623609efe4885abd9474cb6d
[ "MIT" ]
null
null
null
# 整理各个阶段的技能要求 """ 分大类: Python(语言类): 数据库: 服务器: 其他: """ fourth_chart = {'first': [ {'demands': ['精通Python语言', '熟悉Python多进程应用开发', '熟练掌握至少一门PythonWeb开发框架(Tornado、Django、Flask等)', '熟练使用mysql,redis,mongodb', '熟悉Linux、分布式、微服务、高性能Web服务开发、有一定的系统架构设计能力者优先', '拥有良好的代码习惯,结构清晰,命名规范,逻辑性强', ], 'salary': '10-12K', 'site': '北京'}, {'demands': ['精通python/shell编程,有良好的编程习惯,能够实现高效、可靠的代码', '负责服务端业务开发', '对软件工程深入理解,熟悉面向对象的分析和设计技术,熟悉linux平台', '有搜索引擎开发能力有限', '此岗位为外派到360的岗位项目稳定', '数据蜘蛛系统开发', '有政府项目开发背景', '具有良好的沟通能力和独立解决问题的能力,有积极进取的创新精神', '具有搜索相关/大并发/高性能系统开发经验,或具有深厚的算法基础理论知识者优先'], 'salary': '10-14K', 'site': '北京'}, {'demands': [ '具有机器/深度学习框架的开发经验,如sklearn、xgboost、TensorFlow、PyTorch等', '了解图像领域的常⻅任务及其发展脉络' '赋能客户使用OCRSDK完成自动建模和模型上线', '负责区域内OCR项目支持工作,包括POC项目、交付项目', '负责带领客户进行建模方案设计、项目实施、并进行项目管理,确保达成项目目标', '了解常⽤的图像处理技术,掌握opencv、pillow等图像处理库', '积极主动,能够在有限资源下追求最好的结果', '具有一年以上python开发及测试经验,熟练掌握numpy等常用python库', '熟悉linux开发环境,会使用常见的shell命令', '熟悉CLI,docker,git工具链', '了解C/S架构,了解restfulapi规范,http协议', '计算机或相关专业,了解常用数据结构和算法', '在GitHub上有1k+star的开源项目', ], 'salary': '8-10K', 'site': '北京'}, {'demands': [ '编写自动化测试用例', '熟悉pep8规范,有良好的编程风格', '至少一年及以上Python开发经验,有良好的编程能力', '熟练使用SQL,熟悉PostgreSQL、MySQL等数据库', '参与产品需求分析和评审', '负责模型设计、详细设计和接口设计与评审', '编码实现功能需求,修复Bug缺陷和代码评审' ], 'salary': '9-14K', 'site': '上海'}, {'demands': ['练操作Linux系统(熟练使用Linux命令)', '熟练使用VNC等远程访问工具(最好能对芯片电路板有一定了解)', '熟练使用Python,BAT等脚本语言', '熟练使用JIAR和HSD等Bug管理工具'], 'salary': '8-12K·13薪', 'site': '上海'}, {'demands': ['从事搜狗客户端产品的统计管理系统的设计和开发', '熟练掌握python脚本语言,使用过Django前端框架', '熟练使用HTML/CSS/Javascript/jquery/bootstrap等前端技术,代码符合W3C标准、兼容主流浏览器', '参与现有产品的设计和讨论', '熟悉Linux环境,熟悉SQL语句'], 'salary': '4-7K', 'site': '北京'}, {'demands': [ '良好的交流表达能力,能够配合客户部同事梳理解决疑难问题.', '有较好的协作能力和团队精神', '熟练使用shell或者Python进行服务器的维护,和自动化脚本的开发,有系统架构设计者优先.', '精通熟练python开发,使用过Django,Redis,Mysql,3年以上经历', '业界领先,全额社保,年终奖,六险一金', '专科及以上学历,软件工程相关专业,数据结构等基础扎实,有编码洁癖,很好的面向对象思维.', '负责公司socialCRM产品的开发' ], 'salary': '10-15K·13薪', 'site': '北京'}, {'demands': [ '熟练掌握python编程熟悉python相关web框架,如Django,Flask等', '硕士及以上,计算机', '编写并执行测试用例,准确跟踪模块的缺陷或问题', '熟悉Linux系统操作,可独立完成小型python项目的开发', '熟练运用测试方法及工具对模块及接口性能进行测试,准确发现性能短板', '熟练使用python性能测试工具与http接口测试工具,如lineprofiler,apachebench等', '加分项:具有python项目开发经验,或具有python自动化测试经验者优先', '支持项目python方面的相关开发工作' ], 'salary': '2-4K', 'site': '北京'}, {'demands': [ '工作积极主动,认真细致,有独立思考意识', '熟悉网络安全产品优先考虑', '熟悉linux开发环境,熟悉常用shell命令', '扎实的python基础,熟悉python常用库的使用至少连续一年python开发经验有良好的编码习惯', '熟悉django开发框架,了解restapi设计模式', 'python开发实习生,应届毕业生,初级开发工程师', '熟悉mysql数据库使用,熟悉至少一种缓存数据库使用', '良好的沟通能力,能与团队成员有效沟通' ], 'salary': '9-12K', 'site': '北京'}, {'demands': [ '有2年以上Python开发经验,能独立完成需求', '熟练掌握Python语言,熟悉python数据分析类库,有numpy', '熟悉python的RestfulAPI常用框架,及Pythonweb框架如:Django', '拥有良好的代码编写风格,要求结构清晰,命名规范,逻辑性强,了解技术文档编写规范具有良好的学习能力', '熟悉Linux系统,熟悉常用爬虫框架,如Scrapy,webmagic等', '熟悉数据库mysql' ], 'salary': '10-15K', 'site': '北京'}, {'demands': [ '负责分布式的网络爬虫应用,实现大规模文本,进行多平台信息的抓取和分析', '图像数据的抓取、抽取,去重、分类、垃圾过滤、解析入库等工作', '负责基于电商风控引擎大数据平台批量自动生成数据分析报告', '支持数据分析' ], 'salary': '8-13K', 'site': '西安'}], 'second': [ {'demands': ['4熟悉mysql等关系数据库的使用', '1参与公司产品后台服务的设计、开发、优化等研发工作,保证产品的质量和开发进度', '1-3年以上python开发经验或其他语言经验丰富,基础扎实,有意转python开发者亦可', '研究新兴技术,对产品进行持续优化', '熟悉pythonweb框架,django、tornado、flask等任意一种', '熟悉linux,git的使用'], 'salary': '15-25K', 'site': '北京'}, {'demands': ['3年以上python开发经验', '熟悉Linux指令,git管理', '熟悉mySQL', 'python多线程,多进程,会flask框架', '有大数据和机器学习工作经验的优先考虑', '华为大数据项目,AI领域,非外派,软通总部办公'], 'salary': '12-20K', 'site': '北京'}, {'demands': ['参与公司相关产品的架构优化,性能优化及其模块的技术实现', '计算机相关专业毕业3年以上Python开发经验', '负责所属模块的前后端开发', '良好的代码风格和开发习惯', '熟悉常见的存储方案,包括MySQL', '性格积极乐观,诚信,较好的沟通能力,文档整理能力', '良好的问题理解能力,能够理解以及处理复杂逻辑', '具备强烈的进取心', '熟悉常见的Web框架,包括Django,Tornado,Flask等', '负责公司智能管理信息化系统模块的功能规划', '熟悉主流前端框架,了解React', '具备OA系统'], 'salary': '20-40K·16薪', 'site': '上海'}, {'demands': ['精通异步网络编程,多线程编程,HTTP协议,异步框架Async,集群与负载均衡,消息中间件等技术', '5年以上Python开发经验', '根据产品需求,进行系统设计和编码,持续对系统架构进行改造和优化', '熟练使用Linux操作系统,具有基础服务维护能力', '理解并能应用RESTFul规范,具备较强的编程能力和良好的编码风格、结构清晰,命名规范,逻辑性强,代码冗余率低', '根据需求制定技术方案,项目计划以及开发,学习研究,并将之应用到工作当中', '熟练使用Mysql,Redis、PostgreSQL等', '负责公司OCR模块接口的功能开发和代码维护,能独立完成子系统和功能模块开发,编写完整的接口文档', '熟悉常用Web服务器软件如ApacheNginx等', '熟悉Python编程语言,熟悉flaskdjangotornado其中至少一种Web开发框', '熟悉网页抓取原理及技术,能够总结分析不同网站,网页的结构特点及规律'], 'salary': '20-25K·13薪', 'site': '上海'}, {'demands': ['熟悉Mysql的使用,优化,及常用的非关系型数据库', '有运维,部署相关操作经验', '参与公司项目的设计和开发', '熟练使用Python进行开发,至少熟悉一种后端框架,Django或者Flask,有Django项目开发者优先', '参与服务端性能的调优和迭代', '熟练使用异步,消息队列', '有docker使用经验', '了解前端技术', '了解Linux环境开发,熟练使用常用命令'], 'salary': '15-25K·13薪', 'site': '上海'}, {'demands': ['熟悉Linux系统和网络知识,扎实的基本功,熟悉设计模式', '熟悉Python者优先,包括但不限于:sklearn,多线程,matplotlib等', '了解或希望从事深度学习、机器学习方向相关工作者优先', '有良好的问题解决能力,难题公关经验,能主动推动项目的进展并达成项目目标'], 'salary': '11-16K·13薪', 'site': '北京'}, {'demands': ['熟悉操作系统和网络知识', '熟悉Docker,Kubernetes等容器技术', '熟练掌握Python开发', '负责开源堡垒机Jumpserver(https://github.com/jumpserver/jumpserver/)的开发工作', '了解angular,阅读过开源代码,活跃于开源社区者优先', '熟悉LinuxShell编程', '有良好的数据结构和算法基础', '有丰富的Django开发经验', '熟悉jQuery'], 'salary': '20-30K', 'site': '北京'}, {'demands': ['(3)熟悉Linux平台,熟悉Linux操作系统运维和Linux系统管理,维护,使用', '(1)熟悉python语言', '(2)5年以上的软件开发经验', '熟练掌握python', '有大型互联网IT系统的项目开发', '主要技术要点:python、mysql数据库、docker、git、云平台网络', '熟练掌握微服务架构,熟悉容器编程', '熟悉云平台网络技术,包括物理网络(交换机', '(4)熟悉数据库表结构设计和使用,有分布式系统及异步任务系统开发经验者优先', '岗位要求说明', '有软件工程意识,熟悉面向对象编程的思想', '(5)在弹性计算、存储、数据库、网络、中间件、安全、操作系统、虚拟化、基础设施(数据中心/网络/服务器)等任一领域有一定理解'], 'salary': '12-19K', 'site': '北京'}, {'demands': ['熟悉Linux、Git、Markdown等常用开发环境及协作形式,熟悉项目文档管理,有相关项目经验者优先', '熟练使用Mysql/PostgreSQL,熟悉Nosql数据库如Redis等,有hadoop经验更佳', '有独立分析和解决问题的能力,有良好的团队合作精神和沟通能力', '编写简洁、高效、清晰和可测试的代码,并通过单元测试、集成测试和CodeReview等保障代码质量', '熟练掌握Flask、Django等Web框架,熟悉SQLAlchemy等ORM框架,及Pandas等数据处理框架', '-岗位职责:', '-任职要求:', '参与后台服务的设计、开发、优化、维护等研发工作'], 'salary': '15-25K', 'site': '北京'}, {'demands': ['精通分布式文件存储系统原理,有分布式存储系统运维开发经验优先,有seafile产品运维开发经验更佳', '熟悉Linux操作系统', '熟悉java开发更佳,有实际生产部署经验更佳', '负责研发运维工具,提升运维效率', '熟悉Python语言', '了解c\\\\c++语言,能够读懂代码更佳,熟练掌握vuereacthtmlcssjs更佳', '熟悉分布式文件存储系统原理有企业网盘运维经验', '帮助部署代码,承担一定的python代码开发', '负责大文件&文件云平台(企业网盘)的运维,保证高可用', '有一定的源码阅读经验,能够快速学习心的技术,能够阅读英文文档'], 'salary': '15-25K', 'site': '上海'}, {'demands': ['熟悉MySQL等关系型数据库,熟悉SQL语言', '有基本的计算机基础理论知识,熟悉常用数据机构与算法', '能够熟练使用Linux作为日常开发环境', '思路清晰、主动性强,有缜密的逻辑思维能力,快速的英文阅读能力及学习能力,良好的沟通能力', '对TCP/IP,HTTP等常用协议有基本的了解,能完整叙述一次HTTP请求从客户端到服务器端所经过的各个环节', '根据项目组需求,对已有的网站增加新的功能,bugfix等', '熟悉ES6,有Vue', '能熟练使用Python进行Web程序相关开发,掌握Django等相关框架'], 'salary': '13-15K', 'site': '上海'}, {'demands': ['熟练掌握Python后端开发,掌握unitest测试框架,有测试代码开发经验优先;', '熟练掌握数据结构', '负责Python自动化开发', '负责指导业务团队自动化脚本开发,涉及UI界面类自动化', '熟悉Linux操作系统,TCPIP协议', '构建自动化脚本开发支撑库,提升自动化脚本开发效率'], 'salary': '12-23K', 'site': '上海'}, {'demands': ['负责后台系统的开发', '精通Python,掌握Django框架,熟悉djangorestframework', '优化HDFS,Impala,Hive的读写性能', '熟悉MySQL或Oracle数据库', '要求有海量数据处理经验', '对历史数据进行清洗,转换', '有其他Web后台使用经验或其他语言如Go,Java优先', '优化系统在海量数据下运行速度', '熟悉Hadoop生态,掌握HDFS、Hive、Impala使用方式优先'], 'salary': '20-40K', 'site': '上海'}, {'demands': ['负责运营后台的网站前后端维护及功能实现', '协助负责游戏服务端功能实现,性能优化等', '熟悉django,html,css', '熟悉多种数据库技术,包括MySQL,Redis,MongoDB等', '熟悉python及相关的库,熟悉linux环境', '具备服务器性能及代码性能优化能力,具备高并发流量下的请求处理经验', '保证服务端的稳定性', '相关的工具开发,相关文档的书写'], 'salary': '12-24K·13薪', 'site': '北京'}, {'demands': ['熟悉NoSQL技术,如Redis/MongoDB等,有Mongokit/MongoEngine使用经验尤佳', '熟悉TypeScript/Rust/Go等', '有自己的开源Python开发项目', '负责业务规则引擎和ApiEngine的研发', '主要工作内容:', '负责同城智能协作配送SaaS产品PaaS系统开发和维护工作', '二年以上Python完整项目开发经验', '负责基于k8s的基础服务系统开发', '有激情爱学习愿意提升,愿意以创业心态做事', 'Flask/Django/Tornado/Gevent/Nsq/ZeroRPC/ZeroMQ以上技术至少有2项以上的开发应用经验', '展示自己的Github项目', '趣钱包等金融产品的研发'], 'salary': '11-20K', 'site': '北京'}, {'demands': ['熟练掌握Python,Django/Tonado,MySQL/PostgreSQL,Redis等组件', '独立思考,有产品意识,能提出系统改善和产品优化者优先', '深入了解TCP/UDP协议,进程间通讯编程,熟悉各类服务器架构并能合理的应用', '负责在线大流量高并发系统设计和性能调优', '3年以上开发项目经验,至少3年的Python开发经验,有异步编程开发经验', '【岗位职责】', '了解前端相关技术,VUE', '负责高质量的设计和编码工作,承担重点', '负责系统的技术方案', '【任职要求】', '熟悉大数据相关工具ES者优先', '具有良好的数据结构和算法基础,具备扎实的编程能力', '负责体育比分产品后台系统和接口的研发,形成规范化的软件代码和单元测试文档', '根据产品规划,制定后台系统的开发'], 'salary': '20-25K·14薪', 'site': '北京'}, {'demands': ['熟悉一种或多种数据库者优先,如Oracle', '有一年及以上Python开发经验', '有linux操作系统'], 'salary': '12-24K', 'site': '北京'}, {'demands': ['设计和开发基于KVM虚拟机的高可用(HA)功能,保证用户业务的连续性', '设计和开发大规模集群的异步任务调度中心,提供高并发且稳定的调度功能', '撰写细致的设计文档,并对其他同事的代码进行审查', '具有2年或以上软件开发工作经验', '具有Web后台开发经验', '计算引擎研发工程师', '设计和开发基于KVM、VMware、Docker/Kubernetes等虚拟化和容器化技术的统一管理平台', '有Kubernetes的开发经验或代码贡献者', '有大规模集群系统的开发经验,对软件高可用,高并发有深刻的理解', '具有基于以下至少一种软件开发的经验,包括MySQL、MongoDB、Redis、Cassandra、ElasticSearch、ZooKeeper等', '代码风格干净简洁,具有极高的软件质量标准', '热爱编程,具有以下任一种语言的扎实的编程经验:Python、Golang、Java', '有基于KVM、Qemu、Libvirt、Openstack等项目开发的经验', '与测试、产品、售前和售后部门密切配合,不断提升产品质量和竞争力', '具有独立完成复杂功能的经历,能够完成完备的设计文档,清晰的接口定义,能够有效的进行任务拆分', '设计和实现健壮、清晰的RESTfulAPI', '熟悉Linux环境编程,理解I/O模型、事件模型、协程、线程池等基本概念,具有异步网络编程经验'], 'salary': '15-30K·14薪', 'site': '北京'}, {'demands': ['负责公司产品的功能开发', '能够按照规范化流程进行项目实施,能够做好项目代码质量的管控', '熟悉Flask', '熟练掌握Python开发语言', '配合完成编制项目实施计划,协调资源并按计划推进项目实施工作,按时按质交付', '熟悉主流Windows/Linux攻防技巧者优先', '熟悉信息安全产品,有信息安全行业工作经验者优先', '乐于分享,积极参加技术讨论交流会,活跃团队内技术氛围'], 'salary': '15-20K', 'site': '北京'}, {'demands': ['有大数据和机器学习工作经验的优先考虑', '熟悉mySQL,redis,elasticsearch,http,websocket,webhook', '熟悉Linux指令,git管理', 'python多线程,多进程,flask框架'], 'salary': '12-20K', 'site': '北京'}, {'demands': ['参与相关业务', '熟练使用Redis', 'jinjasqlalchemy熟练使用', '负责产品后端设计', 'Mysql熟练使用并且有大数量处理经验', '有优秀的解决问题的能力,有很强的学习能力', '2年以上Python开发经验,具有实际项目开发经验,承担过核心开发任务', '根据产品需求和技术演进,制定技术方案,项目计划,并制定相应方案执行'], 'salary': '15-25K', 'site': '北京'}, {'demands': ['负责分析业务领域比较复杂的问题,根据业务需求选择技术解决方案', '对系统及平台进行完善维护,包括功能改进、系统优化及技术支持等', '熟悉SOA架构,具备HTTP,TCP等网络服务端开发经验', '编写符合规范的功能结构定义、需求说明、开发设计等技术文档', '有成为全栈的潜力和强大的自我学习能力希望成为进可攻前,退可守线的全栈大牛', '能够独立完成业务设计和相关技术实现,分析并解决开发过程中的问题', '熟悉Nginx、Apache、Tomcat、HAproxy等', '熟悉Docker容器技术和OpenStack架构者优先', '精通Python语言,两年及以上Python开发经验,能解决实际开发中遇到的问题', '精通Django、Tornado、Flask等主流开发框架(之一)', '熟练操作Linux,能进行脚本编程和系统维护', '掌握MySql、MongoDB、Redis等数据库的使用,同时熟悉NoSQL类型数据库者优先', '思路清晰,具备服务意识和良好的沟通能力、理解能力和团队合作能力', '有良好的编程习惯,包括好的设计文档,单元测试,代码审查,适应敏捷开发模式', '参与平台项目的设计与开发工作,包括需求分析、系统设计、编码和单元测试等工作', '能够独立完成详细设计及编码、进行代码审查'], 'salary': '15-25K', 'site': '上海'}, {'demands': ['具备网络安全产品研发经验', '熟悉kvm和Docker容器技术,了解kvm和Docker技术框架,熟练使用kvm和Docker命令', '熟悉常用的数据库和缓存组件,包括但不限于Mysql、Redis、Mongo等', '需熟悉各类Linux操作系统,具备独立数据库设计并且调优的能力', '熟练使用Python常用框架Django、Tornado、Flask,熟悉RestfulAPI', '其他研发负责人安排的任务', '精通Python开发,并具有3年以上Python开发经验', '熟悉unix/linux环境开发,熟悉使用常用的shell命令', '产品研发及研发任务完成'], 'salary': '15-30K·14薪', 'site': '北京'}, {'demands': ['精通Python语言,代码风格良好,3年以上python开发经验;', '熟悉面向对象的软件设计及设计模式;', '熟悉Git工作流,可以和团队协同工作熟悉Linux下开发', '有openstack开发经验优先', '扎实的计算机基础,熟练掌握数据结构、算法、计算机网络、操作系统等基础知识', '精通数据模型的设计优化及常用DB的调优;', '精通常见的PythonWeb框架,包括但不限于Django'], 'salary': '13-25K', 'site': '北京'}, {'demands': ['负责服务、API开发', '负责Python后端服务开发,解决业务逻辑和数据产品相关业务', '熟悉NoSQL技术,如Redis/MongoDB等,能够独立地合理设计数据库结构,有Mongokit/MongoEngine使用经验尤佳;', '有独立开发项目,上线维护及架构能力', '业务规则引擎和ApiEngine的研发', '熟悉Pandas,对大数据数理统计建模有相关经验者优先', '有较强的数据建模能力,逻辑思维能力以及业务理解能力;', '有自己的开源Python开发项目', '参加业务线上运维,故障应急处理', '精通Linux操作系统、熟悉掌握Linux下常用命令,有Shell编程能力;', '展示自己的Github项目'], 'salary': '15-24K', 'site': '北京'}, {'demands': ['熟悉python语言,有2年使用经验,熟悉常用的web框架如flask', '熟练使用Linux命令', '熟练使用git等代码管理工具', '可适应出差', '基于Python开发WebService,包括设计、代码编写、单元测试、bug解决等', '熟悉c/c++编程优先', '有OCR、人脸识别开发经验优先', '熟悉HTML,CSS,Javascript等前端开发'], 'salary': '13-18K·13薪', 'site': '北京'}, {'demands': ['熟练掌握Django开发框架,熟悉其他Python开发框架者优先', '配合其他研发人员完成模块研发', '根据解决方案进行指定模块研发,并对其进行优化及维护', '3年以上Python开发工作经验,具有良好的编程风格,具备后端性能最优化和安全最大化的知识及能力,具有大型网站项目开发经验者优先', '优化现有产品的功能和体验细节', '【岗位职责】', '负责公司数据产品设计和代码实现', '计算机相关专业,专科以上国家正规院校毕业', '熟悉数据库,精通SQL和数据结构,具有MySQL等关系型数据库开发经验,有其他数据库PostgreSQL/MongoDB工作经验者优先', '熟悉Linux/Unix基本操作,能够编写shell脚本'], 'salary': '15-30K', 'site': '上海'}, {'demands': ['与后端开发人员合作,完成公司B端前端业务系统的研发', '配合项目经理完成相关任务目标', '计算机及相关专业,5年以上python开发经验,有完整项目开发经验或企业级产品开发经验优先,熟悉开发工作流程', '丰富的PyQt开发GUI经验', '有Python调用后端RestfulAPI的项目经验', '有Python读写USB串口的项目经验;', '有Python绘制动态曲线的经验,熟练使用matplotlib或其他类库绘制动态曲线', '负责相关技术实现文档的撰写', '与项目组的其他成员合作,能承受短期交付的压力', '分析并解决软件开发过程中的问题'], 'salary': '15-25K', 'site': '北京'}, {'demands': ['熟练掌握数据结构及算法', '有实际工程落地以及部署经验者优先', '收集整理客户反馈意见、产品实施过程问题,汇总成文档并进行问题追踪处理', '熟练掌握Python开发(必须)', '对客户进行产品使用培训、指导、问题解答和技术支持', '为客户快速接入OCR产品和服务,包括实施开发、数据对接、调试(需要会写Python)', '参与测试流程,并对产品提出问题与看法'], 'salary': '15-25K·14薪', 'site': '上海'}, {'demands': ['熟悉Web开发相关技术栈,Web框架', '本岗位负责公司AI平台的研发,通过提升AI平台的能力,提高公司AI解决方案的交付能力', '熟练掌握Python编程语言,及Python相关的WEB开发框架,比如Django', '2年以上的Python开发经验,同时掌握一门静态语言如C/C++/JAVA优先', '参与公司内部AI相关的创新工具的研发', '熟悉基本数据结构与常用算法了解机器学习相关算法优先', '熟悉MySQL,Redis,Mango数据库', '有Docker', '了解消息中间件的原理和开发(例如RabbitMQ)', '有分布式系统开发使用经验者优先', '对工作热情积极'], 'salary': '15-20K·14薪', 'site': '北京'}, {'demands': ['熟练使用python编程了解Django、tornado、Flask等常用的Web框架开发有一定的Git、VIM使用经验', '参与技术方案讨论和技术调研', '负责相关工具开发和相关文档的编写', '良好的沟通与表达能力、思路清晰,较强的动手能力与逻辑分析能力', '热爱工作,有良好的学习能力,能够接受新鲜事务,对新技术及新模块可以快速掌握运用', '负责服务器端后台设计、功能开发及维护', '负责服务器性能的优化,保证服务端稳定安全的运行', '熟练使用Linux系统,对算法、数据结构有一定了解', '产品上线后,保障运营平台的稳定,解决相关技术问题', '熟练掌握MySQL/Memcached/ElasticSerch等常用存储技术(数据库系统)'], 'salary': '20-40K·14薪', 'site': '北京'}, {'demands': ['五年以上Python开发经验,熟悉Python运行环境、运行原理,有JAVA开发经验优先', '持续对系统架构进行改造和优化', '按照要求编写设计文档', '熟悉敏捷开发,编写高质量的代码,构建可重复使用的代码以及公共库', '任职要求', '有自动化运维经验优先,比如CMDB,发布系统,配置中心,调度系统,监控系统,工单系统等', '能够及时快速处理服务器的各种突发问题', '职位描述(主要职责)', '根据产品功能模块设计,编写核心代码,并确保开发质量与进度'], 'salary': '18-29K', 'site': '北京'}, {'demands': ['熟悉python其他扩展模块', '参与量化平台', '精通SQL编写,熟练使用至少一种关系型数据库', '熟练掌握python,掌握numpy', '精通python语言,有扎实的编码能力,良好的编码习惯和学习能力', '有解决复杂的反爬限制实践经验', '完成需求', '根据产品或项目的要求'], 'salary': '17-26K·13薪', 'site': '上海'}, {'demands': ['岗位要求:熟悉Linux系统使用、配置,具备Shell、Python、Java、C中一种或多种编码能力工作地点:北京/西安', 'Python开发测试工程师'], 'salary': '15-30K', 'site': '北京'}, {'demands': ['可以按照人天或月薪支付费用皆可', '英文口语流利', '精通flask框架', '上海急需1名英文好的Python开发工程师,属于外派性质', '精通MySQL或者postgreaql,docker,', '大专及以上学历,至少3年以上Python开发经验'], 'salary': '13-15K', 'site': '上海'}, {'demands': ['参与产品沟通,需求分析,方案设计,程序开发,运营优化,架构设计,做到系统可控', '2年及以上Python实际项目经验,精通Web开发框架如Flask/Django', '熟悉Docker,Kubernetes,DevOps,ETL/ELT,ELK等相关技术加分', '良好的沟通能力加分,优秀的解决问题能力加分,不对个人技术路线设限', '海量数据大访问量的内存与存储系统', '精通MySQL或其他关系型数据库,精通Redis/MongoDB熟悉Nginx配置', '扎实的面向对象编程思想,模块化编程的思维,应用性能优化等方面问题的分析和解决能力', '负责移动互联网游戏服务器端PythonWebService开发,为全球用户提供可靠服务', '除Python外,至少还熟悉Perl/Java/C#中的一门编程语言,有实际编写经验'], 'salary': '11-22K', 'site': '北京'}, {'demands': ['分析重点行业客户的信息资料,构建客户资产信息知识图谱', '熟悉甲方行业客户的软/硬件信息者优先', '负责收集', '计算机', '熟练掌握Python/C/C++语言', '掌握Elasticsearch/MongoDB数据库的使用', '熟悉爬虫框架,能够从相关网站提取信息和进行数据分析', '建设情报分析平台,根据需求输出行业化情报信息'], 'salary': '12-20K', 'site': '北京'}, {'demands': ['熟悉Python的高级特性,具备良好的编码习惯,深入理解各种设计模式和应用场景', '负责并持续优化算法和AI底层模块封装的研发(类型:图像提取、图像匹配、图像内容分析、图像设计质量等)', '掌握Django,Tornado,Flask等一种主流框架,深入理解框架实现原理及特性', '搭建基于WEB内部机器学习训练平台,完成算法效率的提升和基础能力沉淀', '理解算法与应用之间的封装和整合,负责将模块进行产品化的架构设计、维护和研发', '熟悉Internet常用协议,如HTTP、TCP/IP、熟悉RESTful规范', '掌握至少一种关系型数据库,了解Docker运维知识', '数学功底和建模能力强,熟悉机器学习领域的常见原理以及AI适用场景、优点、缺点以及弥补办法', '有一定外文文献的阅读能力,对计算机视觉、计算机图形学有极大兴趣者优先'], 'salary': '20-30K·13薪', 'site': '上海'}, {'demands': ['了解C/C++编译、打包、发布流程,或有相关经验', '代码风格,符合Python官方规范,并且能轻松通过静态代码检查', 'python开发基本要求:', '了解测试基本概念,熟练使用一种测试框架', '有分布式编程经验', '有基于容器的集群管理、部署、运维经验', '适应能力强,热爱编程,勤奋努力', '理解Web编程,熟练使用至少一种框架', '有CI/CD经验', '理解容器技术,熟练使用DockerCLI,能开发Dockerfile', '熟练使用异步、并发编程', '了解Python生态,会打包发布Python软件', '熟练使用至少一种Linux发行版,能开发简单Shell脚本', '有开源项目经验', '理解CLI编程,熟练使用参数解析、日志打印、网络请求等基本操作'], 'salary': '11-21K', 'site': '上海'}, {'demands': ['熟练掌握mysql/redis数据库使用,熟悉linux操作系统及shell编程,熟悉git的使用', '熟悉微服务开发模式,了解docker\\k8s,具有良好的编码规范意识', '有熟悉的开发框架,具备服务化开发能力', '【岗位职责】', '负责子系统架构、模块化设计、分析,以及核心代码编写,提升软件性能、可靠性、可维护性、可扩展性', '熟练Python开发', '承担公有云、私有云、SDN/NFV、IOT等运营、运维系统中相关模块的分析、设计、开发工作'], 'salary': '15-30K·14薪', 'site': '杭州'}, {'demands': ['项目名称:华为大脑大型搜索引擎项目', '知识图谱数据采集爬虫开发', '负责DC模块的开发与部署', '具备较强的安全意识和一定安全风险控制能力', '保存数据到MongoDB', '使用coufluent-kafka模块替代kafka-python模块,解决分发时send的处理瓶颈', '该搜索引擎类似google搜索,通过用户搜索的query解析得到用户可能想要找到的网站、数据及相关信息', '设计爬虫策略和防屏蔽规则,解决字体干扰', '熟悉Python语言,能够熟练使用python的web框架设计网站,例如django等', '提升处理能力到每秒10w条数据,分发效率为每秒1000w条', '熟悉多线程', '分析网页结构', '有较强的逻辑思维能力,对技术有强烈的兴趣,具有良好的学习能力', '参与通用爬虫系统中爬虫模块的开发与维护', '熟悉Linux/Unix类开发环境,熟悉其常用命令的使用;熟悉MySQL,Redis,MongoDB', '采用多线程', '搜索引擎搜索引擎开发', '采用多进程的方式提高DC模块的数据处理能力', '工作主动性强,能承受一定的工作压力', '负责智能搜索中股票和汇率的后端开发与部署内容', '有过分布式任务调度管理(celery)的经验,可熟练编写高并发程序', '团队现有PYTHON工程师50人+后端及爬虫岗位都招聘', '消费kafka中上游解析的网页数据,通过不同的策略和规则过滤分发给网页', '具有良好的工作态度和职业道德', '具有较强的表达能力,能主动沟通'], 'salary': '13-26K', 'site': '深圳'}, {'demands': ['有测试平台,测试工具开发经验者优先', '精通python编程,有多个实际产品或项目开发经验,2年以上python应用经验', '熟悉自动化框架开发者优先'], 'salary': '15-30K', 'site': '杭州'}, {'demands': ['熟练Python或JAVA或Go语言进行编程', '良好的沟通、协调、学习、创新能力,对解决具有挑战性问题充满激情', '具有三年以上软件开发经验', '熟悉Linux,Docker,MySQL、Redis等开源系统应用', '持有华为开发者学院相关证书,有优先录取资格,详情见\xa0华为开发者学院\xa0' '人才计划(https://developer.huawei.com/consumer/cn/training/landings/kg97brca)', '具有相关产品开发和设计经验优先'], 'salary': '12-24K', 'site': '深圳'}, {'demands': ['熟练掌握python语言、熟悉python和shell脚本', '熟悉常用的数据结构和算法,mysql数据库使用和网络编程', '善于交流和表达,有良好团队合作精神'], 'salary': '16-20K', 'site': '深圳'}, {'demands': ['有参与大数据系统开发过程,包括参与需求设计、设计评审、编写测试方案和测试用例,搭建测试环境,进行测试结果和线上反馈等', '华为OD招聘', '具备一定的编程能力或者代码阅读能力,会python编程语言,熟悉django,flask,vue', '能够根据项目需求设计,开发测试工具提升测试效率'], 'salary': '13-16K·15薪', 'site': '东莞'}, {'demands': ['有数据分析,数据挖掘,文本挖掘相关经验', '熟悉mysql,memcache,redis等技术', '熟悉面向对象编程,具有良好的代码编程习惯', '熟练使用各类深度学习框架,具有Linux下GPU多节点多卡平台架构经验', '熟练掌握Python,代码能力扎实', '熟悉django/tornado/flask等一种或多种框架', '有一定的分布式、高并发系统开发或维护经验'], 'salary': '14-28K', 'site': '深圳'}, {'demands': ['熟悉Python定时任务框架:APScheduler,celery', '3,熟悉多线程编程,linux/flask/diango/tornado/redis/', '熟悉python开发框架:Scrapyshell'], 'salary': '11-22K·13薪', 'site': '深圳'}], 'third': [ {'demands': ['能够根据业务发展,对技术架构不断调整,持续优化', '熟练掌握JavaScript,熟练掌握至少一种常用前端框架,如React、AngularJs、Vue', '有扎实的计算机基础,有良好的团队合作能力,善于沟通,并具备独立解决问题的能力', '了解计算机视觉,机器学习或深度学习者优先', '熟练掌握数据的缓存和存储方案以及相关的开源组件,如MySQL,Redis,MongoDB等', '参与内部数据和测试平台的设计、开发与实现', '结合需求设计实现安全、稳定、易维护、易用的后台系统', '熟悉linux环境,熟练掌握Python开发', '熟悉至少一种主流Pythonweb框架的使用,如Tornado,Flask,Django等'], 'salary': '25-40K·14薪', 'site': '北京'}, {'demands': ['能够独立主导研发组系统级别的需求开发', '岗位描述:运维自动化平台开发和一些软件的软件的二次开发', '熟练使用mysql命令及相关工具、熟悉linux基本操作命令', '精通使用django或其他web框架、熟悉tornado更佳', '熟悉各种Web前端技术,包括HTML5、XML、Ajax、CSS、Javascript、JQuery等', '扎实的计算机系统、算法、数据结构基础', '5年以上python开发经验'], 'salary': '25-40K·16薪', 'site': '北京'}, {'demands': ['精通python语言,有扎实的编码能力,深入理解任一python框架', '熟悉python扩展模块', '根据开发进度和任务分配,完成相应模块的开发、编程、测试任务,解决关键问题和寻找技术方案', '系统需求分析与设计,核心模块的开发工作', '熟练使用至少一种数据库(MySQL/PostgreSQL/Redis/MongoDB)', '有独立分析和解决问题的能力,有良好的团队合作精神'], 'salary': '24-35K', 'site': '上海'}, {'demands': ['服务端的基础架构优化及升级,不断提升代码质量、可扩展性和可维护性', '有良好的编码习惯,能做到结构清晰,命名规范', '有良好的产品意识,能够根据业务持续输出产品优化思路', '熟悉在Unix/Linux平台上的Python服务端编程有软件项目开发、迭代和实施经验;了解并使用过一种Pythonweb开发框架(Django、Flask等)', '参与公司产品后端的需求分析分解,设计及开发;', '熟悉Linux操作系统,能够进行日常服务的测试部署', '具备数据系统的规划设计及调优能力,熟悉常见关系数据库和非关系数据库'], 'salary': '25-50K', 'site': '北京'}, {'demands': ['负责Python技术的相关产品规划', '研究并跟踪IT前沿技术', '熟悉Python/C++/Java/Go任意一种语言,Django/Flask等任意Web框架的一种或者多种;', '掌握web页面交互流程,熟悉Javascript调试技术;', '熟悉常用算法和数据结构;', '熟悉MySQL数据库操作,熟悉SQL语句编写和性能调优;', '善于沟通,自学能力强,有强烈的求知欲,业务敏感,有独立解决分析', '搭建系统开发环境,完成系统框架和核心代码的实现,负责解决开发过程中的技术问题'], 'salary': '25-50K·13薪', 'site': '上海'}, {'demands': ['75%工作量是开发,25%工作量是运维,开发维护基础设施,构建云环境中的监控', '最好有云环境经验,熟悉ansible', '不限制计算机语言,python为佳'], 'salary': '30-50K·14薪', 'site': '上海'}, {'demands': ['熟悉Python,熟悉MySQL,熟悉ES;', '具备良好的沟通能力和团队协作精神,较强的学习能力和逻辑分析能力', '良好的计算机基础,良好的数据结构和算法基础;', '负责地图道路情报运营平台的开发'], 'salary': '25-50K', 'site': '北京'}, {'demands': ['熟悉常用数据库设计和开发,熟悉互联网各种类型数据交互模式', '具有python开发经验,掌握网络爬虫开发原理,有通用网站爬取及解析的经验', '熟悉敏捷开发流,具备良好的沟通能力和团队协作能力', '熟悉JavaWeb开发,SpringMVC.MyBatis等常用框架,了解IOC', '掌握HTML,JavaScript,H5,精通常见的反爬虫技术如文本混淆反爬虫', '进行分析和预测合规和安全的现状和发展趋势', '独立开发合规与安全态势感知平台:通过收集等级保护,合规,数据隐私,信息安全,代码泄漏,网络扫描等数据,', '熟悉RESTFULAPI开发和接口调用', '精通scrapy爬虫框架,对分布式爬虫'], 'salary': '25-35K', 'site': '上海'}, {'demands': ['熟练掌握pandas、numpy,熟悉数据处理', '主导重大项目的架构设计和核心模块设计及开发', '审核开发工程师系统设计和代码质量,制定后端技术规范和开发规范', '熟悉Linux系统,有Linux系统进行操作开发经验的优先', '具备扎实的开发水平和技术管理水平,能够管理中小型(10-20人)技术团队', '熟悉Django、Flask等框架,且对一个以上框架有深入理解,能对框架进行优化者优先', '熟悉互联网常用中间件原理,理解使用场景,对中间件有深入理解和实践者优先', '负责应用架构设计、把控及开发', '熟悉大规模Web应用开发,有一定的性能优化和系统安全的实践有大型互联网公司Web开发和性能调优经验优先', '熟悉MySQL数据库,掌握数据库优化者优先', '了解HTTP,TCP/IP等常用internet协议,熟悉Restful规范', '有全栈开发基础或经验者优先', '主导技术难题攻关,重构系统,保证高性能处理和系统的稳定性'], 'salary': '25-40K', 'site': '上海'}, {'demands': ['具有规范的流程思维意识,与公司产品其他部分联动,定义接口设计并跟进完成', '熟练使用python及linux环境,redis', '有以下经验之一者优先:1)熟练sql或hivesql者优先2)熟了解pythonspark者优先3)有项目部署经验者优先', '参与公司产品后端的需求分析分解,设计及开发,保证产品的稳定性及性能', '协助处理服务端开发中的常见问题', '具有良好的学习能力,有复杂应用的架构设计和研发者优先', '具有压力下工作的能力'], 'salary': '25-30K', 'site': '北京'}]}
43.703039
104
0.483708
1,870
31,641
8.197326
0.687701
0.014874
0.031378
0.003914
0.043773
0.030465
0.003653
0.003653
0
0
0
0.019421
0.389747
31,641
723
105
43.763485
0.773163
0.001391
0
0.150281
0
0.001404
0.51507
0.352751
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edce4f4141a5bb74893123cda4abba91dc993dbf
2,861
py
Python
Heap.py
13472889991/DataStructures-Algorithms
3eb219460f0f8108bb3c07c4de5544df412e189e
[ "MIT" ]
null
null
null
Heap.py
13472889991/DataStructures-Algorithms
3eb219460f0f8108bb3c07c4de5544df412e189e
[ "MIT" ]
null
null
null
Heap.py
13472889991/DataStructures-Algorithms
3eb219460f0f8108bb3c07c4de5544df412e189e
[ "MIT" ]
null
null
null
class Heap(): def __init__(self, lst): self.lst = lst def __str__(self): return str(self.lst) # Returns left child index of node, runs in 0(1) time.Returns none if D.N.E def left(self, index): index += 1 if 2 * index >= len(self.lst) + 1: return None return (2 * index - 1) # Returns right child index of node, runs in 0(1) time.Returns none if # D.N.E def right(self, index): index += 1 if 2 * (index) >= len(self.lst): return None return (2 * index) # Returns parent index of the node, runs in 0(1) time.Returns none if D.N.E def parent(self, index): if index == 0: return None index += 1 return (index // 2 - 1) # Given a key, finds index of key. Runs in 0(n) time, where N is the size of Heap # Returns none if D.N.E def find(self, key): for counter, value in enumerate(self.lst): if key == value: return counter return None # appends a key to the end of the heap def append(self, key): self.lst.append(key) # Changes value of a key at a index to new key. def change(self, index, key): if index >= len(self.lst): self.lst[index] = key # Max_Heapify "fixes" the max heap at the index by swapping it with the # largest of its children. def max_heapify(self, index): largest = index l = self.left(index) r = self.right(index) if(l != None and self.lst[index] < self.lst[l]): largest = l if(r != None and self.lst[largest] < self.lst[r]): largest = r if largest != index: temp = self.lst[largest] self.lst[largest] = self.lst[index] self.lst[index] = temp def extract_max(self): self.lst[0], self.lst[len(self.lst)-1] = self.lst[len(self.lst)-1],self.lst[0] output = self.lst.pop() self.build_max_heap() return output #Heap_sort sorts the heap. def build_max_heap(self): size = len(self.lst) for i in range(0, size//2): self.max_heapify(i) def heap_sort(self): for i in range (len(self.lst)): print(self.extract_max()) self.build_max_heap() def min_heapify(self, index): smallest = index l = self.left(index) r = self.right(index) if (l != None and self.lst[index] > self.lst[l]): smallest = l if (r != None and self.lst[largest] > self.lst[r]): smallest = r if smallest != index: temp = self.lst[smallest] self.lst[smallest] = self.lst[index] self.lst[index] = tempqq
31.43956
133
0.527787
409
2,861
3.638142
0.183374
0.159946
0.047043
0.037634
0.403898
0.335349
0.303091
0.290323
0.261425
0.261425
0
0.013646
0.359664
2,861
90
134
31.788889
0.798581
0.184201
0
0.19403
0
0
0
0
0
0
0
0
0
1
0.19403
false
0
0
0.014925
0.358209
0.014925
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd3649e914776cccbee89a84d07e5264b3936d4
3,062
py
Python
locust_exporter.py
nobusugi246/locust-exporter
3d7511f61ee46da857eef1b9f305a92f820ab615
[ "MIT" ]
null
null
null
locust_exporter.py
nobusugi246/locust-exporter
3d7511f61ee46da857eef1b9f305a92f820ab615
[ "MIT" ]
null
null
null
locust_exporter.py
nobusugi246/locust-exporter
3d7511f61ee46da857eef1b9f305a92f820ab615
[ "MIT" ]
null
null
null
#!/usr/bin/python from prometheus_client import start_http_server, Metric, REGISTRY from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily import json import requests import sys import time class LocustCollector(object): def __init__(self, ep): self._ep = ep def collect(self): # Fetch the JSON from locust url = f'http://{self._ep}/stats/requests' try: response = requests.get(url).content.decode('Utf-8') except requests.exceptions.ConnectionError: print("Failed to connect to Locust:", url) exit(2) response = json.loads(response) yield GaugeMetricFamily('locust_user_count', 'Swarmed users', value=response['user_count']) for err in response['errors']: metric = GaugeMetricFamily('locust_errors', 'Locust requests errors', labels=['path', 'method']) metric.add_metric([str(err['name']), str(err['method'])], value=err['occurences']) yield metric if 'slave_count' in response: yield GaugeMetricFamily('locust_slave_count', 'Locust number of slaves', value=response['slave_count']) yield GaugeMetricFamily('locust_fail_ratio', 'Locust failure ratio', value=response['fail_ratio']) metric = GaugeMetricFamily('locust_state', 'State of the locust swarm', labels=['state']) metric.add_metric([str(response['state'])], 1) yield metric yield GaugeMetricFamily('locust_current_response_time_percentile_50', 'Locust current_response_time_percentile_50', value=response['current_response_time_percentile_50']) yield GaugeMetricFamily('locust_current_response_time_percentile_95', 'Locust current_response_time_percentile_95', value=response['current_response_time_percentile_95']) stats_metrics_gause = ['avg_content_length','avg_response_time','current_rps','max_response_time','median_response_time','min_response_time'] stats_metrics_count = ['num_failures','num_requests'] for mtr in stats_metrics_gause: metric = GaugeMetricFamily('locust_requests_' + mtr, 'locust requests ' + mtr, labels=['path', 'method']) for stat in response['stats']: if not 'Total' in stat['name']: metric.add_metric([str(stat['name']), str(stat['method'])], stat[mtr]) yield metric for mtr in stats_metrics_count: metric = CounterMetricFamily('locust_requests_' + mtr, 'locust requests ' + mtr, labels=['path', 'method']) for stat in response['stats']: if not 'Total' in stat['name']: metric.add_metric([str(stat['name']), str(stat['method'])], stat[mtr]) yield metric if __name__ == '__main__': # Usage: locust_exporter.py <port> <locust_host:port> if len(sys.argv) != 3: print('Usage: locust_exporter.py <port> <locust_host:port>') exit(1) else: try: start_http_server(int(sys.argv[1])) REGISTRY.register(LocustCollector(str(sys.argv[2]))) print("Connecting to locust on: " + sys.argv[2]) while True: time.sleep(1) except KeyboardInterrupt: exit(0)
42.527778
174
0.693991
379
3,062
5.366755
0.30343
0.058997
0.056047
0.085546
0.342183
0.320551
0.240905
0.184857
0.146509
0.146509
0
0.008709
0.175049
3,062
71
175
43.126761
0.796516
0.031025
0
0.214286
0
0
0.307355
0.075574
0
0
0
0
0
1
0.035714
false
0
0.107143
0
0.160714
0.053571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd3a35df965b681fd71b0b10561268de5b79d93
2,213
py
Python
checkio/Hermit/Hexagon Spiral/test_hexagon_spiral.py
KenMercusLai/checkio
c7702221e1bc0b0b30425859ffa6c09722949d65
[ "MIT" ]
39
2015-02-09T13:24:12.000Z
2019-05-16T17:51:19.000Z
checkio/Hermit/Hexagon Spiral/test_hexagon_spiral.py
KenMercusLai/checkio
c7702221e1bc0b0b30425859ffa6c09722949d65
[ "MIT" ]
1
2019-10-21T16:18:14.000Z
2019-10-21T16:18:14.000Z
checkio/Hermit/Hexagon Spiral/test_hexagon_spiral.py
KenMercusLai/checkio
c7702221e1bc0b0b30425859ffa6c09722949d65
[ "MIT" ]
22
2015-01-30T18:00:05.000Z
2021-05-22T02:57:23.000Z
import unittest from hexagon_spiral import hex_spiral class Tests(unittest.TestCase): TESTS = { "Basics": [ {"input": [2, 9], "answer": 1, "explanation": 2}, {"input": [9, 2], "answer": 1, "explanation": 2}, {"input": [6, 19], "answer": 2, "explanation": 7}, {"input": [5, 11], "answer": 3, "explanation": 1}, {"input": [13, 15], "answer": 2, "explanation": 14}, {"input": [11, 17], "answer": 4, "explanation": 1}, {"input": [6, 4], "answer": 2, "explanation": 1}, {"input": [42, 13], "answer": 5, "explanation": 4}, {"input": [66, 81], "answer": 10, "explanation": 1}, {"input": [76, 65], "answer": 10, "explanation": 7}, {"input": [84, 78], "answer": 6, "explanation": 15}, {"input": [92, 62], "answer": 1, "explanation": 0}, {"input": [100, 1], "answer": 6, "explanation": 0}, {"input": [200, 202], "answer": 2, "explanation": 0}, ], "Extra": [ {"input": [2, 8], "answer": 1, "explanation": 0}, {"input": [9, 1], "answer": 2, "explanation": 2}, {"input": [16, 19], "answer": 3, "explanation": 7}, {"input": [55, 11], "answer": 6, "explanation": 17}, {"input": [11, 15], "answer": 4, "explanation": 1}, {"input": [21, 17], "answer": 4, "explanation": 6}, {"input": [41, 13], "answer": 6, "explanation": 1}, {"input": [77, 81], "answer": 4, "explanation": 79}, {"input": [55, 65], "answer": 8, "explanation": 32}, {"input": [92, 32], "answer": 8, "explanation": 0}, {"input": [101, 1], "answer": 6, "explanation": 0}, {"input": [300, 302], "answer": 2, "explanation": 0}, {"input": [999, 998], "answer": 1, "explanation": 0}, {"input": [84, 68], "answer": 10, "explanation": 37}, ], } def test_Basics(self): for i in self.TESTS['Basics']: assert hex_spiral(*i['input']) == i['answer'] def test_Extra(self): for i in self.TESTS['Extra']: assert hex_spiral(*i['input']) == i['answer']
45.163265
65
0.456846
245
2,213
4.102041
0.261224
0.095522
0.118408
0.056716
0.310448
0.143284
0.055721
0
0
0
0
0.110323
0.299593
2,213
48
66
46.104167
0.538065
0
0
0.093023
0
0
0.298238
0
0
0
0
0
0.046512
1
0.046512
false
0
0.046512
0
0.139535
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd44fd69a54eee9f7ca6b87c411e44a3378064a
6,995
py
Python
examples/py/tencent_road_map.py
KarcyLee/folium
c0fa6c217842f973037dc7ea8e871855069057f7
[ "MIT" ]
null
null
null
examples/py/tencent_road_map.py
KarcyLee/folium
c0fa6c217842f973037dc7ea8e871855069057f7
[ "MIT" ]
null
null
null
examples/py/tencent_road_map.py
KarcyLee/folium
c0fa6c217842f973037dc7ea8e871855069057f7
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # 腾讯地图示意图 from __future__ import print_function import random import folium from folium.features import DivIcon from folium.plugins import MarkerCluster, RotatedMarker, PolyLineTextPath, DirectedLine def tencent_marker(out_dir="../../out"): """ 腾讯地图,打点 :param out_dir: :return: """ map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12) folium.Marker(location=[31.35742, 120.94784], icon=folium.Icon(color='blue', icon='ok-sign') ).add_to(map_osm) folium.Marker(location=[31.32, 120.63], icon=folium.Icon(color='red', icon='info-sign') ).add_to(map_osm) file_path = "{}/tencent_roadmap.html".format(out_dir) map_osm.save(file_path) def tencent_polyline(out_dir="../../out"): """腾讯地图,多边形""" map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12) locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] for loc in locs: folium.Marker(loc).add_to(map_osm) folium.PolyLine( locs, fill_color='high', fill=True, fill_opacity=0.6, stroke=False).add_to(map_osm) file_path = "{}/tencent_polyline.html".format(out_dir) map_osm.save(file_path) def tencent_marker_with_number(out_dir="../../out"): """腾讯地图,marker 上标记数字""" map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012") locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] for loc in locs: value = random.randint(0, 100) # 字符 folium.Marker(loc, icon=DivIcon(icon_size=(150, 36), icon_anchor=(7, 20), html='<div style="font-size: 18pt; color : black">{}</div>'.format(value), ) ).add_to(map_osm) # 圆圈 map_osm.add_child(folium.CircleMarker(loc, radius=20)) file_path = "{}/tencent_marker_with_number.html".format(out_dir) map_osm.save(file_path) def tencent_marker_cluster(out_dir="../../out"): """腾讯地图,marker 集合""" map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012") locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] marker_cluster = MarkerCluster().add_to(map_osm) for loc in locs: folium.Marker(loc, popup='Add popup text here.', ).add_to(marker_cluster) file_path = "{}/tencent_marker_cluster.html".format(out_dir) map_osm.save(file_path) def gaode_arrow(out_dir="../../out"): """ 高德地图 箭头 :param out_dir: :return: """ map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, tiles="Gaode") folium.RegularPolygonMarker(location=(31.25, 120.742185), fill_color='red', number_of_sides=3, radius=10, rotation=0).add_to(map_osm) file_path = "{}/gaode_arrow.html".format(out_dir) map_osm.save(file_path) return def tencent_hexagon_with_number(out_dir="../../out"): """腾讯地图,带数字的六边形集合""" map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012") locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] marker_cluster = MarkerCluster().add_to(map_osm) for loc in locs: folium.Marker(loc, popup='Add popup text here.', ).add_to(marker_cluster) file_path = "{}/tencent_hexagon_with_number.html".format(out_dir) map_osm.save(file_path) def tencent_hello(out_dir="../../out"): """腾讯地图,带数字的六边形集合""" map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12) locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] for loc in locs: folium.Marker(loc).add_to(map_osm) folium.PolyLine( locs, fill_color='high', fill=True, fill_opacity=0.6, stroke=False).add_to(map_osm) file_path = "{}/tencent_hello.html".format(out_dir) map_osm.save(file_path) def tencent_rotated_marker(out_dir="../../out"): map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12) locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118], [31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185], ] for loc in locs: RotatedMarker(loc, rotation_angle=45 ).add_to(map_osm) folium.PolyLine( locs, fill_color='high', fill=True, fill_opacity=0.6, stroke=False).add_to(map_osm) file_path = "{}/tencent_rotated_marker.html".format(out_dir) map_osm.save(file_path) def tencent_directed_line(out_dir="../../out"): map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12) lines = [ ([31.387113, 120.929393], [31.364861, 120.609265]), ([31.269273, 120.750024], [31.226864, 120.511118]), ] for line in lines: # _line = folium.PolyLine( # line, # weight=10, # color='#8EE9FF' # ).add_to(map_osm) # # attr = {'fill': 'red'} # attr = {'font-weight': 'bold', 'font-size': '24', 'fill': 'red'} # # PolyLineTextPath(_line, # #text='\u25BA', # 三角形 # #text='\u2708', # 飞机 # text='►', # repeat=False, # offset=6, # center=True, # orientation=0, # attributes=attr # ).add_to(map_osm) # _line = DirectedLine( src=line[0], dst=line[1], weight=10, color='#8EE9FF' ).add_to(map_osm) # _line = DirectedLine( # src=lines[0][0], # dst=lines[0][1], # weight=10, # #color='#8EE9FF' # color='black' # ).add_to(map_osm) file_path = "{}/tencent_directed_line.html".format(out_dir) map_osm.save(file_path) if __name__ == "__main__": print() tencent_marker() tencent_polyline() tencent_marker_with_number() tencent_marker_cluster() tencent_hexagon_with_number() gaode_arrow() tencent_hello() tencent_rotated_marker() tencent_directed_line()
30.814978
109
0.567262
883
6,995
4.288788
0.175538
0.055453
0.0338
0.046475
0.667283
0.639556
0.625825
0.599155
0.583311
0.555849
0
0.173725
0.274196
6,995
226
110
30.951327
0.571991
0.116798
0
0.522727
0
0
0.079927
0.037245
0
0
0
0
0
1
0.068182
false
0
0.037879
0
0.113636
0.015152
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd4a7a68b6a7e9dfa514b2b3b2ebe8065ec98d0
19,804
py
Python
explainaboard/tasks/cws/eval_spec.py
Shadowlized/ExplainaBoard
45f1c27468e528f8f88df4b01e10253ba96d3a9b
[ "MIT" ]
255
2021-04-14T11:21:35.000Z
2022-03-27T01:59:05.000Z
explainaboard/tasks/cws/eval_spec.py
ROGERDJQ/ExplainaBoard
4a2a5aeb9cce33198aa748252f9206c6391a695c
[ "MIT" ]
128
2021-04-15T14:30:13.000Z
2022-03-31T18:22:40.000Z
explainaboard/tasks/cws/eval_spec.py
ROGERDJQ/ExplainaBoard
4a2a5aeb9cce33198aa748252f9206c6391a695c
[ "MIT" ]
27
2021-04-14T15:20:22.000Z
2022-03-28T07:21:54.000Z
from random import choices import explainaboard.error_analysis as ea import numpy import pickle import codecs import os def read_data(corpus_type, fn, column_no=-1, delimiter=' '): print('corpus_type', corpus_type) word_sequences = list() tag_sequences = list() total_word_sequences = list() total_tag_sequences = list() with codecs.open(fn, 'r', 'utf-8') as f: lines = f.readlines() curr_words = list() curr_tags = list() for k in range(len(lines)): line = lines[k].strip() if len(line) == 0 or line.startswith('-DOCSTART-'): # new sentence or new document if len(curr_words) > 0: word_sequences.append(curr_words) tag_sequences.append(curr_tags) curr_words = list() curr_tags = list() continue strings = line.split(delimiter) word = strings[0].strip() tag = strings[column_no].strip() # be default, we take the last tag # tag='B-'+tag tag = tag + "-W" curr_words.append(word) curr_tags.append(tag) total_word_sequences.append(word) total_tag_sequences.append(tag) if k == len(lines) - 1: word_sequences.append(curr_words) tag_sequences.append(curr_tags) # if verbose: # print('Loading from %s: %d samples, %d words.' % (fn, len(word_sequences), get_words_num(word_sequences))) # return word_sequences, tag_sequences return total_word_sequences, total_tag_sequences, word_sequences, tag_sequences def get_aspect_value(test_word_sequences, test_true_tag_sequences, test_word_sequences_sent, test_true_tag_sequences_sent, dict_precomputed_path, dict_aspect_func): def get_sentential_value(test_true_tag_sequences_sent, test_word_sequences_sent): eDen = [] sentLen = [] for i, test_sent in enumerate(test_true_tag_sequences_sent): pred_chunks = set(ea.get_chunks(test_sent)) num_entityToken = 0 for pred_chunk in pred_chunks: idx_start = pred_chunk[1] idx_end = pred_chunk[2] num_entityToken += idx_end - idx_start # introduce the entity token density in sentence ... eDen.append(float(num_entityToken) / len(test_sent)) # introduce the sentence length in sentence ... sentLen.append(len(test_sent)) return eDen, sentLen dict_precomputed_model = {} for aspect, path in dict_precomputed_path.items(): print("path:\t" + path) if ea.os.path.exists(path): print('load the hard dictionary of entity span in test set...') fread = open(path, 'rb') dict_precomputed_model[aspect] = pickle.load(fread) else: raise ValueError("can not load hard dictionary" + aspect + "\t" + path) dict_span2aspect_val = {} for aspect, fun in dict_aspect_func.items(): dict_span2aspect_val[aspect] = {} eDen_list, sentLen_list = get_sentential_value(test_true_tag_sequences_sent, test_word_sequences_sent) dict_pos2sid = ea.get_pos2sentid(test_word_sequences_sent) dict_ap2rp = ea.get_token_position(test_word_sequences_sent) all_chunks = ea.get_chunks(test_true_tag_sequences) dict_span2sid = {} dict_chunkid2span = {} for span_info in all_chunks: # print(span_info) # span_type = span_info[0].lower() # print(span_type) idx_start = span_info[1] idx_end = span_info[2] span_cnt = ''.join(test_word_sequences[idx_start:idx_end]).lower() # print(span_cnt.encode("utf-8").decode("utf-8")) span_cnt = span_cnt.encode("gbk", "ignore").decode("gbk", "ignore") # print(sys.getdefaultencoding()) span_type = ''.join(test_true_tag_sequences[idx_start:idx_end]) span_pos = str(idx_start) + "|||" + str(idx_end) + "|||" + span_type if len(span_type) != (idx_end - idx_start): print(idx_start, idx_end) print(span_info) print(span_type + "\t" + span_cnt) print("--------------") # print(span_pos) # print(span_info) # print(span_cnt) span_length = idx_end - idx_start # span_token_list = test_word_sequences[idx_start:idx_end] # span_token_pos_list = [str(pos) + "|||" + span_type for pos in range(idx_start, idx_end)] # print(span_token_pos_list) span_sentid = dict_pos2sid[idx_start] sLen = float(sentLen_list[span_sentid]) dict_span2sid[span_pos] = span_sentid text_sample = "".join(test_word_sequences_sent[span_sentid]) text_sample = text_sample dict_chunkid2span[span_pos] = span_cnt + "|||" + text_sample # Sentence Length: sLen aspect = "sLen" if aspect in dict_aspect_func.keys(): dict_span2aspect_val[aspect][span_pos] = sLen # Entity Length: eLen aspect = "eLen" if aspect in dict_aspect_func.keys(): dict_span2aspect_val[aspect][span_pos] = float(span_length) # Tag: tag aspect = "tag" if aspect in dict_aspect_func.keys(): dict_span2aspect_val[aspect][span_pos] = span_type # print(dict_span2aspect_val) return dict_span2aspect_val, dict_span2sid, dict_chunkid2span def evaluate(task_type="ner", analysis_type="single", systems=[], dataset_name = 'dataset_name', model_name = 'model_name', output_filename="./output.json", is_print_ci=False, is_print_case=False, is_print_ece=False): path_text = systems[0] if analysis_type == "single" else "" path_comb_output = "model_name" + "/" + path_text.split("/")[-1] dict_aspect_func, dict_precomputed_path, obj_json = ea.load_task_conf(task_dir=os.path.dirname(__file__)) list_text_sent, list_text_token = ea.read_single_column(path_text, 0) list_true_tags_sent, list_true_tags_token = ea.read_single_column(path_text, 1) list_pred_tags_sent, list_pred_tags_token = ea.read_single_column(path_text, 2) dict_span2aspect_val, dict_span2sid, dict_chunkid2span = get_aspect_value(list_text_token, list_true_tags_token, list_text_sent, list_true_tags_sent, dict_precomputed_path, dict_aspect_func) dict_span2aspect_val_pred, dict_span2sid_pred, dict_chunkid2span_pred = get_aspect_value(list_text_token, list_pred_tags_token, list_text_sent, list_pred_tags_sent, dict_precomputed_path, dict_aspect_func) holistic_performance = ea.f1(list_true_tags_sent, list_pred_tags_sent)["f1"] confidence_low_overall, confidence_up_overall = 0, 0 if is_print_ci: confidence_low_overall, confidence_up_overall = compute_confidence_interval_f1(dict_span2sid.keys(), dict_span2sid_pred.keys(), dict_span2sid, dict_span2sid_pred, n_times=10) print("confidence_low_overall:\t", confidence_low_overall) print("confidence_up_overall:\t", confidence_up_overall) print("------------------ Holistic Result") print(holistic_performance) # print(f1(list_true_tags_token, list_pred_tags_token)["f1"]) dict_bucket2span = {} dict_bucket2span_pred = {} dict_bucket2f1 = {} aspect_names = [] error_case_list = [] for aspect, func in dict_aspect_func.items(): # print(aspect, dict_span2aspect_val[aspect]) dict_bucket2span[aspect] = ea.select_bucketing_func(func[0], func[1], dict_span2aspect_val[aspect]) # print(aspect, dict_bucket2span[aspect]) # exit() dict_bucket2span_pred[aspect] = ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect], dict_bucket2span[aspect].keys()) dict_bucket2f1[aspect], error_case_list = get_bucket_f1(dict_bucket2span[aspect], dict_bucket2span_pred[aspect], dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token, is_print_ci, is_print_case) aspect_names.append(aspect) print("aspect_names: ", aspect_names) # for v in error_case_list: # print(v) print("------------------ Breakdown Performance") for aspect in dict_aspect_func.keys(): ea.print_dict(dict_bucket2f1[aspect], aspect) print("") # Calculate databias w.r.t numeric attributes dict_aspect2bias = {} for aspect, aspect2Val in dict_span2aspect_val.items(): if type(list(aspect2Val.values())[0]) != type("string"): dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values())) print("------------------ Dataset Bias") for k, v in dict_aspect2bias.items(): print(k + ":\t" + str(v)) print("") dict_fine_grained = {} for aspect, metadata in dict_bucket2f1.items(): dict_fine_grained[aspect] = [] for bucket_name, v in metadata.items(): # print("---------debug--bucket name old---") # print(bucket_name) bucket_name = ea.beautify_interval(bucket_name) # print("---------debug--bucket name new---") # print(bucket_name) # bucket_value = format(v[0]*100,'.4g') bucket_value = format(float(v[0]) * 100, '.4g') n_sample = v[1] confidence_low = format(float(v[2]) * 100, '.4g') confidence_up = format(float(v[3]) * 100, '.4g') error_entity_list = v[4] # instantiation dict_fine_grained[aspect].append({"bucket_name": bucket_name, "bucket_value": bucket_value, "num": n_sample, "confidence_low": confidence_low, "confidence_up": confidence_up, "bucket_error_case": error_entity_list[ 0:int(len(error_entity_list) / 10)]}) obj_json["task"] = task_type obj_json["data"]["name"] = dataset_name obj_json["data"]["language"] = "Chinese" obj_json["data"]["bias"] = dict_aspect2bias obj_json["model"]["name"] = model_name obj_json["model"]["results"]["overall"]["performance"] = holistic_performance obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low_overall obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up_overall obj_json["model"]["results"]["fine_grained"] = dict_fine_grained # Save error cases: overall obj_json["model"]["results"]["overall"]["error_case"] = error_case_list[0:int(len(error_case_list) / 10)] ea.save_json(obj_json, output_filename) def compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred, n_times=1000): n_data = len(dict_span2sid) sample_rate = ea.get_sample_rate(n_data) n_sampling = int(n_data * sample_rate) print("sample_rate:\t", sample_rate) print("n_sampling:\t", n_sampling) dict_sid2span_salient = {} for span in spans_true: # print(span) if len(span.split("|||")) != 3: break sid = dict_span2sid[span] if sid in dict_sid2span_salient.keys(): dict_sid2span_salient[sid].append(span) else: dict_sid2span_salient[sid] = [span] dict_sid2span_salient_pred = {} for span in spans_pred: sid = dict_span2sid_pred[span] if sid in dict_sid2span_salient_pred.keys(): dict_sid2span_salient_pred[sid].append(span) else: dict_sid2span_salient_pred[sid] = [span] performance_list = [] confidence_low, confidence_up = 0, 0 for i in range(n_times): sample_index_list = choices(range(n_data), k=n_sampling) true_label_bootstrap_list = [] pred_label_bootstrap_list = [] for ind, sid in enumerate(sample_index_list): if sid in dict_sid2span_salient.keys(): true_label_list = dict_sid2span_salient[sid] true_label_list_revised = [true_label + "|||" + str(ind) for true_label in true_label_list] true_label_bootstrap_list += true_label_list_revised if sid in dict_sid2span_salient_pred.keys(): pred_label_list = dict_sid2span_salient_pred[sid] pred_label_list_revised = [pred_label + "|||" + str(ind) for pred_label in pred_label_list] pred_label_bootstrap_list += pred_label_list_revised f1, p, r = ea.evaluate_chunk_level(pred_label_bootstrap_list, true_label_bootstrap_list) performance_list.append(f1) if n_times != 1000: confidence_low, confidence_up = ea.mean_confidence_interval(performance_list) else: performance_list.sort() confidence_low = performance_list[24] confidence_up = performance_list[974] # print("\n") # print("confidence_low:\t", confidence_low) # print("confidence_up:\t", confidence_up) return confidence_low, confidence_up def get_error_case_segmentation(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span_sent, dict_chunkid2span_sent_pred, list_true_tags_token, list_pred_tags_token): error_case_list = [] for pos, tag in dict_pos2tag.items(): true_label = tag pred_label = "" # print(dict_chunkid2span_sent.keys()) if pos + "|||" + tag not in dict_chunkid2span_sent.keys(): continue span_sentence = dict_chunkid2span_sent[pos + "|||" + tag] if pos in dict_pos2tag_pred.keys(): pred_label = dict_pos2tag_pred[pos] if true_label == pred_label: continue # print(pos + "\t" + true_label + "\t" + pred_label) else: start = int(pos.split("|||")[0]) end = int(pos.split("|||")[1]) pred_label = "".join(list_pred_tags_token[start:end]) # print(pred_label) error_case = span_sentence + "|||" + true_label + "|||" + pred_label error_case_list.append(error_case) for pos, tag in dict_pos2tag_pred.items(): true_label = "" pred_label = tag if pos + "|||" + tag not in dict_chunkid2span_sent_pred.keys(): continue span_sentence = dict_chunkid2span_sent_pred[pos + "|||" + tag] if pos in dict_pos2tag.keys(): true_label = dict_pos2tag[pos] if true_label == pred_label: continue else: start = int(pos.split("|||")[0]) end = int(pos.split("|||")[1]) true_label = "".join(list_true_tags_token[start:end]) error_case = span_sentence + "|||" + true_label + "|||" + pred_label error_case_list.append(error_case) # for v in error_case_list: # print(len(error_case_list)) # print(v) # print(error_case_list) return error_case_list def get_bucket_f1(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token, is_print_ci, is_print_case): dict_bucket2f1 = {} # predict: 2_3 -> NER dict_pos2tag_pred = {} if is_print_case: for k_bucket_eval, spans_pred in dict_bucket2span_pred.items(): for span_pred in spans_pred: pos_pred = "|||".join(span_pred.split("|||")[0:2]) tag_pred = span_pred.split("|||")[-1] dict_pos2tag_pred[pos_pred] = tag_pred # true: 2_3 -> NER dict_pos2tag = {} if is_print_case: for k_bucket_eval, spans in dict_bucket2span.items(): for span in spans: pos = "|||".join(span.split("|||")[0:2]) tag = span.split("|||")[-1] dict_pos2tag[pos] = tag error_case_list = [] if is_print_case: error_case_list = get_error_case_segmentation(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token) # print(len(error_case_list)) # print(error_case_list) for bucket_interval, spans_true in dict_bucket2span.items(): spans_pred = [] if bucket_interval not in dict_bucket2span_pred.keys(): raise ValueError("Predict Label Bucketing Errors") else: spans_pred = dict_bucket2span_pred[bucket_interval] confidence_low, confidence_up = 0, 0 if is_print_ci: confidence_low, confidence_up = compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred) confidence_low = format(confidence_low, '.3g') confidence_up = format(confidence_up, '.3g') f1, p, r = ea.evaluate_chunk_level(spans_pred, spans_true) error_entity_list = [] if is_print_case: for span_true in spans_true: if span_true not in spans_pred: # print(span_true) pos_true = "|||".join(span_true.split("|||")[0:2]) tag_true = span_true.split("|||")[-1] if pos_true in dict_pos2tag_pred.keys(): tag_pred = dict_pos2tag_pred[pos_true] if tag_pred != tag_true: error_entity_list.append( dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true]) # print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true]) else: start = int(pos_true.split("|||")[0]) end = int(pos_true.split("|||")[1]) pred_label = "".join(list_pred_tags_token[start:end]) error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label) # print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label) dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list] # if bucket_interval[0] == 1.0: # print("debug-f1:",f1) # print(spans_pred[0:20]) # print(spans_true[0:20]) # print("dict_bucket2f1: ",dict_bucket2f1) return ea.sort_dict(dict_bucket2f1), error_case_list
42.13617
175
0.588063
2,317
19,804
4.640052
0.106603
0.013952
0.020556
0.014231
0.386383
0.311413
0.261929
0.183704
0.144545
0.135057
0
0.017886
0.305494
19,804
470
176
42.13617
0.763778
0.097657
0
0.179811
0
0
0.047501
0.002751
0.003155
0
0
0
0
1
0.022082
false
0
0.018927
0
0.059937
0.094637
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd644d6be6dcd949445336e892644d72635cc12
476
py
Python
ci/kubetest/test_posthog_hpa_enabled.py
momentumdash/charts-clickhouse
9f8ef82f11db186810fcf98dd789ff4b0c1eed95
[ "MIT" ]
null
null
null
ci/kubetest/test_posthog_hpa_enabled.py
momentumdash/charts-clickhouse
9f8ef82f11db186810fcf98dd789ff4b0c1eed95
[ "MIT" ]
null
null
null
ci/kubetest/test_posthog_hpa_enabled.py
momentumdash/charts-clickhouse
9f8ef82f11db186810fcf98dd789ff4b0c1eed95
[ "MIT" ]
null
null
null
import pytest from helpers.utils import cleanup_k8s, helm_install, wait_for_pods_to_be_ready HELM_INSTALL_CMD = """ helm upgrade \ --install \ -f ../../ci/values/kubetest/test_posthog_hpa_enabled.yaml \ --timeout 30m \ --create-namespace \ --namespace posthog \ posthog ../../charts/posthog \ --wait-for-jobs \ --wait """ def test_helm_install(kube): cleanup_k8s() helm_install(HELM_INSTALL_CMD) wait_for_pods_to_be_ready(kube)
21.636364
78
0.691176
63
476
4.857143
0.539683
0.179739
0.091503
0.137255
0.130719
0.130719
0
0
0
0
0
0.010283
0.182773
476
21
79
22.666667
0.77635
0
0
0
0
0
0.493697
0.113445
0
0
0
0
0
1
0.058824
false
0
0.117647
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd7c9fc4d4c13cce5799a53fe7796fa6ce013ec
1,379
py
Python
python/biograph/variants/add_ref_test.py
spiralgenetics/biograph
33c78278ce673e885f38435384f9578bfbf9cdb8
[ "BSD-2-Clause" ]
16
2021-07-14T23:32:31.000Z
2022-03-24T16:25:15.000Z
python/biograph/variants/add_ref_test.py
spiralgenetics/biograph
33c78278ce673e885f38435384f9578bfbf9cdb8
[ "BSD-2-Clause" ]
9
2021-07-20T20:39:47.000Z
2021-09-16T20:57:59.000Z
python/biograph/variants/add_ref_test.py
spiralgenetics/biograph
33c78278ce673e885f38435384f9578bfbf9cdb8
[ "BSD-2-Clause" ]
9
2021-07-15T19:38:35.000Z
2022-01-31T19:24:56.000Z
# pylint: disable=missing-docstring from __future__ import print_function import unittest import biograph import biograph.variants as bgexvar class ReadCovTestCases(unittest.TestCase): @classmethod def setUpClass(cls): cls.bg = biograph.BioGraph("datasets/lambdaToyData/benchmark/father_lambda.bg") cls.seqset = cls.bg.seqset cls.rm = cls.bg.open_readmap() cls.ref = biograph.Reference("datasets/lambdaToyData/benchmark/ref_lambda") cls.reflen = cls.ref.scaffold_lens['lambda'] def test_add_ref(self): asms = bgexvar.add_ref_assemblies(self.ref, "lambda", [], whole_ref=True, max_len=100) asms = list(asms) self.assertEqual(len(asms), (self.reflen // 100) + 1) seq = biograph.Sequence() for a in asms: seq += a.seq self.assertEqual(len(seq), self.reflen) rc_asms = bgexvar.add_ref_assemblies(self.ref, "lambda", [], whole_ref=True, max_len=100, rev_comp=True) rc_asms = list(rc_asms) self.assertEqual(len(rc_asms), (self.reflen // 100) + 1) rc_seq = biograph.Sequence() for a in rc_asms: rc_seq += a.seq self.assertEqual(len(rc_seq), self.reflen) self.assertEqual(seq, rc_seq.rev_comp()) if __name__ == '__main__': unittest.main(verbosity=2)
33.634146
97
0.636693
175
1,379
4.8
0.354286
0.089286
0.085714
0.040476
0.307143
0.264286
0.145238
0.145238
0.145238
0.145238
0
0.014409
0.245105
1,379
40
98
34.475
0.792507
0.02393
0
0
0
0
0.087798
0.068452
0
0
0
0
0.16129
1
0.064516
false
0
0.129032
0
0.225806
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
edd7dcbe7e77249b8bc05cc82a6e9d24f74df2a3
924
py
Python
tools/trading/quotes.py
renoneto/swing_trading
1d176d7e42bca6028efcb1869ec648824c535fe1
[ "MIT" ]
8
2020-06-19T11:23:44.000Z
2022-02-11T00:52:29.000Z
tools/trading/quotes.py
renoneto/swing_trading
1d176d7e42bca6028efcb1869ec648824c535fe1
[ "MIT" ]
5
2020-05-16T18:14:24.000Z
2021-12-13T20:40:25.000Z
tools/trading/quotes.py
renoneto/swing_trading
1d176d7e42bca6028efcb1869ec648824c535fe1
[ "MIT" ]
2
2020-05-16T23:31:04.000Z
2021-06-06T18:40:01.000Z
import requests def get_quotes(access_token, my_client, symbols): """ Function to get quotes of a list of stocks """ # Convert list to string str_symbols = ','.join(symbols) # define our headers header = {'Authorization':"Bearer {}".format(access_token), "Content-Type":"application/json"} # define the endpoint for Saved orders, including your account ID endpoint = 'https://api.tdameritrade.com/v1/marketdata/quotes' # payload with symbols payload = {'symbol': str_symbols, 'apikey': my_client.key} # make a post, NOTE WE'VE CHANGED DATA TO JSON AND ARE USING POST content = requests.get(url = endpoint, headers = header, params=payload) # json response json_response = content.json() # quote symbols quote_symbols = list(json_response.keys()) return json_response, quote_symbols
28
76
0.643939
111
924
5.252252
0.603604
0.082333
0
0
0
0
0
0
0
0
0
0.001458
0.257576
924
32
77
28.875
0.848397
0.28355
0
0
0
0
0.175
0
0
0
0
0
0
1
0.071429
false
0
0.071429
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0