hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
f13af7308893eb6dd4208f6a9361b6902679648a
433
py
Python
pyxel_book/sample02.py
scnsh/pyxelGames
058a4c0ad0b421cffc5746bfed2a6eddb3d85949
[ "MIT" ]
null
null
null
pyxel_book/sample02.py
scnsh/pyxelGames
058a4c0ad0b421cffc5746bfed2a6eddb3d85949
[ "MIT" ]
null
null
null
pyxel_book/sample02.py
scnsh/pyxelGames
058a4c0ad0b421cffc5746bfed2a6eddb3d85949
[ "MIT" ]
null
null
null
import pyxel WIDTH = 128 HEIGHT = 128 IMG_NO = 0 class App: my_x = 0 my_y = 0 def __init__(self): pyxel.init(WIDTH, HEIGHT) pyxel.load("mychara.pyxres") pyxel.run(self.update, self.draw) def update(self): self.my_x = pyxel.mouse_x self.my_y = pyxel.mouse_y def draw(self): pyxel.cls(7) pyxel.blt(self.my_x, self.my_y, IMG_NO, 0, 0, 16, 16, 0) App()
16.037037
64
0.575058
71
433
3.309859
0.380282
0.102128
0.051064
0.068085
0
0
0
0
0
0
0
0.056106
0.300231
433
26
65
16.653846
0.719472
0
0
0
0
0
0.032333
0
0
0
0
0
0
1
0.166667
false
0
0.055556
0
0.388889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f13af8576792323da6a71722fc4b7ac0926a65e0
588
py
Python
purchase_order/views.py
Um9i/ERPv
f4bf8d801ad8f800107c92625123c1e59b439c40
[ "MIT" ]
8
2019-04-13T14:08:55.000Z
2022-03-06T10:53:44.000Z
purchase_order/views.py
Um9i/ERPv
f4bf8d801ad8f800107c92625123c1e59b439c40
[ "MIT" ]
16
2019-03-31T20:22:21.000Z
2022-01-15T11:43:57.000Z
purchase_order/views.py
Um9i/ERPv
f4bf8d801ad8f800107c92625123c1e59b439c40
[ "MIT" ]
null
null
null
from .models import PurchaseOrder, PurchaseOrderLine from .serializers import PurchaseOrderSerializer, PurchaseOrderLineSerializer from rest_framework import viewsets, permissions class PurchaseOrderViewSet(viewsets.ModelViewSet): queryset = PurchaseOrder.objects.all() serializer_class = PurchaseOrderSerializer permission_classes = [permissions.IsAuthenticated] class PurchaseOrderLineViewSet(viewsets.ModelViewSet): queryset = PurchaseOrderLine.objects.all() serializer_class = PurchaseOrderLineSerializer permission_classes = [permissions.IsAuthenticated]
36.75
77
0.833333
46
588
10.543478
0.5
0.082474
0.115464
0.103093
0
0
0
0
0
0
0
0
0.110544
588
15
78
39.2
0.927342
0
0
0.181818
0
0
0
0
0
0
0
0
0
1
0
false
0
0.272727
0
1
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
f13b60a1da860ddec297fecdd34c2372b3ccd802
15,253
py
Python
code/python/lib/mg_viz/stats_large.py
alguru/metagenemark-2
3389a6bb306acea87ab3ccb63c47281aadafb273
[ "MIT" ]
null
null
null
code/python/lib/mg_viz/stats_large.py
alguru/metagenemark-2
3389a6bb306acea87ab3ccb63c47281aadafb273
[ "MIT" ]
1
2022-03-30T10:18:15.000Z
2022-03-30T10:18:15.000Z
code/python/lib/mg_viz/stats_large.py
gatech-genemark/MetaGeneMark-2-exp
99f8cf091911b9200af97e722543ad84a247770f
[ "MIT" ]
1
2021-04-06T12:43:12.000Z
2021-04-06T12:43:12.000Z
# Author: Karl Gemayel # Created: 8/5/20, 8:25 AM import logging import math import os from textwrap import wrap import pandas as pd from typing import * import seaborn import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter from mg_general import Environment from mg_io.general import load_obj, save_obj from mg_viz.colormap import ColorMap as CM from mg_general.general import next_name, get_value from mg_stats.small import _helper_join_reference_and_tidy_data, prl_join_reference_and_tidy_data from mg_viz.general import set_size from mg_viz.shelf import number_formatter, update_tool_names_to_full logger = logging.getLogger(__name__) def case_insensitive_match(df, col, value): # type: (pd.DataFrame, str, str) -> pd.Series return df[col].apply(lambda x: x.lower()) == value.lower() def plot_gc_stats_side_by_side(env, df_tidy, columns, tool_order, reference, **kwargs): col_to_ylim = get_value(kwargs, "col_to_ylim", dict()) col_wrap = get_value(kwargs, "col_wrap", len(columns)) num_rows = math.ceil(len(columns) / float(col_wrap)) wrap_val = get_value(kwargs, "wrap_val", None) figsize = get_value(kwargs, "figsize", (8 * col_wrap, 6 * num_rows)) col_x = get_value(kwargs, "col_x", "Genome GC") col_x_text = get_value(kwargs, "col_x", "GC") legend_cols = get_value(kwargs, "legend_cols", len(tool_order)) legend_pos = get_value(kwargs, "legend_pos", "bottom") fig, axes = plt.subplots(num_rows, col_wrap, figsize=figsize) reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3}, "line_kws": {"linewidth": 1}} from collections import abc axes_unr = axes if not isinstance(axes, abc.Iterable): axes = [axes] else: axes = axes.ravel() ax = None i = j = 0 fontsize="small" for ax, col in zip(axes, columns): for t in tool_order: if t.lower() == reference.lower(): continue df_curr = df_tidy[case_insensitive_match(df_tidy, "Tool", t)] seaborn.regplot( df_curr[col_x], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()], **reg_kws, ax=ax ) if col in col_to_ylim: ax.set_ylim(*col_to_ylim[col]) if max(df_curr[col]) > 2000: ax.yaxis.set_major_formatter(FuncFormatter(number_formatter)) if i != num_rows - 1: ax.set_xlabel("") else: ax.set_xlabel(col_x_text, fontsize=fontsize) if wrap_val: col_text = "\n".join(wrap(col, wrap_val, break_long_words=False)) else: col_text = col ax.set_ylabel(col_text, wrap=True, fontsize=fontsize) ax.tick_params(labelsize=fontsize, length=2) j += 1 if j == col_wrap: i += 1 j = 0 if ax is not None: if legend_pos == "bottom": fig.subplots_adjust(bottom=0.2) else: fig.subplots_adjust(right=0.8) handles, labels = ax.get_legend_handles_labels() # labels = [{ # "mgm": "MGM", # "mgm2": "MGM2", # "mga": "MGA", # "mprodigal": "MProdigal", # "fgs": "FGS", # "gms2": "GMS2", # "prodigal": "Prodigal" # }[l.lower()] for l in labels] labels = update_tool_names_to_full(labels) if legend_pos == "bottom" or True: leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=legend_cols, bbox_transform=fig.transFigure, frameon=False, fontsize="xx-small") else: leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False, fontsize=18) for lh in leg.legendHandles: lh.set_alpha(1) lh.set_sizes([18] * (len(tool_order))) if num_rows > 1: for i in range(col_wrap): fig.align_ylabels(axes_unr[:,i]) if legend_pos == "bottom" or True: if num_rows == 1: fig.tight_layout(rect=[0,0.05,1,1]) else: fig.tight_layout(rect=[0,0.1,1,1]) # else: # fig.tight_layout(rect=[0, 0, 1, 1]) fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) #bbox_inches='tight' plt.show() def reorder_pivot_by_tool(df_pivoted, tool_order): # type: (pd.DataFrame, List[str]) -> pd.DataFrame return df_pivoted.reorder_levels([1, 0], 1)[ [x.upper() for x in tool_order]].reorder_levels( [1, 0], 1 ).sort_index(1, 0, sort_remaining=False) def stats_large_3p_reference(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) # if reference not in tool_order: # tool_order = [reference] + tool_order # number of genes per clade df_grouped = df_tidy.groupby(["Clade", "Tool"], as_index=False).sum() df_grouped["Sensitivity"] = df_grouped["Number of Found"] / df_grouped["Number in Reference"] df_grouped["Specificity"] = df_grouped["Number of Found"] / df_grouped["Number of Predictions"] # df_pivoted = reorder_pivot_by_tool( # df_grouped.pivot(index=["Clade", "Number in Reference"], columns="Tool", values=["Sensitivity", "Specificity"]), tool_order # ) df_pivoted = reorder_pivot_by_tool(df_grouped.pivot_table( index=["Clade", "Number in Reference"], columns="Tool", values=["Sensitivity", "Specificity"]).reset_index( level=1), tool_order) df_pivoted.to_csv( next_name(env["pd-work"], ext="csv") ) def stats_large_5p_overall(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) # if reference not in tool_order: # tool_order = [reference] + tool_order # number of genes per clade df_grouped = df_tidy.groupby(["Clade", "Tool"], as_index=False).sum() df_grouped["Error Rate"] = df_grouped["Number of Error"] / df_grouped["Number of Found"] df_pivoted = reorder_pivot_by_tool(df_grouped.pivot_table( index=["Clade", "Number in Reference"], columns="Tool", values=["Error Rate"]).reset_index( level=1), tool_order) df_pivoted.to_csv( next_name(env["pd-work"], ext="csv") ) def viz_stats_large_3p_sn_sp(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) df_tidy["3' FN Error Rate"] = 1- df_tidy["Sensitivity"] df_tidy["3' FP Error Rate"] = 1 - df_tidy["Specificity"] plot_gc_stats_side_by_side( env, df_tidy, ["Sensitivity", "Specificity", "Number of Found", "Number of Predictions"], tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(2,2), legend="bottom"), col_to_ylim={"Specificity": (0.5, 1), "Sensitivity": (0.5, 1)} ) plot_gc_stats_side_by_side( env, df_tidy, ["3' FN Error Rate", "3' FP Error Rate"], tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(1, 2), legend="bottom"), col_to_ylim={"3' FN Error Rate": (0, 0.2), "3' FP Error Rate": (0, 0.2)}, legend_cols = math.ceil(len(tool_order)), legend_pos="right" ) plot_gc_stats_side_by_side( env, df_tidy, ["Sensitivity", "Specificity"], tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(1, 2), legend="bottom"), col_to_ylim={"Sensitivity": (0.8, 1), "Specificity": (0.8, 1)}, legend_cols=math.ceil(len(tool_order)), legend_pos="right" ) def stats_large_3p_predictions_vs_found(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) plot_gc_stats_side_by_side( env, df_tidy, ["Number of Predictions", "Number of Found", "Specificity"], tool_order, reference, col_to_ylim={"Specificity": (0.5, 1), "Sensitivity": (0.5, 1)} ) def viz_stats_large_5p_error_vs_sensitivity(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) df_tidy["Gene Start Error Rate"] = df_tidy["Number of Error"] / df_tidy["Number of Found"] # FIXME: compute before df_tidy["1 - Sensitivity"] = 1 - df_tidy["Sensitivity"] df_tidy["3' FN Error Rate"] = 1 - df_tidy["Sensitivity"] plot_gc_stats_side_by_side( env, df_tidy, ["Gene Start Error Rate", "1 - Sensitivity"], tool_order, reference, col_wrap=2, wrap_val=15, figsize=set_size("thesis", subplots=(1, 2), legend="bottom"), col_to_ylim={"Specificity": (0.5, 1), "Gene Start Error Rate": (0, 0.3), "1 - Sensitivity": (0, 0.15)} ) df_tidy["Gene 5' Error Rate"] = df_tidy["Gene Start Error Rate"] plot_gc_stats_side_by_side( env, df_tidy, ["Gene 5' Error Rate", "3' FN Error Rate"], tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size("thesis", subplots=(1, 2), legend="bottom"), col_to_ylim={"Specificity": (0.5, 1), "Gene 5' Error Rate": (0, 0.3), "3' FN Error Rate": (0, 0.15)} ) print(df_tidy.groupby("Tool", as_index=False).mean().to_csv(index=False)) print(df_tidy.groupby("Tool", as_index=False).sum().to_csv(index=False)) def viz_stats_large_5p_error_vs_gc_by_clade(env, df_tidy, reference, **kwargs): # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique())) df_tidy["Error Rate"] = df_tidy["Number of Error"] / df_tidy["Number of Found"] clades_sorted = sorted(df_tidy["Clade"].unique()) num_clades = len(clades_sorted) num_rows = 2 subplots=(num_rows, math.ceil(num_clades/ float(num_rows))) figsize = set_size("thesis", subplots=subplots,legend="bottom", titles=True) col_x = "Genome GC" col_x_text = "GC" fig, axes = plt.subplots(subplots[0], subplots[1], figsize=figsize, sharex="all", sharey="all") reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3}, "line_kws": {"linewidth": 1}} from collections import abc axes_unr = axes if not isinstance(axes, abc.Iterable): axes = [axes] else: axes = axes.ravel() ax = None fontsize = "xx-small" counter = 0 for ax, col in zip(axes, clades_sorted): for t in tool_order: if t.lower() == reference.lower(): continue df_curr = df_tidy[case_insensitive_match(df_tidy, "Tool", t)] df_curr = df_curr[df_curr["Clade"] == col] seaborn.regplot( df_curr[col_x], df_curr["Error Rate"], label=t, color=CM.get_map("tools")[t.lower()], **reg_kws, ax=ax ) # if col in col_to_ylim: # ax.set_ylim(*col_to_ylim[col]) if max(df_curr["Error Rate"]) > 2000: ax.yaxis.set_major_formatter(FuncFormatter(number_formatter)) ax.set_xlabel(col_x_text, fontsize=fontsize) ax.set_title(col, fontsize=fontsize) ax.set_ylabel("Error Rate", wrap=True, fontsize=fontsize) ax.tick_params(labelsize=fontsize, length=2) if counter == 0: ax.set_ylabel("Error Rate", wrap=True, fontsize=fontsize) else: ax.set_ylabel("") if ax is not None: fig.subplots_adjust(bottom=0.2) handles, labels = ax.get_legend_handles_labels() # labels = [{ # "mgm": "MGM", # "mgm2": "MGM2", # "mga": "MGA", # "mprodigal": "MProdigal", # "fgs": "FGS", # "gms2": "GMS2", # "prodigal": "Prodigal" # }[l.lower()] for l in labels] labels = update_tool_names_to_full(labels) leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=len(tool_order), bbox_transform=fig.transFigure, frameon=False, fontsize=fontsize) for lh in leg.legendHandles: lh.set_alpha(1) lh.set_sizes([18] * (len(tool_order))) # if num_rows > 1: # for i in range(): # fig.align_ylabels(axes_unr[:, i]) if num_rows == 1: fig.tight_layout(rect=[0, 0.05, 1, ]) else: fig.tight_layout(rect=[0, 0.1, 1, 1]) fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) # bbox_inches='tight' plt.show() def viz_stats_large_3p(env, df_per_gene, tools, list_ref, **kwargs): pf_checkpoint = get_value(kwargs, "pf_checkpoint", None) if not pf_checkpoint or not os.path.isfile(pf_checkpoint): reference, df_tidy = prl_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref) if pf_checkpoint: save_obj([reference, df_tidy], pf_checkpoint) else: reference, df_tidy = load_obj(pf_checkpoint) # Reference stats df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2" reference = reference.replace("MGM2_AUTO", "MGM2") tools = tools.copy() for i in range(len(tools)): if tools[i].upper() == "MGM2_AUTO": tools[i] = "MGM2" stats_large_3p_reference(env, df_tidy, reference, tool_order=tools) # Number of Predictions versus number of found stats_large_3p_predictions_vs_found(env, df_tidy, reference, tool_order=tools) # Number of Sensitivity and specificity viz_stats_large_3p_sn_sp(env, df_tidy, reference, tool_order=tools) def viz_stats_large_5p(env, df_per_gene, tools, list_ref, **kwargs): pf_checkpoint = get_value(kwargs, "pf_checkpoint", None) if not pf_checkpoint or not os.path.isfile(pf_checkpoint): reference, df_tidy = prl_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref) if pf_checkpoint: save_obj([reference, df_tidy], pf_checkpoint) else: reference, df_tidy = load_obj(pf_checkpoint) df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2" reference = reference.replace("MGM2_AUTO", "MGM2") tools = tools.copy() for i in range(len(tools)): if tools[i].upper() == "MGM2_AUTO": tools[i] = "MGM2" stats_large_5p_overall(env, df_tidy, reference, tool_order=tools) # Number of found vs number of 5' error viz_stats_large_5p_error_vs_sensitivity(env, df_tidy, reference, tool_order=tools) viz_stats_large_5p_error_vs_gc_by_clade(env, df_tidy, reference, tool_order=tools)
38.811705
133
0.61719
2,133
15,253
4.159869
0.124707
0.040573
0.019272
0.024344
0.767272
0.730982
0.697847
0.669109
0.623803
0.577708
0
0.021841
0.243559
15,253
392
134
38.910714
0.747183
0.11342
0
0.498099
0
0
0.113355
0
0
0
0
0.002551
0
1
0.041825
false
0
0.068441
0.007605
0.117871
0.007605
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f13b704fd8d0df2530e8a9f2950ab903d8a850f5
44,784
py
Python
Heavylifting.py
Dnshbbu/RulesView-Backend
09d47e600f9a3e815b5c60241817ce4d98e7ba7b
[ "MIT" ]
null
null
null
Heavylifting.py
Dnshbbu/RulesView-Backend
09d47e600f9a3e815b5c60241817ce4d98e7ba7b
[ "MIT" ]
null
null
null
Heavylifting.py
Dnshbbu/RulesView-Backend
09d47e600f9a3e815b5c60241817ce4d98e7ba7b
[ "MIT" ]
null
null
null
from flask import Flask, jsonify, abort, make_response from flask_restful import Api, Resource, reqparse, fields, marshal, abort from py2neo import * import json from flask_cors import CORS from ipaddress import * import werkzeug import os from netaddr import * import re import random import time import CSVSplit_generalised_v3 import logging import RawRuleslist import configparser import sqlite3 #config object to pull the password from conf file config = configparser.ConfigParser() config.read('conf/creds.ini') # UPLOAD_FOLDER = 'uploads/' UPLOAD_FOLDER = config.get('uploads', 'UPLOAD_FOLDER') db_location=config.get('sqliteDB', 'database_folder') # Gets or creates a logger logger = logging.getLogger(__name__) # set log level logger.setLevel(logging.INFO) dirLogFolder = config.get('logs', 'LOGS_FOLDER') # Create target Directory if don't exist if not os.path.exists(dirLogFolder): os.mkdir(dirLogFolder) print("[*] Directory \'"+dirLogFolder+"\' Created ") else: print("[*] Directory \'"+dirLogFolder+"\' already exists") # define file handler and set formatter LOG_FILE = config.get('logs', 'LOGS_FOLDER')+'\\sample.log' file_handler = logging.FileHandler(LOG_FILE) formatter = logging.Formatter( '%(asctime)s | %(levelname)s | %(name)s | %(funcName)s | :%(lineno)s | %(message)s', datefmt='%y-%m-%d %H:%M:%S') file_handler.setFormatter(formatter) # %(filename)s:%(lineno)s - %(funcName)20s() # add file handler to logger logger.addHandler(file_handler) def updateriskconfig(onecolumn,twocolumns,threecolumns): config.set("riskconfigAny","onecolumn",onecolumn) config.set("riskconfigAny","twocolumns",twocolumns) config.set("riskconfigAny","threecolumns",threecolumns) with open('conf/creds.ini', 'w') as configfile: config.write(configfile) return('updated') def retrieveriskconfig(): onecolumn= config.get('riskconfigAny', 'onecolumn') twocolumns= config.get('riskconfigAny', 'twocolumns') threecolumns= config.get('riskconfigAny', 'threecolumns') insecureproto= config.get('riskconfigAny', 'insecureriskvalue') itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue') etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue') return(onecolumn,twocolumns,threecolumns,insecureproto,itoeriskvalue,etoiriskvalue) def segregateIandE(db_name): table_name="netobj" allrows = RawRuleslist.ReadSqlitenetobj(db_name,table_name) for x in allrows: idvalue = x['Name'] ipvalue=x['IPv4'] mask=x['Mask'] riskvalue='yes' if x['Mask']!='NA': print('NA is not there') cip = ipvalue+"/"+mask #ipnetwork=IPNetwork[cip] ipnetwork=IPNetwork(cip) ip=ipnetwork if "-" in ipvalue: print("- is there") ipranges = ipvalue.split('-') iprange =IPRange(ipranges[0].strip(),ipranges[1].strip()) ip=iprange else: print('NA is there') #ip=IPAddress[ipvalue] ip = IPAddress(ipvalue) #print(ip_address(var_121).is_private) if ip.is_private(): column_name="Internal" riskvalue="\'yes\'" id_column="Name" idvalue = "\'"+idvalue+"\'" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue) else: column_name="External" riskvalue="\'yes\'" id_column="Name" idvalue = "\'"+idvalue+"\'" RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue) def segregateIntExtConn(db_name,table_name): try: allrows = RawRuleslist.ReadSqlite(db_name,table_name) sqlite_file = db_location+"\\\\"+db_name+'.db' value="\'yes\'" # Connecting to the database file conn = sqlite3.connect(sqlite_file) conn.row_factory = lambda cursor, row: row[0] c = conn.cursor() tablename2 = "netobj" colname2="External" #query to get rows which has External=yes c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename2, cn=colname2,val=value)) queryresult2 =c.fetchall() logger.info("queryresults") for x in allrows: idvalue=x['No'] if x['Action']=="Accept": individualsource1 = str(x['Source']).split(';') for xy in individualsource1: if xy in queryresult2: individualdestination1 = str(x['Destination']).split(';') for xz in individualdestination1: if xz in queryresult2: column_name="ExttoExt" riskvalue="\'yes\'" id_column="No" # idvalue = x['Name'] # idvalue = "\'"+idvalue+"\'" table_name=table_name RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue) break else: column_name="ExttoInt" riskvalue="\'yes\'" id_column="No" # idvalue = "\'"+idvalue+"\'" table_name=table_name RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue) else: individualdestination1 = str(x['Destination']).split(';') for xz in individualdestination1: if xz in queryresult2: column_name="InttoExt" riskvalue="\'yes\'" id_column="No" # idvalue = x['Name'] # idvalue = "\'"+idvalue+"\'" table_name=table_name RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue) else: column_name="InttoInt" riskvalue="\'yes\'" id_column="No" # idvalue = "\'"+idvalue+"\'" table_name=table_name RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue) except Exception as e: logger.exception("%s", e) def riskcalculator(db_name,table_name): try: segregateIandE(db_name) segregateIntExtConn(db_name,table_name) riskcalculator_parked(db_name,table_name) return { 'data': '', 'message': 'Risk updated!', 'status': 'success' } except Exception as e: logger.exception("%s", e) # class HeavyLifting(): def riskcalculator_parked(db_name,table_name): try: allrows = RawRuleslist.ReadSqlite(db_name,table_name) # if source, destination or service has any fields onecolumn= config.get('riskconfigAny', 'onecolumn') twocolumns= config.get('riskconfigAny', 'twocolumns') threecolumns= config.get('riskconfigAny', 'threecolumns') insecureriskvalue= config.get('riskconfigAny', 'insecureriskvalue') itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue') etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue') id_column = "No" sqlite_file = db_location+"\\\\"+db_name+'.db' value="\'yes\'" # Connecting to the database file conn = sqlite3.connect(sqlite_file) conn.row_factory = lambda cursor, row: row[0] c = conn.cursor() tablename = "services" colname="Insecure" #query to get rows which has insecure=yes c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value)) queryresult1 =c.fetchall() colname="ItoE" #query to get rows which has InttoExt=yes c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value)) queryresult2 =c.fetchall() colname="EtoI" #query to get rows which has ExttoInt=yes c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value)) queryresult3 =c.fetchall() for x in allrows: riskvalue = 0 riskreason = "" idvalue=x['No'] id_column = "No" #Any in columns- Risk assignment if x['Action']=="Accept": if (x['Source']=="Any" and x['Destination']=="Any" and x['Service']=="Any"): riskvalue=riskvalue+int(threecolumns) riskreason = riskreason+"1,-,"+"All three columns have Any "+","+str(threecolumns)+";" elif ((x['Source']=="Any" and x['Destination']=="Any") or (x['Destination']=="Any" and x['Service']=="Any") or ( x['Service']=="Any" and x['Source']=="Any")): riskvalue=riskvalue+int(twocolumns) riskreason = riskreason+"1,-,"+"Two columns have Any "+","+str(twocolumns)+";" elif (x['Source']=="Any" or x['Destination']=="Any" or x['Service']=="Any"): riskvalue=riskvalue+int(onecolumn) riskreason = riskreason+"1,-,"+"One column has Any"+","+str(onecolumn)+";" if x['Action']=="Accept": individualservice = str(x['Service']).split(';') '''Insecure protocols- Risk assignment''' for xy in individualservice: if xy in queryresult1: riskvalue=riskvalue+int(insecureriskvalue) riskreason = riskreason+"2"+","+xy+","+"Insecure proto"+","+str(insecureriskvalue)+";" column_name = "Risk" logger.info(riskreason) logger.info(riskvalue) RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue) column_name = "RiskReason" riskreason="\'"+riskreason+"\'" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue) colname="InttoExt" queryresult11 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname) for x in queryresult11: riskvalue = x['Risk'] riskreason = x['RiskReason'] idvalue=x['No'] id_column = "No" #riskreason = riskreason.replace("'", "") if x['Action']=="Accept": individualservice = str(x['Service']).split(';') '''Internal to External connections- Risk assignment''' for xy in individualservice: if xy not in queryresult2: riskvalue=riskvalue+int(itoeriskvalue) riskreason = riskreason+"3"+","+xy+","+"Int to Ext conn - non approved"+","+str(itoeriskvalue)+";" column_name = "Risk" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue) column_name = "RiskReason" riskreason="\'"+riskreason+"\'" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue) colname="ExttoInt" queryresult12 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname) for x in queryresult12: id_column = "No" idvalue=x['No'] riskvalue = x['Risk'] riskreason = x['RiskReason'] #riskreason = riskreason.replace("'", "") if x['Action']=="Accept": individualservice = str(x['Service']).split(';') logger.info(individualservice) '''External to Internal connections- Risk assignment''' for xy in individualservice: if xy not in queryresult3: riskvalue=riskvalue+int(etoiriskvalue) riskreason = riskreason+"4"+","+xy+","+"Ext to Int conn - non approved"+","+str(etoiriskvalue)+";" column_name = "Risk" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue) column_name = "RiskReason" riskreason="\'"+riskreason+"\'" RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue) return { 'data': '', 'message': 'Risk updated!', 'status': 'success' } except Exception as e: logger.exception("%s", e) return { 'data': '', 'message': 'Some error occured', 'status': 'error' } def getselectrules(statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph2.run(statement).data() print(output) output1 = [] if (output==[]): finalgrouping =[] GrpNodes =[] logger.error("Error: Neo4j didnt return any output for the query") message = "Error: Neo4j didnt return any output for the query" status = 'error' print(message) else: output1.append(output) finalgrouping, GrpNodes = FinalGroupingv2(output1) message = "Query completed successfully" status = 'success' print(message) return(output1, finalgrouping, GrpNodes, message, status) def uploadwithcustquery(statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph2.run(statement).stats() return(output) def getfwrulesneo4j( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph2.run(statement).data() RawRuleslist.InsertTable(output) return (rules) def defaultrules( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph2.run(statement).data() output1 = [] output1.append(output) finalgrouping, GrpNodes = FinalGroupingv2(output1) return(output1, finalgrouping, GrpNodes) def custquery( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph.run(statement).data() id_Node = [] NoDup_id_Node = [] Created_rels = [] for rel in output: mi = re.compile("\([_]*(?P<grouping>[0-9]*)\)") mi_rels = re.compile("\)(?P<grouping>.*?)\(") mi_rels_only_name = re.compile("\).*?\[\:(?P<grouping>\w+)\s\{") tomatch = rel['r'] m = mi.findall(str(tomatch)) mi_rels_data = mi_rels.findall(str(tomatch)) mi_rels_data_only_name = mi_rels_only_name.findall(str(tomatch)) print("================mi_nodes_data==============") print(m) print("================mi_rels_data==============") # print(mi_rels_data) for one in m: id_rels = {} intone = int(one) ab = graph.nodes.get(intone) id_rels['id'] = one id_rels['id_prop'] = ab id_Node.append(id_rels) # print (ab) for x in id_Node: if x not in NoDup_id_Node: NoDup_id_Node.append(x) xx = 0 yy = 1 for i in mi_rels_data: if ">" in i: create_rel = {} # print("forward") print(i) for id in NoDup_id_Node: if m[xx] == id['id']: create_rel['s'] = id['id_prop'] # for id in NoDup_id_Node: if m[yy] == id['id']: create_rel['d'] = id['id_prop'] create_rel['r'] = mi_rels_data_only_name[xx] print("source: "+m[xx]+" destination: "+m[yy]) print(create_rel) Created_rels.append(create_rel) print( "======================Created_rels===========================") print(Created_rels) print( "======================Created_rels===========================") # break else: # print("backward") create_rel = {} print(i) for id in NoDup_id_Node: if m[yy] == id['id']: create_rel['s'] = id['id_prop'] # Source # for id in NoDup_id_Node: if m[xx] == id['id']: create_rel['d'] = id['id_prop'] # Destination # create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")" # print(i['name']) create_rel['r'] = mi_rels_data_only_name[xx] # create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")" print("source: "+m[yy]+" destination: "+m[xx]) print(create_rel) Created_rels.append(create_rel) print( "======================Created_rels===========================") print(Created_rels) print( "======================Created_rels===========================") # break xx += 1 yy += 1 print("=!@#====== Create rels ==========!@#=") print(Created_rels) output1 = [] output1.append(Created_rels) finalgrouping = FinalGrouping(output1) # print("################################################################3") # print(finalgrouping) return(output1, finalgrouping) def allRels(statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph.run(statement).data() return (output) def allGroups(statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph.run(statement).data() return (output) def CreateGroup( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph.run(statement).data() return output def check( statement, checkip): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) output = graph.run(statement).data() Node = [] NameAndIP = [] NoDupNode = [] MatchNetwork = [] MatchNodes = [] print("Printing Output") print(output) for item in output: nodes_source = {} name_ip = {} nodes_source['Name'] = item['n']['Name'] nodes_source['IPAddress'] = item['n']['IPAddress'] nodes_source['Mask'] = item['n']['Mask'] nodes_source['Comments'] = item['n']['Comments'] Node.append(nodes_source) # & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0") if (item['n']['Mask'] != "NA"): cip = item['n']['IPAddress']+"/"+item['n']['Mask'] name_ip['Name'] = item['n']['Name'] name_ip['IPAddress'] = item['n']['IPAddress'] name_ip['Comments'] = item['n']['Comments'] name_ip['Network'] = cip NameAndIP.append(name_ip) # & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0") if (item['n']['Mask'] == "NA"): name_ip['Name'] = item['n']['Name'] name_ip['Network'] = item['n']['IPAddress'] name_ip['Comments'] = item['n']['Comments'] NameAndIP.append(name_ip) # tocheck_ip="194.127.24.66" # tocheck_ip="10.197.167.96" tocheck_ip = checkip # print("Printing tocheck_ip") # print(ip_network(tocheck_ip,strict=False)) # print("Printing NameAndIP") # print(NameAndIP) # print(checkip) MatchRel = [] for y in NameAndIP: if ("-" in y['Network']): # if m.group('IP_start')=="0.0.0.0" and m.group('IP_end')=="255.255.255.255": if y['Network'] == "0.0.0.0 - 255.255.255.255": MatchNetwork.append(y) else: ip_range_to_match = y['Network'] m = re.search( "^(?P<IP_start>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\-\s+(?P<IP_end>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", ip_range_to_match) iprange = IPRange(m.group('IP_start'), m.group('IP_end')) if tocheck_ip in iprange: MatchNetwork.append(y) else: # if (tocheck_ip == y['Network']): # MatchNetwork.append(y) # if not for strict=false, typeerror will be raised as "with hostbits set"! ab = ip_network(y['Network'], strict=False) if (IPv4Address(tocheck_ip) in IPv4Network(ab)): # print(y) #print(y['Name']) MatchNetwork.append(y) print("Printing MatchNetwork") print(MatchNetwork) for z in MatchNetwork: graph_z = Graph(password="myneo2") # statement="MATCH (s:Hosts {Name:'"+z['Name']+"'})-[r]-(d:Hosts) RETURN s,d,r" statement1 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE s.Name='" + \ z['Name']+"' RETURN s,d,r" statement2 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE d.Name='" + \ z['Name']+"' RETURN s,d,r" # MATCH p=(s:Hosts)-[r:"+searchterm+"]->(d:Hosts) RETURN s as source,d as target,r as service LIMIT 5 #to search print(statement1) output1 = graph_z.run(statement1).data() print(len(output1)) print(output1) print(statement2) output2 = graph_z.run(statement2).data() print(len(output2)) print(output2) print("Printing matched nodes relationships") # MatchNodes.append if output1 != []: MatchRel.append(output1) if output2 != []: MatchRel.append(output2) # out={} # out={"MatchNetwork":MatchNetwork} print("=====================================================================================") print(MatchRel) finalgrouping = FinalGrouping(MatchRel) print("finalgroupingtest1") print(finalgrouping) return(MatchRel, finalgrouping) def Convert_to_IP_Network( output): Node = [] NoDupNode = [] for item1 in output: for item in item1: nodes_source = {} nodes_target = {} name_ip_host = {} name_ip_net = {} # Assign the name of the node to ID nodes_source['Name'] = item['s']['Name'] nodes_source['IPAddress'] = item['s']['IPAddress'] nodes_source['Mask'] = item['s']['Mask'] nodes_source['Comments'] = item['s']['Comments'] if (item['s']['Mask'] == "NA"): nodes_source['Network'] = item['s']['IPAddress'] # & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0") if (item['s']['Mask'] != "NA"): cip = item['s']['IPAddress']+"/"+item['s']['Mask'] nodes_source['Network'] = cip # ab=ip_network(nodes_source['Network'],strict=False) # nodes_source['Network']=ab nodes_target['Name'] = item['d']['Name'] nodes_target['IPAddress'] = item['d']['IPAddress'] nodes_target['Mask'] = item['d']['Mask'] nodes_target['Comments'] = item['d']['Comments'] if (item['d']['Mask'] == "NA"): nodes_target['Network'] = item['d']['IPAddress'] # & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0") if (item['d']['Mask'] != "NA"): cip = item['d']['IPAddress']+"/"+item['d']['Mask'] nodes_target['Network'] = cip cd = ip_network(nodes_target['Network'], strict=False) nodes_target['Network'] = cd Node.append(nodes_source) Node.append(nodes_target) for x in Node: if x not in NoDupNode: NoDupNode.append(x) return NoDupNode def FinalGrouping( finalarray): print("Printing final array") NameAndIP = Convert_to_IP_Network(finalarray) print(NameAndIP) ParentChild = [] NoDupParentChild = [] graph = Graph(password="myneo2") statement = "MERGE (d:Groups) RETURN d" # fetch the source, target and relationship details Grouping = graph.run(statement).data() number_of_colors = len(Grouping) Groups = [] for rot in range(number_of_colors): grp = {} grp['Name'] = Grouping[rot]['d']['Name'] grp['IPAddress'] = Grouping[rot]['d']['IPAddress'] grp['color'] = Grouping[rot]['d']['color'] Groups.append(grp) print(Groups) for y in NameAndIP: for Group in Groups: ab = ip_network(y['Network'], strict=False) print(IPv4Network(ab)) print(IPv4Network(Group['IPAddress'])) c = ip_network(IPv4Network(ab), strict=False) d = ip_network(IPv4Network(Group['IPAddress']), strict=False) # if IPv4Network(ab) in IPv4Network(Group['d']['IPAddress']): if c.subnet_of(d): par_child = {} print(IPv4Network(ab)) print(IPv4Network(Group['IPAddress'])) # par_child="sdsadsa" par_child['ChildName'] = y['Name'] par_child['ParentName'] = Group['Name'] par_child['Parent_IP'] = Group['IPAddress'] par_child['color'] = Group['color'] ParentChild.append(par_child) for x in ParentChild: if x not in NoDupParentChild: NoDupParentChild.append(x) print("==============Printing NoDupParentChild===============") return (NoDupParentChild) def FinalGroupingv2( finalarray): try: print("================Printing final array==================") NameAndIP = Convert_to_IP_Network(finalarray) # print(NameAndIP) ParentChild = [] NoDupParentChild = [] user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) # graph = Graph(password="myneo2") statement = "MERGE (d:Groups) RETURN d" # fetch the source, target and relationship details Grouping = graph.run(statement).data() number_of_colors = len(Grouping) Groups = [] AllIPNetwork = [] for rot in range(number_of_colors): grp = {} grp['Name'] = Grouping[rot]['d']['Name'] grp['IPAddress'] = Grouping[rot]['d']['IPAddress'] grp['color'] = Grouping[rot]['d']['color'] Groups.append(grp) # if not for strict=false, typeerror will be raised as "with hostbits set"! ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False) AllIPNetwork.append(ab) arrangedones = ArrangeNodesv2(Grouping) #arrangedones = self.ArrangeNodes(AllIPNetwork) print("===================== Printing the arranged ones =============") print(arrangedones) print("===================== Printed the arranged ones =============") GrpParChd = [] # this is to access the pair {depth0:[xx]} for evry in arrangedones: print(evry) # key values # this is to access the array in values of key/value pairs for evry2 in arrangedones[evry]: print(evry2) x = len(evry2)-1 print(x) # no of elements in values array while (x >= 0): grpparchild = {} if (x == 0): y = x print(x, evry2[x], y, evry2[y]) print(evry2[x], " is subnet of ", evry2[y]) grpparchild[evry2[x]] = evry2[y] GrpParChd.append(grpparchild) else: y = x-1 print(x, evry2[x], y, evry2[y]) while (y >= 0): if (evry2[x].subnet_of(evry2[y])): print(evry2[x], " is subnet of ", evry2[y]) grpparchild[evry2[x]] = evry2[y] GrpParChd.append(grpparchild) break y -= 1 x -= 1 print("========== Parent Child pair in Groups=======") print(GrpParChd) GrpNodes = [] for Group in Groups: d = ip_network(Group['IPAddress'], strict=False) grp_item = {} # print("========== Printing only keys in Groups=======") # print(k) for evrypair in GrpParChd: for k, v in evrypair.items(): # for k,v in list(a.items(): if(d == k): print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&") print(d, k, v) for grpk in Group.keys(): grp_item[grpk] = Group[grpk] grp_item['id'] = Group['Name'] grp_item['isgrp'] = "true" print( "!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11") print(grp_item) for Grouppar in Groups: print( "*****************All values from groups********************") print(Grouppar) d_par = ip_network( Grouppar['IPAddress'], strict=False) if(d_par == v): print( "*****************Entered into matched parent group********************") print(d_par, v) grp_item['parent'] = Grouppar['Name'] GrpNodes.append(grp_item) print( "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^") logger.info("GrpNodes:") logger.info(GrpNodes) print(GrpNodes) print("========== Parent Child pair in Groups (reverse)=======") #GrpParChdreverse = GrpParChd.reverse() print(list(reversed(GrpParChd))) print("========== Print Groups=======") print(Groups) print("========== All IP Network=======") print(AllIPNetwork) ip_list_sorted = sorted(AllIPNetwork) SortedReversedAllIPNetwork = list(reversed(ip_list_sorted)) print("================Printing SortedReversedAllIPNetwork===============") print(SortedReversedAllIPNetwork) for y in NameAndIP: ab = ip_network(y['Network'], strict=False) c = ip_network(IPv4Network(ab), strict=False) for matchsortedIpnetwork in SortedReversedAllIPNetwork: e = ip_network(IPv4Network( matchsortedIpnetwork), strict=False) if c.subnet_of(e): # print(c,e) i = 0 while(i < len(Groups)): #d = ip_network(IPv4Network(Groups[i]['IPAddress']), strict=False) d = ip_network( Groups[i]['IPAddress'], strict=False) #print(d, " ; ",e) if (d == e): print(d, e, i) print(Groups[i]['Name'], Groups[i]['color']) par_child = {} par_child['ChildName'] = y['Name'] par_child['ParentName'] = Groups[i]['Name'] par_child['Parent_IP'] = Groups[i]['IPAddress'] par_child['color'] = Groups[i]['color'] ParentChild.append(par_child) i += 1 # node_any = {} # node_any['Name'] = "Any" # node_any['color'] = "#ffff80" # ParentChild.append(node_any) for x in ParentChild: if x not in NoDupParentChild: NoDupParentChild.append(x) print("==============Printing NoDupParentChild===============") print(NoDupParentChild) return (NoDupParentChild, GrpNodes) print("========== End of new attempt=======") except Exception as e: logger.exception("%s", e) def groupheirarchy( statement): try: user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) Grouping = graph.run(statement).data() number_of_colors = len(Grouping) Groups = [] AllIPNetwork = [] for rot in range(number_of_colors): grp = {} grp['Name'] = Grouping[rot]['d']['Name'] grp['IPAddress'] = Grouping[rot]['d']['IPAddress'] grp['color'] = Grouping[rot]['d']['color'] Groups.append(grp) # if not for strict=false, typeerror will be raised as "with hostbits set"! ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False) AllIPNetwork.append(ab) arrangedones = ArrangeNodesv2(Grouping) GrpParChd = [] # this is to access the pair {depth0:[xx]} for evry in arrangedones: print(evry) # key values # this is to access the array in values of key/value pairs for evry2 in arrangedones[evry]: print(evry2) x = len(evry2)-1 print(x) # no of elements in values array while (x >= 0): grpparchild = {} if (x == 0): y = x print(x, evry2[x], y, evry2[y]) print(evry2[x], " is subnet of ", evry2[y]) grpparchild[evry2[x]] = evry2[y] GrpParChd.append(grpparchild) else: y = x-1 print(x, evry2[x], y, evry2[y]) while (y >= 0): if (evry2[x].subnet_of(evry2[y])): print(evry2[x], " is subnet of ", evry2[y]) grpparchild[evry2[x]] = evry2[y] GrpParChd.append(grpparchild) break y -= 1 x -= 1 print("========== Parent Child pair in Groups=======") print(GrpParChd) GrpNodes = [] for Group in Groups: d = ip_network(Group['IPAddress'], strict=False) grp_item = {} # print("========== Printing only keys in Groups=======") # print(k) for evrypair in GrpParChd: for k, v in evrypair.items(): # for k,v in list(a.items(): if(d == k): print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&") print(d, k, v) for grpk in Group.keys(): grp_item[grpk] = Group[grpk] grp_item['id'] = Group['Name'] grp_item['label'] = Group['Name'] +" ("+ Group['IPAddress']+")" grp_item['isgrp'] = "true" print( "!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11") print(grp_item) for Grouppar in Groups: print( "*****************All values from groups********************") print(Grouppar) d_par = ip_network( Grouppar['IPAddress'], strict=False) if(d_par == v): print( "*****************Entered into matched parent group********************") print(d_par, v) # grp_item['parent'] = Grouppar['Name'] GrpNodes.append(grp_item) print( "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^") print(GrpNodes) Node = [] NoDupNode = [] for eve in GrpNodes: print("+++++++++++++++++++ Printing every nodes in groups+++++++++++") print(eve) nodes_datawrapper_grp = {} nodes_datawrapper_grp['data'] = eve Node.append(nodes_datawrapper_grp) for x in Node: if x not in NoDupNode: NoDupNode.append(x) print("========== Parent Child pair in Groups (reverse)=======") Link = [] for evrypair in GrpParChd: for k, v in evrypair.items(): # for k,v in list(a.items(): if (k!=v): links_datawrapper = {} grp_link = {} for Group in Groups: d = ip_network(Group['IPAddress'], strict=False) if (d==k): grp_link['target'] = Group['Name'] if (d==v): grp_link['source'] = Group['Name'] # Group_Link.append(grp_link) links_datawrapper['data'] = grp_link Link.append(links_datawrapper) print("========================== Built the LInk ==================") print(Link) print("========== Print Groups=======") print(Groups) print("========== All IP Network=======") print(AllIPNetwork) ip_list_sorted = sorted(AllIPNetwork) SortedReversedAllIPNetwork = list(reversed(ip_list_sorted)) print("================Printing SortedReversedAllIPNetwork===============") print(SortedReversedAllIPNetwork) print("==============Printing NoDupParentChild===============") print (NoDupNode, Link) return (NoDupNode, Link) print("========== End of new attempt=======") except Exception as e: logger.exception("%s", e) def ArrangeNodesv2( newlist): ax = ip_network('10.0.0.0/8', strict=False) b = ip_network('192.168.4.0/25', strict=False) c = ip_network('192.168.9.0/25', strict=False) # 192.168.10.0/22 is considered as 192.168.8.0/22 #CHECKTHIS d = ip_network('10.0.0.0/8', strict=False) e = ip_network('192.168.9.0/26', strict=False) f = ip_network('192.168.9.0/24', strict=False) #newlist = self.GetAllNodes(statement) ip_list = newlist print("===========================printing the incoming groups array===========================") print(ip_list) AllIPNetwork = [] for rotate in ip_list: # if not for strict=false, typeerror will be raised as "with hostbits set"! ab = ip_network(rotate['d']['IPAddress'], strict=False) AllIPNetwork.append(ab) ip_list_sorted = sorted(AllIPNetwork) print("===========================printing the SORTED incoming groups array===========================") print(ip_list_sorted) x = 0 y = 0 z = 0 a = dict() depth_z = [] firstentry = 1 firstfirstentry = 1 index = x while y < len(ip_list_sorted): ipx = ip_network((ip_list_sorted[x]), strict=False) #this uses ipaddress module ipy = ip_network((ip_list_sorted[y]), strict=False) ipindex = ip_network((ip_list_sorted[index]), strict=False) print("ipindex: ", ipindex, " ipx: ", ipx, " ipy: ", ipy) if ipy.subnet_of(ipx): print("it is a subnet") depth_z.append(ip_list_sorted[y]) if y == len(ip_list_sorted)-1: a["depth_"+str(z)] = [] a["depth_"+str(z)].append(depth_z) z += 1 # x=y # print(x) if not ipy.subnet_of(ipx): if ipy.subnet_of(ipindex): print("it is not a subnet") x = y print(x) if not ipy.subnet_of(ipindex): a["depth_"+str(z)] = [] a["depth_"+str(z)].append(depth_z) z += 1 depth_z = [] print( "it is not a subnet of ipx and ipindex, so adding a new entry to the depth_z") depth_z.append(ip_list_sorted[y]) index = y x = y if y == len(ip_list_sorted)-1: a["depth_"+str(z)] = [] a["depth_"+str(z)].append(depth_z) z += 1 y += 1 print("===================== a ====") print(a) return (a) def GetAllNodes( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) # graph = Graph(password="myneo2") output = graph.run(statement).data() Node = [] NameAndIP = [] NoDupNode = [] AllIPNetwork = [] MatchNodes = [] print("========================================================================================================") print(output) for item in output: nodes_source = {} name_ip = {} nodes_source['Name'] = item['n']['Name'] nodes_source['IPAddress'] = item['n']['IPAddress'] nodes_source['Mask'] = item['n']['Mask'] nodes_source['Comments'] = item['n']['Comments'] Node.append(nodes_source) # & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0") if (item['n']['Mask'] != "NA"): cip = item['n']['IPAddress']+"/"+item['n']['Mask'] name_ip['Name'] = item['n']['Name'] name_ip['Network'] = cip NameAndIP.append(name_ip) for y in NameAndIP: # if not for strict=false, typeerror will be raised as "with hostbits set"! ab = ip_network(y['Network'], strict=False) AllIPNetwork.append(ab) AllIPNetwork.sort() out = {} out = {"NameAndIP": AllIPNetwork} # newhelo ="helo" # return(AllIPNetwork) return(out) def GetRelationshipFromNeo4jv3( statement): user=config.get('neo4j', 'user') password=config.get('neo4j', 'passwd') graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password)) # graph = Graph(password="myneo2") # fetch the source, target and relationship details print(statement) output = graph.run(statement).data() output1 = [] output1.append(output) finalgrouping, GrpNodes = FinalGroupingv2(output1) # print("################################################################3") # print(finalgrouping) return(output1, finalgrouping, GrpNodes)
40.345946
174
0.49741
4,469
44,784
4.882524
0.104274
0.022686
0.025023
0.015811
0.634693
0.59583
0.572869
0.539459
0.521265
0.498671
0
0.0155
0.335901
44,784
1,109
175
40.382326
0.718167
0.093404
0
0.600454
0
0.00227
0.161986
0.039203
0
0
0
0
0
1
0.024972
false
0.031782
0.019296
0
0.057889
0.148695
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f13ba3b7c19b139da995ed439ad5c64bd1341cb8
3,020
py
Python
TestSlice6.py
TecnicoSSof/Software-Security
9c31f5f59a1d1c21c2c9876d09bbbd9823d96357
[ "Apache-2.0" ]
null
null
null
TestSlice6.py
TecnicoSSof/Software-Security
9c31f5f59a1d1c21c2c9876d09bbbd9823d96357
[ "Apache-2.0" ]
null
null
null
TestSlice6.py
TecnicoSSof/Software-Security
9c31f5f59a1d1c21c2c9876d09bbbd9823d96357
[ "Apache-2.0" ]
null
null
null
import os import unittest from searcher.Vulnerability import Vulnerability from searcher.Searcher import Searcher import json from static_analyzer import file_get_contents class TestSlice6(unittest.TestCase): def test_rules(self): parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json")) parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules.json")) output = open(os.getcwd() + "/tests/slice6/slice6_rules.out", "r") vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules) s = Searcher(parsed_snippet['body'], vulnerabilities) self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal") output.close() def test_rules2(self): parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json")) parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules2.json")) output = open(os.getcwd() + "/tests/slice6/slice6_rules2.out", "r") vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules) s = Searcher(parsed_snippet['body'], vulnerabilities) self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal") output.close() def test_rules3(self): parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json")) parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules3.json")) output = open(os.getcwd() + "/tests/slice6/slice6_rules3.out", "r") vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules) s = Searcher(parsed_snippet['body'], vulnerabilities) self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal") output.close() def test_rulesNoVuln(self): parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json")) parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rulesNoVuln.json")) output = open(os.getcwd() + "/tests/slice6/slice6_rulesNoVuln.out", "r") vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules) s = Searcher(parsed_snippet['body'], vulnerabilities) self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal") output.close() def test_rulesSanit(self): parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json")) parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rulesSanit.json")) output = open(os.getcwd() + "/tests/slice6/slice6_rulesSanit.out", "r") vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules) s = Searcher(parsed_snippet['body'], vulnerabilities) self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal") output.close() if __name__ == '__main__': unittest.main()
50.333333
100
0.700993
357
3,020
5.717087
0.131653
0.058795
0.095541
0.139637
0.841744
0.841744
0.841744
0.841744
0.746203
0.746203
0
0.012668
0.163576
3,020
59
101
51.186441
0.795329
0
0
0.510204
0
0
0.175166
0.139404
0
0
0
0
0.102041
1
0.102041
false
0
0.122449
0
0.244898
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f13c81c52509028ac8d8c36865997afd83cbf32b
367
py
Python
src/final_exam/q_survey/histogram_main.py
acc-cosc-1336/cosc-1336-spring-2018-jjmareck
7abfd79cb9a63192c965f828a185ccd981820bae
[ "MIT" ]
null
null
null
src/final_exam/q_survey/histogram_main.py
acc-cosc-1336/cosc-1336-spring-2018-jjmareck
7abfd79cb9a63192c965f828a185ccd981820bae
[ "MIT" ]
null
null
null
src/final_exam/q_survey/histogram_main.py
acc-cosc-1336/cosc-1336-spring-2018-jjmareck
7abfd79cb9a63192c965f828a185ccd981820bae
[ "MIT" ]
null
null
null
from histogram import display_histogram def main(): infile = open('survey.dat', 'r') line1 = infile.readline().split() line2 = infile.readline().split() line3 = infile.readline().split() infile.close() display_histogram(line1) print('') display_histogram(line2) print('') display_histogram(line3) main()
19.315789
40
0.615804
38
367
5.842105
0.473684
0.288288
0.256757
0
0
0
0
0
0
0
0
0.021583
0.242507
367
18
41
20.388889
0.776978
0
0
0.153846
0
0
0.031519
0
0
0
0
0
0
1
0.076923
false
0
0.076923
0
0.153846
0.153846
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f13d9608f48b0c24994ffc9597596e461e4ed4e2
1,632
py
Python
mysite/home/models.py
Rudancy/My_Business
f2c5f51e697214a2699b40d37f206307d19af84b
[ "MIT" ]
null
null
null
mysite/home/models.py
Rudancy/My_Business
f2c5f51e697214a2699b40d37f206307d19af84b
[ "MIT" ]
null
null
null
mysite/home/models.py
Rudancy/My_Business
f2c5f51e697214a2699b40d37f206307d19af84b
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class home_page(models.Model): masthead_image = models.ImageField(upload_to='static/images', default='', blank=True) masthead = models.CharField(max_length=30, default='', blank=True) description = models.TextField(max_length=200, default='', blank=True) icon_1_header = models.CharField(max_length=30, default='', blank=True) icon_1_description = models.TextField(max_length=30, default='', blank=True) icon_2_header = models.CharField(max_length=30, default='', blank=True) icon_2_description = models.TextField(max_length=30, default='', blank=True) icon_3_header = models.CharField(max_length=30, default='', blank=True) icon_3_description = models.TextField(max_length=30, default='', blank=True) showcase_1_header = models.CharField(max_length=30, default='', blank=True) showcase_1_image = models.ImageField(upload_to='static/images', default='', blank=True) showcase_1_description = models.TextField(max_length=700, default='', blank=True) showcase_2_header = models.CharField(max_length=30, default='', blank=True) showcase_2_image = models.ImageField(upload_to='static/images', default='', blank=True) showcase_2_description = models.TextField(max_length=700, default='', blank=True) showcase_3_header = models.CharField(max_length=30, default='', blank=True) showcase_3_image = models.ImageField(upload_to='static/images', default='', blank=True) showcase_3_description = models.TextField(max_length=700, default='', blank=True) def __str__(self): return self.masthead
58.285714
91
0.741422
217
1,632
5.327189
0.184332
0.186851
0.249135
0.155709
0.900519
0.851211
0.847751
0.847751
0.801903
0.709343
0
0.032844
0.123162
1,632
27
92
60.444444
0.774983
0.014706
0
0
0
0
0.032379
0
0
0
0
0
0
1
0.045455
false
0
0.045455
0.045455
1
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
8
f13dfa24c8f93e4de3743cfda3f5e390d7c85332
2,613
py
Python
falconRaspberry/vehicle/engine.py
omarsy/Falcon
ef762e4ec2affa2873e30d8b5d59d8897d422597
[ "MIT" ]
1
2019-04-21T20:41:04.000Z
2019-04-21T20:41:04.000Z
falconRaspberry/vehicle/engine.py
MessasKouseila/falcon
8b6ba22797877d7c7613c772a2a1f91d234f7dec
[ "MIT" ]
null
null
null
falconRaspberry/vehicle/engine.py
MessasKouseila/falcon
8b6ba22797877d7c7613c772a2a1f91d234f7dec
[ "MIT" ]
null
null
null
import RPi.GPIO as GPIO class Engine: FREQUENCY = 100 BOOT_FEQUENCY = 10 def __init__(self, high, mid, low ): self.high = high self.mid = mid self.low = low self.pwmHigh = None self.pwmMid = None self.pwmLow = None def getHigh(self): return self.high def getMid(self): return self.mid def getLow(self): return self.low def makeOut(self, pin): GPIO.setmode(GPIO.BOARD) GPIO.setup(pin, GPIO.OUT) def makeHighOut(self): self.makeOut(self.high) def makeMidOut(self): self.makeOut(self.mid) def makeLowOut(self): self.pwmLow = self.makeOut(self.low) def makeAllOut(self): self.makeHighOut() self.makeMidOut() self.makeLowOut() def enableOutPin(self,pin): GPIO.setmode(GPIO.BOARD) self.makeOut(pin) GPIO.output(pin, GPIO.HIGH) return GPIO.PWM(pin,Engine.FREQUENCY) def disableOutPin(self,pin): GPIO.setmode(GPIO.BOARD) self.makeOut(pin) GPIO.output(pin, GPIO.LOW) def stopMid(self): if self.pwmMid != None: self.pwmMid.stop() self.pwmMid = None def stopHigh(self): if self.pwmHigh != None: self.pwmHigh.stop() self.pwmHigh = None def stopLow(self): if self.pwmLow != None: self.pwmLow.stop() self.pwmLow = None def enableOutHigh(self): self.stopHigh() self.pwmHigh = self.enableOutPin(self.high) def enableOutMid(self): self.stopMid() self.pwmMid = self.enableOutPin(self.mid) def enableOutLow(self): self.stopLow() self.pwmLow = self.enableOutPin(self.low) def disableOutHigh(self): self.stopHigh() self.disableOutPin(self.high) def disableOutMid(self): self.stopMid() self.disableOutPin(self.mid) def disableOutLow(self): self.stopLow() self.disableOutPin(self.low) def accelerate(self,frequency=None): if frequency != None: self.pwmHigh.ChangeDutyCycle(frequency) self.pwmLow.ChangeDutyCycle(frequency) else: self.enableOutHigh() self.enableOutLow() self.disableOutMid() self.pwmHigh.start(Engine.BOOT_FEQUENCY) self.pwmLow.start(Engine.BOOT_FEQUENCY) def reverse(self,frequency=None): if frequency != None: self.pwmHigh.ChangeDutyCycle(frequency) self.pwmMid.ChangeDutyCycle(frequency) else: self.enableOutHigh() self.enableOutMid() self.disableOutLow() self.pwmHigh.start(Engine.BOOT_FEQUENCY) self.pwmMid.start(Engine.BOOT_FEQUENCY) def brake(self): self.enableOutHigh() self.enableOutMid() self.enableOutLow() self.pwmHigh.start(Engine.FREQUENCY) self.pwmMid.start(Engine.FREQUENCY) self.pwmLow.start(Engine.FREQUENCY) def disableEngine(self): self.disableOutHigh() self.disableOutMid() self.disableOutLow()
24.885714
45
0.722924
339
2,613
5.545723
0.168142
0.051064
0.023404
0.048936
0.3
0.244149
0.17766
0.137234
0.137234
0.137234
0
0.002248
0.148871
2,613
104
46
25.125
0.843076
0
0
0.36
0
0
0
0
0
0
0
0
0
1
0.24
false
0
0.01
0.03
0.32
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
f13e7b6e0e1bf6512e5b87efce7ea8384bfd3c4d
587
py
Python
dtt/kubectl/pod.py
ymizushi/dtt
a7be7466bcda9644594394ab8c16e794f514b15c
[ "MIT" ]
3
2019-09-28T02:01:17.000Z
2020-05-23T06:27:57.000Z
dtt/kubectl/pod.py
ymizushi/dtt
a7be7466bcda9644594394ab8c16e794f514b15c
[ "MIT" ]
11
2019-07-26T12:30:47.000Z
2019-08-06T13:45:10.000Z
dtt/kubectl/pod.py
ymizushi/dtt
a7be7466bcda9644594394ab8c16e794f514b15c
[ "MIT" ]
null
null
null
class Pods: def __init__(self, pods): self._index = 0 self._pods = pods.items self._metadata = pods.metadata @property def index(self): return self._index @property def current_pod(self): return self._pods[self._index] @property def list(self): return self._pods def set_index(self, index): self._index = index def add_index(self): if self._index + 1 < len(self._pods): self._index += 1 def sub_index(self): if 0 <= self._index - 1: self._index -= 1
24.458333
45
0.565588
75
587
4.146667
0.266667
0.26045
0.128617
0.163987
0
0
0
0
0
0
0
0.015267
0.330494
587
23
46
25.521739
0.776081
0
0
0.136364
0
0
0
0
0
0
0
0
0
1
0.318182
false
0
0
0.136364
0.5
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
f140e6306cd4b9c5bc97fbd6d7c36b197a7b90cf
3,158
py
Python
apps/reports/views.py
AnimeshRy/gymrocket
c15aed03607f57a7fb01facb826f82d77e0332b7
[ "MIT" ]
4
2021-03-15T12:08:51.000Z
2022-03-30T14:48:16.000Z
apps/reports/views.py
AnimeshRy/gymrocket
c15aed03607f57a7fb01facb826f82d77e0332b7
[ "MIT" ]
null
null
null
apps/reports/views.py
AnimeshRy/gymrocket
c15aed03607f57a7fb01facb826f82d77e0332b7
[ "MIT" ]
null
null
null
from django.shortcuts import render, redirect from django.http import HttpResponse import csv from apps.members.models import Member from .models import GenerateReportForm from django.db.models import Q from django.contrib.auth.decorators import login_required def export_all(user_obj): # Generate Users.csv file with Report Data generated by Query response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="users.csv"' writer = csv.writer(response) writer.writerow(['First name', 'Last name', 'DOB', 'Mobile', 'Admission Date', 'Subscription Type', 'Batch']) # print(user_obj) members = user_obj.values_list('first_name', 'last_name', 'dob', 'mobile_number', 'admitted_on', 'subscription_type', 'batch') # print(members) for user in members: writer.writerow(user) return response @login_required # Export Single User Data in CSV format, called on user profile view def export_single(request, pk): member = Member.objects.filter(pk=pk) return export_all(member) @login_required def reports(request): """ Generate reports according to year, month and batch Year Range = 2020 - Current Year + 5 (Changed in Model) Month Range - 1 - 12 Batch - Mor, Eve & Both export_all function is used to generate reports in .CSV (Excel Formats) """ if request.method == 'POST': form = GenerateReportForm(request.POST) if form.is_valid(): if request.POST.get('month') and request.POST.get('year') and request.POST.get('batch'): query = Q( registration_date__month=request.POST.get('month'), registration_date__year=request.POST.get('year'), batch=request.POST.get('batch') ) elif request.POST.get('month') and request.POST.get('year'): query = Q( registration_date__month=request.POST.get('month'), registration_date__year=request.POST.get('year') ) elif request.POST.get('month') and request.POST.get('batch'): query = Q( registration_date__month=request.POST.get('month'), batch=request.POST.get('batch') ) elif request.POST.get('year') and request.POST.get('batch'): query = Q( registration_date__year=request.POST.get('year'), batch=request.POST.get('batch') ) else: query = Q( registration_date__year=request.POST.get('year'), ) users = Member.objects.filter(query) if 'export' in request.POST: return export_all(users) context = { 'users': users, 'form': form, } return render(request, 'reports/export.html', context) else: form = GenerateReportForm() return render(request, 'reports/export.html', {'form': form})
38.512195
100
0.58993
353
3,158
5.164306
0.294618
0.126714
0.145913
0.069117
0.380143
0.380143
0.312123
0.312123
0.312123
0.223807
0
0.003617
0.299557
3,158
81
101
38.987654
0.820524
0.121596
0
0.285714
1
0
0.123952
0
0
0
0
0
0
1
0.047619
false
0
0.111111
0
0.238095
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f1418f6a18565d695a2a58d62afc3f91de80db3a
401
py
Python
setup.py
ysglh/evolute
ea868e5d04e6bb59760a9b6dec709303637b9f10
[ "MIT" ]
174
2018-08-15T21:48:30.000Z
2022-03-13T01:34:48.000Z
setup.py
ysglh/evolute
ea868e5d04e6bb59760a9b6dec709303637b9f10
[ "MIT" ]
null
null
null
setup.py
ysglh/evolute
ea868e5d04e6bb59760a9b6dec709303637b9f10
[ "MIT" ]
27
2018-05-16T16:25:36.000Z
2021-11-02T20:51:38.000Z
from setuptools import setup, find_packages setup( name='evolute', version='0.9.0', packages=find_packages(), url='https://github.com/csxeba/evolute.git', license='MIT', author='Csaba Gór', author_email='csxeba@gmail.com', description='Evolutionary algorithm toolbox', long_description=open("Readme.md").read(), long_description_content_type='text/markdown' )
26.733333
49
0.698254
49
401
5.571429
0.755102
0.087912
0
0
0
0
0
0
0
0
0
0.008824
0.15212
401
14
50
28.642857
0.794118
0
0
0
0
0
0.321696
0
0
0
0
0
0
1
0
true
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
f141c618f7f5e93e7eb5c8ee22854c6063df694c
4,044
py
Python
grabNdays.py
brianvan555/Stock
f0115ef0773153cd8df361556e398935cdd1bdde
[ "BSD-2-Clause" ]
null
null
null
grabNdays.py
brianvan555/Stock
f0115ef0773153cd8df361556e398935cdd1bdde
[ "BSD-2-Clause" ]
null
null
null
grabNdays.py
brianvan555/Stock
f0115ef0773153cd8df361556e398935cdd1bdde
[ "BSD-2-Clause" ]
null
null
null
# %% import datetime import time import pandas as pd from io import StringIO from function_grab import grab_price import numpy as np import warnings import random data = {} n_days = 120 date = datetime.datetime.now() fail_count = 0 allow_continuous_fail_count = 15 # 近五年最長連續12天休市 while len(data) < n_days: print('parsing', date) # 使用 grabPrice 爬資料 try: # 抓資料 data[date.date()] = grab_price(date) print('success!') fail_count = 0 except: # 假日爬不到 print('fail! check the date is holiday') fail_count += 1 if fail_count == allow_continuous_fail_count: raise break # 減一天 date -= datetime.timedelta(days=1) time.sleep(random.randint(5, 10)) updown = pd.DataFrame({k: d['漲跌(+/-)'] for k, d in data.items()}) # print(updown) # %% trade_n = pd.DataFrame({k: d['成交股數'] for k, d in data.items()}) i = 0 while i <= trade_n.shape[0]-1: trade_n.iloc[i] = trade_n.iloc[i].str.replace(',', '') trade_n.iloc[i] = round(trade_n.iloc[i].astype(float)/1000) i += 1 # print(trade_n) PEratio = pd.DataFrame({k: d['本益比'] for k, d in data.items()}) i = 0 while i <= PEratio.shape[0]-1: PEratio.iloc[i] = PEratio.iloc[i].str.replace(',', '') i += 1 updown.to_excel('grab120days_updown.xlsx') trade_n.to_excel('grab120days_traden.xlsx') PEratio.to_excel('grab120days_PE.xlsx') # print(trade_n) # print(PEratio) close = pd.DataFrame({k: d['收盤價'] for k, d in data.items()}) # type=string # print(close) # print(close.shape) # %% # MA5 i = 0 MA5 = [] close = close.replace('--', np.NaN) while i <= close.shape[0]-1: close.iloc[i] = close.iloc[i].str.replace(',', '') mean = np.nanmean(close.iloc[i, 0:5].astype(float)) # print(mean) MA5.append([close.index[i], mean]) i = i+1 # 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN MA5 = pd.DataFrame(MA5, columns=['證券代號', 'MA5']).set_index('證券代號') close = pd.merge(close, MA5, how='outer', left_index=True, right_index=True) # 必須將dtype轉為float(與算出來的平均值同樣型態),否則在後面merge的時候會因為型別不一樣而存成NaN close = close.astype(float) # 全轉float # MA20 i = 0 MA20 = [] # print(close.shape) #df.shape=(n_rows,n_columns) # 將有收盤價的資料存入np.nan 在後面使用np.mean時可以直接跳過不加入平均值的計算 # close = close.replace('--', np.NaN) replace只能針對字符串全等於才能替換 while i <= close.shape[0]-1: # dataframe中需使用iloc對索引編號定位,loc是針對索引名稱進行定位 # print(type(close.iloc[i, 1])) # 針對某一列的字符串進行replace才能將千分位符號消去 # close.iloc[i] = close.iloc[i].str.replace(',', '') # print(close.iloc[i, 0:3]) # astype只是將資料暫時當作別的型態使用,而不是真的更改 除非有另外存回變數 mean = np.nanmean(close.iloc[i, 0:20]) # print(type(close.iloc[i, 1])) """for j in range(3): close.iloc[i, j] = close.iloc[i, j].replace(',', '') sum = sum + float(close.iloc[i, j]) print(sum) mean = sum/3""" # print(mean) MA20.append([close.index[i], mean]) i = i+1 # 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN MA20 = pd.DataFrame(MA20, columns=['證券代號', 'MA20']).set_index('證券代號') close = pd.merge(close, MA20, how='outer', left_index=True, right_index=True) print(close) close = close.astype(float) # MA60 i = 0 MA60 = [] while i <= close.shape[0]-1: mean = np.nanmean(close.iloc[i, 0:60]) MA60.append([close.index[i], mean]) i = i+1 MA60 = pd.DataFrame(MA60, columns=['證券代號', 'MA60']).set_index('證券代號') close = pd.merge(close, MA60, how='outer', left_index=True, right_index=True) print(close) close = close.astype(float) # %% # MA120 i = 0 MA120 = [] while i <= close.shape[0]-1: mean = np.nanmean(close.iloc[i, 0:120]) MA120.append([close.index[i], mean]) i = i+1 MA120 = pd.DataFrame(MA120, columns=['證券代號', 'MA120']).set_index('證券代號') close = pd.merge(close, MA120, how='outer', left_index=True, right_index=True) print(close) close = close.astype(float) # print(close) close.to_excel('stock120.xlsx')
29.955556
79
0.629327
575
4,044
4.347826
0.229565
0.04
0.056
0.022
0.3884
0.3708
0.3276
0.262
0.2048
0.2048
0
0.039531
0.199308
4,044
134
80
30.179104
0.732551
0.203759
0
0.294118
0
0
0.074254
0.015961
0
0
0
0
0
1
0
false
0
0.094118
0
0.094118
0.070588
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14245be4031d7387f3b61db91e9fdbcd0da8f5b
1,811
py
Python
CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py
impastasyndrome/DS-ALGO-OFFICIAL
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
[ "Apache-2.0" ]
13
2021-03-11T00:25:22.000Z
2022-03-19T00:19:23.000Z
CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py
impastasyndrome/DS-ALGO-OFFICIAL
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
[ "Apache-2.0" ]
162
2021-03-09T01:52:11.000Z
2022-03-12T01:09:07.000Z
CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py
impastasyndrome/DS-ALGO-OFFICIAL
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
[ "Apache-2.0" ]
12
2021-04-26T19:43:01.000Z
2022-01-31T08:36:29.000Z
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None import json class Codec: def serialize(self, root): """Encodes a tree to a single string. :type root: TreeNode :rtype: str """ if not root: return "null" return ( "[" + str(root.val) + "," + self.serialize(root.left) + "," + self.serialize(root.right) + "]" ) def getSection(self, data): brackets = 1 idx = 1 while brackets != 0: if data[idx] == "[": brackets += 1 elif data[idx] == "]": brackets -= 1 idx += 1 return data[:idx], idx def deserialize(self, data): """Decodes your encoded data to tree. :type data: str :rtype: TreeNode """ if data == "null": return None first_comma = data.index(",") val = int(data[1:first_comma]) data = data[first_comma + 1 :] if data[0] == "[": leftsection, last = self.getSection(data) left = self.deserialize(leftsection) else: last = 4 left = None data = data[last + 1 :] if data[0] == "[": rightsection, _ = self.getSection(data) right = self.deserialize(rightsection) else: right = None node = TreeNode(val) node.left = left node.right = right return node # Your Codec object will be instantiated and called as such: # codec = Codec() # codec.deserialize(codec.serialize(root))
24.146667
60
0.47101
184
1,811
4.592391
0.331522
0.028402
0.040237
0.030769
0
0
0
0
0
0
0
0.011321
0.414688
1,811
74
61
24.472973
0.785849
0.227499
0
0.130435
0
0
0.012938
0
0
0
0
0
0
1
0.065217
false
0
0.021739
0
0.217391
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14272e90c89cf6a167f3389d2119044ece31836
1,832
py
Python
lhc/collections/interval_map.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
lhc/collections/interval_map.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
lhc/collections/interval_map.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
from collections import defaultdict from lhc.interval import IntervalBinner class IntervalMap(object): def __init__(self, key_value_pairs=None): self.len = 0 self.binner = IntervalBinner() self.bins = defaultdict(list) self.values = defaultdict(list) if key_value_pairs is not None: for key, value in key_value_pairs: self[key] = value def __len__(self): return self.len def __iter__(self): for bin in self.bins.values(): for item in bin: yield item def __contains__(self, item): bins = self.binner.get_overlapping_bins(item) for fr, to in bins: for bin in range(fr, to + 1): for set_interval in self.bins[bin]: if set_interval == item: return True return False def __setitem__(self, key, value): self.len += 1 bin = self.binner.get_bin(key) self.bins[bin].append(key) self.values[bin].append(value) def __getitem__(self, item): bins = self.binner.get_overlapping_bins(item) for fr, to in bins: for bin in range(fr, to + 1): for i, set_interval in enumerate(self.bins[bin]): if set_interval.overlaps(item): yield self.values[bin][i] def iterkeys(self): for bin in self.bins.values(): for item in bin: yield item def itervalues(self): for bin in self.values.values(): for value in bin: yield value def iteritems(self): for keys, values in zip(iter(self.bins.items()), iter(self.values.items())): for key, value in zip(keys, values): yield key, value
30.032787
84
0.556223
231
1,832
4.242424
0.238095
0.065306
0.040816
0.036735
0.326531
0.310204
0.261224
0.261224
0.261224
0.261224
0
0.003376
0.353166
1,832
60
85
30.533333
0.823629
0
0
0.244898
0
0
0
0
0
0
0
0
0
1
0.183673
false
0
0.040816
0.020408
0.306122
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f142be6f64ce84f706ae93f6e0a3bfff599295dc
3,031
py
Python
control_layer/uuv_control_utils/scripts/apply_body_wrench.py
shubhamkorde/AnahitaPlus
0fc99ad774640c8dc8572ffb58d10fa18bb1a4b1
[ "BSD-3-Clause" ]
2
2020-09-21T19:45:07.000Z
2020-09-22T15:46:45.000Z
control_layer/uuv_control_utils/scripts/apply_body_wrench.py
shubhamkorde/AnahitaPlus
0fc99ad774640c8dc8572ffb58d10fa18bb1a4b1
[ "BSD-3-Clause" ]
2
2019-06-13T10:58:38.000Z
2019-09-24T14:09:05.000Z
control_layer/uuv_control_utils/scripts/apply_body_wrench.py
shubhamkorde/AnahitaPlus
0fc99ad774640c8dc8572ffb58d10fa18bb1a4b1
[ "BSD-3-Clause" ]
11
2019-12-05T05:18:10.000Z
2020-04-06T13:01:39.000Z
#!/usr/bin/env python # Copyright (c) 2016 The UUV Simulator Authors. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import rospy import sys from gazebo_msgs.srv import ApplyBodyWrench from geometry_msgs.msg import Point, Wrench, Vector3 if __name__ == '__main__': print 'Apply programmed perturbation to vehicle', rospy.get_namespace() rospy.init_node('set_body_wrench') if rospy.is_shutdown(): print 'ROS master not running!' sys.exit(-1) starting_time = 0.0 if rospy.has_param('~starting_time'): starting_time = rospy.get_param('~starting_time') print 'Starting time= %fs' % starting_time duration = 0.0 if rospy.has_param('~duration'): duration = rospy.get_param('~duration') if duration == 0.0: print 'Duration not set, leaving node...' sys.exit(-1) print 'Duration [s]=', ('Inf.' if duration < 0 else duration) force = [0, 0, 0] if rospy.has_param('~force'): force = rospy.get_param('~force') print force if len(force) != 3: raise rospy.ROSException('Invalid force vector') print 'Force [N]=', force torque = [0, 0, 0] if rospy.has_param('~torque'): torque = rospy.get_param('~torque') if len(torque) != 3: raise rospy.ROSException('Invalid torque vector') print 'Torque [N]=', torque try: rospy.wait_for_service('/gazebo/apply_body_wrench', timeout=10) except rospy.ROSException: print 'Service not available! Closing node...' sys.exit(-1) try: apply_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench) except rospy.ServiceException, e: print 'Service call failed, error=', e sys.exit(-1) ns = rospy.get_namespace().replace('/', '') body_name = '%s/base_link' % ns if starting_time >= 0: rate = rospy.Rate(100) while rospy.get_time() < starting_time: rate.sleep() wrench = Wrench() wrench.force = Vector3(*force) wrench.torque = Vector3(*torque) success = apply_wrench( body_name, 'world', Point(0, 0, 0), wrench, rospy.Time().now(), rospy.Duration(duration)) if success: print 'Body wrench perturbation applied!' print '\tFrame: ', body_name print '\tDuration [s]: ', duration print '\tForce [N]: ', force print '\tTorque [Nm]: ', torque else: print 'Failed!'
29.427184
87
0.635104
390
3,031
4.823077
0.394872
0.009569
0.017012
0.019139
0.069112
0.037214
0.019139
0
0
0
0
0.017075
0.246453
3,031
102
88
29.715686
0.80648
0.200924
0
0.088235
0
0
0.213544
0.020773
0
0
0
0
0
0
null
null
0
0.058824
null
null
0.235294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
f1432fbe7e4b709efdff7049c6c10eeddba93d8f
11,788
py
Python
bin/consolidate-preempted-logs.py
phyletica/ecoevolity-experiments
bb16e34c4c7495feaa68653df98d5fbead93cf44
[ "CC-BY-4.0" ]
null
null
null
bin/consolidate-preempted-logs.py
phyletica/ecoevolity-experiments
bb16e34c4c7495feaa68653df98d5fbead93cf44
[ "CC-BY-4.0" ]
null
null
null
bin/consolidate-preempted-logs.py
phyletica/ecoevolity-experiments
bb16e34c4c7495feaa68653df98d5fbead93cf44
[ "CC-BY-4.0" ]
null
null
null
#! /usr/bin/env python import sys import os import re import glob import argparse import project_util batch_number_pattern = re.compile(r'batch(?P<batch_number>\d+)') sim_number_pattern = re.compile(r'-sim-(?P<sim_number>\d+)-') run_number_pattern = re.compile(r'-run-(?P<sim_number>\d+)\.log') def line_count(path): count = 0 with open(path) as stream: for line in stream: count += 1 return count def get_run_number(log_path): run_number_matches = run_number_pattern.findall(log_path) assert len(run_number_matches) == 1 run_number_str = run_number_matches[0] return int(run_number_str) def consolidate_preempted_logs( target_run_number = 1, number_of_samples = 1501, batch_dir_name = None): number_of_lines = number_of_samples + 1 val_sim_dirs = glob.glob(os.path.join(project_util.VAL_DIR, '0*')) for val_sim_dir in sorted(val_sim_dirs): sim_name = os.path.basename(val_sim_dir) batch_dirs = glob.glob(os.path.join(val_sim_dir, "batch*")) for batch_dir in sorted(batch_dirs): if batch_dir_name and (os.path.basename(batch_dir) != batch_dir_name): sys.stderr.write("Skipping {0}\n".format(batch_dir)) continue batch_number_matches = batch_number_pattern.findall(batch_dir) assert len(batch_number_matches) == 1 batch_number_str = batch_number_matches[0] batch_number = int(batch_number_str) sh_paths = glob.glob(os.path.join(batch_dir, "*simcoevolity-sim-*-config-run-{0}-qsub.sh".format( target_run_number))) if not sh_paths: sys.stderr.write("WARNING: No qsub files found for\n" " Simulation: {0}\n" " Batch: {1}\n" " Target run: {2}\n Skipping!!\n".format( sim_name, batch_number, target_run_number)) continue for sh_path in sorted(sh_paths): posterior_path = sh_path.replace( "-run-{0}-qsub.sh".format(target_run_number), "-state-run-{0}.log".format(target_run_number)) if not os.path.exists(posterior_path): sys.stderr.write("WARNING: Missing log: {0}\n".format(posterior_path)) sys.stdout.write("{0}\n".format(sh_path)) continue sim_number_matches = sim_number_pattern.findall(posterior_path) assert len(sim_number_matches) == 1 sim_number_str = sim_number_matches[0] sim_number = int(sim_number_str) posterior_file = os.path.basename(posterior_path) prefix = posterior_file.split("-sim-")[0] gp = os.path.join(batch_dir, "{0}-sim-{1}-config-state-run-{2}.log*".format( prefix, sim_number_str, target_run_number)) target_state_log_paths = glob.glob(gp) assert (len(target_state_log_paths) == 1), ( "Multiple matches to {0!r}".format(gp)) target_state_log_path = target_state_log_paths[0] gp = os.path.join(batch_dir, "{0}-sim-{1}-config-operator-run-{2}.log*".format( prefix, sim_number_str, target_run_number)) target_op_log_paths = glob.glob(gp) assert (len(target_op_log_paths) == 1), ( "Multiple matches to {0!r}".format(gp)) target_op_log_path = target_op_log_paths[0] state_log_path_pattern = os.path.join(batch_dir, "{0}-sim-{1}-config-state-run-*.log*".format( prefix, sim_number_str)) state_log_paths = glob.glob(state_log_path_pattern) op_log_path_pattern = os.path.join(batch_dir, "{0}-sim-{1}-config-operator-run-*.log*".format( prefix, sim_number_str)) op_log_paths = glob.glob(op_log_path_pattern) assert (len(state_log_paths) == len(op_log_paths)), ( "{0} matches for {1!r} and {2} for {3!r}".format( len(state_log_paths), state_log_path_pattern, len(op_log_paths), op_log_path_pattern)) assert (target_state_log_path in state_log_paths), ( "Target {0!r} not in matches".format( target_state_log_path)) assert (target_op_log_path in op_log_paths), ( "Target {0!r} not in matches".format( target_op_log_path)) run_numbers = sorted(get_run_number(p) for p in state_log_paths) assert (run_numbers == sorted(get_run_number(p) for p in op_log_paths)) extra_run_numbers = [rn for rn in run_numbers if rn > target_run_number] if len(extra_run_numbers) < 1: if line_count(target_state_log_path) != number_of_lines: sys.stderr.write( "WARNING: Target log is incomplete, but there are no extra runs\n" " Simulation: {0}\n" " Batch: {1}\n" " Rep: {2}\n" " Target run: {3}\n Skipping!!\n".format( sim_name, batch_number, sim_number, target_run_number)) sys.stdout.write("{0}\n".format(sh_path)) continue else: if line_count(target_state_log_path) >= number_of_lines: sys.stderr.write( "WARNING: Target log is complete, but there are extra runs\n" " Simulation: {0}\n" " Batch: {1}\n" " Rep: {2}\n" " Target run: {3}\n Skipping!!\n".format( sim_name, batch_number, sim_number, target_run_number)) sys.stdout.write("{0}\n".format(sh_path)) continue completed_run_number = extra_run_numbers.pop(-1) completed_state_log_pattern = os.path.join(batch_dir, "{0}-sim-{1}-config-state-run-{2}.log*".format( prefix, sim_number_str, completed_run_number)) completed_state_log_paths = glob.glob(completed_state_log_pattern) assert (len(completed_state_log_paths) == 1), ( "Multiple matches to complete state log {0!r}".format( completed_state_log_pattern)) completed_state_log_path = completed_state_log_paths[0] completed_op_log_pattern = os.path.join(batch_dir, "{0}-sim-{1}-config-operator-run-{2}.log*".format( prefix, sim_number_str, completed_run_number)) completed_op_log_paths = glob.glob(completed_op_log_pattern) assert (len(completed_op_log_paths) == 1), ( "Multiple matches to complete op log {0!r}".format( completed_state_log_pattern)) completed_op_log_path = completed_op_log_paths[0] if line_count(completed_state_log_path) != number_of_lines: sys.stderr.write( "WARNING: could not find completed log for\n" " Simulation: {0}\n" " Batch: {1}\n" " Rep: {2}\n" " Target run: {3}\n Skipping!!\n".format( sim_name, batch_number, sim_number, target_run_number)) sys.stdout.write("{0}\n".format(sh_path)) continue os.rename(completed_state_log_path, target_state_log_path) os.rename(completed_op_log_path, target_op_log_path) for n in extra_run_numbers: sp = os.path.join(batch_dir, "{0}-sim-{1}-config-state-run-{2}.log*".format( prefix, sim_number_str, n)) state_purge_paths = glob.glob(sp) assert (len(state_purge_paths) == 1), ( "Multiple matches to incomplete state log {0!r}".format( sp)) state_purge_path = state_purge_paths[0] op = os.path.join(batch_dir, "{0}-sim-{1}-config-operator-run-{2}.log*".format( prefix, sim_number_str, n)) op_purge_paths = glob.glob(op) assert (len(op_purge_paths) == 1), ( "Multiple matches to incomplete op log {0!r}".format( op)) op_purge_path = op_purge_paths[0] os.remove(state_purge_path) os.remove(op_purge_path) def main_cli(argv = sys.argv): parser = argparse.ArgumentParser() parser.add_argument('-r', '--run-number', action = 'store', type = int, default = 1, help = 'Target run number for consolidation.') parser.add_argument('-n', '--number-of-samples', action = 'store', type = int, default = 1501, help = ('Number of MCMC samples that should be found in the ' 'completed log file of each analysis.')) parser.add_argument('-b', '--batch-dir', action = 'store', type = str, default = None, help = ('Batch directory name.')) if argv == sys.argv: args = parser.parse_args() else: args = parser.parse_args(argv) consolidate_preempted_logs( target_run_number = args.run_number, number_of_samples = args.number_of_samples, batch_dir_name = args.batch_dir) if __name__ == "__main__": main_cli()
49.529412
98
0.462335
1,220
11,788
4.160656
0.112295
0.045705
0.038416
0.026596
0.514972
0.439322
0.391056
0.349291
0.312254
0.287431
0
0.014264
0.446895
11,788
237
99
49.738397
0.764264
0.001781
0
0.384615
0
0
0.136325
0.036206
0
0
0
0
0.058824
1
0.0181
false
0
0.027149
0
0.054299
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f144647a79fb401204bd68db25162da1d4a3b39a
303
py
Python
web/util/dateutil.py
weerapatbook/studentmonitor
82d3f5f3ce123b447ba4e4930765319734eab223
[ "Apache-2.0" ]
null
null
null
web/util/dateutil.py
weerapatbook/studentmonitor
82d3f5f3ce123b447ba4e4930765319734eab223
[ "Apache-2.0" ]
4
2020-02-12T00:58:14.000Z
2021-06-10T21:43:33.000Z
web/util/dateutil.py
weerapatbook/studentmonitor
82d3f5f3ce123b447ba4e4930765319734eab223
[ "Apache-2.0" ]
null
null
null
import datetime class DateUtil(object): @classmethod def convertDateToString(cls, date): print(type(date)) value = '' try: value = date.strftime('%d/%m/%Y') except Exception as ex: print (ex) value = date return value
21.642857
45
0.528053
31
303
5.16129
0.741935
0.1125
0
0
0
0
0
0
0
0
0
0
0.369637
303
13
46
23.307692
0.837696
0
0
0
0
0
0.02649
0
0
0
0
0
0
1
0.083333
false
0
0.083333
0
0.333333
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14465b2943d9c01982ead5e862581f5c9a9e84c
356
py
Python
django_traceback/models.py
andrewp-as-is/django-traceback.py
286094e329e0395e7f40a5e77216c2d03b0fb385
[ "Unlicense" ]
1
2020-10-03T06:07:35.000Z
2020-10-03T06:07:35.000Z
django_traceback/models.py
andrewp-as-is/django-traceback.py
286094e329e0395e7f40a5e77216c2d03b0fb385
[ "Unlicense" ]
null
null
null
django_traceback/models.py
andrewp-as-is/django-traceback.py
286094e329e0395e7f40a5e77216c2d03b0fb385
[ "Unlicense" ]
1
2020-10-03T06:07:39.000Z
2020-10-03T06:07:39.000Z
from django.db import models class Traceback(models.Model): type = models.TextField() value = models.TextField() traceback = models.TextField() path = models.TextField(blank=True,null=True) created_at = models.DateTimeField(auto_now_add=True) class Meta: db_table = 'django_traceback' ordering = ['-created_at']
23.733333
56
0.685393
42
356
5.666667
0.571429
0.252101
0
0
0
0
0
0
0
0
0
0
0.205056
356
14
57
25.428571
0.840989
0
0
0
0
0
0.075843
0
0
0
0
0
0
1
0
false
0
0.1
0
0.8
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f1448f06373972137c6c53b889b91b947ea395f4
752
py
Python
corehq/apps/ota/migrations/0007_update_blob_paths.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
471
2015-01-10T02:55:01.000Z
2022-03-29T18:07:18.000Z
corehq/apps/ota/migrations/0007_update_blob_paths.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
14,354
2015-01-01T07:38:23.000Z
2022-03-31T20:55:14.000Z
corehq/apps/ota/migrations/0007_update_blob_paths.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
175
2015-01-06T07:16:47.000Z
2022-03-29T13:27:01.000Z
# Generated by Django 1.11.14 on 2018-08-03 13:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('ota', '0006_one_reinstall_measure'), ] operations = [ migrations.RunSQL( # This migration is not reversible because blobs created # since the migration will no longer be accessible after # reversing because the old blob db would use the wrong path. # # '_default' is the bucket name from the old blob db API. """ UPDATE ota_demouserrestore SET restore_blob_id = '_default/' || restore_blob_id WHERE restore_blob_id NOT LIKE '_default/%' """ ), ]
28.923077
73
0.595745
87
752
5
0.678161
0.075862
0.089655
0.055172
0
0
0
0
0
0
0
0.041833
0.332447
752
25
74
30.08
0.824701
0.363032
0
0
1
0
0.098976
0.088737
0
0
0
0
0
1
0
false
0
0.111111
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f14589148cdcd887c0aef6b10b5a56f95ad59841
1,389
py
Python
my_UpNDown/UpNDown_env.py
yifjiang/UCB-review
e5c96c0cf1977012edb7d6ea02ac5362e766980e
[ "MIT" ]
6
2018-05-01T14:25:37.000Z
2021-07-19T15:36:57.000Z
my_UpNDown/UpNDown_env.py
yifjiang/UCB-review
e5c96c0cf1977012edb7d6ea02ac5362e766980e
[ "MIT" ]
null
null
null
my_UpNDown/UpNDown_env.py
yifjiang/UCB-review
e5c96c0cf1977012edb7d6ea02ac5362e766980e
[ "MIT" ]
null
null
null
from gym.envs.atari.atari_env import AtariEnv from gym import spaces import numpy as np class my_UpNDownEnv(AtariEnv): def __init__(self): super(my_UpNDownEnv, self).\ __init__(game = 'up_n_down', obs_type = 'image', frameskip = 1, repeat_action_probability = 0.25) self.observation_space = spaces.Box(low = 0, high = 255, shape = (840, 160, 3)) def _step(self, action): ob_list = np.zeros((840, 160, 3)) reward_sum = 0 for i in range(4): observation, reward, done, info = \ super(my_UpNDownEnv, self)._step(action) #print(reward) #print(info) # print(observation.shape) # print(ob_list.shape) ob_list[i * 210:(i+1)*210, :, :] = observation # if i == 0: # ob_list = observation # else: # ob_list = np.append(ob_list, observation, axis = 0) reward_sum += reward if done: if i != 3: for j in range(i+1, 4): ob_list[j * 210:(j+1)*210, :, :]= observation # ob_list = np.append(ob_list, observation, axis = 0) reward_sum += reward break return ob_list, reward_sum/4, done, info # return observation, reward_sum/4, done, info
38.583333
109
0.521238
170
1,389
4.052941
0.376471
0.087083
0.034833
0.060958
0.20029
0.148041
0.148041
0.148041
0.148041
0.148041
0
0.052332
0.367171
1,389
35
110
39.685714
0.731513
0.196544
0
0.086957
0
0
0.012658
0
0
0
0
0
0
1
0.086957
false
0
0.130435
0
0.304348
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14691bf0dfdd285e473f195a1a0b4f5ae79472f
3,813
py
Python
src/anima_site.py
TheNetAdmin/AniMaid
54e6b593f49561d7bfd08e117675f69ad059132a
[ "MIT" ]
null
null
null
src/anima_site.py
TheNetAdmin/AniMaid
54e6b593f49561d7bfd08e117675f69ad059132a
[ "MIT" ]
4
2021-04-01T00:36:06.000Z
2021-12-11T02:08:49.000Z
src/anima_site.py
TheNetAdmin/AniMaid
54e6b593f49561d7bfd08e117675f69ad059132a
[ "MIT" ]
null
null
null
import logging import re import requests from dateutil.parser import parse as parse_time from json import JSONDecodeError class site: def __init__(self): pass def parse_team(self, url: str) -> dict: pass class bangumi_moe_site(site): def __init__(self): self.logger = logging.getLogger("animaid.bangumi_moe_site") def parse_team(self, url: str) -> dict: if url.startswith("https") and "torrent" not in url: raise Exception( f'This is not a torrent url, as "torrent" is not part of the url. Click the anima title and use new page\'s url (should have "torrent" in it).' ) torrent_id = url.split("/")[-1] search_url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}" response = requests.get(url=search_url).json() if "team" not in response.keys() or "_id" not in response["team"]: raise Exception( f"This record does not have a valid team info, " f"try another anima record from the same team." ) team_info = response["team"] team_name = team_info["name"] team_id = team_info["_id"] print(f"The following team info is found:") print(f" team name: {team_name}") print(f" team id: {team_id}") filename = response["content"][0][0] print(f" filename: {filename}") auto_alias = re.findall(r"\[[\w\s-]+\]", filename)[0] if auto_alias: team_alias = auto_alias.replace("[", "").replace("]", "").replace(" ", "_") print(f" team alias:{team_alias}") else: print(f"Please give this team a unique alias in English,") team_alias = input(f"Input the team alias:") team_alias = team_alias.strip() team = { "_id": team_alias, "name": team_name, "alias": team_alias, "source": [ { "site": "bangumi_moe", "team_id": team_id, "last_update": parse_time("2000").isoformat(), } ], } return team def _search(self, url, ignore_properties=["introduction"]): try: res = requests.get(url=url).json() except JSONDecodeError as e: self.logger.error(f"Anima site request is invalid, url: {url}") raise Exception(f"Anima site request is invalid, url: {url}") try: res["torrents"] = sorted( res["torrents"], key=lambda x: parse_time(x["publish_time"]), reverse=True, ) except KeyError as e: self.logger.error(f"Invalid response {res}") raise e for t in res["torrents"]: for i in ignore_properties: del t[i] if len(res) == 0: raise Exception( f"No data responded, something is wrong with the request to bangumi.moe, url: {url}", extra={"info": {"url": url}}, ) return res def search_by_team(self, team, page, ignore_properties=["introduction"]): url = f'https://bangumi.moe/api/v2/torrent/team/{team["team_id"]}?p={page+1}&LIMIT=500' return self._search(url, ignore_properties) def searcy_by_tag(self, tag, page, ignore_properties=["introduction"]): url = f"https://bangumi.moe/api/v2/torrent/search?query=`{tag}`&p={page+1}&LIMIT=500" return self._search(url, ignore_properties) def search_by_torrent(self, torrent_id): url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}" res = requests.get(url=url).json() if len(res) == 0: return None return res
35.635514
159
0.552583
475
3,813
4.296842
0.284211
0.039686
0.034297
0.031357
0.250367
0.250367
0.208721
0.183244
0.151886
0.151886
0
0.008481
0.319696
3,813
106
160
35.971698
0.778335
0
0
0.208791
0
0.032967
0.275636
0.006294
0
0
0
0
0
1
0.087912
false
0.021978
0.054945
0
0.230769
0.065934
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14762183446dcbbb15ac368035ea80f2d697fd9
1,099
py
Python
test/fail_testcases.py
cmorterud/flask-eztest
3d0827c64e09c7787ec00f62ef8a94bb153ae9a5
[ "MIT" ]
1
2018-06-22T04:56:14.000Z
2018-06-22T04:56:14.000Z
test/fail_testcases.py
cmorterud/flask-eztest
3d0827c64e09c7787ec00f62ef8a94bb153ae9a5
[ "MIT" ]
null
null
null
test/fail_testcases.py
cmorterud/flask-eztest
3d0827c64e09c7787ec00f62ef8a94bb153ae9a5
[ "MIT" ]
2
2018-08-30T08:56:17.000Z
2019-05-08T15:20:26.000Z
from flaskeztest import EZTestCase from flaskeztest.exceptions import FixtureDoesNotExistError, EztestidNotInFixture class FailTC1(EZTestCase): FIXTURE = "twousers" def runTest(self): self.navigate_to_endpoint('index_two') try: self.assert_full_fixture_exists() self.fail("Should have failed assert full fixture exists") except AssertionError: pass class AssertEleExistsThatWasntLoadedByFixture(EZTestCase): FIXTURE = "oneuser" def runTest(self): self.navigate_to_endpoint('index_one') try: self.assert_ele_exists('User.lastname') self.fail("Should have raised User.lastname is not an eztestid in fixture") except EztestidNotInFixture: pass class AttemptToLoadAFixtureThatDoesntExist(EZTestCase): FIXTURE = "Invalid" def setUp(self): pass def runTest(self): try: EZTestCase.setUp(self) self.fail("Should not have gotten passed load_fixture") except FixtureDoesNotExistError: pass
26.166667
87
0.66515
107
1,099
6.719626
0.448598
0.070932
0.058414
0.05007
0.114047
0.114047
0.114047
0.114047
0
0
0
0.001239
0.265696
1,099
41
88
26.804878
0.889715
0
0
0.333333
0
0
0.183971
0
0
0
0
0
0.166667
1
0.133333
false
0.166667
0.066667
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
f147c644059fdd73863ea4fdd99a9a3c68be91b0
3,227
py
Python
datareduction/TestLinearRegression.py
andresmasegosa/PRML-CoreSets
fb768debb15e3ff6f5b65b7224915a41c1493f3d
[ "MIT" ]
null
null
null
datareduction/TestLinearRegression.py
andresmasegosa/PRML-CoreSets
fb768debb15e3ff6f5b65b7224915a41c1493f3d
[ "MIT" ]
null
null
null
datareduction/TestLinearRegression.py
andresmasegosa/PRML-CoreSets
fb768debb15e3ff6f5b65b7224915a41c1493f3d
[ "MIT" ]
null
null
null
import inferpy as inf import matplotlib.animation as animation import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import KMeans from datareduction.variational_linear_regressor_DR import VariationalLinearRegressor_DR from prml.rv import VariationalGaussianMixture from prml.features import PolynomialFeatures from prml.linear import ( VariationalLinearRegressor, VariationalLogisticRegressor ) np.random.seed(1234) N=10000 K=50 D=10 # def create_toy_data(func, sample_size, std, domain=[0, 1]): # x = np.linspace(domain[0], domain[1], sample_size) # np.random.shuffle(x) # t = func(x) + np.random.normal(scale=std, size=x.shape) # return x, t # # def cubic(x): # return x * (x - 5) * (x + 5) # # x_train, y_train = create_toy_data(cubic, N, 10., [-5, 5]) # x = np.linspace(-5, 5, 100) # y = cubic(x) X_train=np.ones((N,D+1)) X_train[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2)) X_train[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2)) w = np.random.rand(D+1) y_train = X_train@w.T X=np.ones((N,D+1)) X[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2)) X[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2)) y = X@w.T #feature = PolynomialFeatures(degree=D) #X_train = feature.transform(x_train) #X = feature.transform(x) vlr = VariationalLinearRegressor(beta=0.01) vlr.fit(X_train, y_train) y_mean, y_std = vlr.predict(X, return_std=True) # plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b") # plt.plot(x, y, c="g", label="$\sin(2\pi x)$") # plt.plot(x, y_mean, c="r", label="prediction") # plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink") # plt.legend() # plt.show() normal = inf.models.Normal(y_mean,y_std) l = normal.log_prob(y) print(np.sum(l)) y_repeated = np.repeat(np.expand_dims(y_train,axis=1),X_train.shape[1],axis=1) XY_train = np.multiply(X_train,y_repeated) # np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1))[1] == np.matmul(np.expand_dims(X_train[1],axis=1), np.expand_dims(X_train[1],axis=1).T) XX_train = np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1)) XX_train = XX_train.reshape((XX_train.shape[0],-1)) XJoin_train = np.concatenate((XY_train,XX_train),axis=1) kmeans = KMeans(n_clusters=K, random_state=0).fit(XJoin_train) weights = np.asarray([sum(kmeans.labels_==x) for x in range(0, K)]) clusters_centers = np.multiply(kmeans.cluster_centers_,np.repeat(weights.reshape(K,1),kmeans.cluster_centers_.shape[1],axis=1)) clusters_sum = np.sum(clusters_centers,axis=0) X_dr = {'XY': clusters_sum[0:(D+1)],'XX': clusters_sum[(D+1):(D+1)+(D+1)*(D+1)].reshape((D+1,D+1))} vlr_dr = VariationalLinearRegressor_DR(beta=0.01) vlr_dr.fit(X_dr) y_mean_dr, y_std_dr = vlr_dr.predict(X, return_std=True) # plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b") # plt.plot(x, y, c="g", label="$\sin(2\pi x)$") # plt.plot(x, y_mean, c="r", label="prediction") # plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink") # plt.legend() # plt.show() normal_dr = inf.models.Normal(y_mean_dr,y_std_dr) l_dr = normal_dr.log_prob(y) print(np.sum(l_dr))
31.330097
164
0.704059
606
3,227
3.580858
0.20297
0.04977
0.018433
0.024885
0.384793
0.368664
0.343779
0.326267
0.304147
0.304147
0
0.035294
0.104431
3,227
102
165
31.637255
0.715571
0.356678
0
0
0
0
0.001954
0
0
0
0
0
0
1
0
false
0
0.191489
0
0.191489
0.042553
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14b4723145eab9e4983e1e31682b70ef51514f9
38,348
py
Python
tests/test_worldpop.py
mcarans/hdxscraper-worldpop
dcfd73df5af2fffb927a6ad39129f744a6f5debb
[ "MIT" ]
1
2017-09-02T15:07:43.000Z
2017-09-02T15:07:43.000Z
tests/test_worldpop.py
mcarans/hdxscraper-worldpop
dcfd73df5af2fffb927a6ad39129f744a6f5debb
[ "MIT" ]
1
2021-09-21T15:44:59.000Z
2021-09-22T22:47:42.000Z
tests/test_worldpop.py
mcarans/hdxscraper-worldpop
dcfd73df5af2fffb927a6ad39129f744a6f5debb
[ "MIT" ]
null
null
null
#!/usr/bin/python """ Unit tests for worldpop. """ from os.path import join import pytest from hdx.data.vocabulary import Vocabulary from hdx.hdx_configuration import Configuration from hdx.hdx_locations import Locations from hdx.location.country import Country from worldpop import ( generate_datasets_and_showcases, get_countriesdata, get_indicators_metadata, ) class TestWorldPop: indicators_metadata = [ { "alias": "pop", "name": "Population", "title": "Population", "desc": "WorldPop produces different types of gridded population count datasets...", }, { "alias": "births", "name": "Births", "title": "Births", "desc": "The health and survival of women and their new-born babies in low income countries is a key public health priority...", }, { "alias": "pregnancies", "name": "Pregnancies", "title": "Pregnancies", "desc": "The health and survival of women and their new-born babies in low income countries is a key public health priority...", }, { "alias": "age_structures", "name": "Age and sex structures", "title": "Age and sex structures", "desc": "Age and sex structures: WorldPop produces different types of gridded population count datasets...", }, ] countriesdata = { "AUS": { "pop": { "wpgp": ["http://papa/getJSON/pop/wpgp?iso3=AUS"], "wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=AUS"], } }, "BRA": { "pop": { "wpgp": ["http://papa/getJSON/pop/wpgp?iso3=BRA"], "wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=BRA"], } }, "CAN": { "pop": { "wpgp": ["http://papa/getJSON/pop/wpgp?iso3=CAN"], "wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=CAN"], } }, "RUS": { "pop": { "wpgp": ["http://papa/getJSON/pop/wpgp?iso3=RUS"], "wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=RUS"], } }, "World": { "pop": { "wpgp1km": [ "http://papa/getJSON/pop/wpgp1km?id=24776", "http://papa/getJSON/pop/wpgp1km?id=24777", ] } }, "ZWE": { "pop": { "wpgp": ["http://papa/getJSON/pop/wpgp?iso3=ZWE"], "wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=ZWE"], } }, } wpgpdata = [ {"id": "1325", "iso3": "AUS"}, {"id": "1326", "iso3": "RUS"}, {"id": "1327", "iso3": "BRA"}, {"id": "1328", "iso3": "CAN"}, {"id": "1482", "iso3": "ZWE"}, ] wpgpunadjdata = [ {"id": "13251", "iso3": "AUS"}, {"id": "13261", "iso3": "RUS"}, {"id": "13271", "iso3": "BRA"}, {"id": "13281", "iso3": "CAN"}, {"id": "14821", "iso3": "ZWE"}, ] metadata = [ { "id": "1482", "title": "The spatial distribution of population in 2000, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2000", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/1482/zwe_ppp_wpgp_2000_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=1482", }, { "id": "1731", "title": "The spatial distribution of population in 2001, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2001", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/1731/zwe_ppp_wpgp_2001_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=1731", }, { "id": "3474", "title": "The spatial distribution of population in 2008, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2008", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/3474/zwe_ppp_wpgp_2008_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=3474", }, { "id": "4711", "title": "The spatial distribution of population in 2013, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2013", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/4711/zwe_ppp_wpgp_2013_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=4711", }, { "id": "6205", "title": "The spatial distribution of population in 2019, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2019", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/6205/zwe_ppp_wpgp_2019_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=6205", }, { "id": "6454", "title": "The spatial distribution of population in 2020, Zimbabwe", "desc": "Estimated total number of people per grid-cell.", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2020", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=6454", }, ] metadataunadj = [ { "id": "14821", "title": "The spatial distribution of population in 2000 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2000", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/1482/zwe_ppp_wpgp_2000_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=14821", }, { "id": "17311", "title": "The spatial distribution of population in 2001 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2001", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/1731/zwe_ppp_wpgp_2001_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=17311", }, { "id": "34741", "title": "The spatial distribution of population in 2008 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2008", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/3474/zwe_ppp_wpgp_2008_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=34741", }, { "id": "47111", "title": "The spatial distribution of population in 2013 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2013", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/4711/zwe_ppp_wpgp_2013_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=47111", }, { "id": "62051", "title": "The spatial distribution of population in 2019 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2019", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/6205/zwe_ppp_wpgp_2019_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=62051", }, { "id": "64541", "title": "The spatial distribution of population in 2020 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "desc": "Estimated total number of people per grid-cell. UNAdj", "doi": "10.5258/SOTON/WP00645", "date": "2018-11-01", "popyear": "2020", "citation": "WorldPop", "data_file": "GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "geotiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )", "gtype": "Population", "continent": "Africa", "country": "Zimbabwe", "iso3": "ZWE", "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020_UNadj.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image_UNadj.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=64541", }, ] wpgp1kmdata = [{"id": "24776"}, {"id": "24777"}] metadata_24777 = { "id": "24777", "title": "The spatial distribution of population in 2020", "desc": "Estimated total number of people per grid-cell...\r\n", "doi": "10.5258/SOTON/WP00647", "date": "0018-02-01", "popyear": "2020", "citation": "WorldPop...\r\n", "data_file": "GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif", "file_img": "world_ppp_wpgp_2020_Image.png", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "tiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Global mosaics 2000-2020", "gtype": "Population", "continent": "World", "country": None, "iso3": None, "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif" ], "url_img": "", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=24777", } metadata_24776 = { "id": "24776", "title": "The spatial distribution of population in 2019", "desc": "Estimated total number of people per grid-cell...\r\n", "doi": "10.5258/SOTON/WP00647", "date": "2018-11-01", "popyear": "2019", "citation": "WorldPop...\r\n", "data_file": "GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif", "file_img": "world_ppp_wpgp_2019_Image.png", "archive": "N", "public": "Y", "source": "WorldPop, University of Southampton, UK", "data_format": "tiff", "author_email": "wp@worldpop.uk", "author_name": "WorldPop", "maintainer_name": "WorldPop", "maintainer_email": "wp@worldpop.uk", "project": "Population", "category": "Global mosaics 2000-2020", "gtype": "Population", "continent": "World", "country": None, "iso3": None, "files": [ "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif" ], "url_img": "https://www.worldpop.org/tabs/gdata/img/24776/world_ppp_wpgp_2019_Image.png", "organisation": "WorldPop, University of Southampton, UK, www.worldpop.org", "license": "https://www.worldpop.org/data/licence.txt", "url_summary": "https://www.worldpop.org/geodata/summary?id=24776", } @pytest.fixture(scope="function") def configuration(self): Configuration._create( hdx_read_only=True, user_agent="test", project_config_yaml=join("tests", "config", "project_configuration.yml"), ) Locations.set_validlocations( [{"name": "zwe", "title": "Zimbabwe"}, {"name": "world", "title": "World"}] ) Country.countriesdata(use_live=False) Vocabulary._tags_dict = True Vocabulary._approved_vocabulary = { "tags": [{"name": "population"}, {"name": "geodata"}], "id": "4e61d464-4943-4e97-973a-84673c1aaa87", "name": "approved", } return Configuration.read() @pytest.fixture(scope="function") def downloader(self): class Download: url = None @classmethod def download(cls, url): cls.url = url @classmethod def get_json(cls): if cls.url == "http://lala/getJSON/": return {"data": TestWorldPop.indicators_metadata} elif cls.url == "http://papa/getJSON/pop/wpgp": return {"data": TestWorldPop.wpgpdata} elif cls.url == "http://papa/getJSON/pop/wpgpunadj": return {"data": TestWorldPop.wpgpunadjdata} elif cls.url == "http://papa/getJSON/pop/wpgp1km": return {"data": TestWorldPop.wpgp1kmdata} elif cls.url == "http://papa/getJSON/pop/wpgp?iso3=ZWE": return {"data": TestWorldPop.metadata} elif cls.url == "http://papa/getJSON/pop/wpgpunadj?iso3=ZWE": return {"data": TestWorldPop.metadataunadj} elif cls.url == "http://papa/getJSON/pop/wpgp1km?id=24776": return {"data": TestWorldPop.metadata_24776} elif cls.url == "http://papa/getJSON/pop/wpgp1km?id=24777": return {"data": TestWorldPop.metadata_24777} @staticmethod def get_text(): return ( "The WorldPop project aims to provide an open access archive of spatial " "demographic datasets ... at creativecommons.org." ) return Download() def test_get_indicators_metadata(self, configuration, downloader): indicators = configuration["indicators"] indicators_metadata = get_indicators_metadata( "http://lala/getJSON/", downloader, indicators ) assert "pop" in indicators_metadata.keys() assert sorted( list(indicators_metadata.values()), key=lambda k: k["alias"] ) == sorted(TestWorldPop.indicators_metadata, key=lambda k: k["alias"]) def test_get_countriesdata(self, configuration, downloader): indicators = configuration["indicators"] cutdownindicators = {"pop": indicators["pop"]} countriesdata, countries = get_countriesdata( "http://papa/getJSON/", downloader, cutdownindicators ) assert countriesdata == TestWorldPop.countriesdata assert countries == [ {"iso3": "AUS"}, {"iso3": "BRA"}, {"iso3": "CAN"}, {"iso3": "RUS"}, {"iso3": "ZWE"}, {"iso3": "World"}, ] def test_generate_datasets_and_showcases(self, configuration, downloader): indicators_metadata = {"pop": TestWorldPop.indicators_metadata[0]} countryiso = "World" countrydata = TestWorldPop.countriesdata[countryiso] datasets, showcases = generate_datasets_and_showcases( downloader, countryiso, indicators_metadata, countrydata ) dataset = datasets[0] assert dataset == { "name": "worldpop-population-for-world", "title": "World - Population", "notes": "WorldPop produces different types of gridded population count datasets... \nData for earlier dates is available directly from WorldPop. \n \nWorldPop...\r\n", "methodology": "Other", "methodology_other": "Estimated total number of people per grid-cell...\r\n", "dataset_source": "WorldPop, University of Southampton, UK", "license_id": "hdx-other", "license_other": "The WorldPop project aims to provide an open access archive of spatial demographic datasets ... at creativecommons.org.", "private": False, "maintainer": "37023db4-a571-4f28-8d1f-15f0353586af", "owner_org": "3f077dff-1d05-484d-a7c2-4cb620f22689", "data_update_frequency": "365", "subnational": "1", "groups": [{"name": "world"}], "tags": [ { "name": "population", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, { "name": "geodata", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, ], "dataset_date": "[2019-01-01T00:00:00 TO 2020-12-31T00:00:00]", } resources = dataset.get_resources() assert resources == [ { "name": "ppp_2020_1km_Aggregated.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif", "description": "The spatial distribution of population in 2020", "resource_type": "api", "url_type": "api", }, { "name": "ppp_2019_1km_Aggregated.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif", "description": "The spatial distribution of population in 2019", "resource_type": "api", "url_type": "api", }, ] showcase = next(iter(showcases.values()))[0] assert showcase == { "name": "worldpop-population-for-world-showcase", "title": "WorldPop World Population Summary Page", "notes": "Summary for Global mosaics 2000-2020 - World", "url": "https://www.worldpop.org/geodata/summary?id=24777", "image_url": "https://www.worldpop.org/tabs/gdata/img/24776/world_ppp_wpgp_2019_Image.png", "tags": [ { "name": "population", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, { "name": "geodata", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, ], } countryiso = "ZWE" countrydata = TestWorldPop.countriesdata[countryiso] datasets, showcases = generate_datasets_and_showcases( downloader, countryiso, indicators_metadata, countrydata ) dataset = datasets[0] assert dataset == { "name": "worldpop-population-for-zimbabwe", "title": "Zimbabwe - Population", "notes": "WorldPop produces different types of gridded population count datasets... \nData for earlier dates is available directly from WorldPop. \n \nWorldPop", "methodology": "Other", "methodology_other": "Estimated total number of people per grid-cell. UNAdj", "dataset_source": "WorldPop, University of Southampton, UK", "license_id": "hdx-other", "license_other": "The WorldPop project aims to provide an open access archive of spatial demographic datasets ... at creativecommons.org.", "private": False, "maintainer": "37023db4-a571-4f28-8d1f-15f0353586af", "owner_org": "3f077dff-1d05-484d-a7c2-4cb620f22689", "data_update_frequency": "365", "subnational": "1", "groups": [{"name": "zwe"}], "tags": [ { "name": "population", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, { "name": "geodata", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, ], "dataset_date": "[2000-01-01T00:00:00 TO 2020-12-31T00:00:00]", } resources = dataset.get_resources() assert resources == [ { "name": "zwe_ppp_2020.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif", "description": "The spatial distribution of population in 2020, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2020_UNadj.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020_UNadj.tif", "description": "The spatial distribution of population in 2020 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2019.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif", "description": "The spatial distribution of population in 2019, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2019_UNadj.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019_UNadj.tif", "description": "The spatial distribution of population in 2019 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2013.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif", "description": "The spatial distribution of population in 2013, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2013_UNadj.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013_UNadj.tif", "description": "The spatial distribution of population in 2013 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2008.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif", "description": "The spatial distribution of population in 2008, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2008_UNadj.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008_UNadj.tif", "description": "The spatial distribution of population in 2008 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2001.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif", "description": "The spatial distribution of population in 2001, Zimbabwe", "resource_type": "api", "url_type": "api", }, { "name": "zwe_ppp_2001_UNadj.tif", "format": "geotiff", "url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001_UNadj.tif", "description": "The spatial distribution of population in 2001 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe", "resource_type": "api", "url_type": "api", } ] showcase = next(iter(showcases.values()))[0] assert showcase == { "name": "worldpop-population-for-zimbabwe-showcase", "title": "WorldPop Zimbabwe Population Summary Page", "notes": "Summary for Individual countries 2000-2020 ( 100m resolution ) - Zimbabwe", "url": "https://www.worldpop.org/geodata/summary?id=6454", "image_url": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image.png", "tags": [ { "name": "population", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, { "name": "geodata", "vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87", }, ], }
45.81601
183
0.541541
3,858
38,348
5.250648
0.075687
0.046157
0.040776
0.042208
0.881177
0.871254
0.859555
0.841734
0.795577
0.769067
0
0.078749
0.311229
38,348
836
184
45.870813
0.688184
0.001069
0
0.544895
0
0.03936
0.541125
0.112982
0
0
0
0
0.0123
1
0.00984
false
0
0.00861
0.00123
0.04551
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f14b7fd1ac391cc7123c9a3508a739a92e9a94f9
1,277
py
Python
fast/day02/day02.py
JerryDot/advent-2021-py
c3eb44b5989c276de0b5e721d3b8dfa1698796f1
[ "MIT" ]
null
null
null
fast/day02/day02.py
JerryDot/advent-2021-py
c3eb44b5989c276de0b5e721d3b8dfa1698796f1
[ "MIT" ]
null
null
null
fast/day02/day02.py
JerryDot/advent-2021-py
c3eb44b5989c276de0b5e721d3b8dfa1698796f1
[ "MIT" ]
null
null
null
from typing import Iterable, List, Tuple """ ----------> (1,0) | | | | v (0,1) """ def parse_input() -> List[Tuple[int, int]]: with open('day02.txt', 'rb') as f: INPUT = map(lambda x: x.strip(), map(lambda x: x.decode("utf-8"), f.readlines())) moves = [] for entry in INPUT: direction, size = entry.split()[0], int(entry.split()[1]) if direction == "forward": moves.append((size, 0)) elif direction == "backward": moves.append((-size, 0)) elif direction == "down": moves.append((0, size)) elif direction == "up": moves.append((0, -size)) else: raise Exception("This should not occur") return moves def part_one(p_input: List[Tuple[int, int]]) -> int: position = [0, 0] for move in p_input: position[0] += move[0] position[1] += move[1] return position[0] * position[1] def part_two(p_input: List[Tuple[int, int]]) -> int: position = [0, 0] aim = 0 for move in p_input: position[0] += move[0] position[1] += move[0] * aim aim += move[1] return position[0] * position[1] if __name__ == "__main__": print(part_one(parse_input())) print(part_two(parse_input()))
22.803571
89
0.539546
172
1,277
3.895349
0.348837
0.080597
0.059701
0.076119
0.425373
0.395522
0.308955
0.228358
0.228358
0.228358
0
0.033806
0.281911
1,277
55
90
23.218182
0.696838
0
0
0.228571
0
0
0.053485
0
0
0
0
0
0
1
0.085714
false
0
0.028571
0
0.2
0.057143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14c0cd89a3a51b3ae10b34f33f09e21ce1188ac
1,300
py
Python
a.py
sarthak7838/Music-Recommender-Web-App
cda0d1a73915495b05ef9b094fad509f34afc2b6
[ "MIT" ]
null
null
null
a.py
sarthak7838/Music-Recommender-Web-App
cda0d1a73915495b05ef9b094fad509f34afc2b6
[ "MIT" ]
null
null
null
a.py
sarthak7838/Music-Recommender-Web-App
cda0d1a73915495b05ef9b094fad509f34afc2b6
[ "MIT" ]
1
2021-12-03T20:51:50.000Z
2021-12-03T20:51:50.000Z
import requests as req from bs4 import BeautifulSoup import os import os.path import shutil try: r1=req.get("https://gaana.com/playlist/gaana-dj-best-of-badshah") c1=r1.content print(r) soup1=BeautifulSoup(c1,"html.parser") F1=soup1.find_all("a",{"class":"sng_c "}) #print(F1) namelist=[] #count=0 for i in F1: print (i.text) x=i.text l=x.split() name="" url="https://www.youtube.com/results?search_query=" for j in l: url=url+j+"+" name=name+j+"_" #print(":",url) url=url[:-1] name=name[:-1] namelist.append(name) #print(url,name) r2=req.get(url) c2=r2.content soup2=BeautifulSoup(c2,"html.parser") #F2=soup2.find_all("a",{"class":"yt-simple-endpoint"}) #print(F2[0]['href']) #print(F2) #F2 = soup2.find_all('a',href=True) #print(link[40]['href']) F2 =soup2.findAll(attrs={'class':'yt-uix-tile-link'}) link="https://www.youtube.com"+F2[0]['href'] command="youtube-dl --extract-audio --audio-format mp3 "+link+" -o "+name+".mp3" os.system(command) #count=count+1 except req.exceptions.RequestException as e: print (e) ''' for n in namelist: if os.path.isfile(n): n=n+".mp3" src = n dst = 'data/romance/{}'.format(n) shutil.move(src, dst) filelist=os.listdir('.') for f in filelist: os.remove(f) '''
19.402985
82
0.636923
211
1,300
3.895735
0.445498
0.025547
0.029197
0.03163
0.036496
0
0
0
0
0
0
0.031732
0.151538
1,300
66
83
19.69697
0.713509
0.151538
0
0
0
0
0.26
0
0
0
0
0
0
1
0
false
0
0.151515
0
0.151515
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f14c3a534b6e2ff42a0b00e89103520a3c797b9c
638
py
Python
interest/migrations/0002_auto_20210523_2111.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
null
null
null
interest/migrations/0002_auto_20210523_2111.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
4
2021-05-02T01:14:59.000Z
2022-02-13T17:58:36.000Z
interest/migrations/0002_auto_20210523_2111.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
null
null
null
# Generated by Django 3.1.7 on 2021-05-23 21:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('interest', '0001_initial'), ] operations = [ migrations.AlterField( model_name='interest', name='name', field=models.CharField(max_length=100, verbose_name='Interesse'), ), migrations.AlterField( model_name='interesttype', name='description', field=models.CharField(blank=True, max_length=400, null=True, verbose_name='Descrição do tipo de interesse'), ), ]
26.583333
121
0.60815
67
638
5.686567
0.656716
0.104987
0.131234
0.152231
0
0
0
0
0
0
0
0.054113
0.275862
638
23
122
27.73913
0.770563
0.070533
0
0.235294
1
0
0.159052
0
0
0
0
0
0
1
0
false
0
0.058824
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f14cca8920c13babdb5f450b2a03d4a66aa045bd
897
py
Python
utils.py
TrueNobility303/image-classification-CIFAR10
e0200d9b4d4f6ceaf058177abebd3f6510aebd9a
[ "MIT" ]
2
2021-06-10T16:19:50.000Z
2021-06-16T10:55:14.000Z
utils.py
TrueNobility303/image-classification-CIFAR10
e0200d9b4d4f6ceaf058177abebd3f6510aebd9a
[ "MIT" ]
null
null
null
utils.py
TrueNobility303/image-classification-CIFAR10
e0200d9b4d4f6ceaf058177abebd3f6510aebd9a
[ "MIT" ]
null
null
null
import numpy as np import torchvision import torch import matplotlib.pyplot as plt import torch import torchvision import torchvision.transforms as transforms from torch.utils.data import Dataset from torch.utils.data.dataloader import DataLoader from config import device import torch.nn as nn import torch.optim as optim import tqdm from torch.nn import functional as F #截取部分数据集 class PartialDataset(Dataset): def __init__(self, dataset, n_items): self.dataset = dataset self.n_items = n_items def __getitem__(self,index): return self.dataset.__getitem__(index) def __len__(self): return min(self.n_items, len(self.dataset)) #设置随机种子,使得代码可复现 def set_random_seeds(seed_value=0): torch.cuda.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
27.181818
51
0.769231
128
897
5.171875
0.40625
0.066465
0.069486
0.054381
0
0
0
0
0
0
0
0.001328
0.160535
897
33
52
27.181818
0.877822
0.023411
0
0.148148
0
0
0
0
0
0
0
0
0
1
0.148148
false
0
0.518519
0.074074
0.777778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
f14e4386fd51245a77e8a6b909a18b755412d9be
2,068
py
Python
setup.py
xguse/holographer
d389655772afc5a5c4e458c763d59c25a8508f18
[ "MIT" ]
null
null
null
setup.py
xguse/holographer
d389655772afc5a5c4e458c763d59c25a8508f18
[ "MIT" ]
null
null
null
setup.py
xguse/holographer
d389655772afc5a5c4e458c763d59c25a8508f18
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ 'Click>=6.0', 'logzero', # TODO: put package requirements here ] setup_requirements = [ 'pytest-runner', # TODO(xguse): put setup requirements (distutils extensions, etc.) here ] test_requirements = [ 'pytest', # TODO: put package test requirements here ] setup( name='holographer', version='0.0.2', description="Holographer copies a filesytem object to a storage location and creates in its place a symlinked decoy pointing to the stored target. Think of when you run out of HDD space and need to move things to free up space but do not want to break everything that may expect to find your target in its old location.", long_description=readme + '\n\n' + history, author="Gus Dunn", author_email='w.gus.dunn@gmail.com', url='https://github.com/xguse/holographer', packages=find_packages('src'), package_dir={"": "src"}, entry_points={ 'console_scripts': [ 'holo=holographer.cli.main:run' ] }, include_package_data=True, install_requires=requirements, license="MIT license", zip_safe=False, keywords='holographer', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], test_suite='tests', tests_require=test_requirements, setup_requires=setup_requirements, )
30.865672
325
0.648936
250
2,068
5.28
0.552
0.100758
0.132576
0.078788
0.040909
0
0
0
0
0
0
0.011853
0.224855
2,068
66
326
31.333333
0.811603
0.100097
0
0
0
0.019231
0.491626
0.015667
0
0
0
0.015152
0
1
0
false
0
0.019231
0
0.019231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
f150530daa19fd258ee7528230b71a750194f14d
1,041
py
Python
data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py
mxl1n/CodeGen
e5101dd5c5e9c3720c70c80f78b18f13e118335a
[ "MIT" ]
241
2021-07-20T08:35:20.000Z
2022-03-31T02:39:08.000Z
data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py
mxl1n/CodeGen
e5101dd5c5e9c3720c70c80f78b18f13e118335a
[ "MIT" ]
49
2021-07-22T23:18:42.000Z
2022-03-24T09:15:26.000Z
data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py
mxl1n/CodeGen
e5101dd5c5e9c3720c70c80f78b18f13e118335a
[ "MIT" ]
71
2021-07-21T05:17:52.000Z
2022-03-29T23:49:28.000Z
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def f_gold ( s ) : n = len ( s ) count = 0 ; for i in range ( 0 , n , 1 ) : if ( s [ i ] == '4' or s [ i ] == '8' or s [ i ] == '0' ) : count += 1 for i in range ( 0 , n - 1 , 1 ) : h = ( ord ( s [ i ] ) - ord ( '0' ) ) * 10 + ( ord ( s [ i + 1 ] ) - ord ( '0' ) ) if ( h % 4 == 0 ) : count = count + i + 1 return count #TOFILL if __name__ == '__main__': param = [ ('Qaq',), ('9400761825850',), ('0011001111',), ('lasWqrLRq',), ('5662',), ('110',), (' tOYKf',), ('6536991235305',), ('11111',), ('uZftT iDHcYiCt',) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print("#Results: %i, %i" % (n_success, len(param)))
26.692308
90
0.480307
137
1,041
3.525547
0.481752
0.020704
0.020704
0.045549
0.057971
0.057971
0.057971
0
0
0
0
0.104348
0.337176
1,041
39
91
26.692308
0.595652
0.177714
0
0
0
0
0.128386
0
0
0
0
0
0
1
0.034483
false
0
0
0
0.068966
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f156bf8245bfd78557645c9f4c7188cd9870a78f
846
py
Python
IPython/frontend/html/notebook/fabfile.py
3kwa/ipython
a5922fd39ed4b2067d64b285125278a850bb129f
[ "BSD-3-Clause-Clear" ]
1
2022-03-13T23:06:43.000Z
2022-03-13T23:06:43.000Z
IPython/frontend/html/notebook/fabfile.py
3kwa/ipython
a5922fd39ed4b2067d64b285125278a850bb129f
[ "BSD-3-Clause-Clear" ]
null
null
null
IPython/frontend/html/notebook/fabfile.py
3kwa/ipython
a5922fd39ed4b2067d64b285125278a850bb129f
[ "BSD-3-Clause-Clear" ]
null
null
null
""" fabfile to prepare the notebook """ from fabric.api import local,lcd from fabric.utils import abort import os static_dir = 'static' components_dir = os.path.join(static_dir,'components') def test_component(name): if not os.path.exists(os.path.join(components_dir,name)): components() def components(): """install components with bower""" with lcd(static_dir): local('bower install') def css(minify=True): """generate the css from less files""" test_component('bootstrap') test_component('less.js') if minify not in ['True', 'False', True, False]: abort('minify must be Boolean') minify = (minify in ['True',True]) min_flag= '-x' if minify is True else '' with lcd(static_dir): local('lessc {min_flag} less/style.less css/style.min.css'.format(min_flag=min_flag))
27.290323
93
0.674941
121
846
4.61157
0.413223
0.064516
0.035842
0.057348
0.075269
0
0
0
0
0
0
0
0.187943
846
30
94
28.2
0.812227
0.111111
0
0.1
0
0
0.180082
0
0
0
0
0
0
1
0.15
false
0
0.15
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f15902568327420d51680a127a7bed4e1c5d6fac
2,028
py
Python
qchem/tests/test_molecular_hamiltonian.py
ryanlevy/pennylane
fb03b09d17267ebd0b9050432f9eeb84b5dff200
[ "Apache-2.0" ]
3
2021-02-22T18:30:55.000Z
2021-02-23T10:54:58.000Z
qchem/tests/test_molecular_hamiltonian.py
ryanlevy/pennylane
fb03b09d17267ebd0b9050432f9eeb84b5dff200
[ "Apache-2.0" ]
null
null
null
qchem/tests/test_molecular_hamiltonian.py
ryanlevy/pennylane
fb03b09d17267ebd0b9050432f9eeb84b5dff200
[ "Apache-2.0" ]
1
2021-03-27T09:03:15.000Z
2021-03-27T09:03:15.000Z
import os import pytest from pennylane import qchem from pennylane.vqe import Hamiltonian import numpy as np symbols = ["C", "C", "N", "H", "H", "H", "H", "H"] coordinates = np.array( [ 0.68219113, -0.85415621, -1.04123909, -1.34926445, 0.23621577, 0.61794044, 1.29068294, 0.25133357, 1.40784596, 0.83525895, -2.88939124, -1.16974047, 1.26989596, 0.19275206, -2.69852891, -2.57758643, -1.05824663, 1.61949529, -2.17129532, 2.04090421, 0.11338357, 2.06547065, 2.00877887, 1.20186581, ] ) @pytest.mark.parametrize( ( "charge", "mult", "package", "nact_els", "nact_orbs", "mapping", ), [ (0, 1, "psi4", 2, 2, "jordan_WIGNER"), (1, 2, "pyscf", 3, 4, "BRAVYI_kitaev"), (-1, 2, "pyscf", 1, 2, "jordan_WIGNER"), (2, 1, "psi4", 2, 2, "BRAVYI_kitaev"), ], ) def test_building_hamiltonian( charge, mult, package, nact_els, nact_orbs, mapping, psi4_support, requires_babel, tmpdir, ): r"""Test that the generated Hamiltonian `built_hamiltonian` is an instance of the PennyLane Hamiltonian class and the correctness of the total number of qubits required to run the quantum simulation. The latter is tested for different values of the molecule's charge and for active spaces with different size""" if package == "psi4" and not psi4_support: pytest.skip("Skipped, no Psi4 support") built_hamiltonian, qubits = qchem.molecular_hamiltonian( symbols, coordinates, charge=charge, mult=mult, package=package, active_electrons=nact_els, active_orbitals=nact_orbs, mapping=mapping, outpath=tmpdir.strpath, ) assert isinstance(built_hamiltonian, Hamiltonian) assert qubits == 2 * nact_orbs
22.043478
95
0.569527
233
2,028
4.862661
0.476395
0.007061
0.007944
0.007061
0.068844
0.068844
0.068844
0.068844
0
0
0
0.172688
0.317554
2,028
91
96
22.285714
0.645954
0.149901
0
0
0
0
0.086166
0
0
0
0
0
0.025974
1
0.012987
false
0
0.064935
0
0.077922
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f1595406821dd1cc1d957a18aeecca6864b3bc11
1,106
py
Python
lims_dashboard/utils.py
alneberg/lims_dashboard
7a833a7d8e9548fb5012dad814e6c9b807454fe7
[ "MIT" ]
null
null
null
lims_dashboard/utils.py
alneberg/lims_dashboard
7a833a7d8e9548fb5012dad814e6c9b807454fe7
[ "MIT" ]
null
null
null
lims_dashboard/utils.py
alneberg/lims_dashboard
7a833a7d8e9548fb5012dad814e6c9b807454fe7
[ "MIT" ]
null
null
null
import os import subprocess import traceback import logging logger = logging.getLogger("lims_dashboard") def run_script(app, name, options): cwd = os.getcwd() os.chdir('{0}/uploads'.format(app.root_path)) conf_obj = app.config['my_scripts'][name] command = [':'] if conf_obj['type'] == 'python': try: python_exec = conf_obj['python_exec'] except KeyError: # No python exec specified in script conf python_exec = app.config['python_exec'] command = [python_exec, os.path.join(app.config['SCRIPT_FOLDER'],app.config['my_scripts'][name]['script'])] command.extend(options.split()) logger.info("About to run command: {}".format(" ".join(command))) try: handle = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = handle.communicate() returncode = handle.returncode except Exception: returncode = -1 out = "Running the command: {}".format(" ".join(command)) err = traceback.format_exc() os.chdir(cwd) return returncode, out, err
30.722222
115
0.644665
134
1,106
5.208955
0.455224
0.08596
0.031519
0.051576
0.063037
0
0
0
0
0
0
0.002304
0.21519
1,106
35
116
31.6
0.801843
0.035262
0
0.071429
0
0
0.137218
0
0
0
0
0
0
1
0.035714
false
0
0.142857
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f15a182c95aa4785e3fe9ddf7f1191498aa88c8a
5,152
py
Python
chat/indico_chat/models/chatrooms.py
aristofanischionis/indico-plugins
abd67d8f5173aa0e2d80613cf8bef415e332d773
[ "MIT" ]
null
null
null
chat/indico_chat/models/chatrooms.py
aristofanischionis/indico-plugins
abd67d8f5173aa0e2d80613cf8bef415e332d773
[ "MIT" ]
null
null
null
chat/indico_chat/models/chatrooms.py
aristofanischionis/indico-plugins
abd67d8f5173aa0e2d80613cf8bef415e332d773
[ "MIT" ]
null
null
null
# This file is part of the Indico plugins. # Copyright (C) 2002 - 2019 CERN # # The Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; # see the LICENSE file for more details. from __future__ import unicode_literals from indico.core.db.sqlalchemy import UTCDateTime, db from indico.util.date_time import now_utc from indico.util.string import return_ascii from indico_chat.xmpp import delete_room class Chatroom(db.Model): __tablename__ = 'chatrooms' __table_args__ = (db.UniqueConstraint('jid_node', 'custom_server'), {'schema': 'plugin_chat'}) #: Chatroom ID id = db.Column( db.Integer, primary_key=True ) #: Node of the chatroom's JID (the part before `@domain`) jid_node = db.Column( db.String, nullable=False ) #: Name of the chatroom name = db.Column( db.String, nullable=False ) #: Description of the chatroom description = db.Column( db.Text, nullable=False, default='' ) #: Password to join the room password = db.Column( db.String, nullable=False, default='' ) #: Custom Jabber MUC server hostname custom_server = db.Column( db.String, nullable=False, default='' ) #: ID of the creator created_by_id = db.Column( db.Integer, db.ForeignKey('users.users.id'), index=True, nullable=False ) #: Creation timestamp of the chatroom created_dt = db.Column( UTCDateTime, nullable=False, default=now_utc ) #: Modification timestamp of the chatroom modified_dt = db.Column( UTCDateTime ) #: The user who created the chatroom created_by_user = db.relationship( 'User', lazy=True, backref=db.backref( 'chatrooms', lazy='dynamic' ) ) @property def locator(self): return {'chatroom_id': self.id} @property def server(self): """The server name of the chatroom. Usually the default one unless a custom one is set. """ from indico_chat.plugin import ChatPlugin return self.custom_server or ChatPlugin.settings.get('muc_server') @property def jid(self): return '{}@{}'.format(self.jid_node, self.server) @return_ascii def __repr__(self): server = self.server if self.custom_server: server = '!' + server return '<Chatroom({}, {}, {}, {})>'.format(self.id, self.name, self.jid_node, server) class ChatroomEventAssociation(db.Model): __tablename__ = 'chatroom_events' __table_args__ = {'schema': 'plugin_chat'} #: ID of the event event_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), primary_key=True, index=True, autoincrement=False ) #: ID of the chatroom chatroom_id = db.Column( db.Integer, db.ForeignKey('plugin_chat.chatrooms.id'), primary_key=True, index=True ) #: If the chatroom should be hidden on the event page hidden = db.Column( db.Boolean, nullable=False, default=False ) #: If the password should be visible on the event page show_password = db.Column( db.Boolean, nullable=False, default=False ) #: The associated :class:Chatroom chatroom = db.relationship( 'Chatroom', lazy=False, backref=db.backref('events', cascade='all, delete-orphan') ) #: The associated event event = db.relationship( 'Event', lazy=True, backref=db.backref( 'chatroom_associations', lazy='dynamic' ) ) @property def locator(self): return dict(self.chatroom.locator, confId=self.event_id) @return_ascii def __repr__(self): return '<ChatroomEventAssociation({}, {})>'.format(self.event_id, self.chatroom) @classmethod def find_for_event(cls, event, include_hidden=False, **kwargs): """Returns a Query that retrieves the chatrooms for an event :param event: an indico event (with a numeric ID) :param include_hidden: if hidden chatrooms should be included, too :param kwargs: extra kwargs to pass to ``find()`` """ query = cls.find(event_id=event.id, **kwargs) if not include_hidden: query = query.filter(~cls.hidden) return query def delete(self, reason=''): """Deletes the event chatroom and if necessary the chatroom, too. :param reason: reason for the deletion :return: True if the associated chatroom was also deleted, otherwise False """ db.session.delete(self) db.session.flush() if not self.chatroom.events: db.session.delete(self.chatroom) db.session.flush() delete_room(self.chatroom, reason) return True return False
26.694301
93
0.60132
598
5,152
5.053512
0.279264
0.034414
0.0364
0.015884
0.180675
0.143944
0.108206
0.027796
0
0
0
0.002218
0.299884
5,152
192
94
26.833333
0.835597
0.240101
0
0.343284
0
0
0.080369
0.019499
0
0
0
0
0
1
0.059701
false
0.014925
0.044776
0.029851
0.335821
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f15d8051b598f82bae1097c948afd6ba6185c62e
69
py
Python
test_settings.py
praekelt/django-richcomments
e1b2e123bf46135fd2bdf8fa810e4995e641db72
[ "BSD-3-Clause" ]
2
2015-01-22T19:16:06.000Z
2015-04-28T19:12:45.000Z
test_settings.py
praekelt/django-richcomments
e1b2e123bf46135fd2bdf8fa810e4995e641db72
[ "BSD-3-Clause" ]
null
null
null
test_settings.py
praekelt/django-richcomments
e1b2e123bf46135fd2bdf8fa810e4995e641db72
[ "BSD-3-Clause" ]
null
null
null
DATABASE_ENGINE = 'sqlite3' INSTALLED_APPS = [ 'richcomments' ]
11.5
27
0.695652
6
69
7.666667
1
0
0
0
0
0
0
0
0
0
0
0.017857
0.188406
69
5
28
13.8
0.803571
0
0
0
0
0
0.275362
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f15fcf355f967e1d7ebc0044368b9e8988697682
1,272
py
Python
fiasco_api/expenses/models.py
xelnod/fiasco_backend
edeca8cac8c7b1a1cc53051d4443cc2996eba37c
[ "MIT" ]
null
null
null
fiasco_api/expenses/models.py
xelnod/fiasco_backend
edeca8cac8c7b1a1cc53051d4443cc2996eba37c
[ "MIT" ]
null
null
null
fiasco_api/expenses/models.py
xelnod/fiasco_backend
edeca8cac8c7b1a1cc53051d4443cc2996eba37c
[ "MIT" ]
null
null
null
from django.db import models from django.conf import settings from core.mixins import Trackable class ExpenseProto(models.Model): tags = models.ManyToManyField('tags.Tag') kit = models.ForeignKey('categories.Kit', on_delete=models.SET_NULL, blank=True, null=True) title = models.CharField(max_length=255) channel = models.ForeignKey('channels.Channel', on_delete=models.CASCADE) comment = models.TextField(blank=True, null=True) amount = models.IntegerField(default=0) @property def cat(self): return self.kit.cat class Expense(Trackable, ExpenseProto): owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) is_fulfilled = models.BooleanField(default=True) money_stored = models.BooleanField(default=False) ongoing_origin = models.ForeignKey('expenses.OngoingExpense', null=True, blank=True, default=None, on_delete=models.SET_NULL) class OngoingExpenseScope(models.IntegerChoices): MONTH = 0, 'Month' YEAR = 1, 'Year' class OngoingExpense(Trackable, ExpenseProto): owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) scope = models.IntegerField( choices=OngoingExpenseScope.choices, default=OngoingExpenseScope.MONTH, )
34.378378
129
0.749214
151
1,272
6.211921
0.437086
0.085288
0.074627
0.067164
0.223881
0.179104
0.179104
0.179104
0.179104
0.179104
0
0.005525
0.146226
1,272
36
130
35.333333
0.858195
0
0
0.074074
0
0
0.055031
0.018082
0
0
0
0
0
1
0.037037
false
0
0.111111
0.037037
0.851852
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
f1619cbdb55c21eebfd2ca8844452a484636f5b9
775
py
Python
photos/migrations/0001_initial.py
erastus-1/Gallery
67a8d61d6d017cb8a78462678b54da782150d2c9
[ "MIT" ]
null
null
null
photos/migrations/0001_initial.py
erastus-1/Gallery
67a8d61d6d017cb8a78462678b54da782150d2c9
[ "MIT" ]
null
null
null
photos/migrations/0001_initial.py
erastus-1/Gallery
67a8d61d6d017cb8a78462678b54da782150d2c9
[ "MIT" ]
null
null
null
# Generated by Django 3.1.5 on 2021-01-09 06:50 import cloudinary.models from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('imageDescription', models.CharField(max_length=450)), ('image_url', cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='image')), ('date_uploaded', models.DateTimeField(auto_now_add=True)), ], ), ]
29.807692
115
0.609032
81
775
5.691358
0.654321
0.058568
0.078091
0.104121
0
0
0
0
0
0
0
0.042179
0.265806
775
25
116
31
0.768014
0.058065
0
0
1
0
0.078297
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f1630ee487947a7bff056c1c6a14584627e28798
2,832
py
Python
circle_core/web/api/replication_masters.py
glucoseinc/CircleCore
577f814ce2944efb6e5997f3d7838c71ce9aea6a
[ "MIT" ]
3
2019-01-11T04:30:18.000Z
2019-01-11T04:31:18.000Z
circle_core/web/api/replication_masters.py
glucoseinc/CircleCore
577f814ce2944efb6e5997f3d7838c71ce9aea6a
[ "MIT" ]
16
2018-11-21T11:47:18.000Z
2021-09-01T03:52:35.000Z
circle_core/web/api/replication_masters.py
glucoseinc/CircleCore
577f814ce2944efb6e5997f3d7838c71ce9aea6a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """共有マスター関連APIの実装.""" # community module from flask import abort, request import sqlalchemy.exc # project module from circle_core.models import MetaDataSession, ReplicationMaster from .api import api from .utils import respond_failure, respond_success from ..utils import (oauth_require_read_schema_scope, oauth_require_write_schema_scope) @api.route('/replication_masters/', methods=['GET', 'POST']) def api_repliction_masters(): """全てのReplicationMasterのCRUD.""" if request.method == 'GET': return _get_replication_masters() elif request.method == 'POST': return _post_replication_masters() abort(405) @oauth_require_read_schema_scope def _get_replication_masters(): """全てのReplicationMasterの情報を取得する. :return: 全てのReplicationMasterの情報 :rtype: Response """ # TODO: earger loading replication_masters = [obj.to_json() for obj in ReplicationMaster.query] return respond_success(replicationMasters=replication_masters) @oauth_require_write_schema_scope def _post_replication_masters(): """ReplicationMasterを作成する. :return: 作成したReplicationMasterの情報 :rtype: Response """ data = request.json try: with MetaDataSession.begin(): replication_master = ReplicationMaster(endpoint_url=data['endpointUrl'],) MetaDataSession.add(replication_master) except sqlalchemy.exc.IntegrityError: return respond_failure('このURLは既に登録されています') return respond_success(replicationMaster=replication_master.to_json()) @api.route('/replication_masters/<int:replication_master_id>', methods=['GET', 'DELETE']) def api_replication_master(replication_master_id): """単一のReplicationMasterのCRUD.""" repmaster = ReplicationMaster.query.get(replication_master_id) if not repmaster: return respond_failure('Replication Master not found.', _status=404) if request.method == 'GET': return _get_replication_master(repmaster) elif request.method == 'DELETE': return _delete_replication_master(repmaster) abort(405) @oauth_require_read_schema_scope def _get_replication_master(replication_master): """ReplicationMasterの情報を取得する. :param ReplicationMaster replication_master: 取得するReplicationMaster :return: ReplicationMasterの情報 :rtype: Response """ return respond_success(replicationMaster=replication_master.to_json()) @oauth_require_write_schema_scope def _delete_replication_master(replication_master): """ReplicationMasterを削除する. :param ReplicationMaster replication_master: 削除するReplicationMaster :return: ReplicationMasterの情報 :rtype: Response """ with MetaDataSession.begin(): MetaDataSession.delete(replication_master) return respond_success(replicationMaster=replication_master.to_json())
29.5
89
0.754237
281
2,832
7.291815
0.309609
0.157638
0.082967
0.032211
0.232796
0.205954
0.175695
0.138604
0.050756
0.050756
0
0.004168
0.152895
2,832
95
90
29.810526
0.849937
0.205862
0
0.288889
0
0
0.073023
0.032093
0
0
0
0.010526
0
1
0.133333
false
0
0.133333
0
0.488889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
f163e4eec92c5b9acccead12e1775a3ab901332a
532
py
Python
src/Blog/assets.py
sadmanbd/wagtailblog
adeccb29826200cb1bedc658a0f1c57c2f705d0e
[ "MIT" ]
1
2020-04-20T05:38:01.000Z
2020-04-20T05:38:01.000Z
src/Blog/assets.py
sadmanbd/wagtailblog
adeccb29826200cb1bedc658a0f1c57c2f705d0e
[ "MIT" ]
8
2020-02-11T21:41:52.000Z
2022-01-13T00:33:02.000Z
src/Blog/assets.py
sadmanbd/wagtailblog
adeccb29826200cb1bedc658a0f1c57c2f705d0e
[ "MIT" ]
null
null
null
from django.conf import settings from django_assets import Bundle, register from webassets.filter import get_filter libsass = get_filter("libsass", style="compressed") css_libs = Bundle( settings.BASE_DIR + "/assets/styles/css/libs/normalize.css", filters="cssutils", output="css/libs.css" ) css_custom = Bundle( settings.BASE_DIR + "/assets/styles/sass/base.sass", filters=libsass, output="css/style.css", depends="/**/*.sass", ) register("css_libs", css_libs) register("css_custom", css_custom)
21.28
64
0.716165
70
532
5.285714
0.371429
0.094595
0.086486
0.113514
0.178378
0.178378
0
0
0
0
0
0
0.140977
532
24
65
22.166667
0.809628
0
0
0
0
0
0.270677
0.12406
0
0
0
0
0
1
0
false
0
0.176471
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f165f215e0f29ecdf542caf1a3d19feab64687a9
2,864
py
Python
lib/tools/job_launcher.py
anonconda/RTDmaker
ac2a070ba26933194aa13041e1fdf92cbc39f201
[ "MIT" ]
1
2021-04-13T18:30:14.000Z
2021-04-13T18:30:14.000Z
lib/tools/job_launcher.py
anonconda/RTDmaker
ac2a070ba26933194aa13041e1fdf92cbc39f201
[ "MIT" ]
null
null
null
lib/tools/job_launcher.py
anonconda/RTDmaker
ac2a070ba26933194aa13041e1fdf92cbc39f201
[ "MIT" ]
null
null
null
import os import sys import time import multiprocessing from subprocess import Popen from multiprocessing.pool import ThreadPool def work(command, logfile, job_id, tot): line_info = f'\n{time.asctime()} Starting Job {job_id} (out of {tot})' line_command = f'\n{time.asctime()} Job {job_id} command: {command}\n' with open(logfile, "a+") as fh: fh.write(line_info) fh.write(line_command) try: process = Popen(command, shell=True) process.wait() # If the output of the process needs further processing/parsing, it can be done here # Source: https://stackoverflow.com/questions/26774781/ # python-multiple-subprocess-with-a-pool-queue-recover-output-as-soon-as-one-finis except Exception as e: line_error = f'\n{time.asctime()} Error while executing Job {job_id}:\n' print(line_error) with open(logfile, "a+") as fh: fh.write(line_error) fh.write(str(e) + "\n") line_end = f'\n{time.asctime()} Job {job_id} completed!\n' print(line_end) with open(logfile, "a+") as fh: fh.write(line_end) def launch_jobs(commands_list, logfile=None, n_jobs=None, core_proportion=(1, 3), max_cores=8, log_dir=None): if not log_dir: log_dir = os.getcwd() # Create a log file to track the completed jobs if not logfile: logfile = os.path.join(log_dir, f"{time.asctime().replace(' ', '_')}_jobs_logfile.txt") # Attach ID to the jobs to track their execution indexed_commands = [(i + 1, command) for i, command in enumerate(commands_list)] tot = len(indexed_commands) if n_jobs: if n_jobs > multiprocessing.cpu_count(): sys.exit(f"The system does not posses that many cores. It must be {multiprocessing.cpu_count()} or less.") else: n_cores = n_jobs else: # Use a predetermined proportion of available total cores (ex: one third (1/3) of the available cores) n_cores = int((multiprocessing.cpu_count() / core_proportion[1]) * core_proportion[0]) # Limit the maximum number of cores to use at a time if n_cores > max_cores: n_cores = max_cores line_start = f'{time.asctime()} Launching {tot} jobs, using {n_cores} cores\n' print(line_start) with open(logfile, "a+") as fh: fh.write(line_start) # Launch "n" number of jobs at a time; whenever a job is finish, launch a new one # The number of jobs is determined by the number of available/selected cores to use tp = ThreadPool(n_cores) for (job_id, command) in indexed_commands: tp.apply_async(work, (command, logfile, job_id, tot, )) tp.close() tp.join() line_end = f'\n{time.asctime()} All Jobs completed! ({tot})\n' print(line_end) with open(logfile, "a+") as fh: fh.write(line_end)
34.506024
118
0.651885
432
2,864
4.19213
0.331019
0.019326
0.036444
0.035892
0.170072
0.170072
0.126449
0.103258
0.103258
0.051905
0
0.007273
0.231844
2,864
82
119
34.926829
0.815909
0.217877
0
0.203704
0
0
0.212108
0.034081
0
0
0
0
0
1
0.037037
false
0
0.111111
0
0.148148
0.074074
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f1677483d30c15fa4a7bd2f5195ec1678c9aa75b
2,803
py
Python
1c_GT_Computer_Networks/Project-3/Topology.py
yevheniyc/Python
262842477793d65c2b382ca810867fd24a415576
[ "MIT" ]
4
2016-08-28T03:21:36.000Z
2021-01-19T01:59:17.000Z
1c_GT_Computer_Networks/Project-3/Topology.py
yevheniyc/Python
262842477793d65c2b382ca810867fd24a415576
[ "MIT" ]
null
null
null
1c_GT_Computer_Networks/Project-3/Topology.py
yevheniyc/Python
262842477793d65c2b382ca810867fd24a415576
[ "MIT" ]
1
2018-05-20T12:33:23.000Z
2018-05-20T12:33:23.000Z
# Assignment 3 for OMS6250 # # Defines a Topology, which is a collection of Nodes. Students should not # modify this file. This is NOT a topology like the ones defined in Mininet projects. # # Copyright 2015 Sean Donovan from DistanceVector import * class Topology(object): def __init__(self, conf_file): ''' Initializes the topology. Called from outside of DistanceVector.py ''' self.topodict = {} self.nodes = [] self.topo_from_conf_file(conf_file) def topo_from_conf_file(self, conf_file): ''' This created all the nodes in the Topology from the configuration file passed into __init__(). Can throw an exception if there is a problem with the config file. ''' try: conf = __import__(conf_file) for key in conf.topo.keys(): new_node = DistanceVector(key, self, conf.topo[key]) self.nodes.append(new_node) self.topodict[key] = new_node except: print "error importing conf_file " + conf_file raise self.verify_topo() def verify_topo(self): ''' Once the topology is imported, we verify the topology to make sure it is actually valid. ''' print self.topodict for node in self.nodes: try: node.verify_neighbors() except: print "error with neighbors of " + node.name raise def run_topo(self): ''' This is where most of the action happens. First, we have to "prime the pump" and send to each neighbor that they are connected. Next, in a loop, we go through all of the nodes in the topology running their instances of Bellman-Ford, passing and receiving messages, until there are no further messages to service. Each loop, print out the distances after the loop instance. After the full loop, check to see if we're finished (all queues are empty). ''' #Prime the pump for node in self.nodes: node.send_initial_messages() done = False while done == False: for node in self.nodes: node.process_BF() node.log_distances() # Done with a round. Now, we call finish_round() which writes out # each entry in log_distances(). By default, this will will print # out alphabetical order, which you can turn off so the logfile # matches what is printed during log_distances(). finish_round() done = True for node in self.nodes: if len(node) != 0: done = False break
35.0375
86
0.585801
354
2,803
4.528249
0.440678
0.039925
0.022458
0.032439
0.076107
0.027449
0
0
0
0
0
0.005495
0.350696
2,803
79
87
35.481013
0.875275
0.16518
0
0.307692
0
0
0.034868
0
0
0
0
0
0
0
null
null
0
0.076923
null
null
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
f168e53e39bdfc41294178fc34531c937c9a2329
250
py
Python
code2/day13/demo03.py
picktsh/python
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
[ "MIT" ]
1
2019-12-31T16:44:06.000Z
2019-12-31T16:44:06.000Z
code2/day13/demo03.py
picktsh/python
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
[ "MIT" ]
null
null
null
code2/day13/demo03.py
picktsh/python
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
[ "MIT" ]
1
2022-01-13T10:32:22.000Z
2022-01-13T10:32:22.000Z
# 如何在items.py里定义这些数据 import scrapy # 导入scrapy class DoubanItem(scrapy.Item): # 定义一个类DoubanItem,它继承自scrapy.Item title = scrapy.Field() # 定义书名的数据属性 publish = scrapy.Field() # 定义出版信息的数据属性 score = scrapy.Field() # 定义评分的数据属性
17.857143
37
0.68
24
250
7.083333
0.708333
0.194118
0
0
0
0
0
0
0
0
0
0
0.22
250
13
38
19.230769
0.871795
0.364
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
f1698183cab47657356ca5d40e4c3268c220a1ca
2,388
py
Python
malware.py
c0zzy/semi-supervised-ann
5b458774388d79f64b19ca696afb0d42ed403731
[ "MIT" ]
null
null
null
malware.py
c0zzy/semi-supervised-ann
5b458774388d79f64b19ca696afb0d42ed403731
[ "MIT" ]
null
null
null
malware.py
c0zzy/semi-supervised-ann
5b458774388d79f64b19ca696afb0d42ed403731
[ "MIT" ]
null
null
null
import copy from alg.semi_supervised import SemiSupervised from lib.ops import evaluate from lib.utils import load_avast_weeks_pca, parse_arguments args = parse_arguments() dataset_avast_pca = { 'name': 'avast_pca', 'input_size': 128, 'num_classes': 5, 'mlp_arch': [96, 64, 32], 'visualise_funcs': ['losses'] } dataset_avast_pca_binary = copy.deepcopy(dataset_avast_pca) dataset_avast_pca_binary['num_classes'] = 2 ds = dataset_avast_pca_binary if args.binary else dataset_avast_pca if args.m: # can't load matplotlib in metacentrum ds['visualise_funcs'] = [] def train(run_nmb, x, y, x_un=None, y_un=None, x_test=None, y_test=None): semi_sup = SemiSupervised( dataset_params=ds, num_epoch=100, ssl_method=args.method, options=args.options, hyper_par=args.hyper_par, save_results=False, imbalanced=False, should_evaluate=True ) semi_sup.set_train_data(x, y) if x_test is not None and y_test is not None: semi_sup.set_test_data(x_test, y_test) if x_un is not None: semi_sup.set_unsupervised_data(x_un) else: semi_sup.split_sup_unsup(args.ratio) semi_sup.prepare_train_test_data() model = semi_sup.train(run_nmb) return model def train_eval(run_nmb): x, y, x_test, y_test = load_avast_weeks_pca(args.train_weeks, 10000 + 5000, 5000, args.binary) print('y_test: ', y_test[:10]) model = train(run_nmb, x, y, x_test=x_test, y_test=y_test) test_acc = evaluate(model, ds['num_classes'], x_test, y_test, hot=False) return test_acc def main(): with open(args.out_path + 'results.out', 'w') as f: print('weeks:', args.train_weeks, file=f) print('ratio:', args.ratio, file=f) print('ssl:', args.method, file=f) print(args.hyper_par, file=f) print(args.options, file=f) runs = args.runs accuracies = [] for i in range(runs): acc = train_eval(i) print('Final test acc:', acc) with open(args.out_path + 'results.out', 'a') as f: print(i, acc, file=f) accuracies.append(acc) with open(args.out_path + 'results.out', 'a') as f: print('min', min(accuracies), file=f) print('max', max(accuracies), file=f) print('avg', sum(accuracies) / runs, file=f) if __name__ == '__main__': main()
27.767442
98
0.652429
366
2,388
3.986339
0.300546
0.030843
0.061686
0.027416
0.132968
0.132968
0.07608
0.056203
0.056203
0.056203
0
0.015625
0.222781
2,388
85
99
28.094118
0.770474
0.015075
0
0.030769
0
0
0.081702
0
0
0
0
0
0
1
0.046154
false
0
0.061538
0
0.138462
0.169231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f16a774d5548b032544d49f97e7b259834c08fa6
580
py
Python
one cmd creator/defaults.py
JustRedTTG/one-command-block-creator
81f57da5be04d6b19332b7b0da2db6b2cee088a1
[ "MIT" ]
null
null
null
one cmd creator/defaults.py
JustRedTTG/one-command-block-creator
81f57da5be04d6b19332b7b0da2db6b2cee088a1
[ "MIT" ]
null
null
null
one cmd creator/defaults.py
JustRedTTG/one-command-block-creator
81f57da5be04d6b19332b7b0da2db6b2cee088a1
[ "MIT" ]
null
null
null
functions = { 'default-start':"""@minecraft:activator_rail\n""", 'default-end':"""setblock ~ ~1 ~ minecraft:chain_command_block[facing=up]{auto:1,Command:"fill ~ ~ ~ ~ ~-2 ~ air"} setblock ~ ~ ~ minecraft:command_block[facing=up]{auto:1,Command:"kill @e[type=minecraft:command_block_minecart,distance=..2]"}\n""", 'default-fullend':"""setblock ~ ~1 ~ minecraft:chain_command_block[facing=up]{auto:1,Command:"fill ~ ~ ~ ~ ~-3 ~ air"} setblock ~ ~ ~ minecraft:command_block[facing=up]{auto:1,Command:"kill @e[type=minecraft:command_block_minecart,distance=..2]"}\n""" }
72.5
133
0.687931
77
580
5.038961
0.337662
0.185567
0.185567
0.206186
0.819588
0.819588
0.819588
0.819588
0.819588
0.819588
0
0.018975
0.091379
580
7
134
82.857143
0.717268
0
0
0
0
0.571429
0.896552
0.646552
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
1
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
12
f16aaf2fc0ac983255c89745ef68d2c5983e2b39
593
py
Python
phishfry/remediation_result.py
krayzpipes/phishfry
6da69548ae6970e2bc773d112509a7c94ce0e265
[ "Apache-2.0" ]
null
null
null
phishfry/remediation_result.py
krayzpipes/phishfry
6da69548ae6970e2bc773d112509a7c94ce0e265
[ "Apache-2.0" ]
3
2019-12-31T00:12:12.000Z
2020-01-03T13:43:30.000Z
phishfry/remediation_result.py
krayzpipes/phishfry
6da69548ae6970e2bc773d112509a7c94ce0e265
[ "Apache-2.0" ]
2
2019-02-12T21:01:20.000Z
2019-04-22T17:41:46.000Z
import logging log = logging.getLogger(__name__) class RemediationResult(object): def __init__(self, address, message_id, mailbox_type, action, success=True, message=None): self.address = address self.message_id = message_id self.mailbox_type = mailbox_type self.success = success self.message = message self.owner = None self.members = [] self.forwards = [] self.action = action def result(self, message, success=False): log.info(message) self.success = success self.message = message
28.238095
94
0.640809
66
593
5.545455
0.393939
0.120219
0.098361
0.120219
0.196721
0.196721
0
0
0
0
0
0
0.269815
593
20
95
29.65
0.845266
0
0
0.235294
0
0
0
0
0
0
0
0
0
1
0.117647
false
0
0.058824
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f16b40ca452b4c54cacfb36dfaca7d833512d61a
10,452
py
Python
psyIP.py
chocosobo/psypy
ebaa9e59a70117acf3182f86d7ab7fc3c40338c3
[ "MIT" ]
6
2015-07-24T05:08:52.000Z
2017-12-08T04:39:22.000Z
psyIP.py
chocosobo/psypy
ebaa9e59a70117acf3182f86d7ab7fc3c40338c3
[ "MIT" ]
1
2017-08-23T21:40:17.000Z
2017-09-27T00:32:55.000Z
psyIP.py
chocosobo/psypy
ebaa9e59a70117acf3182f86d7ab7fc3c40338c3
[ "MIT" ]
7
2019-09-14T18:16:05.000Z
2021-11-26T15:34:26.000Z
import math as m # All functions expect base SI units for any arguments given # DBT - Dry bulb temperature - Degrees Rankine, R # DPT - Dew point temperature - Degress Rankine, R # H - Specific enthalpy - British thermal unit per pound mass, # Btu/lbm # P - Atmospheric pressure - Pounds force per square inch, psi # Pw - Water vapor partial pressure - Pounds force per square inch, psi # RH - Relative humidity - Decimal (i.e. not a percentage) # V - Specific volume - Cubic feet per pound mass, ft^3/lbm # W - Humidity ratio - pounds mass per pound mass, lbm/lbm # WBT - Wet bulb temperature - Degrees Rankine, R # Minimum dry bulb temperature Min_DBT=491.67 # Maximum dry bulb temperature Max_DBT=851.67 # Convergence tolerance TOL=0.0000005 def __DBT_H_RH_P(H, RH, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_H(DBTa, H) y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_H(DBT, H) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_H_V_P(H, V, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_V_P(DBTa, V, P)-__W_DBT_H(DBTa, H) y=__W_DBT_V_P(DBT, V, P)-__W_DBT_H(DBT, H) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_H_W(H, W): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=W-__W_DBT_H(DBTa, H) y=W-__W_DBT_H(DBT, H) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_H_WBT_P(H, WBT, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_H(DBTa, H) y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_H(DBT, H) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_RH_V_P(RH, V, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_V_P(DBTa, V, P) y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_V_P(DBT, V, P) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_RH_W_P(RH, W, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_RH_P(DBTa, RH, P)-W y=__W_DBT_RH_P(DBT, RH, P)-W if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_RH_WBT_P(RH, WBT, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_RH_P(DBTa, RH, P) y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_RH_P(DBT, RH, P) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_V_W_P(V, W, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=W-__W_DBT_V_P(DBTa, V, P) y=W-__W_DBT_V_P(DBT, V, P) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_V_WBT_P(V, WBT, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_V_P(DBTa, V, P) y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_V_P(DBT, V, P) if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT def __DBT_W_WBT_P(W, WBT, P): [DBTa, DBTb]=[Min_DBT, Max_DBT] DBT=(DBTa+DBTb)/2 while DBTb-DBTa>TOL: ya=__W_DBT_WBT_P(DBTa, WBT, P)-W y=__W_DBT_WBT_P(DBT, WBT, P)-W if __is_positive(y)==__is_positive(ya): DBTa=DBT else: DBTb=DBT DBT=(DBTa+DBTb)/2 return DBT # ASHRAE 2009 Chapter 1 Equation 39 def __DPT_Pw(Pw): Pw=Pw C14=100.45 C15=33.193 C16=2.319 C17=0.17074 C18=1.2063 a=m.log(Pw) return (C14+C15*a+C16*a**2+C17*a**3+C18*Pw**0.1984)+459.67 # ASHRAE 2009 Chapter 1 Equation 32 def __H_DBT_W(DBT, W): if __valid_DBT(DBT): DBT=DBT-459.67 return 0.240*DBT+W*(1061+0.444*DBT) def __is_positive(x): if x>0: return True else: return False # ASHRAE 2009 Chapter 1 Equation 22 def __Pw_W_P(W, P): return W*P/(W+0.621945) # ASHRAE 2009 Chapter 1 Equation 6 def __Pws(DBT): if __valid_DBT(DBT): C8=-1.0440397*10**4 C9=-1.1294650*10**1 C10=-2.7022355*10**-2 C11=1.2890360*10**-5 C12=-2.4780681*10**-9 C13=6.5459673 return m.exp(C8/DBT+C9+C10*DBT+C11*DBT**2+C12*DBT**3+C13*m.log(DBT)) def state(prop1, prop1val, prop2, prop2val,P): if prop1==prop2: print("Properties must be independent.") return prop=["DBT","WBT","RH","W","V","H"] if prop1 not in prop or prop2 not in prop: print("Valid property must be given.") return prop1i=prop.index(prop1) prop2i=prop.index(prop2) if prop1i<prop2i: cd1=prop1 cd1val=prop1val cd2=prop2 cd2val=prop2val else: cd1=prop2 cd1val=prop2val cd2=prop1 cd2val=prop1val if cd1=="DBT": DBT=cd1val if cd2=="WBT": WBT=cd2val W=__W_DBT_WBT_P(DBT, WBT, P) H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) elif cd2=="RH": RH=cd2val W=__W_DBT_RH_P(DBT, RH, P) H=__H_DBT_W(DBT, W) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="W": W=cd2val H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="V": V=cd2val W=__W_DBT_V_P(DBT, V, P) H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="H": H=cd2val W=__W_DBT_H(DBT, H) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd1=="WBT": WBT=cd1val if cd2=="RH": RH=cd2val DBT=__DBT_RH_WBT_P(RH, WBT, P) W=__W_DBT_RH_P(DBT, RH, P) H=__H_DBT_W(DBT, W) V=__V_DBT_W_P(DBT, W, P) elif cd2=="W": W=cd2val DBT=__DBT_W_WBT_P(W, WBT, P) H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) elif cd2=="V": V=cd2val DBT=__DBT_V_WBT_P(V, WBT, P) W=__W_DBT_V_P(DBT, V, P) H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) elif cd2=="H": H=cd2val DBT=__DBT_H_WBT_P(H, WBT, P) W=__W_DBT_H(DBT, H) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) elif cd1=="RH": RH=cd1val if cd2=="W": W=cd2val DBT=__DBT_RH_W_P(RH, W, P) H=__H_DBT_W(DBT, W) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="V": V=cd2val DBT=__DBT_RH_V_P(RH, V, P) W=__W_DBT_RH_P(DBT, RH, P) H=__H_DBT_W(DBT, W) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="H": H=cd2val DBT=__DBT_H_RH_P(H, RH, P) W=__W_DBT_RH_P(DBT, RH, P) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd1=="W": W=cd1val if cd2=="V": V=cd2val DBT=__DBT_V_W_P(V, W, P) H=__H_DBT_W(DBT, W) RH=__RH_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd2=="H": H=cd2val DBT=__DBT_H_W(H, W) RH=__RH_DBT_W_P(DBT, W, P) V=__V_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) elif cd1=="V": V=cd1val H=cd2val DBT=__DBT_H_V_P(H, V, P) W=__W_DBT_V_P(DBT, V, P) RH=__RH_DBT_W_P(DBT, W, P) WBT=__WBT_DBT_W_P(DBT, W, P) return [DBT, H, RH, V, W, WBT] # ASHRAE 2009 Chapter 1 Equation 22 and Equation 24 def __RH_DBT_W_P(DBT, W, P): if __valid_DBT(DBT): return W*P/((0.621945+W)*__Pws(DBT)) # ASHRAE 2009 Chapter 1 Equation 28 def __V_DBT_W_P(DBT, W, P): if __valid_DBT(DBT): return 0.370486*DBT*(1+1.607858*W)/P # ASHRAE 2009 Chapter 1 Equation 32 def __W_DBT_H(DBT, H): if __valid_DBT(DBT): DBT=DBT-459.67 return (H-0.240*DBT)/(1061+0.444*DBT) # ASHRAE 2009 Chapter 1 Equation 22 and Equation 24 def __W_DBT_RH_P(DBT, RH, P): if __valid_DBT(DBT): Pw=RH*__Pws(DBT) return 0.621945*Pw/(P-Pw) # ASHRAE 2009 Chapter 1 Equation 28 def __W_DBT_V_P(DBT, V, P): if __valid_DBT(DBT): return (P*V-0.370486*DBT)/(1.607858*0.370486*DBT) # ASHRAE 2009 Chapter 1 Equation 35 def __W_DBT_WBT_P(DBT, WBT, P): if __valid_DBT(DBT): DBT=DBT-459.67 WBT=WBT-459.67 return ((1093-0.556*WBT)*__W_DBT_RH_P(WBT+459.67,1,P)-0.240*(DBT-WBT))/\ (1093+0.444*DBT-WBT) # ASHRAE 2009 Chapter 1 Equation 35 def __WBT_DBT_W_P(DBT, W, P): if __valid_DBT(DBT): WBTa=__DPT_Pw(__Pw_W_P(W, P)) WBTb=DBT WBT=(WBTa+WBTb)/2 while WBTb-WBTa>TOL: Ws=__W_DBT_WBT_P(DBT, WBT, P) if W>Ws: WBTa=WBT else: WBTb=WBT WBT=(WBTa+WBTb)/2 return WBT def __valid_DBT(DBT): if Min_DBT<=DBT<=Max_DBT: return True else: return False
28.557377
80
0.540088
1,809
10,452
2.74848
0.096186
0.07321
0.066372
0.053097
0.707965
0.671158
0.648632
0.559936
0.540426
0.509453
0
0.065667
0.329793
10,452
365
81
28.635616
0.644111
0.116437
0
0.592357
0
0
0.01075
0
0
0
0
0
0
1
0.076433
false
0
0.003185
0.003185
0.16879
0.006369
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f16b53621b6f62b6b1717b1dbba16b70894bb004
391
py
Python
newday2/App/hello.py
greatfirsty/hellopython
f12aacf36b8f208d6c5622ffd6b4c1927f37b45a
[ "Apache-2.0" ]
1
2019-05-04T01:25:43.000Z
2019-05-04T01:25:43.000Z
newday2/App/hello.py
greatfirsty/hellopython
f12aacf36b8f208d6c5622ffd6b4c1927f37b45a
[ "Apache-2.0" ]
null
null
null
newday2/App/hello.py
greatfirsty/hellopython
f12aacf36b8f208d6c5622ffd6b4c1927f37b45a
[ "Apache-2.0" ]
null
null
null
import time def total_time(fun): def f(): before_time=time.time() fun() current_time=time.time() t_time=current_time-before_time print(t_time) return f @total_time def add(): time.sleep(1) return 3 #为函数添加装饰器,统计时间 @total_time def sub(): print('睡不好') time.sleep(2) print('很烦') return 10 if __name__=='__main__': sub()
15.64
39
0.598465
56
391
3.875
0.446429
0.147465
0.110599
0
0
0
0
0
0
0
0
0.017544
0.2711
391
24
40
16.291667
0.74386
0.033248
0
0.095238
0
0
0.034574
0
0
0
0
0
0
1
0.190476
false
0
0.047619
0
0.380952
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f16cd046c1d5bc8c482c76596f8d465b56eb49c5
667
py
Python
too_simple_server/__main__.py
outcatcher/Simple-Mock-Webserver
315bccca9dea9e821bb0a02878187058cd1fe38f
[ "MIT" ]
null
null
null
too_simple_server/__main__.py
outcatcher/Simple-Mock-Webserver
315bccca9dea9e821bb0a02878187058cd1fe38f
[ "MIT" ]
3
2019-09-04T13:22:46.000Z
2019-09-13T09:35:51.000Z
too_simple_server/__main__.py
opentelekomcloud-infra/Simple-Mock-Webserver
315bccca9dea9e821bb0a02878187058cd1fe38f
[ "MIT" ]
null
null
null
"""Run server as module""" from argparse import ArgumentParser from too_simple_server.configuration import DEFAULT_CFG_PATH from too_simple_server.run import main AGP = ArgumentParser(description="Mock server with simple DB interactions") AGP.add_argument("--debug", action="store_true", default=None) AGP.add_argument("--config", help=f"Configuration file to be used, '{DEFAULT_CFG_PATH}' by default", default=DEFAULT_CFG_PATH) AGP.add_argument("--no-wsgi", action="store_true", default=False) AGP.add_argument("action", default="start", choices=["start", "stop"]) ARGS = AGP.parse_args() main(ARGS.action, ARGS.debug, ARGS.config, ARGS.no_wsgi)
41.6875
100
0.758621
95
667
5.136842
0.463158
0.04918
0.114754
0.077869
0
0
0
0
0
0
0
0
0.106447
667
15
101
44.466667
0.818792
0.029985
0
0
0
0
0.25741
0
0
0
0
0
0
1
0
false
0
0.272727
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f16d85b67524c6dca5cdb704cfed6566472ca5d7
10,239
py
Python
tests/test_vmtkScripts/test_vmtksurfacecelldatatopointdata.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
null
null
null
tests/test_vmtkScripts/test_vmtksurfacecelldatatopointdata.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
null
null
null
tests/test_vmtkScripts/test_vmtksurfacecelldatatopointdata.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
1
2019-06-18T23:41:11.000Z
2019-06-18T23:41:11.000Z
## Program: VMTK ## Language: Python ## Date: February 12, 2018 ## Version: 1.4 ## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved. ## See LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more information. ## Note: this code was contributed by ## Richard Izzo (Github @rlizzo) ## University at Buffalo import pytest import vmtk.vmtksurfacecelldatatopointdata as celltopoint from vtk.numpy_interface import dataset_adapter as dsa import numpy as np @pytest.fixture(scope='module') def centerline_pointdata(aorta_centerline_branches): ctp = celltopoint.vmtkSurfaceCellDataToPointData() ctp.Surface = aorta_centerline_branches ctp.Execute() return ctp.Surface @pytest.mark.parametrize("expectedKey",[ ('CenterlineIds'), ('TractIds'), ('Blanking'), ('GroupIds') ]) def test_expected_cell_data_keys(centerline_pointdata, expectedKey): wp = dsa.WrapDataObject(centerline_pointdata) assert expectedKey in wp.CellData.keys() @pytest.mark.parametrize("expectedKey",[ ('MaximumInscribedSphereRadius'), ('EdgeArray'), ('EdgePCoordArray'), ('CenterlineIds'), ('TractIds'), ('Blanking'), ('GroupIds') ]) def test_expected_point_data_keys(centerline_pointdata, expectedKey): wp = dsa.WrapDataObject(centerline_pointdata) assert expectedKey in wp.PointData.keys() def test_number_of_cell_data_keys_is_4(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) assert len(wp.CellData.keys()) == 4 def test_number_of_point_data_keys_is_7(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) assert len(wp.PointData.keys()) == 7 def test_expected_number_of_points_in_output(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) assert wp.Points.shape == (417, 3) def test_blanking_array_is_correct(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert np.allclose(wp.PointData.GetArray('Blanking'), expectedOutput) == True def test_centerlineids_array_is_correct(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) assert np.allclose(wp.PointData.GetArray('CenterlineIds'), expectedOutput) == True def test_groupids_array_is_correct(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]) assert np.allclose(wp.PointData.GetArray('GroupIds'), expectedOutput) == True def test_tractids_array_is_correct(centerline_pointdata): wp = dsa.WrapDataObject(centerline_pointdata) expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) assert np.allclose(wp.PointData.GetArray('TractIds'), expectedOutput) == True
61.680723
97
0.377381
2,001
10,239
1.896552
0.058971
0.463241
0.688538
0.909618
0.704348
0.704348
0.667457
0.640053
0.623452
0.623452
0
0.288653
0.430218
10,239
165
98
62.054545
0.361844
0.046977
0
0.6875
0
0
0.019624
0.002877
0
0
0
0
0.070313
1
0.078125
false
0
0.03125
0
0.117188
0
0
0
1
null
1
1
1
0
1
0
0
0
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
f1729eaefd848cb322cd91bf8c15356b94b4a070
3,038
py
Python
SIM_utils/SIM.py
facebookresearch/Project_FARSI
12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4
[ "MIT" ]
14
2021-06-01T16:45:19.000Z
2022-03-08T20:07:00.000Z
SIM_utils/SIM.py
facebookresearch/Project_FARSI
12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4
[ "MIT" ]
null
null
null
SIM_utils/SIM.py
facebookresearch/Project_FARSI
12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4
[ "MIT" ]
3
2021-08-05T16:37:47.000Z
2022-01-06T00:25:49.000Z
#Copyright (c) Facebook, Inc. and its affiliates. #This source code is licensed under the MIT license found in the #LICENSE file in the root directory of this source tree. from SIM_utils.components.perf_sim import * from SIM_utils.components.pow_sim import * #from OSSIM_utils.components.pow_knob_sim import * from design_utils.design import * from settings import config # This module is our top level simulator containing all simulators (perf, and pow simulator) class OSASimulator: def __init__(self, dp, database, pk_dp=""): self.time_elapsed = 0 # time elapsed from the beginning of the simulation self.dp = dp # design point to simulate self.perf_sim = PerformanceSimulator(self.dp) # performance simulator instance self.pow_sim = PowerSimulator(self.dp) # power simulator instance self.database = database if config.simulation_method == "power_knobs": self.pk_dp = pk_dp #self.knob_change_sim = PowerKnobSimulator(self.dp, self.pk_dp, self.database) self.completion_time = -1 # time passed for the simulation to complete self.program_status = "idle" self.cur_tick_time = self.next_tick_time = 0 # current tick time # ------------------------------ # Functionality: # whether the simulation should terminate # ------------------------------ def terminate(self, program_status): if config.termination_mode == "workload_completion": return program_status == "done" elif config.termination_mode == "time_budget_reahced": return self.time_elapsed >= config.time_budge else: return False # ------------------------------ # Functionality: # ticking the simulation. Note that the tick time varies depending on what is (dynamically) happening in the # system # ------------------------------ def tick(self): self.cur_tick_time = self.next_tick_time # ------------------------------ # Functionality # progress the simulation for clock_time forward # ------------------------------ def step(self, clock_time): self.next_tick_time, self.program_status = self.perf_sim.simulate(clock_time) # ------------------------------ # Functionality: # simulation # ------------------------------ def simulate(self): while not self.terminate(self.program_status): self.tick() self.step(self.cur_tick_time) if config.use_cacti: self.dp.correct_power_area_with_cacti(self.database) # collect all the stats upon completion of simulation self.dp.collect_dp_stats(self.database) if config.simulation_method == "power_knobs": self.knob_change_sim.launch() self.completion_time = self.next_tick_time self.dp.set_serial_design_time(self.perf_sim.serial_latency) self.dp.set_par_speedup(self.perf_sim.serial_latency/self.completion_time) return self.dp
40.506667
114
0.629032
360
3,038
5.097222
0.35
0.032698
0.023978
0.034877
0.140599
0.140599
0.083924
0.083924
0
0
0
0.001272
0.223502
3,038
75
115
40.506667
0.7766
0.38183
0
0.05
0
0
0.036777
0
0
0
0
0
0
1
0.125
false
0
0.1
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f1743c6ed7aff9eadd1ebcb064f885173f6e5cef
5,992
py
Python
netbox_graphql/tests/ipam/tests_vlans.py
ninech/django-netbox-graphql
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
[ "MIT" ]
17
2017-08-17T02:38:09.000Z
2022-01-05T15:36:20.000Z
netbox_graphql/tests/ipam/tests_vlans.py
ninech/django-netbox-graphql
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
[ "MIT" ]
2
2017-09-13T14:53:56.000Z
2018-02-08T14:06:54.000Z
netbox_graphql/tests/ipam/tests_vlans.py
ninech/django-netbox-graphql
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
[ "MIT" ]
2
2020-03-04T11:51:10.000Z
2021-03-11T19:24:37.000Z
from string import Template from graphene.test import Client from django.test import TestCase from ipam.models import VLAN from netbox_graphql.schema import schema from netbox_graphql.tests.utils import obj_to_global_id from netbox_graphql.tests.factories.ipam_factories import VLANFactory, RoleFactory from netbox_graphql.tests.factories.tenant_factories import TenantFactory class CreateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.tenant = TenantFactory() cls.role = RoleFactory() cls.query = Template(''' mutation{ newVlan(input: { tenant: "$tenantId", role: "$roleId", vid: 2, name: "New Vlan"}) { vlan{ name vid tenant{ name } role{ name } } } } ''').substitute(tenantId=obj_to_global_id(cls.tenant), roleId=obj_to_global_id(cls.role)) def test_creating_returns_no_error(self): result = schema.execute(self.query) assert not result.errors def test_creating_returns_data(self): expected = {'newVlan': {'vlan': {'name': 'New Vlan', 'vid': 2, 'tenant': {'name': self.tenant.name}, 'role': {'name': self.role.name} }}} result = schema.execute(self.query) self.assertEquals(result.data, expected) def test_creating_creates_it(self): oldCount = VLAN.objects.all().count() schema.execute(self.query) self.assertEquals(VLAN.objects.all().count(), oldCount + 1) class QueryMultipleTestCase(TestCase): @classmethod def setUpTestData(cls): cls.first = VLANFactory() cls.second = VLANFactory() cls.query = ''' { vlans { edges { node { id } } } } ''' def test_querying_all_returns_no_error(self): result = schema.execute(self.query) assert not result.errors def test_querying_all_returns_two_results(self): result = schema.execute(self.query) self.assertEquals(len(result.data['vlans']['edges']), 2) class QuerySingleTestCase(TestCase): @classmethod def setUpTestData(cls): cls.first = VLANFactory() cls.second = VLANFactory() cls.query = Template(''' { vlans(id: "$id") { edges { node { name vid tenant { name } role { name } } } } } ''').substitute(id=obj_to_global_id(cls.second)) def test_querying_single_returns_no_error(self): result = schema.execute(self.query) assert not result.errors def test_querying_single_returns_result(self): result = schema.execute(self.query) self.assertEquals(len(result.data['vlans']['edges']), 1) def test_querying_single_returns_expected_result(self): result = schema.execute(self.query) expected = {'vlans': {'edges': [ {'node': {'name': self.second.name, 'vid': self.second.vid, 'tenant': {'name': self.second.tenant.name}, 'role': {'name': self.second.role.name}}} ]}} self.assertEquals(result.data, expected) class UpdateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.first = VLANFactory() cls.tenant = TenantFactory() cls.query = Template(''' mutation{ updateVlan(input: { id: "$id", vid: 10, name: "New Name", tenant: "$tenantId"}) { vlan{ name vid tenant { name } } } } ''').substitute(id=obj_to_global_id(cls.first), tenantId=obj_to_global_id(cls.tenant)) def test_updating_returns_no_error(self): result = schema.execute(self.query) assert not result.errors def test_updating_doesnt_change_count(self): oldCount = VLAN.objects.all().count() schema.execute(self.query) self.assertEquals(VLAN.objects.all().count(), oldCount) def test_updating_returns_updated_data(self): expected = {'updateVlan': {'vlan': {'name': 'New Name', 'vid': 10, 'tenant': {'name': self.tenant.name}}}} result = schema.execute(self.query) self.assertEquals(result.data, expected) def test_updating_alters_data(self): schema.execute(self.query) vlan = VLAN.objects.get(id=self.first.id) self.assertEquals(vlan.name, 'New Name') self.assertEquals(vlan.vid, 10) self.assertEquals(vlan.tenant.name, self.tenant.name) class DeleteTestCase(TestCase): @classmethod def setUpTestData(cls): cls.first = VLANFactory() cls.query = Template(''' mutation{ deleteVlan(input: { id:"$id"}) { vlan{ id } } } ''').substitute(id=obj_to_global_id(cls.first)) def test_deleting_returns_no_error(self): result = schema.execute(self.query) assert not result.errors def test_deleting_removes_a_type(self): oldCount = VLAN.objects.all().count() schema.execute(self.query) self.assertEquals(VLAN.objects.all().count(), oldCount - 1)
30.571429
99
0.524866
570
5,992
5.370175
0.159649
0.032016
0.077752
0.100621
0.614832
0.508657
0.495263
0.442992
0.40967
0.370467
0
0.003165
0.367323
5,992
195
100
30.728205
0.804273
0
0
0.451807
0
0.012048
0.245995
0
0
0
0
0
0.096386
1
0.114458
false
0
0.048193
0
0.192771
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f175d2c9f715e287b74b3835941c2fefef41cc0e
36,264
py
Python
ncdiff/src/yang/ncdiff/gnmi.py
tahigash/yang
f74d4549421a4ca3b9bc9ee46194d9c3810fd32a
[ "Apache-2.0" ]
null
null
null
ncdiff/src/yang/ncdiff/gnmi.py
tahigash/yang
f74d4549421a4ca3b9bc9ee46194d9c3810fd32a
[ "Apache-2.0" ]
null
null
null
ncdiff/src/yang/ncdiff/gnmi.py
tahigash/yang
f74d4549421a4ca3b9bc9ee46194d9c3810fd32a
[ "Apache-2.0" ]
null
null
null
import re import json import logging from lxml import etree from copy import deepcopy from xmljson import Parker from ncclient import xml_ from xml.etree import ElementTree from collections import OrderedDict, defaultdict from .errors import ModelError from .composer import Tag, Composer from .calculator import BaseCalculator from .proto.gnmi.gnmi_pb2 import PathElem, Path, SetRequest, TypedValue, Update # create a logger for this module logger = logging.getLogger(__name__) nc_url = xml_.BASE_NS_1_0 config_tag = '{' + nc_url + '}config' ns_spec = { 'legacy': { 'path': Tag.JSON_PREFIX, 'val_name': Tag.JSON_NAME, 'val_val': Tag.JSON_PREFIX, }, 'rfc7951': { 'path': Tag.JSON_NAME, 'val_name': Tag.JSON_NAME, 'val_val': Tag.JSON_NAME, }, 'openconfig': { 'path': Tag.JSON_NAME, 'val_name': Tag.JSON_NAME, 'val_val': Tag.JSON_NAME, }, '': { 'path': Tag.JSON_NAME, 'val_name': Tag.JSON_NAME, 'val_val': Tag.JSON_NAME, }, } def _tostring(value): '''_tostring Convert value to XML compatible string. ''' if value is True: return 'true' elif value is False: return 'false' elif value is None: return None else: return str(value) def _fromstring(value): '''_fromstring Convert XML string value to None, boolean, int or float. ''' if not value: return None std_value = value.strip().lower() if std_value == 'true': return 'true' elif std_value == 'false': return 'false' # try: # return int(std_value) # except ValueError: # pass # try: # return float(std_value) # except ValueError: # pass return value class gNMIParser(object): '''gNMIParser A parser to convert a gNMI GetResponse to an lxml Element object. gNMI specification can be found at https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md Attributes ---------- ele : `Element` An lxml Element object which is the root of the config tree. config_nodes : `list` A list of config nodes. Each config node is an Element node in the config tree, which is corresponding to one 'update' in the gNMI GetResponse. xpaths : `list` A list of strings. Each string is an xpath of an Element node in the config tree, which is corresponding to one 'update' in the gNMI GetResponse. ''' def __init__(self, device, gnmi_get_reply): self.device = device self.reply = gnmi_get_reply self._config_nodes = None self._ele = None self._convert_tag = defaultdict(dict) self._prefix_to_name = {i[1]: i[0] for i in self.device.namespaces if i[1] is not None} self._prefix_to_url = {i[1]: i[2] for i in self.device.namespaces if i[1] is not None} @property def ele(self): if self._ele is None: self._ele = self.config_nodes.ele return self._ele @property def config_nodes(self): if self._config_nodes is None: self._config_nodes = self.get_config_nodes() return self._config_nodes @property def xpaths(self): xpaths = [] if len(self.config_nodes) > 0 and len(self.config_nodes[0]) > 0: if len(self.config_nodes[0]) > 1: xpaths.append(self.device.get_xpath(self.config_nodes[0][0], type=Tag.LXML_XPATH, instance=False)) else: xpaths.append(self.device.get_xpath(self.config_nodes[0][0], type=Tag.LXML_XPATH, instance=True)) return xpaths def parse_value(self, origin, value, tag): def convert_xml_to_lxml(xml_element, lxml_parent=None, default_ns=''): ns_name, tag = self.convert_tag(default_ns, xml_element.tag, src=ns_spec[origin]['val_name'], dst=Tag.LXML_ETREE) val_name_ns_tuple = self.convert_ns(ns_name, src=ns_spec[origin]['val_name'][0]) nsmap = {None: val_name_ns_tuple[Tag.NAMESPACE]} val_name_ns = val_name_ns_tuple[ns_spec[origin]['val_val'][0]] if xml_element.text is not None: ns_val, text = self.convert_tag(val_name_ns, xml_element.text, src=ns_spec[origin]['val_val'], dst=Tag.JSON_PREFIX) if ns_val != val_name_ns: v_v_ns = self.convert_ns(ns_val, src=ns_spec[origin]['val_val'][0]) v_v_prefix = v_v_ns[Tag.PREFIX] v_v_url = v_v_ns[Tag.NAMESPACE] nsmap[v_v_prefix] = v_v_url if lxml_parent is None: lxml_element = etree.Element(tag, nsmap=nsmap) else: lxml_element = etree.SubElement(lxml_parent, tag, nsmap=nsmap) if xml_element.text is not None: lxml_element.text = text for xml_child in xml_element: convert_xml_to_lxml(xml_child, lxml_parent=lxml_element, default_ns=ns_name) return lxml_element n, t = self.convert_tag('', tag, src=Tag.LXML_ETREE, dst=ns_spec[origin]['val_name']) json_val_str = '{{"{}": {}}}'.format(t, value.json_ietf_val.decode()) json_data = json.loads(json_val_str, object_pairs_hook=OrderedDict) pk = Parker(xml_tostring=_tostring, element=ElementTree.Element) return [convert_xml_to_lxml(i) for i in pk.etree(json_data)] @staticmethod def parse_tag(tag): ret = re.search('^{(.+)}(.+)$', tag) if ret: return ret.group(1), ret.group(2) else: raise ModelError("tag '{}' does not have URL info" \ .format(tag)) def convert_tag(self, default_ns, tag, src=Tag.LXML_ETREE, dst=Tag.YTOOL): if src == Tag.JSON_NAME and dst == Tag.LXML_ETREE: if default_ns not in self._convert_tag or \ tag not in self._convert_tag[default_ns]: self._convert_tag[default_ns][tag] = \ self.device.convert_tag(default_ns, tag, src=src, dst=dst) return self._convert_tag[default_ns][tag] else: return self.device.convert_tag(default_ns, tag, src=src, dst=dst) def convert_ns(self, ns, src=Tag.NAME): entries = [i for i in self.device.namespaces if i[src] == ns] c = len(entries) if c == 0: raise ConfigError("{} '{}' does not exist in device attribute " \ "'namespaces'" \ .format(Tag.STR[src], ns)) if c > 1: raise ModelError("device has more than one {} '{}': {}" \ .format(Tag.STR[src], ns, entries)) return entries[0] def get_config_nodes(self): '''get_config_nodes High-level api: get_config_nodes returns a list of config nodes. Each config node is an Element node in the config tree, which is corresponding to one 'update' in the gNMI GetResponse. Returns ------- list A list of config nodes. Config A Config object. ''' from .config import Config config = Config(self.device, config=None) for notification in self.reply.notification: updates = [] for update in notification.update: config += self.build_config_node(Config(self.device, config=None), notification.prefix, update.path, update.val) return config def get_schema_node(self, parent_schema_node, tag, origin=''): def is_parent(node1, node2): ancestors = {id(a): a for a in node2.iterancestors()} ids_1 = set([id(a) for a in node1.iterancestors()]) ids_2 = set([id(a) for a in node2.iterancestors()]) if not ids_1 < ids_2: return False for i in ids_2 - ids_1: if ancestors[i] is not node1 and \ ancestors[i].attrib['type'] != 'choice' and \ ancestors[i].attrib['type'] != 'case': return False return True def get_root(tag): if origin == 'openconfig' or origin == '': models = [m for m in self.device.models_loaded if m[:10] == 'openconfig'] else: models = self.device.models_loaded roots = {} for m in models: root = get_child(tag, parent=self.device.models[m].tree) if root is not None: roots[m] = root if len(roots) == 1: return list(roots.values())[0] elif len(roots) > 1: if origin == 'openconfig' or origin == '': tag = self.parse_tag(tag)[1] raise ModelError("more than one models have root with tag " \ "'{}': {}" \ .format(tag, ', '.join(roots.keys()))) else: return None def get_child(tag, parent): if origin == 'openconfig' or origin == '': children = [i for i in parent.iterdescendants() \ if self.parse_tag(i.tag)[1] == tag and \ i.get('type') != 'choice' and \ i.get('type') != 'case' and \ is_parent(parent, i)] else: children = [i for i in parent.iterdescendants() \ if i.tag == tag and \ i.get('type') != 'choice' and \ i.get('type') != 'case' and \ is_parent(parent, i)] if len(children) == 1: return children[0] elif len(children) > 1: if parent.getparent() is None: raise ModelError("model {} has more than one root with " \ "tag '{}'" \ .format(parent.tag, tag)) else: raise ModelError("node {} has more than one child with " \ "tag '{}'" \ .format(self.device.get_xpath(parent), tag)) else: return None # search roots if parent_schema_node is None: child = get_root(tag) if child is None: raise ConfigError("root '{}' cannot be found in loaded models" \ .format(tag)) else: return child # search from a parent child = get_child(tag, parent_schema_node) if child is None: raise ConfigError("node {} does not have child with tag '{}'" \ .format(self.device.get_xpath(parent_schema_node), tag)) else: return child def build_config_node_per_elem(self, origin, parent_config_node, path_elem, value=None): def cleanup_and_append(origin, parent_config_node, child_schema_node, value): for n in parent_config_node.findall(child_schema_node.tag): parent_config_node.remove(n) for n in self.parse_value(origin, value, child_schema_node.tag): parent_config_node.append(n) return None if parent_config_node.tag == config_tag: parent_schema_node = None parent_ns = '' else: parent_schema_node = self.device.get_schema_node(parent_config_node) parent_url, parent_tag_name = self.parse_tag(parent_config_node.tag) parent_ns_tuple = self.convert_ns(parent_url, src=Tag.LXML_ETREE[0]) parent_ns = parent_ns_tuple[ns_spec[origin]['path'][0]] if origin == 'openconfig' or origin == '': child_schema_node = self.get_schema_node(parent_schema_node, path_elem.name, origin=origin) else: child_ns, child_tag = self.convert_tag(parent_ns, path_elem.name, src=ns_spec[origin]['path'], dst=Tag.LXML_ETREE) child_schema_node = self.get_schema_node(parent_schema_node, child_tag, origin=origin) type = child_schema_node.get('type') if type == 'leaf' or type == 'leaf-list': if value is None: raise ConfigError("node {} does not have value" \ .format(self.device.get_xpath(child_schema_node))) else: return cleanup_and_append(origin, parent_config_node, child_schema_node, value) elif type == 'container': if value is None: match = parent_config_node.find(child_schema_node.tag) if match is not None: return match else: return self.subelement(origin, parent_config_node, child_schema_node.tag) else: return cleanup_and_append(origin, parent_config_node, child_schema_node, value) elif type == 'list': if value is None: instance = self.find_instance(origin, parent_config_node, child_schema_node, path_elem.key) if instance is not None: return instance else: return self.subelement(origin, parent_config_node, child_schema_node.tag, key=path_elem.key) else: return cleanup_and_append(origin, parent_config_node, child_schema_node, value) else: raise ModelError("type of node {} is unknown: '{}'" \ .format(self.device.get_xpath(parent_schema_node), type)) def build_config_node(self, config, prefix, path, value): from .config import Config config_node = config.ele absolute_path = list(prefix.elem) + list(path.elem) for index, elem in enumerate(absolute_path): if index == len(path.elem) - 1: config_saved = Config(self.device, config=deepcopy(config.ele)) config_node = self.build_config_node_per_elem(path.origin, config_node, elem, value=value) return config_saved + config else: config_node = self.build_config_node_per_elem(path.origin, config_node, elem) def find_instance(self, origin, parent_config_node, child_schema_node, key): def find_key(config_node, key_tag, key_text): match = config_node.find(key_tag) if match is None: return False if match.text != key_text: return False return True def find_keys(config_node, key_tuple): for key_tag, nsmap, key_text in key_tuple: if not find_key(config_node, key_tag, key_text): return False return True keys = child_schema_node.get('key').split() if len(keys) != len(key): raise ConfigError("node {} has {} keys in Path object, but the " \ "schema node requires {} keys: {}" \ .format(self.device.get_xpath(child_schema_node), len(key), len(keys), ', '.join(keys))) key_tuple = self.parse_key(origin, child_schema_node.tag, key) for key_tag, nsmap, text in key_tuple: url, tag_name = self.parse_tag(key_tag) if tag_name not in keys: raise ConfigError("node {} does not have key {}" \ .format(self.device.get_xpath(child_schema_node), key_tag)) for child in parent_config_node.findall(child_schema_node.tag): if find_keys(child, key_tuple): return child return None def get_prefix(self, text): if text is None: return '', None m = re.search('^(.*):(.*)$', text) if m: if m.group(1) in self._prefix_to_name: return m.group(1), m.group(2) else: return '', text else: return '', text def parse_key(self, origin, tag, key): url, tag_name = self.parse_tag(tag) text_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE) default_ns = text_ns_tuple[ns_spec[origin]['path'][0]] ret = [] for k, v in key.items(): tag_ns, key_tag = self.convert_tag(default_ns, k, src=ns_spec[origin]['path'], dst=Tag.LXML_ETREE) text_ns, text = self.convert_tag(tag_ns, v, src=ns_spec[origin]['path'], dst=Tag.XPATH) text_ns_tuple = self.convert_ns(tag_ns, src=ns_spec[origin]['path'][0]) nsmap = {None: text_ns_tuple[Tag.NAMESPACE]} if text_ns != tag_ns: text_ns_tuple = self.convert_ns(text_ns, src=ns_spec[origin]['path'][0]) nsmap[text_ns_tuple[Tag.PREFIX]] = text_ns_tuple[Tag.NAMESPACE] ret.append((key_tag, nsmap, text)) return ret def subelement(self, origin, parent, tag, key={}): url, tag_name = self.parse_tag(tag) e = etree.SubElement(parent, tag, nsmap={None: url}) default_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE) default_ns = default_ns_tuple[ns_spec[origin]['path'][0]] if key: for key_tag, nsmap, text in self.parse_key(origin, tag, key): e_child = etree.SubElement(e, key_tag, nsmap=nsmap) e_child.text = text return e class gNMIComposer(Composer): '''gNMIComposer A composer to convert an lxml Element object to gNMI JSON format. gNMI adopts RFC 7951 when encoding data. One gNMIComposer instance abstracts a config node in config tree. ''' def __init__(self, *args, **kwargs): super(gNMIComposer, self).__init__(*args, **kwargs) self._url_to_prefix = {i[2]: i[1] for i in self.device.namespaces if i[1] is not None} def get_json(self, instance=True, origin='openconfig'): '''get_json High-level api: get_json returns json_val of the config node. Parameters ---------- instance : `bool` True if only one instance of list or leaf-list is required. False if all instances of list or leaf-list are needed. Returns ------- str A string in JSON format. ''' def get_json_instance(node): pk = Parker(xml_fromstring=_fromstring, dict_type=OrderedDict) default_ns = {} for item in node.iter(): parents = [p for p in node.iter() if item in p] if parents and id(parents[0]) in default_ns: ns, tag = self.device.convert_tag(default_ns[id(parents[0])], item.tag, dst=ns_spec[origin]['val_name']) else: ns, tag = self.device.convert_tag('', item.tag, dst=ns_spec[origin]['val_name']) default_ns[id(item)] = ns item.tag = tag if item.text: text = self.device.convert_tag(self._url_to_prefix[ns], item.text, src=Tag.JSON_PREFIX, dst=ns_spec[origin]['val_val'])[1] item.text = text return pk.data(node) def convert_node(node): # lxml.etree does not allow tag name like oc-if:enable # so it is converted to xml.etree.ElementTree string = etree.tostring(node, encoding='unicode', pretty_print=False) return ElementTree.fromstring(string) if instance: return json.dumps(get_json_instance(convert_node(self.node))) else: nodes = [n for n in self.node.getparent().iterchildren(tag=self.node.tag)] if len(nodes) > 1: return json.dumps([get_json_instance(convert_node(n)) for n in nodes]) else: return json.dumps(get_json_instance(convert_node(nodes[0]))) def get_path(self, instance=True, origin='openconfig'): '''get_path High-level api: get_path returns gNMI path object of the config node. Note that gNMI Path can specify list instance but cannot specify leaf-list instance. Parameters ---------- instance : `bool` True if the gNMI Path object refers to only one instance of a list. False if the gNMI Path object refers to all instances of a list. Returns ------- Path An object of gNMI Path class. ''' def get_name(node, default_ns): if origin == 'openconfig' or origin == '': return gNMIParser.parse_tag(node.tag) else: return self.device.convert_tag(default_ns, node.tag, src=Tag.LXML_ETREE, dst=ns_spec[origin]['path']) def get_keys(node, default_ns): keys = Composer(self.device, node).keys ret = {} for key in keys: if origin=='openconfig' or origin == '': key_ns, key_val = gNMIParser.parse_tag(key) else: key_ns, key_val = self.device.convert_tag(default_ns, key, src=Tag.LXML_ETREE, dst=ns_spec[origin]['path']) ns_tuple = self.convert_ns(key_ns, src=Tag.NAMESPACE) val_ns, val_val = self.device.convert_tag(ns_tuple[Tag.PREFIX], node.find(key).text, src=Tag.XPATH, dst=ns_spec[origin]['path']) ret[key_val] = val_val return ret def get_pathelem(node, default_ns): ns, name = get_name(node, default_ns) schema_node = self.device.get_schema_node(node) if schema_node.get('type') == 'list' and \ (node != self.node or instance): return ns, PathElem(name=name, key=get_keys(node, ns)) else: return ns, PathElem(name=name) nodes = list(reversed(list(self.node.iterancestors())))[1:] + \ [self.node] path_elems = [] default_ns = '' for node in nodes: default_ns, path_elem = get_pathelem(node, default_ns) path_elems.append(path_elem) return Path(elem=path_elems, origin=origin) def convert_ns(self, ns, src=Tag.NAME): entries = [i for i in self.device.namespaces if i[src] == ns] c = len(entries) if c == 0: raise ConfigError("{} '{}' does not exist in device attribute " \ "'namespaces'" \ .format(Tag.STR[src], ns)) if c > 1: raise ModelError("device has more than one {} '{}': {}" \ .format(Tag.STR[src], ns, entries)) return entries[0] class gNMICalculator(BaseCalculator): '''gNMICalculator A gNMI calculator to do subtraction and addition. A subtraction is to compute the delta between two Config instances in a form of gNMI SetRequest. An addition is to apply one gNMI SetRequest to a Config instance (TBD). Attributes ---------- sub : `SetRequest` A gNMI SetRequest which can achieve a transition from one config, i.e., self.etree2, to another config, i.e., self.etree1. ''' @property def sub(self): deletes, replaces, updates = self.node_sub(self.etree1, self.etree2) return SetRequest(prefix=None, delete=deletes, replace=replaces, update=updates) def node_sub(self, node_self, node_other): '''node_sub High-level api: Compute the delta of two config nodes. This method is recursive, assuming two config nodes are different. Parameters ---------- node_self : `Element` A config node in the destination config that is being processed. node_self cannot be a leaf node. node_other : `Element` A config node in the source config that is being processed. Returns ------- tuple There are three elements in the tuple: a list of gNMI Path instances that need to be deleted, a list of gNMI Update instances for replacement purpose, and a list of gNMI Update instances for merging purpose. ''' paths_delete = [] updates_replace = [] updates_update = [] done_list = [] # if a leaf-list node, delete the leaf-list totally # if a list node, by default delete the list instance # if a list node and delete_whole=True, delete the list totally def generate_delete(node, instance=True): paths_delete.append(gNMIComposer(self.device, node) \ .get_path(instance=instance)) # if a leaf-list node, replace the leaf-list totally # if a list node, replace the list totally def generate_replace(node, instance=True): n = gNMIComposer(self.device, node) json_value = n.get_json(instance=instance).encode() value = TypedValue(json_val=json_value) path = n.get_path(instance=instance) updates_replace.append(Update(path=path, val=value)) # if a leaf-list node, update the leaf-list totally # if a list node, by default update the list instance # if a list node and update_whole=True, update the list totally def generate_update(node, instance=True): n = gNMIComposer(self.device, node) json_value = n.get_json(instance=instance).encode() value = TypedValue(json_val=json_value) path = n.get_path(instance=instance) updates_update.append(Update(path=path, val=value)) # the leaf-list value sequence under node_self is different from the one # under node_other def leaf_list_seq_is_different(tag): if [i.text for i in node_self.iterchildren(tag=tag)] == \ [i.text for i in node_other.iterchildren(tag=tag)]: return False else: return True # the leaf-list value set under node_self is different from the one # under node_other def leaf_list_set_is_different(tag): s_list = [i.text for i in node_self.iterchildren(tag=tag)] o_list = [i.text for i in node_other.iterchildren(tag=tag)] if set(s_list) == set(o_list): return False else: return True # the leaf-list or list under node_self is empty def list_is_empty(tag): if [i for i in node_self.iterchildren(tag=tag)]: return False else: return True # the sequence of list instances under node_self is different from the # one under node_other def list_seq_is_different(tag): s_list = [i for i in node_self.iterchildren(tag=tag)] o_list = [i for i in node_other.iterchildren(tag=tag)] if [self.device.get_xpath(n) for n in s_list] == \ [self.device.get_xpath(n) for n in o_list]: return False else: return True # all list instances under node_self have peers under node_other, and # the sequence of list instances under node_self that have peers under # node_other is same as the sequence of list instances under node_other def list_seq_is_inclusive(tag): s_list = [i for i in node_self.iterchildren(tag=tag)] o_list = [i for i in node_other.iterchildren(tag=tag)] s_seq = [self.device.get_xpath(n) for n in s_list] o_seq = [self.device.get_xpath(n) for n in o_list] if set(s_seq) <= set(o_seq) and \ [i for i in s_seq if i in o_seq] == o_seq: return True else: return False in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \ self._group_kids(node_self, node_other) for child_s in in_s_not_in_o: schema_node = self.device.get_schema_node(child_s) if schema_node.get('type') == 'leaf': generate_update(child_s) elif schema_node.get('type') == 'leaf-list': if child_s.tag not in done_list: generate_replace(child_s, instance=False) done_list.append(child_s.tag) elif schema_node.get('type') == 'container': generate_update(child_s) elif schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': if child_s.tag not in done_list: generate_replace(child_s, instance=False) done_list.append(child_s.tag) else: generate_update(child_s, instance=True) for child_o in in_o_not_in_s: schema_node = self.device.get_schema_node(child_o) if schema_node.get('type') == 'leaf': generate_delete(child_o) elif schema_node.get('type') == 'leaf-list': if child_o.tag not in done_list: child_s = node_self.find(child_o.tag) if child_s is None: generate_delete(child_o, instance=False) else: generate_replace(child_s, instance=False) done_list.append(child_o.tag) elif schema_node.get('type') == 'container': generate_delete(child_o) elif schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': if list_seq_is_inclusive(child_o.tag): generate_delete(child_o, instance=True) else: if child_o.tag not in done_list: generate_replace(child_o, instance=False) done_list.append(child_o.tag) else: if list_is_empty(child_o.tag): if child_o.tag not in done_list: generate_delete(child_o, instance=False) done_list.append(child_o.tag) else: generate_delete(child_o, instance=True) for child_s, child_o in in_s_and_in_o: schema_node = self.device.get_schema_node(child_s) if schema_node.get('type') == 'leaf': if child_s.text != child_o.text: generate_update(child_s) elif schema_node.get('type') == 'leaf-list': if child_s.tag not in done_list: if schema_node.get('ordered-by') == 'user': if leaf_list_seq_is_different(child_s.tag): generate_replace(child_s, instance=False) else: if leaf_list_set_is_different(child_s.tag): generate_replace(child_s, instance=False) done_list.append(child_s.tag) elif schema_node.get('type') == 'container': if BaseCalculator(self.device, child_s, child_o).ne: d, r, u = self.node_sub(child_s, child_o) paths_delete += d updates_replace += r updates_update += u elif schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': if list_seq_is_different(child_s.tag): if child_s.tag not in done_list: generate_replace(child_s, instance=False) done_list.append(child_s.tag) else: if BaseCalculator(self.device, child_s, child_o).ne: d, r, u = self.node_sub(child_s, child_o) paths_delete += d updates_replace += r updates_update += u else: if BaseCalculator(self.device, child_s, child_o).ne: d, r, u = self.node_sub(child_s, child_o) paths_delete += d updates_replace += r updates_update += u return (paths_delete, updates_replace, updates_update)
41.875289
90
0.506866
4,144
36,264
4.230936
0.079151
0.033651
0.013688
0.013574
0.530029
0.445788
0.386357
0.352706
0.296127
0.251697
0
0.003807
0.405995
36,264
865
91
41.923699
0.81013
0.116259
0
0.415241
0
0
0.039971
0
0
0
0
0
0
1
0.068429
false
0
0.023328
0
0.209953
0.001555
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f177e6a7009b15c338ed0becd47586ed6aaec891
3,927
py
Python
tournament/tournament.py
ross-schlie/exercism-Python-track
4db690d11377377fc6f5cef6422da497272bbb31
[ "MIT" ]
null
null
null
tournament/tournament.py
ross-schlie/exercism-Python-track
4db690d11377377fc6f5cef6422da497272bbb31
[ "MIT" ]
null
null
null
tournament/tournament.py
ross-schlie/exercism-Python-track
4db690d11377377fc6f5cef6422da497272bbb31
[ "MIT" ]
null
null
null
"""exercism tournament module.""" class Team: def __init__(self, name): """A Team in a football tournament. Keeps track of a teams matches, wins, draws, losses and points. Parameters ---------- arg1 : string Name of the Team. """ self.name = name self.matches = 0 self.wins = 0 self.draws = 0 self.losses = 0 self.points = 0 def __repr__(self): # return self.name.ljust(31) + f'| # {self.matches} | # {self.wins} | # {self.draws} | # {self.losses} | # {self.points}' return repr((self.name, self.points)) def __eq__(self, other): return self.name == other.name def __lt__(self, other): return self.points < other.points def __le__(self, other): return self.points <= other.points def __eq__(self, other): return self.points == other.points def __ne__(self, other): return self.points != other.points def __gt__(self, other): return self.points > other.points def __ge__(self, other): return self.points >= other.points def win(self): """The team Won! Updates the teams matches, wins and points.""" self.matches += 1 self.wins += 1 self.points += 3 def loss(self): """The team Lost! Updates the teams matches and losses.""" self.matches += 1 self.losses += 1 def draw(self): """The team drew! Updates the teams draws, matches and points.""" self.matches += 1 self.draws += 1 self.points += 1 class Tourney: def __init__(self): """A football Tournament that tracks teams that take part.""" self.teams = {} def _get_team(self, name): if name not in self.teams: self.teams[name] = Team(name) return self.teams[name] def get_teams(self): """A Dictionary of the teams in the Tournament""" return self.teams def parse_contest(self, rows): """Determine the teams and their results in a Tournament. Parse lines in text (rows) to find the teams and their results in a Tournament. """ for line in rows: matchresult = line.split(';') team1 = self._get_team(matchresult[0]) team2 = self._get_team(matchresult[1]) if matchresult[2] == 'win': team1.win() team2.loss() elif matchresult[2] == 'loss': team1.loss() team2.win() elif matchresult[2] == 'draw': team1.draw() team2.draw() #else: # ???? def tally(rows): """Tally the results of a small football competition. Parameters ---------- arg1 : list A list of the results of matches in the competition Example: "Allegoric Alaskans;Blithering Badgers;win" Returns ------ string The results of the Tournament. # containing the lines in the file Test says to make a file... so create and send it's contents? """ tournament = Tourney() tournament.parse_contest(rows) sortedTeams = sorted(tournament.get_teams().values(), key=lambda team: (-team.points, team.name)) f = open('tournament_results.txt', 'w') f.write('Team'.ljust(31) + '| MP | W | D | L | P\n') for team in sortedTeams: f.write(team.name.ljust(31) + f'| {team.matches} ' + f'| {team.wins} ' + f'| {team.draws} ' + f'| {team.losses} ' + f'| {team.points} \n') f.close() # ... f = open('tournament_results.txt', 'r') buffer = [] for line in f: buffer.append(line.rstrip()) f.close() return buffer
26.18
73
0.531958
464
3,927
4.390086
0.25
0.054001
0.051546
0.065292
0.215022
0.190476
0.16593
0.150221
0
0
0
0.013209
0.344538
3,927
149
74
26.355705
0.778166
0.276292
0
0.088608
0
0
0.064916
0.01651
0
0
0
0
0
1
0.21519
false
0
0
0.101266
0.379747
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
f17945798753686fc982268f9cb799a7cdccba0f
1,816
py
Python
extensions/prefix_handler.py
Elfein7Night/ElfBot
5967b2b87569badab3ad60d2ecaa139d86bbb093
[ "MIT" ]
null
null
null
extensions/prefix_handler.py
Elfein7Night/ElfBot
5967b2b87569badab3ad60d2ecaa139d86bbb093
[ "MIT" ]
null
null
null
extensions/prefix_handler.py
Elfein7Night/ElfBot
5967b2b87569badab3ad60d2ecaa139d86bbb093
[ "MIT" ]
null
null
null
import logging from discord import Message from discord.ext.commands import Cog, Context, command, has_permissions, Bot from utils.utils import log_event, db, get_dict from extensions.extension_templates import DatabaseHandler DEFAULT_PREFIX = '?' PREFIXES_DB_KEY = 'prefixes_for_servers' class PrefixDBHandler(DatabaseHandler): # On First Joining Server @Cog.listener() async def on_guild_join(self, guild: Context.guild): self.set_value_for_server(guild_id=guild.id, value=DEFAULT_PREFIX) log_event(f'Joined the server: {guild.name} - {guild.id}') @command(brief="Change the bot's prefix for this server") @has_permissions(administrator=True) async def pf(self, ctx: Context, prefix): self.set_value_for_server(guild_id=ctx.guild.id, value=prefix) message = f"set '{prefix}' as the prefix for the server '{ctx.guild}'" log_event(message) await ctx.send(f'{ctx.author.mention} {message}') ############################ # STATIC METHODS # ############################ def get_prefix_for_guild(guild_id: int): prefixes_raw_dict = db.get(PREFIXES_DB_KEY) if prefixes_raw_dict is not None: try: return get_dict(prefixes_raw_dict)[str(guild_id)] except KeyError: log_event(f"Failed trying to fetch prefix for server id {guild_id}", logging.CRITICAL) return DEFAULT_PREFIX log_event(f"Error Fetching prefixes DB", logging.CRITICAL) return DEFAULT_PREFIX # bot is passed by default by the API but not needed for this function. def get_prefix(_: Bot, message: Message): return get_prefix_for_guild(message.guild.id) # expected function for outside calling function 'load_extension()' def setup(_bot): _bot.add_cog(PrefixDBHandler(_bot, PREFIXES_DB_KEY))
34.923077
98
0.701542
250
1,816
4.88
0.372
0.051639
0.031967
0.02459
0.127049
0.045902
0.045902
0
0
0
0
0
0.180066
1,816
51
99
35.607843
0.819342
0.101872
0
0.060606
0
0
0.172721
0
0
0
0
0
0
1
0.090909
false
0
0.151515
0.030303
0.393939
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f17afddf883e4061f0668498c50ef2b1d40f471f
190
py
Python
app/gws/server/spool.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
app/gws/server/spool.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
app/gws/server/spool.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
import gws import importlib def add(job): uwsgi = importlib.import_module('uwsgi') gws.log.info("SPOOLING", job.uid) d = {b'job_uid': gws.as_bytes(job.uid)} uwsgi.spool(d)
19
44
0.663158
30
190
4.1
0.566667
0.146341
0
0
0
0
0
0
0
0
0
0
0.178947
190
9
45
21.111111
0.788462
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
f17b300d0d9a14ab776efbe26b4bf7268c4f862a
4,116
py
Python
snakeskin/config.py
healthverity/snakeskin-fabric
31ba7fa5a71445eba76f89723c998d603704e0f9
[ "Apache-2.0" ]
5
2019-08-08T17:16:02.000Z
2021-05-15T07:28:31.000Z
snakeskin/config.py
healthverity/snakeskin-fabric
31ba7fa5a71445eba76f89723c998d603704e0f9
[ "Apache-2.0" ]
4
2019-08-20T15:07:12.000Z
2020-07-31T17:50:51.000Z
snakeskin/config.py
healthverity/snakeskin-fabric
31ba7fa5a71445eba76f89723c998d603704e0f9
[ "Apache-2.0" ]
2
2019-08-20T15:22:48.000Z
2019-12-17T19:38:55.000Z
""" Blockchain configuration """ import os import json from dataclasses import dataclass, field, replace from typing import List, Mapping, Optional import yaml import dacite from .models import Peer, Channel, User, Orderer, ChaincodeSpec from .models.gateway import Gateway from .constants import ChaincodeLanguage @dataclass() class GatewayConfig: """ A gateway config object """ channel: str requestor: str endorsing_peers: List[str] = field(default_factory=list) orderers: List[str] = field(default_factory=list) chaincode: Optional[str] = None @dataclass() class BlockchainConfig: """ A gateway for accessing the blockchain """ @classmethod def from_file(cls, file_path: str): """ Loads gateway config from a static file """ ext = os.path.splitext(file_path)[1] with open(file_path) as inf: if ext == '.json': return cls.from_dict(json.load(inf)) if ext in {'.yaml', '.yml'}: return cls.from_dict(yaml.load(inf, Loader=yaml.SafeLoader)) raise ValueError( f'Unrecognized file extension for file {file_path}' ) @classmethod def from_dict(cls, value: dict): """ Creates a gateway config from a dictionary """ return dacite.from_dict(cls, value, config=dacite.Config( type_hooks={ ChaincodeLanguage: ChaincodeLanguage } )) peers: Mapping[str, Peer] = field(default_factory=dict) orderers: Mapping[str, Orderer] = field(default_factory=dict) users: Mapping[str, User] = field(default_factory=dict) chaincodes: Mapping[str, ChaincodeSpec] = field(default_factory=dict) gateways: Mapping[str, GatewayConfig] = field(default_factory=dict) def __post_init__(self): # Set names to be the mapping key for all entities that weren't # provided names self.peers = { name: replace(peer, name=peer.name or name) for name, peer in self.peers.items() } self.orderers = { name: replace(orderer, name=orderer.name or name) for name, orderer in self.orderers.items() } self.users = { name: replace(user, name=user.name or name) for name, user in self.users.items() } self.chaincodes = { name: replace(chaincode, name=chaincode.name or name) for name, chaincode in self.chaincodes.items() } def get_gateway(self, name: str): """ Gets a gateway using the config name """ if name not in self.gateways: raise KeyError(f'No gateway defined with name "{name}"') config = self.gateways[name] return Gateway( endorsing_peers=[ self.get_peer(peer) for peer in config.endorsing_peers ], chaincode=self.get_chaincode(config.chaincode) if config.chaincode else None, requestor=self.get_user(config.requestor), orderers=[ self.get_orderer(orderer) for orderer in config.orderers ], channel=Channel(name=config.channel) ) def get_peer(self, name: str): """ Gets a peer using the config name """ if not name in self.peers: raise KeyError(f'No peer defined with name "{name}"') return self.peers[name] def get_orderer(self, name: str): """ Gets a orderer using the config name """ if not name in self.orderers: raise KeyError(f'No orderer defined with name "{name}"') return self.orderers[name] def get_user(self, name: str): """ Gets a user using the config name """ if not name in self.users: raise KeyError(f'No user defined with name "{name}"') return self.users[name] def get_chaincode(self, name: str): """ Gets a chaincode spec using the config name """ if not name in self.chaincodes: raise KeyError(f'No chaincode defined with name "{name}"') return self.chaincodes[name]
34.3
89
0.617104
502
4,116
4.992032
0.207171
0.021548
0.053073
0.04589
0.189944
0.122905
0.052674
0.052674
0.052674
0
0
0.00034
0.2845
4,116
119
90
34.588235
0.850594
0.107629
0
0.067416
0
0
0.067388
0
0
0
0
0
0
1
0.089888
false
0
0.101124
0
0.41573
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f17d0d49e5fc9b163d91e5df674590e6e4cad918
1,570
py
Python
quantifier.py
daibiaoxuwu/cnn-text-classification-tf
595008c44acef7fa4dd45d1d943f0b8314e6b635
[ "Apache-2.0" ]
null
null
null
quantifier.py
daibiaoxuwu/cnn-text-classification-tf
595008c44acef7fa4dd45d1d943f0b8314e6b635
[ "Apache-2.0" ]
null
null
null
quantifier.py
daibiaoxuwu/cnn-text-classification-tf
595008c44acef7fa4dd45d1d943f0b8314e6b635
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import nltk from papersmith.editor.issue import Issue def check(content): cno=['many','few','a few','a number of','the number of','numbers of','a quantity of','quantities of','a good many','a great many','a large number of','a great number of','scores of','dozens of'] uno=['much','little','a little','huge amounts of','a great amount of','a large amount of','a great deal of','a large deal of','a plenty of','a good supply of','a piece of','a bit of','an item of', 'an article of','a bottle of','a cup of','a drop of','a glass of'] uncountable_nouns=eval(open('papersmith/editor/grammar/uncountable_nouns.txt').read()) issues=[] w='' for i in range(len(content)): if (ord(content[i])>64 and ord(content[i])<91) or (ord(content[i])>96 and ord(content[i])<123) or content[i]=="'": w+=content[i] if len(w)==1 and w=='\'': w='' continue if len(w)==0: continue if w=='many' or w=='few': pos=i sentence='' for j in range(1000): if i>=len(content) or content[i]=='.' or content[i]=='!' or content[i]=='?' or content[i]==',' or content[i]==':' or content[i]==';': t=nltk.word_tokenize(sentence) l=nltk.pos_tag(t) for j in l: if j[1]=='NN': if j[0] in uncountable_nouns: if w=='many': issues.append(Issue(2, 1, [pos-4], [pos], 'much', 0)) else: issues.append(Issue(2, 1, [pos-3], [pos], 'little', 0)) break sentence+=content[i] i+=1 w='' return issues
35.681818
264
0.564331
256
1,570
3.441406
0.339844
0.051078
0.079455
0.068104
0.118048
0.118048
0.068104
0.068104
0.068104
0.068104
0
0.022296
0.228662
1,570
43
265
36.511628
0.705202
0.013376
0
0.142857
0
0
0.276665
0.030381
0
0
0
0
0
1
0.028571
false
0
0.057143
0
0.114286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f17d6fabf9fa920276d161547a20c9812814326f
4,038
py
Python
doc/ansible/__init__.py
jansenm/ansible-jenkins-roles
73330c53d197b5161d2b46dd27aa609e03902485
[ "BSD-3-Clause" ]
10
2015-12-30T17:49:19.000Z
2019-12-16T15:58:11.000Z
doc/ansible/__init__.py
jansenm/ansible-jenkins-roles
73330c53d197b5161d2b46dd27aa609e03902485
[ "BSD-3-Clause" ]
null
null
null
doc/ansible/__init__.py
jansenm/ansible-jenkins-roles
73330c53d197b5161d2b46dd27aa609e03902485
[ "BSD-3-Clause" ]
8
2016-10-21T04:13:14.000Z
2020-11-10T19:48:06.000Z
import docutils import docutils.nodes import docutils.parsers.rst import docutils.parsers.rst.directives import sphinx.addnodes import sphinx.application import sphinx.directives import sphinx.domains import sphinx.environment import sphinx.locale import sphinx.roles import sphinx.util.compat import sphinx.util.docfields import sphinx.util.nodes class AnsibleRoleRole(sphinx.roles.XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): return title, target class AnsibleRoleDirective(sphinx.directives.ObjectDescription): # :BUG: Something is wrong (just a test) required_arguments = 1 doc_field_types = [ sphinx.util.docfields.GroupedField( 'default', label=sphinx.locale.l_('Defaults'), names=('default', 'default') ), sphinx.util.docfields.Field( 'dependency', label=sphinx.locale.l_('Dependencies'), names=('dependency', 'depend'), rolename='role', bodyrolename='role' ), sphinx.util.docfields.TypedField( 'parameter', label=sphinx.locale.l_('Parameters'), names=('param', 'parameter', 'arg', 'argument'), typerolename='role', typenames=('type',) ), sphinx.util.docfields.Field( 'become', label=sphinx.locale.l_('Uses become'), names=('become') ) ] option_spec = { 'noindex': docutils.parsers.rst.directives.flag } has_content = True def handle_signature(self, sig: str, signode: sphinx.addnodes.desc_signature): (ns, _, rolename) = sig.rpartition('/') signode += sphinx.addnodes.desc_annotation('role', 'Role ') signode += sphinx.addnodes.desc_addname(ns, "{ns} ".format(ns=ns)) signode += sphinx.addnodes.desc_name(rolename, rolename) return 'role-' + sig def add_target_and_index(self, name, sig, signode): targetname = name signode['ids'].append(targetname) self.env.domaindata['ansible']['roles'][name] = (self.env.docname, name) self.state.document.note_explicit_target(signode) class AnsibleDomain(sphinx.domains.Domain): """Ansible domain""" name = "ansible" label = "Ansible" object_types = { 'role': sphinx.domains.ObjType(sphinx.locale.l_('role'), 'role') } directives = { 'role': AnsibleRoleDirective } roles = { 'role': AnsibleRoleRole() } initial_data = { "roles": {} } def clear_doc(self, doc): for name in self.data['roles']: if doc == self.data['roles'][name][1]: del self.data['roles'][name] def get_objects(self): for docname, name in self.data['roles'].values(): yield name, name, 'role', docname, 'role-' + name, 1 def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode): print(target) if (type == "role"): for (docname, name) in self.data['roles'].values(): if name == target: print("Yes") return sphinx.util.nodes.make_refnode( builder, fromdocname, docname, name, contnode ) else: # print("here") # print(node) # print(contnode) # print(type) # print(target) # print(builder) # print(fromdocname) # print(env) return def resolve_any_xref(self, env, fromdocname, builder, type, target, node, contnode): print("resolve_xref") print(type) print(target) def setup(app: sphinx.application.Sphinx): """Initialize the sphinx extension for ansible. """ app.add_domain(AnsibleDomain)
28.041667
82
0.567112
398
4,038
5.668342
0.326633
0.053191
0.04211
0.031915
0.089096
0.080674
0.080674
0.080674
0.049645
0.049645
0
0.001078
0.311045
4,038
143
83
28.237762
0.809849
0.053739
0
0.085714
0
0
0.074698
0
0
0
0
0
0
1
0.07619
false
0
0.133333
0.009524
0.371429
0.047619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f17fd326827766f921f6b7614a4cdc197d55723b
2,068
py
Python
odps/pai/nodes/transform_nodes.py
ZZHGit/aliyun-odps-python-sdk
e1c39378863ec7a1947487acab38125ac77f178e
[ "Apache-2.0" ]
null
null
null
odps/pai/nodes/transform_nodes.py
ZZHGit/aliyun-odps-python-sdk
e1c39378863ec7a1947487acab38125ac77f178e
[ "Apache-2.0" ]
null
null
null
odps/pai/nodes/transform_nodes.py
ZZHGit/aliyun-odps-python-sdk
e1c39378863ec7a1947487acab38125ac77f178e
[ "Apache-2.0" ]
1
2019-09-18T05:35:29.000Z
2019-09-18T05:35:29.000Z
# encoding: utf-8 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ..core.dag import BaseDagNode, DagEndpointType from ..nodes.exporters import get_input_table_name, get_input_partitions, get_output_table_name, \ get_output_table_partitions class SplitNode(BaseDagNode): def __init__(self, percentage): super(SplitNode, self).__init__("split") self.marshal({ "parameters": { "fraction": percentage, }, "inputs": [(1, "input", DagEndpointType.DATA)], "outputs": [(1, "output1", DagEndpointType.DATA), (2, "output2", DagEndpointType.DATA)] }) self.add_exporter("inputTableName", lambda context: get_input_table_name(context, self, "input")) self.add_exporter("inputTablePartitions", lambda context: get_input_partitions(context, self, "input")) self.add_exporter("output1TableName", lambda context: get_output_table_name(context, self, "output1")) self.add_exporter("output1TablePartition", lambda context: get_output_table_partitions(context, self, "output1")) self.add_exporter("output2TableName", lambda context: get_output_table_name(context, self, "output2")) self.add_exporter("output2TablePartition", lambda context: get_output_table_partitions(context, self, "output2"))
50.439024
121
0.729207
256
2,068
5.726563
0.453125
0.040928
0.057299
0.060027
0.195089
0.195089
0.122783
0.122783
0
0
0
0.010545
0.174565
2,068
40
122
51.7
0.848272
0.37234
0
0
0
0
0.157031
0.032813
0
0
0
0
0
1
0.052632
false
0
0.105263
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f17fd5450289443b6d91376201627bb8e37f188a
12,302
py
Python
kk/api/views.py
icw82/django-kk
b84c41ceb9f6de4fc94ef4b353583bfec70d6fed
[ "MIT" ]
null
null
null
kk/api/views.py
icw82/django-kk
b84c41ceb9f6de4fc94ef4b353583bfec70d6fed
[ "MIT" ]
null
null
null
kk/api/views.py
icw82/django-kk
b84c41ceb9f6de4fc94ef4b353583bfec70d6fed
[ "MIT" ]
null
null
null
import re import datetime import time import urllib import json from inspect import ismethod from django.views import generic from django.utils import timezone from django.utils.decorators import classonlymethod from django.db.models import ( Model, Manager, AutoField, CharField ) from django.db.models.query import QuerySet from django.db.models.fields.files import ImageFieldFile from django.core.exceptions import FieldDoesNotExist from django.http import (HttpResponse, HttpResponseNotFound, HttpResponseForbidden) from .. import utils DEFAULT_PAGE_SIZE = 200 DEFAULT_PAGE_SIZE_MAX = 200 class Stream: user = None query = None params = None # from path count = None skip = None keys = [ ('count', int, DEFAULT_PAGE_SIZE), ('skip', int, 0) ] def __init__(self, HttpRequest, params): self.user = HttpRequest.user self.query = HttpRequest.GET self.params = params self.client_params = {} for key, type, default in self.keys: value = HttpRequest.GET.get(key, default) self.client_params[key] = type(value) def convertToJSON(method): def wrapper(self, HttpRequest, **kwargs): stream = method(self, HttpRequest, **kwargs) if isinstance(stream.data, HttpResponse): return stream.data if stream.data == None: return HttpResponseNotFound() response = HttpResponse( json.dumps( stream.data, ensure_ascii = False, sort_keys = True ), content_type = 'application/json' ) if hasattr(stream, 'total'): response['Total'] = stream.total if hasattr(stream, 'skip'): response['Skip'] = stream.skip if hasattr(stream, 'count'): response['Count'] = stream.count # print(stream) return response return wrapper class Base(generic.View): model = None key = 'id' # data = None page_size = DEFAULT_PAGE_SIZE page_size_max = DEFAULT_PAGE_SIZE_MAX @classonlymethod def as_api(self, **kwargs): self.dynamic_filters = {}; if len(kwargs) > 0: if 'filters' in kwargs: # Представление получает новые фильтры из urls.py (.as_api()) filters = kwargs['filters'] for key in filters: # print('> > >', key) value = filters[key] if type(value) == tuple and len(value) == 2: self.dynamic_filters.update({ key: value }) return self.as_view() available_filters = [] dynamic_filters = {} def getFilters(self, stream): # print('- - - - - - - - -') # print('- - - - - - - - -') # print(self.model) # print('- - - - - - - - -') # print('- - - - - - - - -') filters = {} try: field = self.model._meta.get_field('status') filters.update({ 'status': True }) except FieldDoesNotExist: pass #stream.user.is_staff or stream.user.is_superuser try: field = self.model._meta.get_field('pub_date') filters.update({ 'pub_date__lt': timezone.now() }) except FieldDoesNotExist: pass if len(self.dynamic_filters) > 0: # print('... dynamic_filters:', self.dynamic_filters) for key in self.dynamic_filters: param_key = self.dynamic_filters[key][0] param_class = self.dynamic_filters[key][1] # print('param_key', param_key) # print('param_class', param_class) if param_key in stream.params: value = param_class(stream.params[param_key]) filters[key] = value # for filter in self.available_filters: # if (stream.query.__contains__(filter[0])): # self.filters[filter[1]] = stream.query.__getitem__(filter[0]) return filters orders = [] available_orders = [] # Данные def getResource(self, HttpRequest, params): stream = Stream(HttpRequest, params) input_key, field_key = self.getKeys(self.key) try: query = { field_key: stream.params[input_key] } stream.response = self.model.objects.get(**query) except self.model.DoesNotExist: stream.response = HttpResponseNotFound() return stream def getCollection(self, HttpRequest, params): stream = Stream(HttpRequest, params) stream.response = self.model.objects # Проверка и вся хуйня # Фильтры filters = self.getFilters(stream) stream.response = stream.response.filter(**filters) # print('... filters', filters) # print('..query..', stream.response.query) # Сортировка if (len(self.orders) > 0): stream.response = stream.response.order_by(*self.orders) # Пагинация stream.total = stream.response.count() stream.skip = stream.client_params['skip'] stream.count = stream.client_params['count'] if (stream.skip > stream.total): stream.skip = stream.total elif stream.skip < 0: stream.skip = 0 if stream.count < 0: stream.count = 0 elif stream.count > DEFAULT_PAGE_SIZE_MAX: stream.count = DEFAULT_PAGE_SIZE_MAX stream.response = stream.response[ stream.skip : stream.skip + stream.count ] return stream def getKeys(self, string): # line = re.search(r'\s-[d]*$', string) # line = re.sub(r'\s-[d]*', '', string) # print('..', line) if ' as ' in string: orig, view = string.split(' as ') else: orig = view = string return orig, view # NOTE: Префикс используется? def export__resource(self, resource, schema = None, prefix = None): data = {}; if type(schema) != tuple: schema = self.scheme def get_attr_by_path(resource, path): key = path.pop(0) # print('..', key, resource) if ismethod(resource): resource = resource() if (resource and hasattr(resource, key)): value = getattr(resource, key) else: value = None # print(resource) # print(key) if len(path) == 0: if isinstance(value, datetime.date): return(value.isoformat()) if isinstance(value, datetime.time): return(value.isoformat()) if isinstance(value, datetime.datetime): return(value.isoformat()) if ismethod(value): return value() return value else: return get_attr_by_path(value, path) for item in schema: if type(item) == str: orig, view = self.getKeys(item) if prefix: orig = prefix + orig path = orig.split('.') data[view] = get_attr_by_path(resource, path) elif type(item) == tuple: orig, view = self.getKeys(item[0]) keys = item[1] model = getattr(resource, orig) if isinstance(model, Manager): resources = model.all() if type(keys) == tuple: data[view] = self.export( resources, schema = keys, ) elif type(keys) == str: data[view] = [ i[keys] for i in self.export( resources, schema = (keys, ), )] else: print('*** Manager: Некорректный тип:', type(keys)) elif isinstance(model, Model): if type(keys) == str: keys = (keys, ) if type(keys) == tuple: data[view] = self.export( model, schema = keys, ) else: print('*** Model: Некорректный тип:', type(keys)) else: print('*** Не Менеджер! ***', manager) else: print('Что-то непонятное') return data # Конвертирование данных для выдачи def export(self, response, schema = None): if isinstance(response, QuerySet): if not self.scheme: return None data = [] # FIXME: ЗАПРОСЫ второго уровня НЕ ФИЛЬТРУЮТСЯ # filters = {} # # for filter_key in self.__instance['filters']: # filter = {filter_key: self.__instance['filters'][filter_key]} # try: # field = response.model._meta.get_field(filter_key) # filters.update(filter) # except FieldDoesNotExist: # pass # ## print('....', filters) # # response = response.filter(**filters) for resource in response: item = self.export__resource(resource, schema) if item != None: data.append(item) elif isinstance(response, Model): if not self.scheme: return None data = self.export__resource(response, schema); else: return None return data def get_exported_resource(self, HttpRequest, params): stream = self.getResource(HttpRequest, params) stream.data = self.export(stream.response) return stream def get_exported_collection(self, HttpRequest, params): stream = self.getCollection(HttpRequest, params) stream.data = self.export(stream.response) return stream class ResourceMixin: # def __init__(self): # self.test = True '''Заголовок ресурса''' def head(self, HttpRequest, **kwargs): pass '''Ресурс''' @convertToJSON def get(self, HttpRequest, **kwargs): stream = self.get_exported_resource(HttpRequest, kwargs) return stream '''Изменение ресурса''' def post(self, HttpRequest, *args, **kwargs): pass '''Удаление ресурса''' def delete(self, HttpRequest, **kwargs): pass class CollectionMixin: '''Заголовок коллекции''' def head(self, HttpRequest, **kwargs): pass '''Коллекция''' @convertToJSON def get(self, HttpRequest, **kwargs): stream = self.get_exported_collection(HttpRequest, kwargs) return stream '''Новый ресурс в коллекции''' def post(self, HttpRequest, *args, **kwargs): pass from django.urls import path class Scheme: pass class API(): model = None name = None scheme = [] # filters = {} # order = [] def __init__(self, Model): self.model = Model self.name = Model.KK.name_plural self.scheme = Model.KK.scheme class Mixin(Base): model = self.model scheme = self.scheme class ResourceView(Mixin, ResourceMixin): pass class CollectionView(Mixin, CollectionMixin):pass self.Mixin = Mixin; self.ResourceView = ResourceView; self.CollectionView = CollectionView; def getUrlPatterns(self): patterns = [] patterns.append( path( '{}/<int:id>/'.format(self.name), self.ResourceView.as_api() ) ) patterns.append( path( '{}/'.format(self.name), self.CollectionView.as_api() ) ) return patterns
27.037363
78
0.520728
1,173
12,302
5.356351
0.179881
0.033424
0.016712
0.011459
0.172847
0.138469
0.120325
0.049976
0.039472
0.039472
0
0.003121
0.374898
12,302
454
79
27.096916
0.813914
0.139327
0
0.236111
0
0
0.021966
0
0
0
0
0.002203
0
1
0.076389
false
0.034722
0.055556
0
0.298611
0.013889
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f180d4f03f7e3782891a070c90168ee6769dbd4b
2,393
py
Python
boardfarm/devices/authenticated_telnet.py
nickberry17/boardfarm
80f24fc97eff9a987250a6334b76eff08e001189
[ "BSD-3-Clause-Clear" ]
17
2018-04-19T08:35:47.000Z
2021-11-01T01:38:33.000Z
boardfarm/devices/authenticated_telnet.py
nickberry17/boardfarm
80f24fc97eff9a987250a6334b76eff08e001189
[ "BSD-3-Clause-Clear" ]
190
2018-04-19T07:00:18.000Z
2022-02-11T01:42:51.000Z
boardfarm/devices/authenticated_telnet.py
nickberry17/boardfarm
80f24fc97eff9a987250a6334b76eff08e001189
[ "BSD-3-Clause-Clear" ]
30
2018-04-12T01:49:21.000Z
2022-02-11T14:53:19.000Z
import pexpect import boardfarm.config as config from boardfarm.lib.bft_pexpect_helper import bft_pexpect_helper class AuthenticatedTelnetConnection: """Allow authenticated telnet sessions to be established with a \ unit's serial ports by OpenGear server. If a board is connected serially to a OpenGear terminal server, this class can be used to connect to the board. """ def __init__(self, device=None, conn_cmd=None, **kwargs): """Initialize the class instance to open a pexpect session. :param device: device to connect, defaults to None :type device: object :param conn_cmd: conn_cmd to connect to device, defaults to None :type conn_cmd: string :param ``**kwargs``: args to be used :type ``**kwargs``: dict """ self.device = device self.conn_cmd = conn_cmd self.device.conn_cmd = conn_cmd if not config.ldap: raise Exception("Please, provide ldap credentials in env variables") self.username, self.password = config.ldap.split(";") def connect(self): """Connect to the board/station using telnet. This method spawn a pexpect session with telnet command. The telnet port must be as per the ser2net configuration file in order to connect to serial ports of the board. :raises: Exception Board is in use (connection refused). """ if "telnet" not in self.conn_cmd: raise Exception( "Telnet connection string is not found. Check inventory server or ams.json" ) bft_pexpect_helper.spawn.__init__( self.device, command="/bin/bash", args=["-c", self.conn_cmd] ) try: self.device.expect(["login:"]) self.device.sendline(self.username) self.device.expect(["Password:"]) self.device.setecho(False) self.device.sendline(self.password) self.device.setecho(True) self.device.expect(["OpenGear Serial Server"]) except Exception: raise except pexpect.EOF: raise Exception("Board is in use (connection refused).") def close(self): """Close the connection.""" try: self.sendcontrol("]") self.sendline("q") finally: super().close()
34.681159
92
0.617635
290
2,393
5.013793
0.37931
0.075653
0.033012
0.028886
0.05227
0.05227
0.05227
0
0
0
0
0.000591
0.292938
2,393
68
93
35.191176
0.858747
0.33013
0
0.054054
0
0
0.147039
0
0
0
0
0
0
1
0.081081
false
0.081081
0.081081
0
0.189189
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
f182e3c76127620cdb609d77542bf57fc763d905
3,616
py
Python
Scripts/generateOTTagsModule.py
justvanrossum/fontgoggles
0054f17d3c82938f62468c0cdb7d90b68c76d396
[ "Apache-2.0" ]
308
2020-02-17T10:08:17.000Z
2022-03-27T19:47:49.000Z
Scripts/generateOTTagsModule.py
justvanrossum/fontgoggles
0054f17d3c82938f62468c0cdb7d90b68c76d396
[ "Apache-2.0" ]
147
2020-02-17T10:03:56.000Z
2022-03-28T17:37:09.000Z
Scripts/generateOTTagsModule.py
justvanrossum/fontgoggles
0054f17d3c82938f62468c0cdb7d90b68c76d396
[ "Apache-2.0" ]
41
2020-02-25T16:15:51.000Z
2022-03-12T00:02:11.000Z
import os import re def parse(data): start = data.find("<tbody>") end = data.find("</tbody>") data = data[start+7:end] for chunk in re.findall(r"<tr>.+?</tr>", data, re.DOTALL): fields = re.findall(r"<td>(.+?)</td>", chunk, re.DOTALL) parsedFields = [] for field in fields: m = re.search(r'href="(.+?)"', field) if m is not None and m.group(1) != "#foot": parsedFields.append(m.group(1)) tagParts = field.split("&#39;") if len(tagParts) >= 2: parsedFields.append(tagParts[1].replace("&nbsp;", " ")) else: parsedFields.append(field) if parsedFields: yield parsedFields def formatFeatures(data, baseURL): print("features = {") print(" # tag, friendly name, documentation URL") for link, tag, friendlyName in data: if tag == 'cv01': tags = [f"cv{i:02d}" for i in range(1, 100)] else: tags = [tag] for tag in tags: print(f" {tag!r}: ({friendlyName!r}, {baseURL+link!r}),") print("}") def formatScripts(data): print("scripts = {") print(" # tag, friendly name") duplicates = {} for i, (friendlyName, tag) in enumerate(data): if tag in duplicates: duplicates[tag] = duplicates[tag] + ", " + friendlyName data[i] = (None, None) # skip else: duplicates[tag] = friendlyName for _, tag in data: if tag is None: continue friendlyName = duplicates[tag] print(f" {tag!r}: {friendlyName!r},") print("}") def formatLanguages(data): print("languages = {") print(" # tag, friendly name, ISO 639 IDs (if applicable)") for friendlyName, *fields in data: tag = fields[0] if len(tag) < 4: tag += (4 - len(tag)) * " " assert len(tag) == 4, tag if len(fields) > 1: assert len(fields) == 2 isoCodes = [isoCode.strip() for isoCode in fields[1].split(",")] else: isoCodes = [] t = (friendlyName,) + tuple(isoCodes) print(f" {tag!r}: {t},") print("}") # https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist # https://docs.microsoft.com/en-us/typography/opentype/spec/scripttags # https://docs.microsoft.com/en-us/typography/opentype/spec/languagetags if __name__ == "__main__": import sys import time baseURL = "https://docs.microsoft.com/en-us/typography/opentype/spec/" if len(sys.argv) > 1: with open(sys.argv[1]) as f: html = f.read() pages = [html] else: import urllib.request pages = [] print(f"# Generated by {os.path.basename(__file__)}") print("# Scraped from:") for page in ["featurelist", "scripttags", "languagetags"]: url = baseURL + page print(f"# {url}") with urllib.request.urlopen(url) as fp: html = fp.read().decode("utf-8", errors="replace") pages.append(html) print() print() print("__all__ = ['features', 'scripts', 'languages']") print() for html in pages: print() parsed = list(parse(html)) if "<title>Registered features" in html: formatFeatures(parsed, baseURL) elif "<title>Script tags" in html: formatScripts(parsed) elif "<title>Language system tags" in html: formatLanguages(parsed) else: assert 0, "huh."
32
76
0.535675
412
3,616
4.660194
0.315534
0.015625
0.0375
0.04375
0.121875
0.121875
0.097917
0.097917
0.097917
0
0
0.011665
0.3125
3,616
112
77
32.285714
0.76066
0.059181
0
0.134021
0
0
0.188402
0.008243
0
0
0
0
0.030928
1
0.041237
false
0
0.051546
0
0.092784
0.206186
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f182fcd36ab8f4942153a6e6391bae9fda5d3a4a
241
py
Python
core/test.py
tianyaqu/guess-your-song
3655ca2091b49c5934235c3fd8dd4442eed200d6
[ "MIT" ]
15
2015-08-01T01:57:14.000Z
2021-10-30T10:09:19.000Z
core/test.py
tianyaqu/guess-your-song
3655ca2091b49c5934235c3fd8dd4442eed200d6
[ "MIT" ]
null
null
null
core/test.py
tianyaqu/guess-your-song
3655ca2091b49c5934235c3fd8dd4442eed200d6
[ "MIT" ]
6
2016-04-10T14:49:15.000Z
2020-03-09T04:15:41.000Z
from melody_feature import * if __name__ == '__main__': file = 'alphaville-forever_young.mid' for k,note in note_from_midi_test(file): print k name = 'forever_youngy' + str(k)+'.txt' vector_to_file(note,name)
30.125
47
0.655602
34
241
4.176471
0.705882
0
0
0
0
0
0
0
0
0
0
0
0.228216
241
8
48
30.125
0.763441
0
0
0
0
0
0.223141
0.115702
0
0
0
0
0
0
null
null
0
0.142857
null
null
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
f183e8d9598657f18e0da5ccceacb5083a04c145
4,000
py
Python
tests/test_request_logging_middleware.py
quoth/fastapi-cloud-logging
680f5ab6d90a6d06d1037845a23f355cc6d3b0fe
[ "MIT" ]
null
null
null
tests/test_request_logging_middleware.py
quoth/fastapi-cloud-logging
680f5ab6d90a6d06d1037845a23f355cc6d3b0fe
[ "MIT" ]
null
null
null
tests/test_request_logging_middleware.py
quoth/fastapi-cloud-logging
680f5ab6d90a6d06d1037845a23f355cc6d3b0fe
[ "MIT" ]
null
null
null
import pytest from fastapi import Request from pytest_mock import MockerFixture from starlette.datastructures import Headers from fastapi_cloud_logging.request_logging_middleware import ( _FASTAPI_REQUEST_CONTEXT, RequestLoggingMiddleware, ) @pytest.fixture def middleware(mocker: MockerFixture) -> RequestLoggingMiddleware: return RequestLoggingMiddleware(app=mocker.Mock(), dispatch=mocker.Mock()) def test__set_request_context(middleware: RequestLoggingMiddleware): request = Request( { "type": "http", "method": "GET", "root_path": "https://example.com/", "path": "", "headers": Headers({}).raw, "client": ("127.0.0.1", 80), } ) middleware.set_request_context(request=request) request_context = _FASTAPI_REQUEST_CONTEXT.get() assert request_context is not None assert request_context.protocol == "https" assert request_context.request_method == "GET" assert request_context.remote_ip == "127.0.0.1" @pytest.mark.parametrize( ( "example_request, http_method, protocol, ip_address, content_length," "url, user_agent, referer, trace_content" ), [ ( Request( { "type": "http", "method": "GET", "root_path": "https://example.com/", "path": "", "headers": Headers({"X-Forwarded-For": "192.168.0.1"}).raw, "client": ("127.0.0.1", 80), } ), "GET", "https", "192.168.0.1", None, "https://example.com/", None, None, None, ), ( Request( { "type": "http", "method": "POST", "root_path": "https://example.com/", "path": "", "headers": Headers( {"User-Agent": "curl 7.79.1", "X-Forwarded-For": "192.168.0.1"} ).raw, "client": ("127.0.0.1", 80), } ), "POST", "https", "192.168.0.1", None, "https://example.com/", "curl 7.79.1", None, None, ), ( Request( { "type": "http", "method": "POST", "root_path": "https://example.com/", "path": "", "headers": Headers( { "User-Agent": "curl 7.79.1", "X-Forwarded-For": "192.168.0.1", "X-Cloud-Trace-Context": "105445aa7843bc8bf206b12000100000/1;o=1", } ).raw, "client": ("127.0.0.1", 80), } ), "POST", "https", "192.168.0.1", None, "https://example.com/", "curl 7.79.1", None, "105445aa7843bc8bf206b12000100000/1;o=1", ), ], ) def test__parse_request( middleware: RequestLoggingMiddleware, example_request: Request, http_method, protocol, ip_address, content_length, url, user_agent, referer, trace_content, ): request_context = middleware._parse_request(example_request) assert request_context.request_method == http_method assert request_context.protocol == protocol assert request_context.content_length == content_length assert request_context.request_url == url assert request_context.remote_ip == ip_address assert request_context.user_agent == user_agent assert request_context.referer == referer assert request_context.cloud_trace_content == trace_content
30.075188
94
0.48975
357
4,000
5.29972
0.187675
0.133192
0.12685
0.02537
0.450846
0.386364
0.386364
0.377378
0.377378
0.360465
0
0.063544
0.38625
4,000
132
95
30.30303
0.707128
0
0
0.448
0
0
0.19275
0.02425
0
0
0
0
0.096
1
0.024
false
0
0.04
0.008
0.072
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f1848a6a488cda88f054bb7571aceea97a0b3d14
690
py
Python
spam-filter/NaiveBayesClassifier.py
sevmardi/ml-projects
0eb218c77cda61285cfcf599599ff28a8a8deba7
[ "MIT" ]
null
null
null
spam-filter/NaiveBayesClassifier.py
sevmardi/ml-projects
0eb218c77cda61285cfcf599599ff28a8a8deba7
[ "MIT" ]
7
2020-06-06T01:26:08.000Z
2022-02-10T11:26:58.000Z
spam-filter/NaiveBayesClassifier.py
sevmardi/ml-projects
0eb218c77cda61285cfcf599599ff28a8a8deba7
[ "MIT" ]
null
null
null
import MailSpamFilter class NaiveBayesClassifier: def __init__(self, k=0.5): self.k = k self.words_probs = [] def train(self, training_set): # count spam and non-spam messages num_spams = len( [is_spam for message, is_spam in training_set if is_spam]) num_non_spams = len(training_set) - num_spams # run training data through our "pipeline" word_counts = MailSpamFilter.count_words(training_set) self.words_probs = MailSpamFilter.word_probabilities( word_counts, num_spams, num_non_spams, self.k) def classify(self, message): return spam_probability(self.words_probs, message)
31.363636
70
0.671014
89
690
4.910112
0.438202
0.100687
0.09611
0
0
0
0
0
0
0
0
0.003876
0.252174
690
21
71
32.857143
0.843023
0.105797
0
0
0
0
0
0
0
0
0
0
0
1
0.214286
false
0
0.071429
0.071429
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
f18699f6bd3e257501ff398db9ec35282969ef93
311
py
Python
app/middleware/python/ram_usage.py
anhquoctran/node-rpi-monsys
5c0e7c5b4b7bf6de966dfb6ffe32bd283fb8fb7a
[ "MIT" ]
null
null
null
app/middleware/python/ram_usage.py
anhquoctran/node-rpi-monsys
5c0e7c5b4b7bf6de966dfb6ffe32bd283fb8fb7a
[ "MIT" ]
null
null
null
app/middleware/python/ram_usage.py
anhquoctran/node-rpi-monsys
5c0e7c5b4b7bf6de966dfb6ffe32bd283fb8fb7a
[ "MIT" ]
null
null
null
#!/usr/bin/env python import psutil import sys import time # Return Virtual Memory Usage as a JSON def get_memory_percentage(): while True: r = psutil.virtual_memory().percent print(r) sys.stdout.flush() time.sleep(1) if __name__ == '__main__': get_memory_percentage()
19.4375
43
0.665595
42
311
4.619048
0.714286
0.134021
0.195876
0
0
0
0
0
0
0
0
0.004202
0.234727
311
15
44
20.733333
0.810924
0.186495
0
0
0
0
0.031873
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.363636
0.090909
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
f186dbe8c5a6f32bd57f511982e06c3f8a0977c9
1,055
py
Python
api/tests/snapshots/snap_test_api.py
City-of-Helsinki/notification-service-api
1c4c6d900de5f2ce9ce9becb6774d10892084ede
[ "MIT" ]
null
null
null
api/tests/snapshots/snap_test_api.py
City-of-Helsinki/notification-service-api
1c4c6d900de5f2ce9ce9becb6774d10892084ede
[ "MIT" ]
17
2020-07-07T12:08:34.000Z
2021-06-10T20:26:20.000Z
api/tests/snapshots/snap_test_api.py
City-of-Helsinki/notification-service-api
1c4c6d900de5f2ce9ce9becb6774d10892084ede
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots["test_send_sms 1"] = { "errors": [], "messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}}, "warnings": [], } snapshots["test_webhook_delivery_log 1"] = { "errors": [], "messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}}, "warnings": [], } snapshots["test_webhook_delivery_log 2"] = { "errors": [], "messages": { "+358461231231": { "billingref": "Palvelutarjotin", "destination": "+358461231231", "sender": "hel.fi", "smscount": "1", "status": "DELIVERED", "statustime": "2020-07-21T09:18:00Z", } }, "warnings": [], } snapshots["test_get_delivery_log 1"] = { "errors": [], "messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}}, "warnings": [], }
25.731707
87
0.563033
88
1,055
6.568182
0.522727
0.089965
0.179931
0.140138
0.484429
0.484429
0.484429
0.484429
0.484429
0.484429
0
0.144608
0.22654
1,055
40
88
26.375
0.563725
0.058768
0
0.34375
0
0
0.456566
0.071717
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f18777bf37a356b77a08bfd81442ba10cd669ef2
2,552
py
Python
metrics.py
alexjercan/normals-estimation
6f18f9248b7c01842f0d4ba57cde5227a893f506
[ "MIT" ]
null
null
null
metrics.py
alexjercan/normals-estimation
6f18f9248b7c01842f0d4ba57cde5227a893f506
[ "MIT" ]
null
null
null
metrics.py
alexjercan/normals-estimation
6f18f9248b7c01842f0d4ba57cde5227a893f506
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Developed by Alex Jercan <jercan_alex27@yahoo.com> # # References: # - https://github.com/XinJCheng/CSPN/blob/b3e487bdcdcd8a63333656e69b3268698e543181/cspn_pytorch/utils.py#L19 # - https://web.eecs.umich.edu/~fouhey/2016/evalSN/evalSN.html # from math import radians import torch import torch.nn.functional as F class MetricFunction(): def __init__(self, batch_size) -> None: self.batch_size = batch_size self.total_size = 0 self.error_sum = {} self.error_avg = {} def evaluate(self, predictions, targets): normal_p = predictions normal_gt = targets error_val = evaluate_error_normal(normal_p, normal_gt) self.total_size += self.batch_size self.error_avg = avg_error(self.error_sum, error_val, self.total_size, self.batch_size) return self.error_avg def show(self): error = self.error_avg format_str = ('======NORMALS=======\nMSE=%.4f\tRMSE=%.4f\tMAE=%.4f\tMME=%.4f\nTANGLE11.25=%.4f\tTANGLE22.5=%.4f\tTANGLE30.0=%.4f') return format_str % (error['N_MSE'], error['N_RMSE'], error['N_MAE'], error['N_MME'], \ error['N_TANGLE11.25'], error['N_TANGLE22.5'], error['N_TANGLE30.0']) def evaluate_error_normal(pred_normal, gt_normal): error = {} eps = 1e-7 pred_normal = F.normalize(pred_normal, p=2, dim=1) gt_normal = F.normalize(gt_normal, p=2, dim=1) dot_product = torch.mul(pred_normal, gt_normal).sum(dim=1) angular_error = torch.acos(torch.clamp(dot_product, -1+eps, 1-eps)) error['N_MSE'] = torch.mean(torch.mul(angular_error, angular_error)) error['N_RMSE'] = torch.sqrt(error['N_MSE']) error['N_MAE'] = torch.mean(angular_error) error['N_MME'] = torch.median(angular_error) error['N_TANGLE11.25'] = torch.mean((angular_error <= radians(11.25)).float()) error['N_TANGLE22.5'] = torch.mean((angular_error <= radians(22.5)).float()) error['N_TANGLE30.0'] = torch.mean((angular_error <= radians(30.0)).float()) return error # avg the error def avg_error(error_sum, error_val, total_size, batch_size): error_avg = {} for item, value in error_val.items(): error_sum[item] = error_sum.get(item, 0) + value * batch_size error_avg[item] = error_sum[item] / float(total_size) return error_avg def print_single_error(epoch, loss, error): format_str = ('%s\nEpoch: %d, loss=%s\n%s\n') print (format_str % ('eval_avg_error', epoch, loss, error))
35.444444
138
0.653605
367
2,552
4.310627
0.318801
0.05689
0.03287
0.053097
0.120101
0.03287
0
0
0
0
0
0.046027
0.191223
2,552
72
139
35.444444
0.720446
0.104232
0
0
0
0.022222
0.121265
0.049649
0
0
0
0
0
1
0.133333
false
0
0.066667
0
0.311111
0.044444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f18793cae2b85ec27dc63fa7682a9e35d2982e31
102
py
Python
test/test_parse.py
aagnone3/edit-learn
1e033e3e358510f5400f1c7cc5687cafdcef1a00
[ "Apache-2.0" ]
null
null
null
test/test_parse.py
aagnone3/edit-learn
1e033e3e358510f5400f1c7cc5687cafdcef1a00
[ "Apache-2.0" ]
2
2018-06-17T21:16:37.000Z
2018-06-17T23:38:31.000Z
test/test_parse.py
aagnone3/edit-learn
1e033e3e358510f5400f1c7cc5687cafdcef1a00
[ "Apache-2.0" ]
null
null
null
import numpy import ielearn from ielearn import extract, predict, util def test_123(): assert True
12.75
42
0.784314
15
102
5.266667
0.8
0
0
0
0
0
0
0
0
0
0
0.035294
0.166667
102
7
43
14.571429
0.894118
0
0
0
0
0
0
0
0
0
0
0
0.2
1
0.2
true
0
0.6
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f188a72c45fc5d13f8ee8845ece06a14fed39304
2,088
py
Python
pyfarm/models/statistics/task_count.py
guidow/pyfarm-master
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
[ "Apache-2.0" ]
null
null
null
pyfarm/models/statistics/task_count.py
guidow/pyfarm-master
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
[ "Apache-2.0" ]
null
null
null
pyfarm/models/statistics/task_count.py
guidow/pyfarm-master
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
[ "Apache-2.0" ]
null
null
null
# No shebang line, this module is meant to be imported # # Copyright 2015 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TaskCount Model ==================== Model describing the number of tasks in a given queue in a given state at a point in time """ from datetime import datetime from pyfarm.master.application import db from pyfarm.master.config import config from pyfarm.models.core.types import id_column class TaskCount(db.Model): __bind_key__ = 'statistics' __tablename__ = config.get("table_statistics_task_count") id = id_column(db.Integer) counted_time = db.Column( db.DateTime, nullable=False, default=datetime.utcnow, doc="The point in time at which these counts were done") # No foreign key reference, because this table is stored in a separate db # Code reading it will have to check for referential integrity manually. job_queue_id = db.Column( db.Integer, nullable=True, doc="ID of the jobqueue these stats refer to") total_queued = db.Column( db.Integer, nullable=False, doc="Number of queued tasks at `counted_time`") total_running = db.Column( db.Integer, nullable=False, doc="Number of running tasks at `counted_time`") total_done = db.Column( db.Integer, nullable=False, doc="Number of done tasks at `counted_time`") total_failed = db.Column( db.Integer, nullable=False, doc="Number of failed tasks at `counted_time`")
29.408451
77
0.690134
295
2,088
4.8
0.474576
0.039548
0.063559
0.060028
0.182203
0.115819
0.115819
0.115819
0.115819
0
0
0.004957
0.227011
2,088
70
78
29.828571
0.872367
0.429119
0
0.30303
0
0
0.243151
0.023116
0
0
0
0
0
1
0
false
0
0.121212
0
0.424242
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f189b87dcc3283704fc6b747b9d51de15714a10a
2,817
py
Python
malware prediction.py
Highcourtdurai/AI-prediction
bea4c19c0c9bf3432be6cf61f9a4992d3070746c
[ "Apache-2.0" ]
null
null
null
malware prediction.py
Highcourtdurai/AI-prediction
bea4c19c0c9bf3432be6cf61f9a4992d3070746c
[ "Apache-2.0" ]
null
null
null
malware prediction.py
Highcourtdurai/AI-prediction
bea4c19c0c9bf3432be6cf61f9a4992d3070746c
[ "Apache-2.0" ]
null
null
null
import warnings warnings.filterwarnings('ignore') import pandas as pd import numpy as np import seaborn as sns #from sklearn.svm import SVC #from sklearn.model_selection import KFold from sklearn import preprocessing import matplotlib.pyplot as plt data=pd.read_csv('android.csv') print(data.shape) data = data.sample(frac=1).reset_index(drop=True) print(data.head()) import seaborn as sns sns.countplot(x='malware',data=data) #Over sampling target_count = data.malware.value_counts() print('Class 0:', target_count[0]) print('Class 1:', target_count[1]) count_class_0, count_class_1 = data.malware.value_counts() df_class_0 = data[data['malware'] == 0] df_class_1 = data[data['malware'] == 1] df_class_1_over = df_class_1.sample(count_class_0, replace=True) df_test_over = pd.concat([df_class_0, df_class_1_over], axis=0) print(df_test_over.shape) sns.countplot(x='malware',data=df_test_over) X=df_test_over.iloc[:,df_test_over.columns !='malware'] Y=df_test_over.iloc[:,df_test_over.columns =="malware"] print(X.head()) print(Y.head()) from sklearn.utils import shuffle X, Y=shuffle(X, Y) print(X.head()) X=X.drop(columns='name') print(X.head()) print(Y.head()) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 bestfeatures = SelectKBest(score_func=chi2, k=10) fit = bestfeatures.fit(X,Y) dfscores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(X.columns) featureScores = pd.concat([dfcolumns,dfscores],axis=1) featureScores.columns = ['Specs','Score'] featureScores.nlargest(10,'Score') from sklearn.ensemble import ExtraTreesClassifier import matplotlib.pyplot as plt model = ExtraTreesClassifier() model.fit(X,Y) print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers #plot graph of feature importances for better visualization feat_importances = pd.Series(model.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') plt.show() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, random_state=0) print(X_train.shape) print(X_train.head()) print(y_train.head()) from sklearn import metrics from sklearn.metrics import confusion_matrix from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier() tree.fit(X_train,y_train) DecisionTreeClassifier() y_pred = tree.predict(X_test) print(y_pred) model2=metrics.accuracy_score(y_test,y_pred) print(model2) cnf_matrix = confusion_matrix(y_test,y_pred) labels = [0,1] sns.heatmap(cnf_matrix, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels) plt.show()
23.475
102
0.746539
417
2,817
4.848921
0.280576
0.059842
0.034619
0.017804
0.176558
0.068249
0.068249
0.068249
0.037587
0
0
0.013952
0.134895
2,817
119
103
23.672269
0.815757
0.071707
0
0.15942
0
0
0.043024
0
0
0
0
0
0
1
0
false
0
0.275362
0
0.275362
0.231884
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f18ade7811c36ebc2634566008604890a3cd1f7b
3,147
py
Python
_unittests/ut_ipythonhelper/test_ipythonhelper.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
18
2015-11-10T08:09:23.000Z
2022-02-16T11:46:45.000Z
_unittests/ut_ipythonhelper/test_ipythonhelper.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
321
2015-06-14T21:34:28.000Z
2021-11-28T17:10:03.000Z
_unittests/ut_ipythonhelper/test_ipythonhelper.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
10
2015-06-20T01:35:00.000Z
2022-01-19T15:54:32.000Z
""" @brief test log(time=2s) """ import sys import os import unittest from pyquickhelper.pycode import ExtTestCase from pyquickhelper.ipythonhelper import AutoCompletion, AutoCompletionFile, MagicCommandParser, MagicClassWithHelpers, open_html_form class TestAutoCompletion(ExtTestCase): def test_completion(self): root = AutoCompletion() cl = root._add("name", "TestAutoCompletion") cl._add("method", "test_completion") cl._add("method2", "test_completion") cl = root._add("name2", "TestAutoCompletion2") cl._add("method3", "test_completion") s = (str # unicode# (root)) self.assertIn(" | |- method2", s) ls = len(root) self.assertEqual(ls, 6) def test_completion_file(self): fold = os.path.abspath(os.path.split(__file__)[0]) fold = os.path.join(fold, "..", "..", "src") this = AutoCompletionFile(fold) ls = len(this) self.assertGreater(ls, 30) def test_html_form(self): params = {"parA": "valueA", "parB": "valueB"} title = 'unit_test_title' key_save = 'jjj' raw = open_html_form(params, title, key_save, raw=True) self.assertGreater(len(raw), 1) def test_eval(self): params = {"x": 3, "y": 4} cl = MagicCommandParser(prog="test_command") res = cl.eval("x+y", params) self.assertEqual(res, 7) def test_parse(self): parser = MagicCommandParser(prog="test_command", description='display the first lines of a text file') typstr = str # unicode# parser.add_argument('f', type=typstr, help='filename') parser.add_argument( '-n', '--n', type=typstr, default=10, help='number of lines to display') parser.add_argument( '-e', '--encoding', default="utf8", help='file encoding') params = {"x": 3, "y": 4} res = parser.parse_cmd('this.py -n x+y', context=params) self.assertNotEmpty(res) r = parser.format_help() self.assertIn("usage: test_command", r) self.assertEqual(res.n, 7) def test_class_magic(self): cl = MagicClassWithHelpers() self.assertEmpty(cl.Context) def call_MagicCommandParser(): return MagicCommandParser(prog="parser_unittest") pa = cl.get_parser(call_MagicCommandParser, name="parser_unittest") typstr = str # unicode# pa.add_argument('f', type=typstr, help='filename') pa.add_argument('-n', '--n', type=typstr, default=10, help='number of lines to display') pa.add_argument('-e', '--encoding', default="utf8", help='file encoding') self.assertNotEmpty(pa) cl.add_context({"x": 3, "y": 4}) self.assertEqual(cl.Context, {"x": 3, "y": 4}) res = cl.get_args('this.py -n x+y', pa) if res.n != 7: raise Exception("res.n == {0}\nres={1}".format(res.n, res)) if __name__ == "__main__": unittest.main()
34.582418
133
0.57674
362
3,147
4.861878
0.328729
0.023864
0.006818
0.009091
0.195455
0.157955
0.157955
0.119318
0.119318
0.065909
0
0.013239
0.279949
3,147
90
134
34.966667
0.76346
0.017159
0
0.133333
0
0
0.158442
0
0
0
0
0
0.146667
1
0.093333
false
0
0.066667
0.013333
0.186667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f18bd24ea799cc66f95a6e5ba34ff5211de2b387
30
py
Python
time_execution/__init__.py
snelis/py-timeexecution
f08bf6b9c5307a50b3ee1190f79bf74dc920f8da
[ "Apache-2.0" ]
null
null
null
time_execution/__init__.py
snelis/py-timeexecution
f08bf6b9c5307a50b3ee1190f79bf74dc920f8da
[ "Apache-2.0" ]
null
null
null
time_execution/__init__.py
snelis/py-timeexecution
f08bf6b9c5307a50b3ee1190f79bf74dc920f8da
[ "Apache-2.0" ]
null
null
null
from .time_execution import *
15
29
0.8
4
30
5.75
1
0
0
0
0
0
0
0
0
0
0
0
0.133333
30
1
30
30
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f18f7a49dc45e6932d4b5a8e61c3228a5317f294
1,129
py
Python
tests/unit/lib/bundle_cli_test.py
kl-chou/codalab-worksheets
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
[ "Apache-2.0" ]
236
2015-12-29T22:50:03.000Z
2022-03-28T21:12:34.000Z
tests/unit/lib/bundle_cli_test.py
kl-chou/codalab-worksheets
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
[ "Apache-2.0" ]
2,628
2015-12-27T09:45:13.000Z
2022-03-30T16:18:25.000Z
tests/unit/lib/bundle_cli_test.py
kl-chou/codalab-worksheets
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
[ "Apache-2.0" ]
87
2015-12-30T01:36:46.000Z
2022-03-08T15:21:30.000Z
import unittest from codalab.lib.bundle_cli import BundleCLI class BundleCliTest(unittest.TestCase): def setUp(self) -> None: self.bundle_cli = BundleCLI def tearDown(self) -> None: del self.bundle_cli def test_collapse_bare_command_empty_args(self): argv = ['cl', 'run', '---', 'echo', ''] expected_result = ['cl', 'run', "echo ''"] actual_result = self.bundle_cli.collapse_bare_command(argv) self.assertEqual(actual_result, expected_result) def test_collapse_bare_command_non_empty_str_args(self): argv = ['cl', 'run', '---', 'echo', 'hello'] expected_result = ['cl', 'run', "echo hello"] actual_result = self.bundle_cli.collapse_bare_command(argv) self.assertEqual(actual_result, expected_result) def test_collapse_bare_command_non_empty_str_args_with_escaped_char(self): argv = ['cl', 'run', '---', 'echo', 'hello world!'] expected_result = ['cl', 'run', "echo 'hello world!'"] actual_result = self.bundle_cli.collapse_bare_command(argv) self.assertEqual(actual_result, expected_result)
38.931034
78
0.667848
138
1,129
5.130435
0.275362
0.076271
0.161017
0.079096
0.747175
0.663842
0.492938
0.492938
0.492938
0.492938
0
0
0.195748
1,129
28
79
40.321429
0.779736
0
0
0.272727
0
0
0.092117
0
0
0
0
0
0.136364
1
0.227273
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
f18f9e7f23115fa084cf6bfe20683d0f105bb79e
116
py
Python
src/bgpfu/io.py
bgpfu/bgpfu
6dcb236914d49ab8fb595d8a6d300f36ecf1e152
[ "Apache-2.0" ]
12
2017-08-18T14:39:43.000Z
2021-11-21T16:50:45.000Z
src/bgpfu/io.py
bgpfu/bgpfu
6dcb236914d49ab8fb595d8a6d300f36ecf1e152
[ "Apache-2.0" ]
17
2017-04-03T22:51:30.000Z
2021-06-17T12:48:58.000Z
src/bgpfu/io.py
bgpfu/bgpfu
6dcb236914d49ab8fb595d8a6d300f36ecf1e152
[ "Apache-2.0" ]
2
2017-04-04T18:25:22.000Z
2019-07-29T08:36:38.000Z
# import to namespace from gevent import select, socket # noqa from gevent.queue import Empty, Full, Queue # noqa
29
51
0.758621
17
116
5.176471
0.647059
0.227273
0
0
0
0
0
0
0
0
0
0
0.181034
116
3
52
38.666667
0.926316
0.25
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f1902cfe2ebb699e8ddbec6e13ef11d2cb8b9555
9,563
py
Python
pysnmp/ZYXEL-CLUSTER-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/ZYXEL-CLUSTER-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/ZYXEL-CLUSTER-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module ZYXEL-CLUSTER-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-CLUSTER-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 21:43:09 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") Bits, Unsigned32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, TimeTicks, Gauge32, NotificationType, Integer32, IpAddress, MibIdentifier, ModuleIdentity, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Unsigned32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "TimeTicks", "Gauge32", "NotificationType", "Integer32", "IpAddress", "MibIdentifier", "ModuleIdentity", "iso", "Counter32") DisplayString, TextualConvention, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress", "RowStatus") esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt") zyxelCluster = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14)) if mibBuilder.loadTexts: zyxelCluster.setLastUpdated('201207010000Z') if mibBuilder.loadTexts: zyxelCluster.setOrganization('Enterprise Solution ZyXEL') zyxelClusterSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1)) zyxelClusterStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2)) zyxelClusterManager = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1)) zyClusterManagerMaxNumberOfManagers = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterManagerMaxNumberOfManagers.setStatus('current') zyxelClusterManagerTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2), ) if mibBuilder.loadTexts: zyxelClusterManagerTable.setStatus('current') zyxelClusterManagerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterManagerVid")) if mibBuilder.loadTexts: zyxelClusterManagerEntry.setStatus('current') zyClusterManagerVid = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 1), Integer32()) if mibBuilder.loadTexts: zyClusterManagerVid.setStatus('current') zyClusterManagerName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: zyClusterManagerName.setStatus('current') zyClusterManagerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: zyClusterManagerRowStatus.setStatus('current') zyxelClusterMembers = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2)) zyClusterMemberMaxNumberOfMembers = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterMemberMaxNumberOfMembers.setStatus('current') zyxelClusterMemberTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2), ) if mibBuilder.loadTexts: zyxelClusterMemberTable.setStatus('current') zyxelClusterMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterMemberMacAddress")) if mibBuilder.loadTexts: zyxelClusterMemberEntry.setStatus('current') zyClusterMemberMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 1), MacAddress()) if mibBuilder.loadTexts: zyClusterMemberMacAddress.setStatus('current') zyClusterMemberName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterMemberName.setStatus('current') zyClusterMemberModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 3), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterMemberModel.setStatus('current') zyClusterMemberPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 4), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: zyClusterMemberPassword.setStatus('current') zyClusterMemberRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 5), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: zyClusterMemberRowStatus.setStatus('current') zyxelClusterCandidate = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1)) zyxelClusterCandidateTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1), ) if mibBuilder.loadTexts: zyxelClusterCandidateTable.setStatus('current') zyxelClusterCandidateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterCandidateMacAddress")) if mibBuilder.loadTexts: zyxelClusterCandidateEntry.setStatus('current') zyClusterCandidateMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 1), MacAddress()) if mibBuilder.loadTexts: zyClusterCandidateMacAddress.setStatus('current') zyClusterCandidateName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterCandidateName.setStatus('current') zyClusterCandidateModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterCandidateModel.setStatus('current') zyClusterRole = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("none", 0), ("manager", 1), ("member", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterRole.setStatus('current') zyClusterInfoManager = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 3), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterInfoManager.setStatus('current') zyxelClusterInfoMemberTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4), ) if mibBuilder.loadTexts: zyxelClusterInfoMemberTable.setStatus('current') zyxelClusterInfoMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterInfoMemberMacAddress")) if mibBuilder.loadTexts: zyxelClusterInfoMemberEntry.setStatus('current') zyClusterInfoMemberMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 1), MacAddress()) if mibBuilder.loadTexts: zyClusterInfoMemberMacAddress.setStatus('current') zyClusterInfoMemberName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterInfoMemberName.setStatus('current') zyClusterInfoMemberModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 3), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterInfoMemberModel.setStatus('current') zyClusterInfoMemberStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("error", 0), ("online", 1), ("offline", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyClusterInfoMemberStatus.setStatus('current') mibBuilder.exportSymbols("ZYXEL-CLUSTER-MIB", zyClusterManagerMaxNumberOfManagers=zyClusterManagerMaxNumberOfManagers, zyxelClusterCandidateTable=zyxelClusterCandidateTable, zyxelClusterInfoMemberEntry=zyxelClusterInfoMemberEntry, zyClusterMemberModel=zyClusterMemberModel, zyxelClusterMemberEntry=zyxelClusterMemberEntry, zyClusterManagerVid=zyClusterManagerVid, zyClusterCandidateModel=zyClusterCandidateModel, zyClusterMemberRowStatus=zyClusterMemberRowStatus, zyxelClusterStatus=zyxelClusterStatus, zyClusterMemberMaxNumberOfMembers=zyClusterMemberMaxNumberOfMembers, zyClusterManagerName=zyClusterManagerName, zyxelClusterSetup=zyxelClusterSetup, zyClusterMemberPassword=zyClusterMemberPassword, zyxelClusterMembers=zyxelClusterMembers, zyClusterMemberMacAddress=zyClusterMemberMacAddress, zyClusterInfoManager=zyClusterInfoManager, zyClusterInfoMemberName=zyClusterInfoMemberName, zyClusterInfoMemberStatus=zyClusterInfoMemberStatus, zyClusterCandidateMacAddress=zyClusterCandidateMacAddress, zyClusterRole=zyClusterRole, zyxelClusterManagerTable=zyxelClusterManagerTable, zyxelClusterManager=zyxelClusterManager, zyClusterManagerRowStatus=zyClusterManagerRowStatus, zyClusterInfoMemberMacAddress=zyClusterInfoMemberMacAddress, zyxelCluster=zyxelCluster, zyClusterCandidateName=zyClusterCandidateName, PYSNMP_MODULE_ID=zyxelCluster, zyxelClusterCandidateEntry=zyxelClusterCandidateEntry, zyxelClusterCandidate=zyxelClusterCandidate, zyxelClusterInfoMemberTable=zyxelClusterInfoMemberTable, zyClusterMemberName=zyClusterMemberName, zyxelClusterManagerEntry=zyxelClusterManagerEntry, zyxelClusterMemberTable=zyxelClusterMemberTable, zyClusterInfoMemberModel=zyClusterInfoMemberModel)
122.602564
1,687
0.768901
1,059
9,563
6.941454
0.144476
0.010339
0.013468
0.017957
0.367025
0.339954
0.278466
0.249218
0.204326
0.204326
0
0.082206
0.087943
9,563
77
1,688
124.194805
0.760605
0.034299
0
0
0
0
0.113917
0.013657
0
0
0
0
0
1
0
false
0.042857
0.1
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
74ac7e93339846ec0bad689ce1665d3b13a2fec4
97
py
Python
config.py
interactionlab/android-notification-drawers
30c40282f57c3c57b5d906997426b23147b87277
[ "MIT" ]
2
2020-09-11T15:39:39.000Z
2020-10-19T16:14:27.000Z
config.py
interactionlab/android-notification-drawers
30c40282f57c3c57b5d906997426b23147b87277
[ "MIT" ]
null
null
null
config.py
interactionlab/android-notification-drawers
30c40282f57c3c57b5d906997426b23147b87277
[ "MIT" ]
null
null
null
NUM_CORES = 8 PATH_VALID_DEVICES = '/path/to/valid.pkl.gz' PATH_DEVICES_DIR = '/path/to/devices/'
32.333333
44
0.752577
17
97
4
0.588235
0.176471
0
0
0
0
0
0
0
0
0
0.011236
0.082474
97
3
45
32.333333
0.752809
0
0
0
0
0
0.387755
0.214286
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
74adb9d1b6713668874e8ffff93d9e51a052d41f
114
py
Python
main.py
panzihan/comic-spider
2566ed8f725c11fa3f87d7b53da479a530a1bc75
[ "Apache-2.0" ]
null
null
null
main.py
panzihan/comic-spider
2566ed8f725c11fa3f87d7b53da479a530a1bc75
[ "Apache-2.0" ]
null
null
null
main.py
panzihan/comic-spider
2566ed8f725c11fa3f87d7b53da479a530a1bc75
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: UTF-8 -*- from scrapy import cmdline cmdline.execute("scrapy crawl dmzj".split())
22.8
44
0.692982
16
114
4.9375
0.875
0
0
0
0
0
0
0
0
0
0
0.009901
0.114035
114
5
44
22.8
0.772277
0.333333
0
0
0
0
0.226667
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
74aed0a79651f3031398772075cf7ed206fd3979
1,752
py
Python
hypersand/plot.py
deverte/HyperSand
8e1fa4db68689ec9fe108ecc4759a221122a9a80
[ "MIT" ]
1
2020-01-31T15:55:01.000Z
2020-01-31T15:55:01.000Z
hypersand/plot.py
deverte/HyperSand
8e1fa4db68689ec9fe108ecc4759a221122a9a80
[ "MIT" ]
null
null
null
hypersand/plot.py
deverte/HyperSand
8e1fa4db68689ec9fe108ecc4759a221122a9a80
[ "MIT" ]
1
2020-06-24T23:59:54.000Z
2020-06-24T23:59:54.000Z
import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D """ Вывод графиков. """ def plt2d(data, types): """ Строит двумерный график из произвольного количества таблиц. Параметры: data - список из таблиц данных типа pandas.DataFrame. Таблицы должны иметь два столбца - ось X и ось Y. Тип - list. types - список из типов графиков. Значения: "plot" - график из точек, соединенных прямой, "scatter" - график из точек. """ # Определяем оси, на которых будем строить графики ax = plt.figure().gca() # Строим графики в зависимости от типа for i in range(len(types)): element = data[i] keys = element.keys() if types[i] == "plot": ax.plot(element[keys[0]], element[keys[1]]) if types[i] == "scatter": ax.scatter(element[keys[0]], element[keys[1]]) plt.show() def plt3d(data, types): """ Строит трехмерный график из произвольного количества таблиц. Параметры: data - список из таблиц данных типа pandas.DataFrame. Таблицы должны иметь три столбца - ось X, ось Y и ось Z. Тип - list. types - список из типов графиков. Значения: "plot" - график из точек, соединенных прямой, "scatter" - график из точек. """ # Определяем оси, на которых будем строить графики ax = plt.figure().gca(projection='3d') # Строим графики в зависимости от типа for i in range(len(types)): element = data[i] keys = element.keys() if types[i] == "plot": ax.plot(element[keys[0]], element[keys[1]], element[keys[2]]) if types[i] == "scatter": ax.scatter(element[keys[0]], element[keys[1]], element[keys[2]]) plt.show()
33.692308
77
0.625571
231
1,752
4.74026
0.341991
0.120548
0.047489
0.069406
0.796347
0.796347
0.796347
0.796347
0.796347
0.774429
0
0.011477
0.253995
1,752
51
78
34.352941
0.82632
0.462329
0
0.521739
0
0
0.028402
0
0
0
0
0
0
1
0.086957
false
0
0.130435
0
0.217391
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
74af94c08770ecd85e6abea27c4f5fa08a581c3a
1,479
py
Python
dcm2lungwin.py
hbuck1996/CT-Window-adjustment
fc5387d42cb4f4299a074aad7aff43cf396578d3
[ "MIT" ]
1
2021-11-05T08:37:09.000Z
2021-11-05T08:37:09.000Z
dcm2lungwin.py
hbuck1996/CT-Window-adjustment
fc5387d42cb4f4299a074aad7aff43cf396578d3
[ "MIT" ]
null
null
null
dcm2lungwin.py
hbuck1996/CT-Window-adjustment
fc5387d42cb4f4299a074aad7aff43cf396578d3
[ "MIT" ]
null
null
null
# -*- coding=utf-8 -*- import pydicom import os import numpy from os.path import splitext import PIL.Image as Image def getfile(file): dcm = pydicom.dcmread(file) img2 = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept return img2 def get_window_size(window_type): if window_type =='lung':#肺窗 center = -600 width = 1200 elif window_type =='Mediastinal':#纵膈窗 center =40 width =400 return center, width #调整CT图像的窗宽窗位 def setDicomWinWidthWinCenter(img_data, window_type): img_temp = img_data rows =len(img_temp) cols =len(img_temp[0]) center, width = get_window_size(window_type) img_temp.flags.writeable =True min = (2 * center - width) /2.0 +0.5 max = (2 * center + width) /2.0 +0.5 dFactor =255.0 / (max - min) for i in numpy.arange(rows): for j in numpy.arange(cols): img_temp[i, j] =int((img_temp[i, j]-min)*dFactor) min_index = img_temp <0 img_temp[min_index] =0 max_index = img_temp >255 img_temp[max_index] =255 return img_temp pathin = 'dcmin/' pathout = 'dcmout/' for root, dirs, files in os.walk(pathin): for i in range(len(files)): filename = files[i] im = getfile(pathin + filename) im1 = setDicomWinWidthWinCenter(im, 'lung') dcm_img = Image.fromarray(im1) dcm_img = dcm_img.convert('L') output = splitext(files[i])[0]+"." +"png" dcm_img.save(pathout + output)
26.890909
68
0.634212
213
1,479
4.258216
0.389671
0.084895
0.028666
0.041896
0.085998
0.035281
0.035281
0
0
0
0
0.03664
0.243408
1,479
54
69
27.388889
0.773905
0.024341
0
0
0
0
0.025748
0
0
0
0
0
0
1
0.066667
false
0
0.111111
0
0.244444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74afe6d7dc2aacf9fcf663af7925235779355a78
1,413
py
Python
setup.py
legale/prepack
8ad82fb252bc7e985175b2b9966aa7c0d217f72c
[ "MIT" ]
null
null
null
setup.py
legale/prepack
8ad82fb252bc7e985175b2b9966aa7c0d217f72c
[ "MIT" ]
null
null
null
setup.py
legale/prepack
8ad82fb252bc7e985175b2b9966aa7c0d217f72c
[ "MIT" ]
null
null
null
from setuptools import setup setup(name='prepack', version='0.4.2', description='Python excel based data preparation library', long_description="Library for preparing data for analysis. " "Allows you to load and easily filter many same structure csv or xls, xlsx files. " "Allows matching tables by incomplete row matching over the shortest Levenshtein " "distance, just like Pandas df.merge()", url='http://github.com/legale/prepack', author='rumi', author_email='legale.legale@gmail.com', license='MIT', packages=['prepack'], zip_safe=False, install_requires=['numpy','pandas','python-levenshtein','xlrd'], keywords = ['xls', 'excel', 'parser', 'pandas','data preparation'], classifiers=[ 'Operating System :: OS Independent', 'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package 'Intended Audience :: Developers', # Define that your audience are developers 'Intended Audience :: End Users/Desktop', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: MIT License', # Again, pick a license 'Programming Language :: Python :: 3', ], )
48.724138
114
0.604388
152
1,413
5.592105
0.743421
0.035294
0
0
0
0
0
0
0
0
0
0.007859
0.279547
1,413
29
115
48.724138
0.827112
0.115357
0
0
0
0
0.560545
0.018444
0
0
0
0
0
1
0
true
0
0.038462
0
0.038462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
74b08192a0962af59339ae5fc75dfd53de367b84
1,600
py
Python
utils/ansi_text.py
hkkwok/MachOTool
469c0fd06199356fcc6d68809c7ba15a12eac1fd
[ "Apache-2.0" ]
12
2016-01-08T22:35:14.000Z
2019-07-29T11:50:41.000Z
utils/ansi_text.py
uvbs/MachOTool
469c0fd06199356fcc6d68809c7ba15a12eac1fd
[ "Apache-2.0" ]
2
2015-12-10T21:28:04.000Z
2019-10-15T10:05:19.000Z
utils/ansi_text.py
uvbs/MachOTool
469c0fd06199356fcc6d68809c7ba15a12eac1fd
[ "Apache-2.0" ]
6
2016-10-10T05:29:41.000Z
2019-10-15T09:59:17.000Z
class AnsiText(object): ENABLE_COLOR = True COLORS = {'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37} BOLD = 1 UNDERLINE = 4 def __init__(self, text, **kwargs): self.text = text self.color = None self.bold = False self.underline = False if 'color' in kwargs: color = kwargs['color'] if color not in self.COLORS: raise IndexError('unknown color %s' % color) self.color = color if 'bold' in kwargs: value = kwargs['bold'] if not isinstance(value, bool): raise TypeError('bold must be a bool') self.bold = value if 'underline' in kwargs: value = kwargs['underline'] if not isinstance(value, bool): raise TypeError('underline must be a bool') self.underline = value def __repr__(self): esc = '\x1b[' output = str(self.text) if not self.ENABLE_COLOR: return output ansi_codes = list() if self.bold: ansi_codes.append(self.BOLD) if self.color is not None: assert self.color in self.COLORS ansi_codes.append(self.COLORS[self.color]) if self.underline: ansi_codes.append(self.UNDERLINE) output = esc + ';'.join([str(x) for x in ansi_codes]) + 'm' + output + esc + '0m' return output
30.769231
89
0.5025
180
1,600
4.383333
0.361111
0.057034
0.057034
0.072243
0.134347
0.096324
0.096324
0
0
0
0
0.020555
0.391875
1,600
51
90
31.372549
0.790339
0
0
0.085106
0
0
0.089375
0
0
0
0
0
0.021277
1
0.042553
false
0
0
0
0.191489
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74b16a53c0b40e421151135394463431087aac11
600
py
Python
groupy/exc.py
zorkian/groupy
c5d112a936e246cba1af14738a7201dfd1257069
[ "Apache-2.0" ]
null
null
null
groupy/exc.py
zorkian/groupy
c5d112a936e246cba1af14738a7201dfd1257069
[ "Apache-2.0" ]
null
null
null
groupy/exc.py
zorkian/groupy
c5d112a936e246cba1af14738a7201dfd1257069
[ "Apache-2.0" ]
null
null
null
class Error(Exception): pass class BackendError(Error): def __init__(self, message, server): self.message = message self.server = server def __str__(self): return "({}:{}) - {}" % ( self.server.hostname, self.server.port, self.message ) class BackendConnectionError(BackendError): pass class BackendIntegrityError(BackendError): pass class TimeTravelNotAllowed(BackendError): pass class BackendMaxDriftError(BackendError): pass class ResourceError(Error): pass class ResourceNotFound(ResourceError): pass
15.789474
64
0.67
54
600
7.296296
0.37037
0.137056
0.213198
0
0
0
0
0
0
0
0
0
0.233333
600
37
65
16.216216
0.856522
0
0
0.318182
0
0
0.02
0
0
0
0
0
0
1
0.090909
false
0.318182
0
0.045455
0.5
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
74b378062d50de8d6fa4de92edd93991772320d6
330
py
Python
Python/Learn Python The Hard Way/ex32.py
Vayne-Lover/Effective
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
[ "MIT" ]
null
null
null
Python/Learn Python The Hard Way/ex32.py
Vayne-Lover/Effective
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
[ "MIT" ]
null
null
null
Python/Learn Python The Hard Way/ex32.py
Vayne-Lover/Effective
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- the_count = [1, 2, 3, 4, 5] fruits = ['apples', 'oranges', 'pears', 'apricots'] change = [1, 'pennies', 2, 'dimes', 3, 'quarters'] for i in the_count: print(i) for j in fruits: print(j) for k in change: print(k) element=[] for i in range(6): element.append(i) for e in element: print(e)
13.2
51
0.587879
56
330
3.428571
0.535714
0.083333
0.0625
0
0
0
0
0
0
0
0
0.03861
0.215152
330
24
52
13.75
0.702703
0.063636
0
0
0
0
0.149837
0
0
0
0
0
0
1
0
false
0
0
0
0
0.285714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74b3f7e3888622737e8eff19f6ae59cf2977b401
678
py
Python
agr0_sh0p/sendemail/sg_verify.py
Mikhail-Kushnerev/agr0-sh0p
b0bea7aef7166ae04691a6b575658a1fb16543ca
[ "BSD-3-Clause" ]
null
null
null
agr0_sh0p/sendemail/sg_verify.py
Mikhail-Kushnerev/agr0-sh0p
b0bea7aef7166ae04691a6b575658a1fb16543ca
[ "BSD-3-Clause" ]
null
null
null
agr0_sh0p/sendemail/sg_verify.py
Mikhail-Kushnerev/agr0-sh0p
b0bea7aef7166ae04691a6b575658a1fb16543ca
[ "BSD-3-Clause" ]
null
null
null
# using SendGrid's Python Library # https://github.com/sendgrid/sendgrid-python import os from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail APIKEY = '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' message = Mail( from_email='muxa2k11@gmail.com', to_emails='mikushnerev@stud.etu.ru', subject='Sending with Twilio SendGrid is Fun', html_content='<strong>and easy to do anywhere, even with Python</strong>' ) try: sg = SendGridAPIClient(APIKEY) response = sg.send(message) print(response.status_code) print(response.body) print(response.headers) except Exception as e: print(e.message)
28.25
80
0.756637
85
678
5.976471
0.647059
0.076772
0
0
0
0
0
0
0
0
0
0.02931
0.144543
678
23
81
29.478261
0.846552
0.110619
0
0
0
0
0.338333
0.153333
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74b48488924a2302ea18239cc158782bc978ef6c
2,324
py
Python
setup.py
rhys-newbury/swift
60154b0441c468e3c15225c6820158d52ade98bf
[ "MIT" ]
25
2020-10-09T05:20:21.000Z
2022-02-27T19:28:14.000Z
setup.py
rhys-newbury/swift
60154b0441c468e3c15225c6820158d52ade98bf
[ "MIT" ]
19
2020-10-28T22:56:55.000Z
2022-03-23T11:26:41.000Z
setup.py
rhys-newbury/swift
60154b0441c468e3c15225c6820158d52ade98bf
[ "MIT" ]
8
2021-03-02T16:13:46.000Z
2021-12-22T10:50:35.000Z
from setuptools import setup, find_packages, Extension from os import path import os # fmt: off import pip pip.main(['install', 'numpy>=1.18.0']) import numpy # fmt: on here = path.abspath(path.dirname(__file__)) req = ["numpy>=1.18.0", "spatialgeometry>=0.2.0", "websockets"] # Get the long description from the README file with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() def package_files(directory): paths = [] for (pathhere, _, filenames) in os.walk(directory): for filename in filenames: paths.append(os.path.join("..", pathhere, filename)) return paths extra_folders = [ "swift/out", "swift/core", ] extra_files = [] for extra_folder in extra_folders: extra_files += package_files(extra_folder) phys = Extension( "phys", sources=["./swift/core/phys.c"], include_dirs=["./swift/core/", numpy.get_include()], ) setup( name="swift-sim", version="0.10.0", description="A Python/Javascript Visualiser", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/jhavl/swift", author="Jesse Haviland", license="MIT", classifiers=[ # 3 - Alpha # 4 - Beta # 5 - Production/Stable "Development Status :: 3 - Alpha", # Indicate who your project is intended for "Intended Audience :: Developers", # Pick your license as you wish (should match "license" above) "License :: OSI Approved :: MIT License", # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], python_requires=">=3.6", keywords="python robotics robotics-toolbox kinematics dynamics" " motion-planning trajectory-generation jacobian hessian" " control simulation robot-manipulator mobile-robot", packages=find_packages(exclude=["tests", "examples"]), package_data={"swift": extra_files}, # include_package_data=True, ext_modules=[phys], install_requires=req, )
29.794872
77
0.654905
285
2,324
5.235088
0.533333
0.050268
0.067024
0.069705
0
0
0
0
0
0
0
0.017486
0.212565
2,324
77
78
30.181818
0.797814
0.160499
0
0
0
0
0.345023
0.022176
0
0
0
0
0
1
0.018182
false
0
0.090909
0
0.127273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74b4dd2e89725f56b4afa9fad54d5b059478cdab
1,669
py
Python
exception_reports/decorators.py
CircleUp/exception-reports
283d1700020c8d2558cbb2ac2a398409de31f442
[ "BSD-3-Clause" ]
1
2018-01-05T06:53:11.000Z
2018-01-05T06:53:11.000Z
exception_reports/decorators.py
CircleUp/exception-reports
283d1700020c8d2558cbb2ac2a398409de31f442
[ "BSD-3-Clause" ]
6
2018-01-05T14:40:03.000Z
2019-04-10T22:42:55.000Z
exception_reports/decorators.py
CircleUp/exception-reports
283d1700020c8d2558cbb2ac2a398409de31f442
[ "BSD-3-Clause" ]
null
null
null
import sys from decorator import decorator from exception_reports.reporter import append_to_exception_message, create_exception_report from exception_reports.storages import LocalErrorStorage def exception_report(storage_backend=LocalErrorStorage(), output_format="html", data_processor=None): """ Decorator for creating detailed exception reports for thrown exceptions Usage: @exception_report() def foobar(text): raise Exception("bad things!!") foobar('hi') Output: Exception: bad things!! [report:/tmp/python-error-reports/2018-01-05_06:15:56.218190+00:00_0773698470164da3b2c427d8832dac13.html] S3 Usage: @exception_report() def foobar(text): raise Exception("bad things!!") foobar('hi') """ def _exception_reports(func, *args, **kwargs): try: return func(*args, **kwargs) except Exception as e: exc_type, exc_value, tb = sys.exc_info() report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor) e = append_to_exception_message(e, tb, f"[report:{report_location}]") setattr(e, "report", report_location) # We want to raise the original exception: # 1) with a modified message containing the report location # 2) with the original traceback # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this) raise e from None return decorator(_exception_reports)
31.490566
157
0.672259
194
1,669
5.592784
0.448454
0.073733
0.04977
0.04424
0.149309
0.117972
0.117972
0.117972
0.117972
0.117972
0
0.041237
0.244458
1,669
52
158
32.096154
0.819191
0.41462
0
0
0
0
0.03956
0.028571
0
0
0
0
0
1
0.133333
false
0
0.266667
0
0.533333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
74b7830f4dda15ada9843cfeaccea42f032c1f19
1,408
py
Python
E9/contact_manager/contact/forms.py
wendy006/Web-Dev-Course
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
[ "MIT" ]
null
null
null
E9/contact_manager/contact/forms.py
wendy006/Web-Dev-Course
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
[ "MIT" ]
null
null
null
E9/contact_manager/contact/forms.py
wendy006/Web-Dev-Course
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
[ "MIT" ]
null
null
null
from django import forms # class RenewBookForm(forms.Form): # renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3).") # class NameForm(forms.Form): # your_name = forms.CharField(label='Your name', max_length=100) # class UserForm(forms.Form): # username = forms.CharField(label="用户名", max_length=128) # password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput) class edit_person_form(forms.Form): # class Meta: # model = User # fields = [] first_name = forms.CharField(label="first_name",max_length=200,required = True) last_name = forms.CharField(label="last_name",max_length=200,required = True) email = forms.CharField(label="email",max_length=200,required = True) phone_number = forms.CharField(label="phone number",max_length=200,required = True) notes = forms.CharField(label="notes",max_length=200,required = True) class add_person_form(forms.Form): first_name = forms.CharField(label="first_name",max_length=200,required = True) last_name = forms.CharField(label="last_name",max_length=200,required = True) email = forms.CharField(label=" email",max_length=200,required = True) phone_number = forms.CharField(label="phone number",max_length=200,required = True) notes = forms.CharField(label="notes",max_length=200,required = True)
36.102564
99
0.715199
190
1,408
5.142105
0.284211
0.186285
0.252815
0.204708
0.595701
0.595701
0.595701
0.595701
0.595701
0.595701
0
0.034396
0.153409
1,408
38
100
37.052632
0.785235
0.314631
0
0.769231
0
0
0.088328
0
0
0
0
0
0
1
0
false
0
0.076923
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
74ba731ff054eb15ac2f02bf27cba0343a1c4050
1,510
py
Python
src/opera/parser/tosca/__init__.py
sstanovnik/xopera-opera
06031d37268913c6ba6dbc30ec6b4acb3a17dc5a
[ "Apache-2.0" ]
null
null
null
src/opera/parser/tosca/__init__.py
sstanovnik/xopera-opera
06031d37268913c6ba6dbc30ec6b4acb3a17dc5a
[ "Apache-2.0" ]
null
null
null
src/opera/parser/tosca/__init__.py
sstanovnik/xopera-opera
06031d37268913c6ba6dbc30ec6b4acb3a17dc5a
[ "Apache-2.0" ]
null
null
null
from pathlib import PurePath import importlib from opera import stdlib from opera.error import ParseError from opera.parser import yaml SUPPORTED_VERSIONS = dict( tosca_simple_yaml_1_3="v_1_3", ) def load(base_path, template_name): with (base_path / template_name).open() as input_fd: input_yaml = yaml.load(input_fd, str(template_name)) if not isinstance(input_yaml.value, dict): raise ParseError( "Top level structure should be a map.", yaml_node.loc, ) tosca_version = _get_tosca_version(input_yaml) parser = _get_parser(tosca_version) stdlib_yaml = stdlib.load(tosca_version) service = parser.parse(stdlib_yaml, base_path, PurePath("STDLIB")) service.merge(parser.parse(input_yaml, base_path, PurePath())) service.visit("resolve_path", base_path) service.visit("resolve_reference", service) return service def _get_parser(tosca_version): return importlib.import_module(".v_1_3", __name__).Parser def _get_tosca_version(input_yaml): for k, v in input_yaml.value.items(): if k.value == "tosca_definitions_version": try: return SUPPORTED_VERSIONS[v.value] except (TypeError, KeyError): raise ParseError( "Invalid TOSCA version. Available: {}.".format( ", ".join(SUPPORTED_VERSIONS.keys()), ), v.loc, ) raise ParseError("Missing TOSCA version", input_yaml.loc)
29.607843
70
0.662914
187
1,510
5.064171
0.374332
0.101373
0.053854
0.066526
0.050686
0
0
0
0
0
0
0.005226
0.239735
1,510
50
71
30.2
0.819686
0
0
0.054054
0
0
0.110596
0.016556
0
0
0
0
0
1
0.081081
false
0
0.162162
0.027027
0.324324
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bab9faf0636dab82e5b1e9553de1e5cede8ce6
18,100
py
Python
project/upload/views.py
beijbom/coralnet
c3f4a44eeb60cb41a079329a0068dc8b34096e89
[ "BSD-2-Clause" ]
31
2019-12-08T14:22:52.000Z
2021-12-27T04:58:12.000Z
project/upload/views.py
beijbom/coralnet
c3f4a44eeb60cb41a079329a0068dc8b34096e89
[ "BSD-2-Clause" ]
193
2019-12-07T23:27:43.000Z
2022-03-05T08:05:46.000Z
project/upload/views.py
beijbom/coralnet
c3f4a44eeb60cb41a079329a0068dc8b34096e89
[ "BSD-2-Clause" ]
null
null
null
import json from datetime import timedelta from django.conf import settings from django.http import HttpResponseRedirect, JsonResponse from django.shortcuts import get_object_or_404, render from django.urls import reverse from django.utils.timezone import now from accounts.utils import get_imported_user from annotations.model_utils import AnnotationAreaUtils from annotations.models import Annotation from images.forms import MetadataForm from images.model_utils import PointGen from images.models import Source, Metadata, Image, Point from images.utils import metadata_obj_to_dict, get_aux_labels, \ metadata_field_names_to_labels from lib.decorators import source_permission_required, source_labelset_required from lib.exceptions import FileProcessError from lib.forms import get_one_form_error from lib.utils import filesize_display from visualization.forms import ImageSpecifyByIdForm from .forms import ( CPCImportForm, CSVImportForm, ImageUploadForm, ImageUploadFrontendForm) from .utils import ( annotations_cpcs_to_dict, annotations_csv_to_dict, annotations_preview, find_dupe_image, metadata_csv_to_dict, metadata_preview, upload_image_process) import vision_backend.tasks as backend_tasks @source_permission_required('source_id', perm=Source.PermTypes.EDIT.code) def upload_portal(request, source_id): """ Page which points to the pages for the three different upload types. """ if request.method == 'POST': if request.POST.get('images'): return HttpResponseRedirect( reverse('upload_images', args=[source_id])) if request.POST.get('metadata'): return HttpResponseRedirect( reverse('upload_metadata', args=[source_id])) if request.POST.get('annotations_cpc'): return HttpResponseRedirect( reverse('upload_annotations_cpc', args=[source_id])) if request.POST.get('annotations_csv'): return HttpResponseRedirect( reverse('upload_annotations_csv', args=[source_id])) source = get_object_or_404(Source, id=source_id) return render(request, 'upload/upload_portal.html', { 'source': source, }) @source_permission_required('source_id', perm=Source.PermTypes.EDIT.code) def upload_images(request, source_id): """ Upload images to a source. This view is for the non-Ajax frontend. """ source = get_object_or_404(Source, id=source_id) images_form = ImageUploadFrontendForm() proceed_to_manage_metadata_form = ImageSpecifyByIdForm(source=source) auto_generate_points_message = ( "We will generate points for the images you upload.\n" "Your Source's point generation settings: {pointgen}\n" "Your Source's annotation area settings: {annoarea}").format( pointgen=PointGen.db_to_readable_format( source.default_point_generation_method), annoarea=AnnotationAreaUtils.db_format_to_display( source.image_annotation_area), ) return render(request, 'upload/upload_images.html', { 'source': source, 'images_form': images_form, 'proceed_to_manage_metadata_form': proceed_to_manage_metadata_form, 'auto_generate_points_message': auto_generate_points_message, 'image_upload_max_file_size': filesize_display( settings.IMAGE_UPLOAD_MAX_FILE_SIZE), }) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) def upload_images_preview_ajax(request, source_id): """ Preview the images that are about to be uploaded. Check to see if there's any problems with the filenames or file sizes. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) file_info_list = json.loads(request.POST.get('file_info')) statuses = [] for file_info in file_info_list: dupe_image = find_dupe_image(source, file_info['filename']) if dupe_image: statuses.append(dict( error="Image with this name already exists", url=reverse('image_detail', args=[dupe_image.id]), )) elif file_info['size'] > settings.IMAGE_UPLOAD_MAX_FILE_SIZE: statuses.append(dict( error="Exceeds size limit of {limit}".format( limit=filesize_display( settings.IMAGE_UPLOAD_MAX_FILE_SIZE)) )) else: statuses.append(dict( ok=True, )) return JsonResponse(dict( statuses=statuses, )) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) def upload_images_ajax(request, source_id): """ After the "Start upload" button is clicked, this view is entered once for each image file. This view saves the image to the database and media storage. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) # Retrieve image related fields image_form = ImageUploadForm(request.POST, request.FILES) # Check for validity of the file (filetype and non-corruptness) and # the options forms. if not image_form.is_valid(): # Examples of errors: filetype is not an image, # file is corrupt, file is empty, etc. return JsonResponse(dict( error=get_one_form_error(image_form), )) img = upload_image_process( image_file=image_form.cleaned_data['file'], image_name=image_form.cleaned_data['name'], source=source, current_user=request.user, ) backend_tasks.submit_features.apply_async( args=[img.id], eta=(now() + timedelta(minutes=1)), ) return JsonResponse(dict( success=True, link=reverse('image_detail', args=[img.id]), image_id=img.id, )) @source_permission_required('source_id', perm=Source.PermTypes.EDIT.code) def upload_metadata(request, source_id): """ Set image metadata by uploading a CSV file containing the metadata. This view is for the non-Ajax frontend. """ source = get_object_or_404(Source, id=source_id) csv_import_form = CSVImportForm() return render(request, 'upload/upload_metadata.html', { 'source': source, 'csv_import_form': csv_import_form, 'field_labels': metadata_field_names_to_labels(source).values(), 'aux_field_labels': get_aux_labels(source), }) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) def upload_metadata_preview_ajax(request, source_id): """ Set image metadata by uploading a CSV file containing the metadata. This view takes the CSV file, processes it, saves the processed metadata to the session, and returns a preview table of the metadata to be saved. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) csv_import_form = CSVImportForm(request.POST, request.FILES) if not csv_import_form.is_valid(): return JsonResponse(dict( error=csv_import_form.errors['csv_file'][0], )) try: # Dict of (metadata ids -> dicts of (column name -> value)) csv_metadata = metadata_csv_to_dict( csv_import_form.get_csv_stream(), source) except FileProcessError as error: return JsonResponse(dict( error=str(error), )) preview_table, preview_details = \ metadata_preview(csv_metadata, source) request.session['csv_metadata'] = csv_metadata return JsonResponse(dict( success=True, previewTable=preview_table, previewDetails=preview_details, )) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) def upload_metadata_ajax(request, source_id): """ Set image metadata by uploading a CSV file containing the metadata. This view gets the metadata that was previously saved to the session by the upload-preview view. Then it saves the metadata to the database. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) csv_metadata = request.session.pop('csv_metadata', None) if not csv_metadata: return JsonResponse(dict( error=( "We couldn't find the expected data in your session." " Please try loading this page again. If the problem persists," " let us know on the forum." ), )) for metadata_id, csv_metadata_for_image in csv_metadata.items(): metadata = Metadata.objects.get(pk=metadata_id, image__source=source) new_metadata_dict = metadata_obj_to_dict(metadata) new_metadata_dict.update(csv_metadata_for_image) metadata_form = MetadataForm( new_metadata_dict, instance=metadata, source=source) # We already validated previously, so this SHOULD be valid. if not metadata_form.is_valid(): raise ValueError("Metadata became invalid for some reason.") metadata_form.save() return JsonResponse(dict( success=True, )) @source_permission_required('source_id', perm=Source.PermTypes.EDIT.code) @source_labelset_required('source_id', message=( "You must create a labelset before uploading annotations.")) def upload_annotations_csv(request, source_id): source = get_object_or_404(Source, id=source_id) csv_import_form = CSVImportForm() return render(request, 'upload/upload_annotations_csv.html', { 'source': source, 'csv_import_form': csv_import_form, }) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) @source_labelset_required('source_id', message=( "You must create a labelset before uploading annotations.")) def upload_annotations_csv_preview_ajax(request, source_id): """ Add points/annotations to images by uploading a CSV file. This view takes the CSV file, processes it, saves the processed data to the session, and returns a preview table of the data to be saved. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) csv_import_form = CSVImportForm(request.POST, request.FILES) if not csv_import_form.is_valid(): return JsonResponse(dict( error=csv_import_form.errors['csv_file'][0], )) try: csv_annotations = annotations_csv_to_dict( csv_import_form.get_csv_stream(), source) except FileProcessError as error: return JsonResponse(dict( error=str(error), )) preview_table, preview_details = \ annotations_preview(csv_annotations, source) request.session['uploaded_annotations'] = csv_annotations return JsonResponse(dict( success=True, previewTable=preview_table, previewDetails=preview_details, )) @source_permission_required('source_id', perm=Source.PermTypes.EDIT.code) @source_labelset_required('source_id', message=( "You must create a labelset before uploading annotations.")) def upload_annotations_cpc(request, source_id): source = get_object_or_404(Source, id=source_id) cpc_import_form = CPCImportForm(source) return render(request, 'upload/upload_annotations_cpc.html', { 'source': source, 'cpc_import_form': cpc_import_form, }) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) @source_labelset_required('source_id', message=( "You must create a labelset before uploading annotations.")) def upload_annotations_cpc_preview_ajax(request, source_id): """ Add points/annotations to images by uploading Coral Point Count files. This view takes multiple .cpc files, processes them, saves the processed data to the session, and returns a preview table of the data to be saved. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) cpc_import_form = CPCImportForm(source, request.POST, request.FILES) if not cpc_import_form.is_valid(): return JsonResponse(dict( error=cpc_import_form.errors['cpc_files'][0], )) try: cpc_info = annotations_cpcs_to_dict( cpc_import_form.get_cpc_names_and_streams(), source, cpc_import_form.cleaned_data['plus_notes']) except FileProcessError as error: return JsonResponse(dict( error=str(error), )) preview_table, preview_details = \ annotations_preview(cpc_info['annotations'], source) request.session['uploaded_annotations'] = cpc_info['annotations'] request.session['cpc_info'] = cpc_info return JsonResponse(dict( success=True, previewTable=preview_table, previewDetails=preview_details, )) @source_permission_required( 'source_id', perm=Source.PermTypes.EDIT.code, ajax=True) @source_labelset_required('source_id', message=( "You must create a labelset before uploading annotations.")) def upload_annotations_ajax(request, source_id): """ This view gets the annotation data that was previously saved to the session by upload-preview-csv or upload-preview-cpc. Then it saves the data to the database, while deleting all previous points/annotations for the images involved. """ if request.method != 'POST': return JsonResponse(dict( error="Not a POST request", )) source = get_object_or_404(Source, id=source_id) uploaded_annotations = request.session.pop('uploaded_annotations', None) if not uploaded_annotations: return JsonResponse(dict( error=( "We couldn't find the expected data in your session." " Please try loading this page again. If the problem persists," " let us know on the forum." ), )) cpc_info = request.session.pop('cpc_info', None) for image_id, annotations_for_image in uploaded_annotations.items(): img = Image.objects.get(pk=image_id, source=source) # Delete previous annotations and points for this image. # Calling delete() on these querysets is more efficient # than calling delete() on each of the individual objects. Annotation.objects.filter(image=img).delete() Point.objects.filter(image=img).delete() # Create new points and annotations. new_points = [] new_annotations = [] for num, point_dict in enumerate(annotations_for_image, 1): # Create a Point. point = Point( row=point_dict['row'], column=point_dict['column'], point_number=num, image=img) new_points.append(point) # Save to DB with an efficient bulk operation. Point.objects.bulk_create(new_points) for num, point_dict in enumerate(annotations_for_image, 1): # Create an Annotation if a label is specified. if 'label' in point_dict: label_obj = source.labelset.get_global_by_code( point_dict['label']) # TODO: Django 1.10 can set database IDs on newly created # objects, so re-fetching the points may not be needed: # https://docs.djangoproject.com/en/dev/releases/1.10/#database-backends new_annotations.append(Annotation( point=Point.objects.get(point_number=num, image=img), image=img, source=source, label=label_obj, user=get_imported_user())) # Do NOT bulk-create the annotations so that the versioning signals # (for annotation history) do not get bypassed. Create them one by one. for annotation in new_annotations: annotation.save() # Update relevant image/metadata fields. img.point_generation_method = PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(new_points) ) if cpc_info: # We uploaded annotations as CPC. Save contents for future CPC # exports. # Note: Since cpc_info went through session serialization, # dicts with integer keys have had their keys stringified. img.cpc_content = cpc_info['cpc_contents'][str(img.pk)] img.cpc_filename = cpc_info['cpc_filenames'][str(img.pk)] else: # We uploaded CSV. Any CPC we had saved previously no longer has # the correct point positions, so we'll just discard the CPC. img.cpc_content = '' img.cpc_filename = '' img.save() img.metadata.annotation_area = AnnotationAreaUtils.IMPORTED_STR img.metadata.save() # Submit job with 1 hour delay to allow the view and thus DB transaction # to conclude before jobs are submitted. # Details: https://github.com/beijbom/coralnet-system/issues/31. backend_tasks.reset_features.apply_async( args=[img.id], eta=now() + timedelta(hours=1)) if cpc_info: # We uploaded annotations as CPC. Save some info for future CPC # exports. source.cpce_code_filepath = cpc_info['code_filepath'] source.cpce_image_dir = cpc_info['image_dir'] source.save() return JsonResponse(dict( success=True, ))
35.559921
88
0.673149
2,231
18,100
5.241147
0.161811
0.038998
0.043274
0.036945
0.499786
0.46515
0.430343
0.427948
0.40366
0.383477
0
0.003998
0.23989
18,100
508
89
35.629921
0.845908
0.172376
0
0.510324
0
0
0.126374
0.018596
0
0
0
0.001969
0
1
0.035398
false
0
0.135693
0
0.265487
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bb9cc1b76d05a1ca8fad8227db7488f5dc77ca
3,846
py
Python
medgen/annotate/variant.py
text2gene/medgen
e2327bdf8f2c9e35da177fbbd0d217790c0ae361
[ "Apache-2.0" ]
null
null
null
medgen/annotate/variant.py
text2gene/medgen
e2327bdf8f2c9e35da177fbbd0d217790c0ae361
[ "Apache-2.0" ]
null
null
null
medgen/annotate/variant.py
text2gene/medgen
e2327bdf8f2c9e35da177fbbd0d217790c0ae361
[ "Apache-2.0" ]
null
null
null
""" Variant-level annotation functions requiring ClinvarDB and Metapub (NCBI/eutils). """ import requests, json, urllib from metapub.text_mining import is_pmcid, is_ncbi_bookID from metapub.pubmedcentral import get_pmid_for_otherid from ..db.clinvar import ClinVarDB from ..log import log ########################################################################################## # # Functions # ########################################################################################## def _clinvar_variant_accession(hgvs_text): """ See ClinVar FAQ http://www.ncbi.nlm.nih.gov/clinvar/docs/faq/#accs :param hgvs_text: c.DNA :return: RCVAccession "Reference ClinVar Accession" """ try: return ClinVarDB().accession_for_hgvs_text(str(hgvs_text)) except Exception as err: log.debug("no clinvar accession for variant hgvs_text %s " % hgvs_text) def _clinvar_variant_allele_id(hgvs_text): """ Get the unique AlleleID :param hgvs_text: c.DNA :return: AlleleID """ try: return ClinVarDB().allele_id_for_hgvs_text(hgvs_text) except Exception as err: log.debug('no clinvar AlleleID for variant hgvs_text %s ' % hgvs_text) def _clinvar_variant_variation_id(hgvs_text): """ Get the unique VariationID :param hgvs_text: c.DNA :return: VariationID """ try: return ClinVarDB().variation_id_for_hgvs_text(hgvs_text) except Exception as err: log.debug('no clinvar VariationID for variant hgvs_text %s ' % hgvs_text) def _clinvar_variant2pubmed(hgvs_text): """ Get PMID for clinvar variants using the AlleleID key. Keep GeneReviews book references (NKBxxxx) without argument. ONE EXPENSIVE LOOKUP HERE: If the citation_source is PubMedCentral, first convert responses to PMID. :param hgvs_text: c.DNA :return: set(PMIDs and possibly also NBK ids) """ pubmeds = [] citations = ClinVarDB().var_citations(hgvs_text) if citations: for cite in citations: some_id = cite['citation_id'] if is_ncbi_bookID(some_id): # Todo: convert? drop?? pubmeds.append(some_id) elif is_pmcid(some_id): try: pmid = get_pmid_for_otherid(some_id) if pmid is not None: log.debug('found PubMedCentral PMCID %s, converted to PMID %s ', some_id, str(pmid)) pubmeds.append(pmid) else: log.debug('PMID not found for PMCID %s; discarding.', some_id) except Exception as err: log.debug('error converting PMCID %s: %r', some_id, err) elif cite['citation_source'] == 'PubMed': pubmeds.append(some_id) #return set([int(entry) for entry in pubmeds]) return set(pubmeds) def clinvar2pmid_with_accessions(hgvs_list): ret = [] citations = ClinVarDB().var_citations(hgvs_list) if citations: for cite in citations: article_id = cite['citation_id'] if is_ncbi_bookID(article_id): pmid = article_id else: pmid = article_id if cite['citation_source'] == 'PubMed' else get_pmid_for_otherid(article_id) if pmid: ret.append({"hgvs_text": cite['HGVS'], "pmid": pmid, "accession": cite['RCVaccession']}) return ret ########################################################################################## # # API # ########################################################################################## ClinvarAccession = _clinvar_variant_accession ClinvarAlleleID = _clinvar_variant_allele_id ClinvarPubmeds = _clinvar_variant2pubmed ClinvarVariationID = _clinvar_variant_variation_id
34.035398
110
0.583983
428
3,846
5.016355
0.287383
0.081975
0.018631
0.026083
0.296227
0.264555
0.161155
0.161155
0.133209
0.115976
0
0.00103
0.24285
3,846
112
111
34.339286
0.736264
0.191368
0
0.275862
0
0
0.137158
0
0
0
0
0.008929
0
1
0.086207
false
0
0.086207
0
0.258621
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bbb875400925fd99ab218e4498bdd041de47bc
6,049
py
Python
microdf/custom_taxes.py
MaxGhenis/taxcalc-helpers
85d2739d1c96882424cb51ef1806c9e51f88c085
[ "MIT" ]
6
2019-06-26T14:37:49.000Z
2020-08-10T22:26:34.000Z
microdf/custom_taxes.py
MaxGhenis/taxcalc-helpers
85d2739d1c96882424cb51ef1806c9e51f88c085
[ "MIT" ]
94
2019-06-22T14:57:58.000Z
2020-09-08T16:35:38.000Z
microdf/custom_taxes.py
MaxGhenis/taxcalc-helpers
85d2739d1c96882424cb51ef1806c9e51f88c085
[ "MIT" ]
6
2020-09-08T18:29:36.000Z
2021-04-01T18:31:42.000Z
""" Functions and data for estimating taxes outside the income tax system. Examples include value added tax, financial transaction tax, and carbon tax. """ import microdf as mdf import numpy as np import pandas as pd # Source: # https://www.taxpolicycenter.org/briefing-book/who-would-bear-burden-vat VAT_INCIDENCE = pd.Series( index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9], data=[3.9, 3.9, 3.6, 3.6, 3.6, 3.6, 3.6, 3.4, 3.4, 3.2, 2.8, 2.5, 2.5], ) VAT_INCIDENCE /= 100 # Source: Table 5 in # https://www.treasury.gov/resource-center/tax-policy/tax-analysis/Documents/WP-115.pdf CARBON_TAX_INCIDENCE = pd.Series( index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9], data=[0.8, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.8, 1.8, 1.8, 1.6, 1.4, 0.7], ) CARBON_TAX_INCIDENCE /= 100 # Source: Figure 1 in # https://www.taxpolicycenter.org/sites/default/files/alfresco/publication-pdfs/2000587-financial-transaction-taxes.pdf FTT_INCIDENCE = pd.Series( index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9], data=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.4, 0.8, 1.0], ) FTT_INCIDENCE /= 100 def add_custom_tax( df, segment_income, w, base_income, incidence, name, total=None, ratio=None, verbose=True, ): """Add a custom tax based on incidence analysis driven by percentiles. :param df: DataFrame. :param segment_income: Income measure used to segment tax units into quantiles. :param w: Weight used to segment into quantiles (either s006 or XTOT_m). :param base_income: Income measure by which incidence is multiplied to estimate liability. :param incidence: pandas Series indexed on the floor of an income percentile, with values for the tax rate. :param name: Name of the column to add. :param total: Total amount the tax should generate. If not provided, liabilities are calculated only based on the incidence schedule. (Default value = None) :param ratio: Ratio to adjust the tax by, compared to the original tax. This acts as a multiplier for the incidence argument. (Default value = None) :param verbose: Whether to print the tax adjustment factor if needed. Defaults to True. :returns: Nothing. Adds the column name to df representing the tax liability. df is also sorted by segment_income. """ if ratio is not None: incidence = incidence * ratio assert total is None, "ratio and total cannot both be provided." df.sort_values(segment_income, inplace=True) income_percentile = 100 * df[w].cumsum() / df[w].sum() tu_incidence = incidence.iloc[ pd.cut( income_percentile, # Add a right endpoint. Should be 100 but sometimes a decimal # gets added. bins=incidence.index.tolist() + [101], labels=False, ) ].values df[name] = np.maximum(0, tu_incidence * df[base_income]) if total is not None: initial_total = mdf.weighted_sum(df, name, "s006") if verbose: print( "Multiplying tax by " + str(round(total / initial_total, 2)) + "." ) df[name] *= total / initial_total def add_vat( df, segment_income="tpc_eci", w="XTOT_m", base_income="aftertax_income", incidence=VAT_INCIDENCE, name="vat", **kwargs ): """Add value added tax based on incidence estimate from Tax Policy Center. :param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income. :param Other: arguments: Args to add_custom_tax with VAT defaults. :param segment_income: Default value = "tpc_eci") :param w: Default value = "XTOT_m") :param base_income: Default value = "aftertax_income") :param incidence: Default value = VAT_INCIDENCE) :param name: Default value = "vat") :param **kwargs: Other arguments passed to add_custom_tax(). :returns: Nothing. Adds vat to df. df is also sorted by tpc_eci. """ add_custom_tax( df, segment_income, w, base_income, incidence, name, **kwargs ) def add_carbon_tax( df, segment_income="tpc_eci", w="XTOT_m", base_income="aftertax_income", incidence=CARBON_TAX_INCIDENCE, name="carbon_tax", **kwargs ): """Add carbon tax based on incidence estimate from the US Treasury Department. :param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income. :param Other: arguments: Args to add_custom_tax with carbon tax defaults. :param segment_income: Default value = "tpc_eci") :param w: Default value = "XTOT_m") :param base_income: Default value = "aftertax_income") :param incidence: Default value = CARBON_TAX_INCIDENCE) :param name: Default value = "carbon_tax") :param **kwargs: Other arguments passed to add_custom_tax(). :returns: Nothing. Adds carbon_tax to df. df is also sorted by tpc_eci. """ add_custom_tax( df, segment_income, w, base_income, incidence, name, **kwargs ) def add_ftt( df, segment_income="tpc_eci", w="XTOT_m", base_income="aftertax_income", incidence=FTT_INCIDENCE, name="ftt", **kwargs ): """Add financial transaction tax based on incidence estimate from Tax Policy Center. :param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income. :param Other: arguments: Args to add_custom_tax with FTT defaults. :param segment_income: Default value = "tpc_eci") :param w: Default value = "XTOT_m") :param base_income: Default value = "aftertax_income") :param incidence: Default value = FTT_INCIDENCE) :param name: Default value = "ftt") :param **kwargs: Other arguments passed to add_custom_tax(). :returns: Nothing. Adds ftt to df. df is also sorted by tpc_eci. """ add_custom_tax( df, segment_income, w, base_income, incidence, name, **kwargs )
33.41989
119
0.65515
886
6,049
4.351016
0.226862
0.052918
0.031128
0.02179
0.473411
0.43917
0.431128
0.428794
0.428794
0.42594
0
0.042073
0.237725
6,049
180
120
33.605556
0.793971
0.551661
0
0.333333
0
0
0.066966
0
0
0
0
0
0.011494
1
0.045977
false
0
0.034483
0
0.08046
0.011494
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bd2bcc47cfe5b8be22decac84de616312a386c
9,067
py
Python
system_c/scripts/combine_two_systems.py
hkayesh/depend_clean
1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851
[ "Apache-2.0" ]
null
null
null
system_c/scripts/combine_two_systems.py
hkayesh/depend_clean
1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851
[ "Apache-2.0" ]
null
null
null
system_c/scripts/combine_two_systems.py
hkayesh/depend_clean
1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851
[ "Apache-2.0" ]
null
null
null
from utilities import Utilities # from comment_level_evaluation import CommentLevelEvaluation import operator class CombineSystems: def __init__(self): self.utilities = Utilities() self.storage_path = 'comment-level-datasets-2/' # self.storage_path = 'r-combine-outputs/' # self.random_states = [111, 122, 133, 144, 155] self.categories = ['environment', 'waiting time', 'staff attitude professionalism', 'care quality', 'other'] def is_valid_asp_from_from_system_a(self, aspect, confidence_value, thresholds): is_valid = False # thresholds = {'environment': 0.6, # 'waiting time': 0.5, # 'staff attitude and professionalism': 0.5, # 'care quality': 0.4, # 'other': 0.7, # } aspects = thresholds.keys() if aspect in aspects and float(confidence_value) >= thresholds[aspect]: is_valid = True return is_valid def is_valid_asp_from_from_system_b(self, aspect, confidence_value, thresholds): is_valid = False # thresholds = {'environment': 0.1, # 'waiting time': 0.8, # 'staff attitude and professionalism': 0.1, # 'care quality': 0.1, # 'other': 0.1 # } aspects = thresholds.keys() if aspect in aspects and float(confidence_value) >= thresholds[aspect]: is_valid = True return is_valid def apply_dictionaries(self, comment): food_lexicon = ['food', 'canteen', 'canten', 'coffee', 'cofee', 'coffe', 'coffee', 'tea', 'drink', 'drinks'] parking_lexicon = ['car park', 'car-park', 'carpark', 'parking', 'bicycle'] aspects = [] all_words = self.utilities.get_lemma(comment) lemmatized_words = all_words.values() for word in food_lexicon: if word in lemmatized_words: aspects.append('food') break for word in parking_lexicon: if word in lemmatized_words: aspects.append('parking') break return aspects def combine_by_dynamic_threshold(self, file_a_path, file_b_path, output_file_path, thresholds_a, thresholds_b, evaluation=False): file_a = self.utilities.read_from_csv(file_a_path) file_b = self.utilities.read_from_csv(file_b_path) output = [] for row_a, row_b in zip(file_a, file_b): comment = row_a[0] aspects = [] # remove comment from the first column del row_a[0] del row_b[0] for a, b in zip(row_a, row_b): if not a and not b and a in self.categories: break # union with threshold if a is not None: asp_threshold = a.rsplit(' ', 1)[0] sentiment = a.rsplit(' ', 1)[1] aspect_a = asp_threshold.rsplit(' ', 1)[0] asp_snt = aspect_a + " " + sentiment if not any(aspect_a in asp for asp in aspects): confidence_value_a = asp_threshold.rsplit(' ', 1)[1] is_valid = self.is_valid_asp_from_from_system_a(aspect_a, confidence_value_a, thresholds_a) if is_valid: aspects.append(asp_snt) if b is not None: aspect_b = b.rsplit(' ', 1)[0] if aspect_b in self.categories and not any(aspect_b in asp for asp in aspects): confidence_value_b = b.rsplit(' ', 1)[1] is_valid = self.is_valid_asp_from_from_system_b(aspect_b, confidence_value_b, thresholds_b) if is_valid: aspects.append(aspect_b) # Apply food and parking dictionaries # TURN OFF THIS SNIPPET BEFORE EVALUATION if evaluation is False: asps_from_dictionaries = self.apply_dictionaries(comment) if len(asps_from_dictionaries) > 0: # if only environment, then replace with food/parking if len(aspects) == 1 and aspects[0] == 'environment': aspects = asps_from_dictionaries else: aspects = aspects + asps_from_dictionaries if len(aspects) < 1: # aspects = ['other'] aspects = ['other negative'] output.append([comment] + aspects) self.utilities.save_list_as_csv(output, output_file_path) def combine_by_static_threshold(self, file_a_path, file_b_path, threshold_a, threshold_b, output_file_path): file_a = self.utilities.read_from_csv(file_a_path) file_b = self.utilities.read_from_csv(file_b_path) output = [] for row_a, row_b in zip(file_a, file_b): comment = row_a[0] aspects = [] # remove comment from the first column del row_a[0] del row_b[0] for a, b in zip(row_a, row_b): if not a and not b and a in self.categories: break # union with threshold if a and a.rsplit(' ', 1)[0] not in aspects and float(a.rsplit(' ', 1)[1]) >= threshold_a: aspects.append(a.rsplit(' ', 1)[0]) if b and b.rsplit(' ', 1)[0] in self.categories and b.rsplit(' ', 1)[0] not in aspects and float(b.rsplit(' ', 1)[1]) >= threshold_b: aspects.append(b.rsplit(' ', 1)[0]) # Apply food and parking dictionaries # asps_from_dictionaries = self.apply_dictionaries(comment) # if len(asps_from_dictionaries) > 0: # aspects = aspects + asps_from_dictionaries if len(aspects) < 1: aspects = ['other'] output.append([comment] + aspects) self.utilities.save_list_as_csv(output, output_file_path) def extract_top_comments(self, data_file, output_file_path): rows = self.utilities.read_from_csv(data_file) envs = {} wts = {} saaps = {} cqs = {} ots = {} for row in rows: comment = row[0] del rows[0] for item in row: # if there is sentiment remove it if any(snt_cat in item for snt_cat in self.utilities.sentiment_classes): item = item.rsplit(' ', 1)[0] if item and item.rsplit(' ', 1)[0] == 'environment': envs[comment] = float(item.rsplit(' ', 1)[1]) if item and item.rsplit(' ', 1)[0] == 'waiting time': wts[comment] = float(item.rsplit(' ', 1)[1]) if item and item.rsplit(' ', 1)[0] == 'staff attitude and professionalism': saaps[comment] = float(item.rsplit(' ', 1)[1]) if item and item.rsplit(' ', 1)[0] == 'care quality': cqs[comment] = float(item.rsplit(' ', 1)[1]) if item and item.rsplit(' ', 1)[0] == 'other': ots[comment] = float(item.rsplit(' ', 1)[1]) # sort comments by the descending order of confidence values sorted_envs = [comment_data[0] for comment_data in sorted(envs.items(), key=operator.itemgetter(1), reverse=True)] sorted_wts = [comment_data[0] for comment_data in sorted(wts.items(), key=operator.itemgetter(1), reverse=True)] sorted_saaps = [comment_data[0] for comment_data in sorted(saaps.items(), key=operator.itemgetter(1), reverse=True)] sorted_cqs = [comment_data[0] for comment_data in sorted(cqs.items(), key=operator.itemgetter(1), reverse=True)] sorted_ots = [comment_data[0] for comment_data in sorted(ots.items(), key=operator.itemgetter(1), reverse=True)] # prepare output to save output = [['Environment', 'Waiting time', 'Staff attitude and professionalism', 'Care quality', 'Other']] top = 5 for i in range(0, top): comments = [] try: comments.append(sorted_envs[i]) except IndexError: comments.append(None) try: comments.append(sorted_wts[i]) except IndexError: comments.append(None) try: comments.append(sorted_saaps[i]) except IndexError: comments.append(None) try: comments.append(sorted_cqs[i]) except IndexError: comments.append(None) try: comments.append(sorted_ots[i]) except IndexError: comments.append(None) output.append(comments) self.utilities.save_list_as_csv(output, output_file_path)
38.419492
149
0.545715
1,062
9,067
4.46516
0.149718
0.035428
0.023619
0.015183
0.614509
0.548503
0.536061
0.51202
0.381274
0.381274
0
0.01872
0.351936
9,067
235
150
38.582979
0.788291
0.122532
0
0.412162
0
0
0.050366
0.003156
0
0
0
0
0
1
0.047297
false
0
0.013514
0
0.087838
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bdf4ce11cc1cf0cef84ffadb758b63eccf4b5b
603
py
Python
python/ops/lekcija 24/program01.py
jasarsoft/examples
d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e
[ "MIT" ]
null
null
null
python/ops/lekcija 24/program01.py
jasarsoft/examples
d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e
[ "MIT" ]
null
null
null
python/ops/lekcija 24/program01.py
jasarsoft/examples
d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e
[ "MIT" ]
null
null
null
import json student = { "ime" : "Milan", "prezime" : "Tair", "indeks" : 2008213514, "ispiti" : [ { "predmet" : "Programiranje 1", "datum" : "2008-01-05", "ocjena" : 10 }, { "predmet" : "Informatika", "datum" : "2008-01-05", "ocjena" : 10 } ] } studentString = str(student) print(studentString) studentString = json.dumps(student) print(studentString) datoteka = open("student.json", "w") datoteka.write(studentString) datoteka.close()
20.1
44
0.480929
48
603
6.041667
0.604167
0.062069
0.075862
0.089655
0.144828
0.144828
0
0
0
0
0
0.081794
0.371476
603
29
45
20.793103
0.683377
0
0
0.24
0
0
0.208955
0
0
0
0
0
0
1
0
false
0
0.04
0
0.04
0.08
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74be7f88c0d0fd7163d55362678d0731ca6c0782
1,066
py
Python
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Carsten/p11_Excel/m01_Excel_einlesen.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
import pandas as pd import xlrd import xlsxwriter from p01_kennenlernen import meinebibliothek df = pd.read_excel("O:\___Python\personen.xlsx") # importieren von excel nach python mit datumsangabe in Timestamp print(df) print() df1 = pd.to_datetime(df["Geburtsdatum"]) # umwandeln von Timestamp in datetime print(df1) print() alter = [] for geburtstag in df1: #verwenden der bereits gebauten Altersberechnung alter.append(meinebibliothek.alter(geburtstag)) durchschnittsalter = sum(alter) / len(alter) # ermitteln des Durchschnittsalters print ("Durchschnittsalter ", durchschnittsalter) print() df["Alter"] = alter # hinzufügen des berechneten Alters in die aus Excel eingelesene Tabelle print(df) writer = pd.ExcelWriter("O:\___Python\personen_bearbeitet.xlsx", engine="xlsxwriter") # erstellen eines Excel-"Writers" mit XlsxWriter df.to_excel(writer, sheet_name='Sheet1') # konvertieren des dataframe in ein XlsxWriter Excel Objekt writer.save() # schließen des Pandas Excel-"Writer" und exportieren des Excel-Dokuments
34.387097
135
0.766417
132
1,066
6.098485
0.537879
0.026087
0.037267
0
0
0
0
0
0
0
0
0.006579
0.144465
1,066
30
136
35.533333
0.876096
0.401501
0
0.238095
0
0
0.191987
0.105175
0
0
0
0
0
1
0
false
0
0.190476
0
0.190476
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74bf45dcb4a434eeeff395649e73785dadc6d3a6
97
py
Python
tests/_support/configs/yaml/tasks.py
tyewang/invoke
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
[ "BSD-2-Clause" ]
null
null
null
tests/_support/configs/yaml/tasks.py
tyewang/invoke
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
[ "BSD-2-Clause" ]
null
null
null
tests/_support/configs/yaml/tasks.py
tyewang/invoke
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
[ "BSD-2-Clause" ]
null
null
null
from spec import eq_ from invoke import ctask @ctask def mytask(c): eq_(c.hooray, 'yaml')
10.777778
25
0.690722
16
97
4.0625
0.6875
0
0
0
0
0
0
0
0
0
0
0
0.206186
97
8
26
12.125
0.844156
0
0
0
0
0
0.041237
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
74bf6f3b5de8a5799156e1034c8878c020d100c2
7,769
py
Python
vmz_interface/extractor/build_lmdb.py
fksato/vmz_interface
985e7129f4bf266a6226dbc2b7e108dafc8b917a
[ "Apache-2.0" ]
null
null
null
vmz_interface/extractor/build_lmdb.py
fksato/vmz_interface
985e7129f4bf266a6226dbc2b7e108dafc8b917a
[ "Apache-2.0" ]
null
null
null
vmz_interface/extractor/build_lmdb.py
fksato/vmz_interface
985e7129f4bf266a6226dbc2b7e108dafc8b917a
[ "Apache-2.0" ]
null
null
null
import os import csv import numpy as np import pandas as pd import pickle as pk from glob import glob from math import ceil from vmz_interface.data.db_video_create import create_video_db class VideoDBBuilder: def __init__(self, stimulus_id, lmdb_path, temporal_depth, fpv=75, video_strt_offset=15 , clips_overlap=0, batch_size=4, gpu_count=2, max_num_records=6e4, min_records_factor=1 , allow_mkdir=False, *args, **kwargs): if not os.path.isdir(lmdb_path): if allow_mkdir: os.mkdir(lmdb_path) else: raise Exception(f'please make sure {lmdb_path} is a valid directory') self._stim_id = stimulus_id self._lmdb_path = lmdb_path self.num_frames_per_clips = temporal_depth self.BATCH_SIZE = batch_size self.GPU_CNT = gpu_count self.gpus = [] self.MAX_RECORDS = max_num_records # 60K max number of records per lmdb (arbitrarily chosen) self.MIN_RECORDS_MULT = min_records_factor # used to make sure last file is not too large (arbitrarily chosen) self.fpv = fpv self.video_start_offset = video_strt_offset self.clips_overlap = clips_overlap self.list_lmdb_meta = [] self.units = 1 self.video_lmdb_paths = None self.uneven_db = True self.gpu_batch_combo = None self.clips_dir = f'{stimulus_id}_{self.num_frames_per_clips}_{self.clips_overlap}' self.clips_lmdb_data_path = f'{self._lmdb_path}/{self.clips_dir}' if not os.path.isdir(self.clips_lmdb_data_path): if allow_mkdir: os.mkdir(self.clips_lmdb_data_path) else: raise Exception(f'please make sure {self.clips_lmdb_data_path} is a valid directory') def make_from_paths(self, stimuli_paths): self.video_paths = stimuli_paths self.vid_cnt = len(self.video_paths) lmdb_metas = glob(f'{self.clips_lmdb_data_path}/lmdb_meta_*.csv') # make existence check: if len(lmdb_metas) > 0: # vid_list = set(self.video_paths) created_metas = set() for i in range(len(lmdb_metas)): with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{i}.csv') as f: df = pd.read_csv(f) created_metas.update(set(df['org_video'].unique())) if created_metas == vid_list: self.video_lmdb_paths = glob(f'{self.clips_lmdb_data_path}/lmdb_*_db') else: raise Exception(f'Stimulus id {self._stim_id} does not match the videos in the LMDB') else: if not self.write_lmdb_meta(): raise Exception('writing stimulus lmdb metas failed') else: self._create_video_dbs() def write_lmdb_meta(self): num_clips, start_frms = self._start_frames() db_starts, db_strides = self._records_per_meta(num_clips) file_strides = [int(i/num_clips) for i in db_strides] file_starts = [int(i/num_clips) for i in db_starts] sub_paths = [self.video_paths[offset:offset+stride] for offset, stride in zip(file_starts, file_strides)] write_data = [[[ data[i] , 0 # labels is None? hacs_action_dict[os.path.basename(os.path.dirname(data[i]))] , start_frms[clip_idx] , num_clips*i + clip_idx + db_starts[idx]] for i in range(len(data)) for clip_idx in range(num_clips)] for idx, data in enumerate(sub_paths)] self.uneven_db = False if len(file_strides) > 0: self.uneven_db = file_strides[-1] == file_strides[0] # self.units = num_clips return self._write_lmdb_meta(write_data) def _write_lmdb_meta(self, write_data): for group, group_paths in enumerate(write_data): with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv', 'w') as f: writer = csv.writer(f) writer.writerow(['org_video', 'label', 'start_frm', 'video_id']) writer.writerows(group_paths) self.list_lmdb_meta.append(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv') return True def _start_frames(self): ''' calculate how many examples given CLIPs type: FULL: number of clips per video == 1 CLIPs_ONE: each clip strides by 1, overlaping 15 frames between adjacent CLIPs CLIPs_TEN: overlaping 10 frames between adjacent CLIPs num_clips = ceil( (total_frames_per_video - temporal_depth - offset) / clips_stride ) + 1 given num_clips per video, calculate frame starts for videos: start_frm[0] = (total_frames_per_video - temporal_depth) - stride * (num_clips - 1) start_frm[i] = start_frm[i-1] + 6 ''' video_width = (self.fpv - self.video_start_offset) # 60 clips_stride = (self.num_frames_per_clips - self.clips_overlap) num_CLIPS = ceil((video_width - self.num_frames_per_clips)/clips_stride) + 1 initial_frame = (self.fpv - self.num_frames_per_clips) - (num_CLIPS - 1) * clips_stride start_frms = [initial_frame + i*clips_stride for i in range(num_CLIPS)] assert all(start_frms[i] > 0 for i in range(len(start_frms))) assert any(start_frms[i] <= self.video_start_offset for i in range(len(start_frms))) return num_CLIPS, start_frms def _records_per_meta(self, num_clips): """ Caffe2 video model does not pad batched data this utility function will distribute batched data into even number of record files a multiple of NUM_GPU and BATCH_SIZE the remainder will be added to a final meta file with a minimum of total video remainder * MIN_RECORDS_MULT records returns list of where in video_paths list lmdb should begin creating DB and a list of how many videos in list it should consume """ total_num_records = num_clips * self.vid_cnt div_criteria = num_clips * self.BATCH_SIZE * self.GPU_CNT # extract_features requires number of records to divide evenly # start with 1 files: num_files = 1 # files_rem = int(total_num_records%num_files) records_per_file = int(total_num_records/num_files) if records_per_file > self.MAX_RECORDS: # files_rem = int(total_num_records % self.MAX_RECORDS) num_files = int(total_num_records / self.MAX_RECORDS) records_per_file = int(total_num_records/num_files) rem_per_file = int(records_per_file % div_criteria) records_per_file = records_per_file - rem_per_file file_starts = [int(records_per_file*i) for i in range(0,num_files)] file_strides = [int(records_per_file) for i in range(num_files)] rem_total = total_num_records - num_files * records_per_file temp_rem = rem_total if rem_total > div_criteria * self.MIN_RECORDS_MULT: temp_rem = int(rem_total % div_criteria) extra_file = rem_total - temp_rem file_starts.append(int(extra_file + file_starts[-1])) file_strides.append(int(extra_file)) num_files+=1 if temp_rem > 0: file_starts.append(int(file_starts[-1] + file_strides[-1])) file_strides.append(int(temp_rem)) self._get_gpu_batch_combo(temp_rem) assert all(file_starts[i]%div_criteria == 0 for i in range(1,num_files)) assert total_num_records - file_starts[-1] == temp_rem assert sum(file_strides) == total_num_records return file_starts, file_strides def _get_gpu_batch_combo(self, file_remainder): gpu_check = int(file_remainder % self.GPU_CNT) batch_check = int(file_remainder % self.BATCH_SIZE) if gpu_check==0 and (batch_check==0 or batch_check!=0): self.gpu_batch_combo = [self.GPU_CNT, 1] elif gpu_check!=0 and batch_check==0: self.gpu_batch_combo = [1, self.BATCH_SIZE] else: self.gpu_batch_combo = [1, 1] def _create_video_dbs(self): """ create_video_db( args.list_file, args.output_file, args.use_list, args.use_video_id, args.use_start_frame, args.num_epochs ) """ use_list = 1 use_video_id = 1 use_start_frame = 1 list_lmdb_output = [f'{self.clips_lmdb_data_path}/lmdb_{i}_db' for i in range(len(self.list_lmdb_meta))] for i in range(len(self.list_lmdb_meta)): create_video_db(list_file=self.list_lmdb_meta[i], output_file=list_lmdb_output[i], use_list=use_list, use_video_id=use_video_id, use_start_frame=use_start_frame) self.video_lmdb_paths = list_lmdb_output
35.313636
164
0.740378
1,282
7,769
4.153666
0.170047
0.02554
0.013521
0.031925
0.281315
0.174648
0.150423
0.107793
0.049202
0.022911
0
0.008913
0.16244
7,769
219
165
35.474886
0.809436
0.197838
0
0.072464
0
0
0.098614
0.062037
0
0
0
0
0.036232
1
0.057971
false
0
0.057971
0
0.152174
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74c04ac95cf162be1b2cc7dbd708d0513446fe97
13,038
py
Python
tests/test_pyoidc_facade.py
Titotix/Flask-pyoidc
87b8059617304ec634d7b6d3f5f649c40d9cdb4c
[ "Apache-2.0" ]
64
2017-01-31T09:08:15.000Z
2021-12-21T21:05:45.000Z
tests/test_pyoidc_facade.py
Titotix/Flask-pyoidc
87b8059617304ec634d7b6d3f5f649c40d9cdb4c
[ "Apache-2.0" ]
99
2017-02-08T22:38:54.000Z
2022-03-31T22:03:27.000Z
tests/test_pyoidc_facade.py
Titotix/Flask-pyoidc
87b8059617304ec634d7b6d3f5f649c40d9cdb4c
[ "Apache-2.0" ]
33
2017-02-09T18:19:51.000Z
2021-12-24T17:48:52.000Z
import time import base64 import pytest import responses from oic.oic import AuthorizationResponse, AccessTokenResponse, TokenErrorResponse, OpenIDSchema, \ AuthorizationErrorResponse from urllib.parse import parse_qsl, urlparse from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata, ProviderMetadata, \ ClientRegistrationInfo from flask_pyoidc.pyoidc_facade import PyoidcFacade, _ClientAuthentication from .util import signed_id_token REDIRECT_URI = 'https://rp.example.com/redirect_uri' class TestPyoidcFacade(object): PROVIDER_BASEURL = 'https://op.example.com' PROVIDER_METADATA = ProviderMetadata(PROVIDER_BASEURL, PROVIDER_BASEURL + '/auth', PROVIDER_BASEURL + '/jwks') CLIENT_METADATA = ClientMetadata('client1', 'secret1') def test_registered_client_metadata_is_forwarded_to_pyoidc(self): config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA) facade = PyoidcFacade(config, REDIRECT_URI) assert facade._client.registration_response def test_no_registered_client_metadata_is_handled(self): config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_registration_info=ClientRegistrationInfo()) facade = PyoidcFacade(config, REDIRECT_URI) assert not facade._client.registration_response def test_is_registered(self): unregistered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_registration_info=ClientRegistrationInfo()) registered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA) assert PyoidcFacade(unregistered, REDIRECT_URI).is_registered() is False assert PyoidcFacade(registered, REDIRECT_URI).is_registered() is True @responses.activate def test_register(self): registration_endpoint = self.PROVIDER_BASEURL + '/register' responses.add(responses.POST, registration_endpoint, json=self.CLIENT_METADATA.to_dict()) provider_metadata = self.PROVIDER_METADATA.copy(registration_endpoint=registration_endpoint) unregistered = ProviderConfiguration(provider_metadata=provider_metadata, client_registration_info=ClientRegistrationInfo()) facade = PyoidcFacade(unregistered, REDIRECT_URI) facade.register() assert facade.is_registered() is True def test_authentication_request(self): extra_user_auth_params = {'foo': 'bar', 'abc': 'xyz'} config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA, auth_request_params=extra_user_auth_params) state = 'test_state' nonce = 'test_nonce' facade = PyoidcFacade(config, REDIRECT_URI) extra_lib_auth_params = {'foo': 'baz', 'qwe': 'rty'} auth_request = facade.authentication_request(state, nonce, extra_lib_auth_params) expected_auth_params = { 'scope': 'openid', 'response_type': 'code', 'client_id': self.CLIENT_METADATA['client_id'], 'redirect_uri': REDIRECT_URI, 'state': state, 'nonce': nonce } expected_auth_params.update(extra_user_auth_params) expected_auth_params.update(extra_lib_auth_params) assert auth_request.to_dict() == expected_auth_params def test_parse_authentication_response(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) auth_code = 'auth_code-1234' state = 'state-1234' auth_response = AuthorizationResponse(**{'state': state, 'code': auth_code}) parsed_auth_response = facade.parse_authentication_response(auth_response.to_dict()) assert isinstance(parsed_auth_response, AuthorizationResponse) assert parsed_auth_response.to_dict() == auth_response.to_dict() def test_parse_authentication_response_handles_error_response(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) error_response = AuthorizationErrorResponse(**{'error': 'invalid_request', 'state': 'state-1234'}) parsed_auth_response = facade.parse_authentication_response(error_response) assert isinstance(parsed_auth_response, AuthorizationErrorResponse) assert parsed_auth_response.to_dict() == error_response.to_dict() @responses.activate def test_parse_authentication_response_preserves_id_token_jwt(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) state = 'state-1234' now = int(time.time()) id_token, id_token_signing_key = signed_id_token({ 'iss': self.PROVIDER_METADATA['issuer'], 'sub': 'test_sub', 'aud': 'client1', 'exp': now + 1, 'iat': now }) responses.add(responses.GET, self.PROVIDER_METADATA['jwks_uri'], json={'keys': [id_token_signing_key.serialize()]}) auth_response = AuthorizationResponse(**{'state': state, 'id_token': id_token}) parsed_auth_response = facade.parse_authentication_response(auth_response) assert isinstance(parsed_auth_response, AuthorizationResponse) assert parsed_auth_response['state'] == state assert parsed_auth_response['id_token_jwt'] == id_token @pytest.mark.parametrize('request_func,expected_token_request', [ ( lambda facade: facade.exchange_authorization_code('auth-code'), { 'grant_type': 'authorization_code', 'code': 'auth-code', 'redirect_uri': REDIRECT_URI } ), ( lambda facade: facade.refresh_token('refresh-token'), { 'grant_type': 'refresh_token', 'refresh_token': 'refresh-token', 'redirect_uri': REDIRECT_URI } ) ]) @responses.activate def test_token_request(self, request_func, expected_token_request): token_endpoint = self.PROVIDER_BASEURL + '/token' now = int(time.time()) id_token_claims = { 'iss': self.PROVIDER_METADATA['issuer'], 'sub': 'test_user', 'aud': [self.CLIENT_METADATA['client_id']], 'exp': now + 1, 'iat': now, 'nonce': 'test_nonce' } id_token_jwt, id_token_signing_key = signed_id_token(id_token_claims) token_response = AccessTokenResponse(access_token='test_access_token', token_type='Bearer', id_token=id_token_jwt) responses.add(responses.POST, token_endpoint, json=token_response.to_dict()) provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint) facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) responses.add(responses.GET, self.PROVIDER_METADATA['jwks_uri'], json={'keys': [id_token_signing_key.serialize()]}) token_response = request_func(facade) assert isinstance(token_response, AccessTokenResponse) expected_token_response = token_response.to_dict() expected_token_response['id_token'] = id_token_claims expected_token_response['id_token_jwt'] = id_token_jwt assert token_response.to_dict() == expected_token_response token_request = dict(parse_qsl(responses.calls[0].request.body)) assert token_request == expected_token_request @responses.activate def test_token_request_handles_error_response(self): token_endpoint = self.PROVIDER_BASEURL + '/token' token_response = TokenErrorResponse(error='invalid_request', error_description='test error description') responses.add(responses.POST, token_endpoint, json=token_response.to_dict(), status=400) provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint) facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) assert facade.exchange_authorization_code('1234') == token_response def test_token_request_handles_missing_provider_token_endpoint(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) assert facade.exchange_authorization_code('1234') is None @pytest.mark.parametrize('userinfo_http_method', [ 'GET', 'POST' ]) @responses.activate def test_configurable_userinfo_endpoint_method_is_used(self, userinfo_http_method): userinfo_endpoint = self.PROVIDER_BASEURL + '/userinfo' userinfo_response = OpenIDSchema(sub='user1') responses.add(userinfo_http_method, userinfo_endpoint, json=userinfo_response.to_dict()) provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=userinfo_endpoint) facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata, client_metadata=self.CLIENT_METADATA, userinfo_http_method=userinfo_http_method), REDIRECT_URI) assert facade.userinfo_request('test_token') == userinfo_response def test_no_userinfo_request_is_made_if_no_userinfo_http_method_is_configured(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA, userinfo_http_method=None), REDIRECT_URI) assert facade.userinfo_request('test_token') is None def test_no_userinfo_request_is_made_if_no_userinfo_endpoint_is_configured(self): facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) assert facade.userinfo_request('test_token') is None def test_no_userinfo_request_is_made_if_no_access_token(self): provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=self.PROVIDER_BASEURL + '/userinfo') facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata, client_metadata=self.CLIENT_METADATA), REDIRECT_URI) assert facade.userinfo_request(None) is None class TestClientAuthentication(object): CLIENT_ID = 'client1' CLIENT_SECRET = 'secret1' @property def basic_auth(self): credentials = '{}:{}'.format(self.CLIENT_ID, self.CLIENT_SECRET) return 'Basic {}'.format(base64.urlsafe_b64encode(credentials.encode('utf-8')).decode('utf-8')) @pytest.fixture(autouse=True) def setup(self): self.client_auth = _ClientAuthentication(self.CLIENT_ID, self.CLIENT_SECRET) def test_client_secret_basic(self): request = {} headers = self.client_auth('client_secret_basic', request) assert headers == {'Authorization': self.basic_auth} assert request == {} def test_client_secret_post(self): request = {} headers = self.client_auth('client_secret_post', request) assert headers is None assert request == {'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET} def test_defaults_to_client_secret_basic(self): assert self.client_auth('invalid_client_auth_method', {}) == self.client_auth('client_secret_basic', {})
49.763359
118
0.649409
1,262
13,038
6.335182
0.12916
0.094059
0.052533
0.056035
0.596623
0.51257
0.430394
0.397498
0.353846
0.329581
0
0.004615
0.268753
13,038
261
119
49.954023
0.833963
0
0
0.321429
0
0
0.071253
0.004679
0
0
0
0
0.120536
1
0.089286
false
0
0.040179
0
0.165179
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74c236bbb346149e0b60b81086ce4a68adb25d83
4,260
py
Python
INIT.py
db260179/mullion
3ed599f624ab308f5a6ffe4c51aa372252430aa1
[ "WTFPL" ]
null
null
null
INIT.py
db260179/mullion
3ed599f624ab308f5a6ffe4c51aa372252430aa1
[ "WTFPL" ]
null
null
null
INIT.py
db260179/mullion
3ed599f624ab308f5a6ffe4c51aa372252430aa1
[ "WTFPL" ]
null
null
null
# Thanks to zecoxao and flatz <3 import struct from binascii import unhexlify as uhx from binascii import hexlify as hx from Crypto.Cipher import AES from Crypto.Hash import SHA, HMAC, CMAC import os import sys EID1KEYS = [ '88228B0F92C4C36AF097F1FE948D27CE', '5794BC8C2131B1E3E7EC61EF14C32EB5', ] INITKEYS = [ '48FF6BFA9C172C6E14AE444419CAF676' ] ZEROS128 = ['00000000000000000000000000000000'] def aes_decrypt_cbc(key, iv, input): return AES.new(key, AES.MODE_CBC, iv).decrypt(input) def aes_decrypt_ecb(key, input): return AES.new(key, AES.MODE_ECB).decrypt(input) def aes_encrypt_cbc(key, iv, input): return AES.new(key, AES.MODE_CBC, iv).encrypt(input) def main(argc, argv): with open(sys.argv[1], 'rb') as f: data = f.read() data1 = data[0x2A0:0x2B0] data2 = data[0x2B0:0x2C0] data3 = data[0x2C0:0x2D0] data4 = data[0x2D0:0x2E0] data5 = data[0x2E0:0x300] data6 = data[0x300:0x320] data7 = data[0x320:0x340] data8 = data[0x340:0x360] eid1 = data[0x10:0x290] hash = data[0x290:0x2A0] cmac1= CMAC.new(uhx(EID1KEYS[0]), ciphermod=AES) cmac1.update(eid1) print(hx(hash)) print(cmac1.hexdigest()) sexy = aes_decrypt_cbc(uhx(EID1KEYS[0]), uhx(ZEROS128[0]), eid1) keyseed = sexy[:0x10] pck1 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), keyseed) pck2 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck1) pck3 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck2) pck4 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck3) pck5 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck4) pck6 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck5) pck7 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck6) pck8 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck7) pck9 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck8) pck10 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck9) pck11 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck10) pck12 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck11) data1_stage1 = aes_decrypt_ecb(pck1,data1) data2_stage1 = aes_decrypt_ecb(pck2,data2) data3_stage1 = aes_decrypt_ecb(pck3,data3) data4_stage1 = aes_decrypt_ecb(pck4,data4) hash1 = data5[0x10:] body1 = data5[:0x10] cmac1= CMAC.new(pck1, ciphermod=AES) cmac1.update(body1) print(hx(hash1)) print(cmac1.hexdigest()) hash2 = data6[0x10:] body2 = data6[:0x10] cmac1= CMAC.new(pck1, ciphermod=AES) cmac1.update(body2) print(hx(hash2)) print(cmac1.hexdigest()) hash3 = data7[0x10:] body3 = data7[:0x10] cmac1= CMAC.new(pck1, ciphermod=AES) cmac1.update(body3) print(hx(hash3)) print(cmac1.hexdigest()) hash4 = data8[0x10:] body4 = data8[:0x10] cmac1= CMAC.new(pck1, ciphermod=AES) cmac1.update(body4) print(hx(hash4)) print(cmac1.hexdigest()) data5_stage1 = aes_decrypt_ecb(pck1,body1) data6_stage1 = aes_decrypt_ecb(pck1,body2) data7_stage1 = aes_decrypt_ecb(pck1,body3) data8_stage1 = aes_decrypt_ecb(pck1,body4) with open(sys.argv[1] + '.eid1.dec.bin', 'wb') as g: g.write(sexy) with open(sys.argv[1] + '.init.dec.bin', 'wb') as g: g.write(data1_stage1+data2_stage1+data3_stage1+data4_stage1+data5_stage1+data6_stage1+data7_stage1+data8_stage1) if __name__ == '__main__': main(len(sys.argv), sys.argv)
38.035714
129
0.559155
502
4,260
4.595618
0.219124
0.05635
0.073255
0.073255
0.398786
0.328132
0.328132
0.301691
0.301691
0.034677
0
0.135986
0.321596
4,260
112
130
38.035714
0.662284
0.007042
0
0.1
0
0
0.040797
0.031083
0
0
0.032783
0
0
1
0.044444
false
0
0.077778
0.033333
0.155556
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74c33b8ce76256cd1afd8372053d4ddff47c2092
1,078
py
Python
src/SortingSim/stick.py
berkayaslan/Sorting_Simulation
16cfcd404063b060191dab244025012271edacd8
[ "MIT" ]
2
2020-01-26T09:42:03.000Z
2020-05-26T13:57:02.000Z
src/SortingSim/stick.py
berkayaslan/Sorting_Simulation
16cfcd404063b060191dab244025012271edacd8
[ "MIT" ]
null
null
null
src/SortingSim/stick.py
berkayaslan/Sorting_Simulation
16cfcd404063b060191dab244025012271edacd8
[ "MIT" ]
null
null
null
class Stick: def __init__(self, length=None, location=None): self._length = length self._loc = location self._id = id(self) self._color = None # fixme: Verilere setter metodu ile yükleme yapılması! @property def length(self): return self._length @property def location(self): return self._loc @property def o_id(self): return self._id @property def color(self): return self._color @length.setter def length(self, value): self._length = value @location.setter def location(self, location): self._loc = location @color.setter def color(self, value: tuple): if value[0] <= 255 and value[1] <= 255 and value[2] <= 255 \ and value[0] >= 0 and value[1] >= 0 and value[2] >= 0: self._color = value else: self._color = (255, 255, 255) if __name__ == "__main__": stick = Stick() stick.length(120) stick.location(0) print(stick.length, stick.location)
19.962963
70
0.569573
132
1,078
4.462121
0.257576
0.067912
0.095076
0
0
0
0
0
0
0
0
0.04235
0.320965
1,078
53
71
20.339623
0.762295
0.048237
0
0.166667
0
0
0.007813
0
0
0
0
0.018868
0
1
0.222222
false
0
0
0.111111
0.361111
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
3