text string | size int64 | token_count int64 |
|---|---|---|
import pyxel
WIDTH = 128
HEIGHT = 128
IMG_NO = 0
class App:
my_x = 0
my_y = 0
def __init__(self):
pyxel.init(WIDTH, HEIGHT)
pyxel.load("mychara.pyxres")
pyxel.run(self.update, self.draw)
def update(self):
self.my_x = pyxel.mouse_x
self.my_y = pyxel.mouse_y
def draw(self):
pyxel.cls(7)
pyxel.blt(self.my_x, self.my_y, IMG_NO, 0, 0, 16, 16, 0)
App()
| 433 | 210 |
from .models import PurchaseOrder, PurchaseOrderLine
from .serializers import PurchaseOrderSerializer, PurchaseOrderLineSerializer
from rest_framework import viewsets, permissions
class PurchaseOrderViewSet(viewsets.ModelViewSet):
queryset = PurchaseOrder.objects.all()
serializer_class = PurchaseOrderSerializer
permission_classes = [permissions.IsAuthenticated]
class PurchaseOrderLineViewSet(viewsets.ModelViewSet):
queryset = PurchaseOrderLine.objects.all()
serializer_class = PurchaseOrderLineSerializer
permission_classes = [permissions.IsAuthenticated]
| 588 | 155 |
# Author: Karl Gemayel
# Created: 8/5/20, 8:25 AM
import logging
import math
import os
from textwrap import wrap
import pandas as pd
from typing import *
import seaborn
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from mg_general import Environment
from mg_io.general import load_obj, save_obj
from mg_viz.colormap import ColorMap as CM
from mg_general.general import next_name, get_value
from mg_stats.small import _helper_join_reference_and_tidy_data, prl_join_reference_and_tidy_data
from mg_viz.general import set_size
from mg_viz.shelf import number_formatter, update_tool_names_to_full
logger = logging.getLogger(__name__)
def case_insensitive_match(df, col, value):
# type: (pd.DataFrame, str, str) -> pd.Series
return df[col].apply(lambda x: x.lower()) == value.lower()
def plot_gc_stats_side_by_side(env, df_tidy, columns, tool_order, reference, **kwargs):
col_to_ylim = get_value(kwargs, "col_to_ylim", dict())
col_wrap = get_value(kwargs, "col_wrap", len(columns))
num_rows = math.ceil(len(columns) / float(col_wrap))
wrap_val = get_value(kwargs, "wrap_val", None)
figsize = get_value(kwargs, "figsize", (8 * col_wrap, 6 * num_rows))
col_x = get_value(kwargs, "col_x", "Genome GC")
col_x_text = get_value(kwargs, "col_x", "GC")
legend_cols = get_value(kwargs, "legend_cols", len(tool_order))
legend_pos = get_value(kwargs, "legend_pos", "bottom")
fig, axes = plt.subplots(num_rows, col_wrap, figsize=figsize)
reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3},
"line_kws": {"linewidth": 1}}
from collections import abc
axes_unr = axes
if not isinstance(axes, abc.Iterable):
axes = [axes]
else:
axes = axes.ravel()
ax = None
i = j = 0
fontsize="small"
for ax, col in zip(axes, columns):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df_tidy[case_insensitive_match(df_tidy, "Tool", t)]
seaborn.regplot(
df_curr[col_x], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
if col in col_to_ylim:
ax.set_ylim(*col_to_ylim[col])
if max(df_curr[col]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
if i != num_rows - 1:
ax.set_xlabel("")
else:
ax.set_xlabel(col_x_text, fontsize=fontsize)
if wrap_val:
col_text = "\n".join(wrap(col, wrap_val, break_long_words=False))
else:
col_text = col
ax.set_ylabel(col_text, wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
j += 1
if j == col_wrap:
i += 1
j = 0
if ax is not None:
if legend_pos == "bottom":
fig.subplots_adjust(bottom=0.2)
else:
fig.subplots_adjust(right=0.8)
handles, labels = ax.get_legend_handles_labels()
# labels = [{
# "mgm": "MGM",
# "mgm2": "MGM2",
# "mga": "MGA",
# "mprodigal": "MProdigal",
# "fgs": "FGS",
# "gms2": "GMS2",
# "prodigal": "Prodigal"
# }[l.lower()] for l in labels]
labels = update_tool_names_to_full(labels)
if legend_pos == "bottom" or True:
leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=legend_cols,
bbox_transform=fig.transFigure, frameon=False,
fontsize="xx-small")
else:
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left',
frameon=False,
fontsize=18)
for lh in leg.legendHandles:
lh.set_alpha(1)
lh.set_sizes([18] * (len(tool_order)))
if num_rows > 1:
for i in range(col_wrap):
fig.align_ylabels(axes_unr[:,i])
if legend_pos == "bottom" or True:
if num_rows == 1:
fig.tight_layout(rect=[0,0.05,1,1])
else:
fig.tight_layout(rect=[0,0.1,1,1])
# else:
# fig.tight_layout(rect=[0, 0, 1, 1])
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) #bbox_inches='tight'
plt.show()
def reorder_pivot_by_tool(df_pivoted, tool_order):
# type: (pd.DataFrame, List[str]) -> pd.DataFrame
return df_pivoted.reorder_levels([1, 0], 1)[
[x.upper() for x in tool_order]].reorder_levels(
[1, 0], 1
).sort_index(1, 0, sort_remaining=False)
def stats_large_3p_reference(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
# if reference not in tool_order:
# tool_order = [reference] + tool_order
# number of genes per clade
df_grouped = df_tidy.groupby(["Clade", "Tool"], as_index=False).sum()
df_grouped["Sensitivity"] = df_grouped["Number of Found"] / df_grouped["Number in Reference"]
df_grouped["Specificity"] = df_grouped["Number of Found"] / df_grouped["Number of Predictions"]
# df_pivoted = reorder_pivot_by_tool(
# df_grouped.pivot(index=["Clade", "Number in Reference"], columns="Tool", values=["Sensitivity", "Specificity"]), tool_order
# )
df_pivoted = reorder_pivot_by_tool(df_grouped.pivot_table(
index=["Clade", "Number in Reference"], columns="Tool",
values=["Sensitivity", "Specificity"]).reset_index(
level=1),
tool_order)
df_pivoted.to_csv(
next_name(env["pd-work"], ext="csv")
)
def stats_large_5p_overall(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
# if reference not in tool_order:
# tool_order = [reference] + tool_order
# number of genes per clade
df_grouped = df_tidy.groupby(["Clade", "Tool"], as_index=False).sum()
df_grouped["Error Rate"] = df_grouped["Number of Error"] / df_grouped["Number of Found"]
df_pivoted = reorder_pivot_by_tool(df_grouped.pivot_table(
index=["Clade", "Number in Reference"], columns="Tool",
values=["Error Rate"]).reset_index(
level=1),
tool_order)
df_pivoted.to_csv(
next_name(env["pd-work"], ext="csv")
)
def viz_stats_large_3p_sn_sp(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
df_tidy["3' FN Error Rate"] = 1- df_tidy["Sensitivity"]
df_tidy["3' FP Error Rate"] = 1 - df_tidy["Specificity"]
plot_gc_stats_side_by_side(
env, df_tidy, ["Sensitivity", "Specificity", "Number of Found", "Number of Predictions"],
tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(2,2), legend="bottom"),
col_to_ylim={"Specificity": (0.5, 1), "Sensitivity": (0.5, 1)}
)
plot_gc_stats_side_by_side(
env, df_tidy, ["3' FN Error Rate", "3' FP Error Rate"],
tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(1, 2), legend="bottom"),
col_to_ylim={"3' FN Error Rate": (0, 0.2), "3' FP Error Rate": (0, 0.2)},
legend_cols = math.ceil(len(tool_order)), legend_pos="right"
)
plot_gc_stats_side_by_side(
env, df_tidy, ["Sensitivity", "Specificity"],
tool_order, reference, col_wrap=2, wrap_val=10, figsize=set_size(433.62001, subplots=(1, 2), legend="bottom"),
col_to_ylim={"Sensitivity": (0.8, 1), "Specificity": (0.8, 1)},
legend_cols=math.ceil(len(tool_order)), legend_pos="right"
)
def stats_large_3p_predictions_vs_found(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
plot_gc_stats_side_by_side(
env, df_tidy, ["Number of Predictions", "Number of Found", "Specificity"], tool_order, reference,
col_to_ylim={"Specificity": (0.5, 1), "Sensitivity": (0.5, 1)}
)
def viz_stats_large_5p_error_vs_sensitivity(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
df_tidy["Gene Start Error Rate"] = df_tidy["Number of Error"] / df_tidy["Number of Found"] # FIXME: compute before
df_tidy["1 - Sensitivity"] = 1 - df_tidy["Sensitivity"]
df_tidy["3' FN Error Rate"] = 1 - df_tidy["Sensitivity"]
plot_gc_stats_side_by_side(
env, df_tidy, ["Gene Start Error Rate", "1 - Sensitivity"], tool_order, reference,
col_wrap=2, wrap_val=15, figsize=set_size("thesis", subplots=(1, 2), legend="bottom"),
col_to_ylim={"Specificity": (0.5, 1), "Gene Start Error Rate": (0, 0.3), "1 - Sensitivity": (0, 0.15)}
)
df_tidy["Gene 5' Error Rate"] = df_tidy["Gene Start Error Rate"]
plot_gc_stats_side_by_side(
env, df_tidy, ["Gene 5' Error Rate", "3' FN Error Rate"], tool_order, reference,
col_wrap=2, wrap_val=10, figsize=set_size("thesis", subplots=(1, 2), legend="bottom"),
col_to_ylim={"Specificity": (0.5, 1), "Gene 5' Error Rate": (0, 0.3), "3' FN Error Rate": (0, 0.15)}
)
print(df_tidy.groupby("Tool", as_index=False).mean().to_csv(index=False))
print(df_tidy.groupby("Tool", as_index=False).sum().to_csv(index=False))
def viz_stats_large_5p_error_vs_gc_by_clade(env, df_tidy, reference, **kwargs):
# type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> None
tool_order = get_value(kwargs, "tool_order", sorted(df_tidy["Tool"].unique()))
df_tidy["Error Rate"] = df_tidy["Number of Error"] / df_tidy["Number of Found"]
clades_sorted = sorted(df_tidy["Clade"].unique())
num_clades = len(clades_sorted)
num_rows = 2
subplots=(num_rows, math.ceil(num_clades/ float(num_rows)))
figsize = set_size("thesis", subplots=subplots,legend="bottom", titles=True)
col_x = "Genome GC"
col_x_text = "GC"
fig, axes = plt.subplots(subplots[0], subplots[1], figsize=figsize, sharex="all", sharey="all")
reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3},
"line_kws": {"linewidth": 1}}
from collections import abc
axes_unr = axes
if not isinstance(axes, abc.Iterable):
axes = [axes]
else:
axes = axes.ravel()
ax = None
fontsize = "xx-small"
counter = 0
for ax, col in zip(axes, clades_sorted):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df_tidy[case_insensitive_match(df_tidy, "Tool", t)]
df_curr = df_curr[df_curr["Clade"] == col]
seaborn.regplot(
df_curr[col_x], df_curr["Error Rate"], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
# if col in col_to_ylim:
# ax.set_ylim(*col_to_ylim[col])
if max(df_curr["Error Rate"]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
ax.set_xlabel(col_x_text, fontsize=fontsize)
ax.set_title(col, fontsize=fontsize)
ax.set_ylabel("Error Rate", wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
if counter == 0:
ax.set_ylabel("Error Rate", wrap=True, fontsize=fontsize)
else:
ax.set_ylabel("")
if ax is not None:
fig.subplots_adjust(bottom=0.2)
handles, labels = ax.get_legend_handles_labels()
# labels = [{
# "mgm": "MGM",
# "mgm2": "MGM2",
# "mga": "MGA",
# "mprodigal": "MProdigal",
# "fgs": "FGS",
# "gms2": "GMS2",
# "prodigal": "Prodigal"
# }[l.lower()] for l in labels]
labels = update_tool_names_to_full(labels)
leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=len(tool_order),
bbox_transform=fig.transFigure, frameon=False,
fontsize=fontsize)
for lh in leg.legendHandles:
lh.set_alpha(1)
lh.set_sizes([18] * (len(tool_order)))
# if num_rows > 1:
# for i in range():
# fig.align_ylabels(axes_unr[:, i])
if num_rows == 1:
fig.tight_layout(rect=[0, 0.05, 1, ])
else:
fig.tight_layout(rect=[0, 0.1, 1, 1])
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) # bbox_inches='tight'
plt.show()
def viz_stats_large_3p(env, df_per_gene, tools, list_ref, **kwargs):
pf_checkpoint = get_value(kwargs, "pf_checkpoint", None)
if not pf_checkpoint or not os.path.isfile(pf_checkpoint):
reference, df_tidy = prl_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref)
if pf_checkpoint:
save_obj([reference, df_tidy], pf_checkpoint)
else:
reference, df_tidy = load_obj(pf_checkpoint)
# Reference stats
df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2"
reference = reference.replace("MGM2_AUTO", "MGM2")
tools = tools.copy()
for i in range(len(tools)):
if tools[i].upper() == "MGM2_AUTO":
tools[i] = "MGM2"
stats_large_3p_reference(env, df_tidy, reference, tool_order=tools)
# Number of Predictions versus number of found
stats_large_3p_predictions_vs_found(env, df_tidy, reference, tool_order=tools)
# Number of Sensitivity and specificity
viz_stats_large_3p_sn_sp(env, df_tidy, reference, tool_order=tools)
def viz_stats_large_5p(env, df_per_gene, tools, list_ref, **kwargs):
pf_checkpoint = get_value(kwargs, "pf_checkpoint", None)
if not pf_checkpoint or not os.path.isfile(pf_checkpoint):
reference, df_tidy = prl_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref)
if pf_checkpoint:
save_obj([reference, df_tidy], pf_checkpoint)
else:
reference, df_tidy = load_obj(pf_checkpoint)
df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2"
reference = reference.replace("MGM2_AUTO", "MGM2")
tools = tools.copy()
for i in range(len(tools)):
if tools[i].upper() == "MGM2_AUTO":
tools[i] = "MGM2"
stats_large_5p_overall(env, df_tidy, reference, tool_order=tools)
# Number of found vs number of 5' error
viz_stats_large_5p_error_vs_sensitivity(env, df_tidy, reference, tool_order=tools)
viz_stats_large_5p_error_vs_gc_by_clade(env, df_tidy, reference, tool_order=tools)
| 15,253 | 5,626 |
from flask import Flask, jsonify, abort, make_response
from flask_restful import Api, Resource, reqparse, fields, marshal, abort
from py2neo import *
import json
from flask_cors import CORS
from ipaddress import *
import werkzeug
import os
from netaddr import *
import re
import random
import time
import CSVSplit_generalised_v3
import logging
import RawRuleslist
import configparser
import sqlite3
#config object to pull the password from conf file
config = configparser.ConfigParser()
config.read('conf/creds.ini')
# UPLOAD_FOLDER = 'uploads/'
UPLOAD_FOLDER = config.get('uploads', 'UPLOAD_FOLDER')
db_location=config.get('sqliteDB', 'database_folder')
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.INFO)
dirLogFolder = config.get('logs', 'LOGS_FOLDER')
# Create target Directory if don't exist
if not os.path.exists(dirLogFolder):
os.mkdir(dirLogFolder)
print("[*] Directory \'"+dirLogFolder+"\' Created ")
else:
print("[*] Directory \'"+dirLogFolder+"\' already exists")
# define file handler and set formatter
LOG_FILE = config.get('logs', 'LOGS_FOLDER')+'\\sample.log'
file_handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter(
'%(asctime)s | %(levelname)s | %(name)s | %(funcName)s | :%(lineno)s | %(message)s', datefmt='%y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
# %(filename)s:%(lineno)s - %(funcName)20s()
# add file handler to logger
logger.addHandler(file_handler)
def updateriskconfig(onecolumn,twocolumns,threecolumns):
config.set("riskconfigAny","onecolumn",onecolumn)
config.set("riskconfigAny","twocolumns",twocolumns)
config.set("riskconfigAny","threecolumns",threecolumns)
with open('conf/creds.ini', 'w') as configfile:
config.write(configfile)
return('updated')
def retrieveriskconfig():
onecolumn= config.get('riskconfigAny', 'onecolumn')
twocolumns= config.get('riskconfigAny', 'twocolumns')
threecolumns= config.get('riskconfigAny', 'threecolumns')
insecureproto= config.get('riskconfigAny', 'insecureriskvalue')
itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue')
etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue')
return(onecolumn,twocolumns,threecolumns,insecureproto,itoeriskvalue,etoiriskvalue)
def segregateIandE(db_name):
table_name="netobj"
allrows = RawRuleslist.ReadSqlitenetobj(db_name,table_name)
for x in allrows:
idvalue = x['Name']
ipvalue=x['IPv4']
mask=x['Mask']
riskvalue='yes'
if x['Mask']!='NA':
print('NA is not there')
cip = ipvalue+"/"+mask
#ipnetwork=IPNetwork[cip]
ipnetwork=IPNetwork(cip)
ip=ipnetwork
if "-" in ipvalue:
print("- is there")
ipranges = ipvalue.split('-')
iprange =IPRange(ipranges[0].strip(),ipranges[1].strip())
ip=iprange
else:
print('NA is there')
#ip=IPAddress[ipvalue]
ip = IPAddress(ipvalue)
#print(ip_address(var_121).is_private)
if ip.is_private():
column_name="Internal"
riskvalue="\'yes\'"
id_column="Name"
idvalue = "\'"+idvalue+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
else:
column_name="External"
riskvalue="\'yes\'"
id_column="Name"
idvalue = "\'"+idvalue+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
def segregateIntExtConn(db_name,table_name):
try:
allrows = RawRuleslist.ReadSqlite(db_name,table_name)
sqlite_file = db_location+"\\\\"+db_name+'.db'
value="\'yes\'"
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
tablename2 = "netobj"
colname2="External"
#query to get rows which has External=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename2, cn=colname2,val=value))
queryresult2 =c.fetchall()
logger.info("queryresults")
for x in allrows:
idvalue=x['No']
if x['Action']=="Accept":
individualsource1 = str(x['Source']).split(';')
for xy in individualsource1:
if xy in queryresult2:
individualdestination1 = str(x['Destination']).split(';')
for xz in individualdestination1:
if xz in queryresult2:
column_name="ExttoExt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = x['Name']
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
break
else:
column_name="ExttoInt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
else:
individualdestination1 = str(x['Destination']).split(';')
for xz in individualdestination1:
if xz in queryresult2:
column_name="InttoExt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = x['Name']
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
else:
column_name="InttoInt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
except Exception as e:
logger.exception("%s", e)
def riskcalculator(db_name,table_name):
try:
segregateIandE(db_name)
segregateIntExtConn(db_name,table_name)
riskcalculator_parked(db_name,table_name)
return {
'data': '',
'message': 'Risk updated!',
'status': 'success'
}
except Exception as e:
logger.exception("%s", e)
# class HeavyLifting():
def riskcalculator_parked(db_name,table_name):
try:
allrows = RawRuleslist.ReadSqlite(db_name,table_name)
# if source, destination or service has any fields
onecolumn= config.get('riskconfigAny', 'onecolumn')
twocolumns= config.get('riskconfigAny', 'twocolumns')
threecolumns= config.get('riskconfigAny', 'threecolumns')
insecureriskvalue= config.get('riskconfigAny', 'insecureriskvalue')
itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue')
etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue')
id_column = "No"
sqlite_file = db_location+"\\\\"+db_name+'.db'
value="\'yes\'"
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
tablename = "services"
colname="Insecure"
#query to get rows which has insecure=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult1 =c.fetchall()
colname="ItoE"
#query to get rows which has InttoExt=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult2 =c.fetchall()
colname="EtoI"
#query to get rows which has ExttoInt=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult3 =c.fetchall()
for x in allrows:
riskvalue = 0
riskreason = ""
idvalue=x['No']
id_column = "No"
#Any in columns- Risk assignment
if x['Action']=="Accept":
if (x['Source']=="Any" and x['Destination']=="Any" and x['Service']=="Any"):
riskvalue=riskvalue+int(threecolumns)
riskreason = riskreason+"1,-,"+"All three columns have Any "+","+str(threecolumns)+";"
elif ((x['Source']=="Any" and x['Destination']=="Any") or (x['Destination']=="Any" and x['Service']=="Any") or ( x['Service']=="Any" and x['Source']=="Any")):
riskvalue=riskvalue+int(twocolumns)
riskreason = riskreason+"1,-,"+"Two columns have Any "+","+str(twocolumns)+";"
elif (x['Source']=="Any" or x['Destination']=="Any" or x['Service']=="Any"):
riskvalue=riskvalue+int(onecolumn)
riskreason = riskreason+"1,-,"+"One column has Any"+","+str(onecolumn)+";"
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
'''Insecure protocols- Risk assignment'''
for xy in individualservice:
if xy in queryresult1:
riskvalue=riskvalue+int(insecureriskvalue)
riskreason = riskreason+"2"+","+xy+","+"Insecure proto"+","+str(insecureriskvalue)+";"
column_name = "Risk"
logger.info(riskreason)
logger.info(riskvalue)
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
colname="InttoExt"
queryresult11 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname)
for x in queryresult11:
riskvalue = x['Risk']
riskreason = x['RiskReason']
idvalue=x['No']
id_column = "No"
#riskreason = riskreason.replace("'", "")
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
'''Internal to External connections- Risk assignment'''
for xy in individualservice:
if xy not in queryresult2:
riskvalue=riskvalue+int(itoeriskvalue)
riskreason = riskreason+"3"+","+xy+","+"Int to Ext conn - non approved"+","+str(itoeriskvalue)+";"
column_name = "Risk"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
colname="ExttoInt"
queryresult12 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname)
for x in queryresult12:
id_column = "No"
idvalue=x['No']
riskvalue = x['Risk']
riskreason = x['RiskReason']
#riskreason = riskreason.replace("'", "")
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
logger.info(individualservice)
'''External to Internal connections- Risk assignment'''
for xy in individualservice:
if xy not in queryresult3:
riskvalue=riskvalue+int(etoiriskvalue)
riskreason = riskreason+"4"+","+xy+","+"Ext to Int conn - non approved"+","+str(etoiriskvalue)+";"
column_name = "Risk"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
return {
'data': '',
'message': 'Risk updated!',
'status': 'success'
}
except Exception as e:
logger.exception("%s", e)
return {
'data': '',
'message': 'Some error occured',
'status': 'error'
}
def getselectrules(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
print(output)
output1 = []
if (output==[]):
finalgrouping =[]
GrpNodes =[]
logger.error("Error: Neo4j didnt return any output for the query")
message = "Error: Neo4j didnt return any output for the query"
status = 'error'
print(message)
else:
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
message = "Query completed successfully"
status = 'success'
print(message)
return(output1, finalgrouping, GrpNodes, message, status)
def uploadwithcustquery(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).stats()
return(output)
def getfwrulesneo4j( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
RawRuleslist.InsertTable(output)
return (rules)
def defaultrules( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
output1 = []
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
return(output1, finalgrouping, GrpNodes)
def custquery( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
id_Node = []
NoDup_id_Node = []
Created_rels = []
for rel in output:
mi = re.compile("\([_]*(?P<grouping>[0-9]*)\)")
mi_rels = re.compile("\)(?P<grouping>.*?)\(")
mi_rels_only_name = re.compile("\).*?\[\:(?P<grouping>\w+)\s\{")
tomatch = rel['r']
m = mi.findall(str(tomatch))
mi_rels_data = mi_rels.findall(str(tomatch))
mi_rels_data_only_name = mi_rels_only_name.findall(str(tomatch))
print("================mi_nodes_data==============")
print(m)
print("================mi_rels_data==============")
# print(mi_rels_data)
for one in m:
id_rels = {}
intone = int(one)
ab = graph.nodes.get(intone)
id_rels['id'] = one
id_rels['id_prop'] = ab
id_Node.append(id_rels)
# print (ab)
for x in id_Node:
if x not in NoDup_id_Node:
NoDup_id_Node.append(x)
xx = 0
yy = 1
for i in mi_rels_data:
if ">" in i:
create_rel = {}
# print("forward")
print(i)
for id in NoDup_id_Node:
if m[xx] == id['id']:
create_rel['s'] = id['id_prop']
# for id in NoDup_id_Node:
if m[yy] == id['id']:
create_rel['d'] = id['id_prop']
create_rel['r'] = mi_rels_data_only_name[xx]
print("source: "+m[xx]+" destination: "+m[yy])
print(create_rel)
Created_rels.append(create_rel)
print(
"======================Created_rels===========================")
print(Created_rels)
print(
"======================Created_rels===========================")
# break
else:
# print("backward")
create_rel = {}
print(i)
for id in NoDup_id_Node:
if m[yy] == id['id']:
create_rel['s'] = id['id_prop'] # Source
# for id in NoDup_id_Node:
if m[xx] == id['id']:
create_rel['d'] = id['id_prop'] # Destination
# create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")"
# print(i['name'])
create_rel['r'] = mi_rels_data_only_name[xx]
# create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")"
print("source: "+m[yy]+" destination: "+m[xx])
print(create_rel)
Created_rels.append(create_rel)
print(
"======================Created_rels===========================")
print(Created_rels)
print(
"======================Created_rels===========================")
# break
xx += 1
yy += 1
print("=!@#====== Create rels ==========!@#=")
print(Created_rels)
output1 = []
output1.append(Created_rels)
finalgrouping = FinalGrouping(output1)
# print("################################################################3")
# print(finalgrouping)
return(output1, finalgrouping)
def allRels(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return (output)
def allGroups(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return (output)
def CreateGroup( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return output
def check( statement, checkip):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
Node = []
NameAndIP = []
NoDupNode = []
MatchNetwork = []
MatchNodes = []
print("Printing Output")
print(output)
for item in output:
nodes_source = {}
name_ip = {}
nodes_source['Name'] = item['n']['Name']
nodes_source['IPAddress'] = item['n']['IPAddress']
nodes_source['Mask'] = item['n']['Mask']
nodes_source['Comments'] = item['n']['Comments']
Node.append(nodes_source)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] != "NA"):
cip = item['n']['IPAddress']+"/"+item['n']['Mask']
name_ip['Name'] = item['n']['Name']
name_ip['IPAddress'] = item['n']['IPAddress']
name_ip['Comments'] = item['n']['Comments']
name_ip['Network'] = cip
NameAndIP.append(name_ip)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] == "NA"):
name_ip['Name'] = item['n']['Name']
name_ip['Network'] = item['n']['IPAddress']
name_ip['Comments'] = item['n']['Comments']
NameAndIP.append(name_ip)
# tocheck_ip="194.127.24.66"
# tocheck_ip="10.197.167.96"
tocheck_ip = checkip
# print("Printing tocheck_ip")
# print(ip_network(tocheck_ip,strict=False))
# print("Printing NameAndIP")
# print(NameAndIP)
# print(checkip)
MatchRel = []
for y in NameAndIP:
if ("-" in y['Network']):
# if m.group('IP_start')=="0.0.0.0" and m.group('IP_end')=="255.255.255.255":
if y['Network'] == "0.0.0.0 - 255.255.255.255":
MatchNetwork.append(y)
else:
ip_range_to_match = y['Network']
m = re.search(
"^(?P<IP_start>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\-\s+(?P<IP_end>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", ip_range_to_match)
iprange = IPRange(m.group('IP_start'), m.group('IP_end'))
if tocheck_ip in iprange:
MatchNetwork.append(y)
else:
# if (tocheck_ip == y['Network']):
# MatchNetwork.append(y)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(y['Network'], strict=False)
if (IPv4Address(tocheck_ip) in IPv4Network(ab)):
# print(y) #print(y['Name'])
MatchNetwork.append(y)
print("Printing MatchNetwork")
print(MatchNetwork)
for z in MatchNetwork:
graph_z = Graph(password="myneo2")
# statement="MATCH (s:Hosts {Name:'"+z['Name']+"'})-[r]-(d:Hosts) RETURN s,d,r"
statement1 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE s.Name='" + \
z['Name']+"' RETURN s,d,r"
statement2 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE d.Name='" + \
z['Name']+"' RETURN s,d,r"
# MATCH p=(s:Hosts)-[r:"+searchterm+"]->(d:Hosts) RETURN s as source,d as target,r as service LIMIT 5 #to search
print(statement1)
output1 = graph_z.run(statement1).data()
print(len(output1))
print(output1)
print(statement2)
output2 = graph_z.run(statement2).data()
print(len(output2))
print(output2)
print("Printing matched nodes relationships")
# MatchNodes.append
if output1 != []:
MatchRel.append(output1)
if output2 != []:
MatchRel.append(output2)
# out={}
# out={"MatchNetwork":MatchNetwork}
print("=====================================================================================")
print(MatchRel)
finalgrouping = FinalGrouping(MatchRel)
print("finalgroupingtest1")
print(finalgrouping)
return(MatchRel, finalgrouping)
def Convert_to_IP_Network( output):
Node = []
NoDupNode = []
for item1 in output:
for item in item1:
nodes_source = {}
nodes_target = {}
name_ip_host = {}
name_ip_net = {}
# Assign the name of the node to ID
nodes_source['Name'] = item['s']['Name']
nodes_source['IPAddress'] = item['s']['IPAddress']
nodes_source['Mask'] = item['s']['Mask']
nodes_source['Comments'] = item['s']['Comments']
if (item['s']['Mask'] == "NA"):
nodes_source['Network'] = item['s']['IPAddress']
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['s']['Mask'] != "NA"):
cip = item['s']['IPAddress']+"/"+item['s']['Mask']
nodes_source['Network'] = cip
# ab=ip_network(nodes_source['Network'],strict=False)
# nodes_source['Network']=ab
nodes_target['Name'] = item['d']['Name']
nodes_target['IPAddress'] = item['d']['IPAddress']
nodes_target['Mask'] = item['d']['Mask']
nodes_target['Comments'] = item['d']['Comments']
if (item['d']['Mask'] == "NA"):
nodes_target['Network'] = item['d']['IPAddress']
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['d']['Mask'] != "NA"):
cip = item['d']['IPAddress']+"/"+item['d']['Mask']
nodes_target['Network'] = cip
cd = ip_network(nodes_target['Network'], strict=False)
nodes_target['Network'] = cd
Node.append(nodes_source)
Node.append(nodes_target)
for x in Node:
if x not in NoDupNode:
NoDupNode.append(x)
return NoDupNode
def FinalGrouping( finalarray):
print("Printing final array")
NameAndIP = Convert_to_IP_Network(finalarray)
print(NameAndIP)
ParentChild = []
NoDupParentChild = []
graph = Graph(password="myneo2")
statement = "MERGE (d:Groups) RETURN d"
# fetch the source, target and relationship details
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
print(Groups)
for y in NameAndIP:
for Group in Groups:
ab = ip_network(y['Network'], strict=False)
print(IPv4Network(ab))
print(IPv4Network(Group['IPAddress']))
c = ip_network(IPv4Network(ab), strict=False)
d = ip_network(IPv4Network(Group['IPAddress']), strict=False)
# if IPv4Network(ab) in IPv4Network(Group['d']['IPAddress']):
if c.subnet_of(d):
par_child = {}
print(IPv4Network(ab))
print(IPv4Network(Group['IPAddress']))
# par_child="sdsadsa"
par_child['ChildName'] = y['Name']
par_child['ParentName'] = Group['Name']
par_child['Parent_IP'] = Group['IPAddress']
par_child['color'] = Group['color']
ParentChild.append(par_child)
for x in ParentChild:
if x not in NoDupParentChild:
NoDupParentChild.append(x)
print("==============Printing NoDupParentChild===============")
return (NoDupParentChild)
def FinalGroupingv2( finalarray):
try:
print("================Printing final array==================")
NameAndIP = Convert_to_IP_Network(finalarray)
# print(NameAndIP)
ParentChild = []
NoDupParentChild = []
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
statement = "MERGE (d:Groups) RETURN d"
# fetch the source, target and relationship details
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
AllIPNetwork = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
arrangedones = ArrangeNodesv2(Grouping)
#arrangedones = self.ArrangeNodes(AllIPNetwork)
print("===================== Printing the arranged ones =============")
print(arrangedones)
print("===================== Printed the arranged ones =============")
GrpParChd = []
# this is to access the pair {depth0:[xx]}
for evry in arrangedones:
print(evry) # key values
# this is to access the array in values of key/value pairs
for evry2 in arrangedones[evry]:
print(evry2)
x = len(evry2)-1
print(x) # no of elements in values array
while (x >= 0):
grpparchild = {}
if (x == 0):
y = x
print(x, evry2[x], y, evry2[y])
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
else:
y = x-1
print(x, evry2[x], y, evry2[y])
while (y >= 0):
if (evry2[x].subnet_of(evry2[y])):
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
break
y -= 1
x -= 1
print("========== Parent Child pair in Groups=======")
print(GrpParChd)
GrpNodes = []
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
grp_item = {}
# print("========== Printing only keys in Groups=======")
# print(k)
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if(d == k):
print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&")
print(d, k, v)
for grpk in Group.keys():
grp_item[grpk] = Group[grpk]
grp_item['id'] = Group['Name']
grp_item['isgrp'] = "true"
print(
"!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11")
print(grp_item)
for Grouppar in Groups:
print(
"*****************All values from groups********************")
print(Grouppar)
d_par = ip_network(
Grouppar['IPAddress'], strict=False)
if(d_par == v):
print(
"*****************Entered into matched parent group********************")
print(d_par, v)
grp_item['parent'] = Grouppar['Name']
GrpNodes.append(grp_item)
print(
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^")
logger.info("GrpNodes:")
logger.info(GrpNodes)
print(GrpNodes)
print("========== Parent Child pair in Groups (reverse)=======")
#GrpParChdreverse = GrpParChd.reverse()
print(list(reversed(GrpParChd)))
print("========== Print Groups=======")
print(Groups)
print("========== All IP Network=======")
print(AllIPNetwork)
ip_list_sorted = sorted(AllIPNetwork)
SortedReversedAllIPNetwork = list(reversed(ip_list_sorted))
print("================Printing SortedReversedAllIPNetwork===============")
print(SortedReversedAllIPNetwork)
for y in NameAndIP:
ab = ip_network(y['Network'], strict=False)
c = ip_network(IPv4Network(ab), strict=False)
for matchsortedIpnetwork in SortedReversedAllIPNetwork:
e = ip_network(IPv4Network(
matchsortedIpnetwork), strict=False)
if c.subnet_of(e):
# print(c,e)
i = 0
while(i < len(Groups)):
#d = ip_network(IPv4Network(Groups[i]['IPAddress']), strict=False)
d = ip_network(
Groups[i]['IPAddress'], strict=False)
#print(d, " ; ",e)
if (d == e):
print(d, e, i)
print(Groups[i]['Name'], Groups[i]['color'])
par_child = {}
par_child['ChildName'] = y['Name']
par_child['ParentName'] = Groups[i]['Name']
par_child['Parent_IP'] = Groups[i]['IPAddress']
par_child['color'] = Groups[i]['color']
ParentChild.append(par_child)
i += 1
# node_any = {}
# node_any['Name'] = "Any"
# node_any['color'] = "#ffff80"
# ParentChild.append(node_any)
for x in ParentChild:
if x not in NoDupParentChild:
NoDupParentChild.append(x)
print("==============Printing NoDupParentChild===============")
print(NoDupParentChild)
return (NoDupParentChild, GrpNodes)
print("========== End of new attempt=======")
except Exception as e:
logger.exception("%s", e)
def groupheirarchy( statement):
try:
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
AllIPNetwork = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
arrangedones = ArrangeNodesv2(Grouping)
GrpParChd = []
# this is to access the pair {depth0:[xx]}
for evry in arrangedones:
print(evry) # key values
# this is to access the array in values of key/value pairs
for evry2 in arrangedones[evry]:
print(evry2)
x = len(evry2)-1
print(x) # no of elements in values array
while (x >= 0):
grpparchild = {}
if (x == 0):
y = x
print(x, evry2[x], y, evry2[y])
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
else:
y = x-1
print(x, evry2[x], y, evry2[y])
while (y >= 0):
if (evry2[x].subnet_of(evry2[y])):
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
break
y -= 1
x -= 1
print("========== Parent Child pair in Groups=======")
print(GrpParChd)
GrpNodes = []
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
grp_item = {}
# print("========== Printing only keys in Groups=======")
# print(k)
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if(d == k):
print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&")
print(d, k, v)
for grpk in Group.keys():
grp_item[grpk] = Group[grpk]
grp_item['id'] = Group['Name']
grp_item['label'] = Group['Name'] +" ("+ Group['IPAddress']+")"
grp_item['isgrp'] = "true"
print(
"!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11")
print(grp_item)
for Grouppar in Groups:
print(
"*****************All values from groups********************")
print(Grouppar)
d_par = ip_network(
Grouppar['IPAddress'], strict=False)
if(d_par == v):
print(
"*****************Entered into matched parent group********************")
print(d_par, v)
# grp_item['parent'] = Grouppar['Name']
GrpNodes.append(grp_item)
print(
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^")
print(GrpNodes)
Node = []
NoDupNode = []
for eve in GrpNodes:
print("+++++++++++++++++++ Printing every nodes in groups+++++++++++")
print(eve)
nodes_datawrapper_grp = {}
nodes_datawrapper_grp['data'] = eve
Node.append(nodes_datawrapper_grp)
for x in Node:
if x not in NoDupNode:
NoDupNode.append(x)
print("========== Parent Child pair in Groups (reverse)=======")
Link = []
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if (k!=v):
links_datawrapper = {}
grp_link = {}
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
if (d==k):
grp_link['target'] = Group['Name']
if (d==v):
grp_link['source'] = Group['Name']
# Group_Link.append(grp_link)
links_datawrapper['data'] = grp_link
Link.append(links_datawrapper)
print("========================== Built the LInk ==================")
print(Link)
print("========== Print Groups=======")
print(Groups)
print("========== All IP Network=======")
print(AllIPNetwork)
ip_list_sorted = sorted(AllIPNetwork)
SortedReversedAllIPNetwork = list(reversed(ip_list_sorted))
print("================Printing SortedReversedAllIPNetwork===============")
print(SortedReversedAllIPNetwork)
print("==============Printing NoDupParentChild===============")
print (NoDupNode, Link)
return (NoDupNode, Link)
print("========== End of new attempt=======")
except Exception as e:
logger.exception("%s", e)
def ArrangeNodesv2( newlist):
ax = ip_network('10.0.0.0/8', strict=False)
b = ip_network('192.168.4.0/25', strict=False)
c = ip_network('192.168.9.0/25', strict=False)
# 192.168.10.0/22 is considered as 192.168.8.0/22 #CHECKTHIS
d = ip_network('10.0.0.0/8', strict=False)
e = ip_network('192.168.9.0/26', strict=False)
f = ip_network('192.168.9.0/24', strict=False)
#newlist = self.GetAllNodes(statement)
ip_list = newlist
print("===========================printing the incoming groups array===========================")
print(ip_list)
AllIPNetwork = []
for rotate in ip_list:
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(rotate['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
ip_list_sorted = sorted(AllIPNetwork)
print("===========================printing the SORTED incoming groups array===========================")
print(ip_list_sorted)
x = 0
y = 0
z = 0
a = dict()
depth_z = []
firstentry = 1
firstfirstentry = 1
index = x
while y < len(ip_list_sorted):
ipx = ip_network((ip_list_sorted[x]), strict=False) #this uses ipaddress module
ipy = ip_network((ip_list_sorted[y]), strict=False)
ipindex = ip_network((ip_list_sorted[index]), strict=False)
print("ipindex: ", ipindex, " ipx: ", ipx, " ipy: ", ipy)
if ipy.subnet_of(ipx):
print("it is a subnet")
depth_z.append(ip_list_sorted[y])
if y == len(ip_list_sorted)-1:
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
# x=y
# print(x)
if not ipy.subnet_of(ipx):
if ipy.subnet_of(ipindex):
print("it is not a subnet")
x = y
print(x)
if not ipy.subnet_of(ipindex):
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
depth_z = []
print(
"it is not a subnet of ipx and ipindex, so adding a new entry to the depth_z")
depth_z.append(ip_list_sorted[y])
index = y
x = y
if y == len(ip_list_sorted)-1:
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
y += 1
print("===================== a ====")
print(a)
return (a)
def GetAllNodes( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
output = graph.run(statement).data()
Node = []
NameAndIP = []
NoDupNode = []
AllIPNetwork = []
MatchNodes = []
print("========================================================================================================")
print(output)
for item in output:
nodes_source = {}
name_ip = {}
nodes_source['Name'] = item['n']['Name']
nodes_source['IPAddress'] = item['n']['IPAddress']
nodes_source['Mask'] = item['n']['Mask']
nodes_source['Comments'] = item['n']['Comments']
Node.append(nodes_source)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] != "NA"):
cip = item['n']['IPAddress']+"/"+item['n']['Mask']
name_ip['Name'] = item['n']['Name']
name_ip['Network'] = cip
NameAndIP.append(name_ip)
for y in NameAndIP:
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(y['Network'], strict=False)
AllIPNetwork.append(ab)
AllIPNetwork.sort()
out = {}
out = {"NameAndIP": AllIPNetwork}
# newhelo ="helo"
# return(AllIPNetwork)
return(out)
def GetRelationshipFromNeo4jv3( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
# fetch the source, target and relationship details
print(statement)
output = graph.run(statement).data()
output1 = []
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
# print("################################################################3")
# print(finalgrouping)
return(output1, finalgrouping, GrpNodes)
| 44,784 | 13,595 |
import os
import unittest
from searcher.Vulnerability import Vulnerability
from searcher.Searcher import Searcher
import json
from static_analyzer import file_get_contents
class TestSlice6(unittest.TestCase):
def test_rules(self):
parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json"))
parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules.json"))
output = open(os.getcwd() + "/tests/slice6/slice6_rules.out", "r")
vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules)
s = Searcher(parsed_snippet['body'], vulnerabilities)
self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal")
output.close()
def test_rules2(self):
parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json"))
parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules2.json"))
output = open(os.getcwd() + "/tests/slice6/slice6_rules2.out", "r")
vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules)
s = Searcher(parsed_snippet['body'], vulnerabilities)
self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal")
output.close()
def test_rules3(self):
parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json"))
parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rules3.json"))
output = open(os.getcwd() + "/tests/slice6/slice6_rules3.out", "r")
vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules)
s = Searcher(parsed_snippet['body'], vulnerabilities)
self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal")
output.close()
def test_rulesNoVuln(self):
parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json"))
parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rulesNoVuln.json"))
output = open(os.getcwd() + "/tests/slice6/slice6_rulesNoVuln.out", "r")
vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules)
s = Searcher(parsed_snippet['body'], vulnerabilities)
self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal")
output.close()
def test_rulesSanit(self):
parsed_snippet = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/slice6.json"))
parsed_rules = json.loads(file_get_contents(os.getcwd() + "/tests/slice6/rulesSanit.json"))
output = open(os.getcwd() + "/tests/slice6/slice6_rulesSanit.out", "r")
vulnerabilities = Vulnerability.build_vulnerabilities(parsed_rules)
s = Searcher(parsed_snippet['body'], vulnerabilities)
self.assertEqual(s.get_vulnerabilities_str(), output.read(), "Should be equal")
output.close()
if __name__ == '__main__':
unittest.main()
| 3,020 | 1,017 |
from histogram import display_histogram
def main():
infile = open('survey.dat', 'r')
line1 = infile.readline().split()
line2 = infile.readline().split()
line3 = infile.readline().split()
infile.close()
display_histogram(line1)
print('')
display_histogram(line2)
print('')
display_histogram(line3)
main()
| 367 | 137 |
from django.db import models
# Create your models here.
class home_page(models.Model):
masthead_image = models.ImageField(upload_to='static/images', default='', blank=True)
masthead = models.CharField(max_length=30, default='', blank=True)
description = models.TextField(max_length=200, default='', blank=True)
icon_1_header = models.CharField(max_length=30, default='', blank=True)
icon_1_description = models.TextField(max_length=30, default='', blank=True)
icon_2_header = models.CharField(max_length=30, default='', blank=True)
icon_2_description = models.TextField(max_length=30, default='', blank=True)
icon_3_header = models.CharField(max_length=30, default='', blank=True)
icon_3_description = models.TextField(max_length=30, default='', blank=True)
showcase_1_header = models.CharField(max_length=30, default='', blank=True)
showcase_1_image = models.ImageField(upload_to='static/images', default='', blank=True)
showcase_1_description = models.TextField(max_length=700, default='', blank=True)
showcase_2_header = models.CharField(max_length=30, default='', blank=True)
showcase_2_image = models.ImageField(upload_to='static/images', default='', blank=True)
showcase_2_description = models.TextField(max_length=700, default='', blank=True)
showcase_3_header = models.CharField(max_length=30, default='', blank=True)
showcase_3_image = models.ImageField(upload_to='static/images', default='', blank=True)
showcase_3_description = models.TextField(max_length=700, default='', blank=True)
def __str__(self):
return self.masthead
| 1,632 | 530 |
import RPi.GPIO as GPIO
class Engine:
FREQUENCY = 100
BOOT_FEQUENCY = 10
def __init__(self, high, mid, low ):
self.high = high
self.mid = mid
self.low = low
self.pwmHigh = None
self.pwmMid = None
self.pwmLow = None
def getHigh(self):
return self.high
def getMid(self):
return self.mid
def getLow(self):
return self.low
def makeOut(self, pin):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT)
def makeHighOut(self):
self.makeOut(self.high)
def makeMidOut(self):
self.makeOut(self.mid)
def makeLowOut(self):
self.pwmLow = self.makeOut(self.low)
def makeAllOut(self):
self.makeHighOut()
self.makeMidOut()
self.makeLowOut()
def enableOutPin(self,pin):
GPIO.setmode(GPIO.BOARD)
self.makeOut(pin)
GPIO.output(pin, GPIO.HIGH)
return GPIO.PWM(pin,Engine.FREQUENCY)
def disableOutPin(self,pin):
GPIO.setmode(GPIO.BOARD)
self.makeOut(pin)
GPIO.output(pin, GPIO.LOW)
def stopMid(self):
if self.pwmMid != None:
self.pwmMid.stop()
self.pwmMid = None
def stopHigh(self):
if self.pwmHigh != None:
self.pwmHigh.stop()
self.pwmHigh = None
def stopLow(self):
if self.pwmLow != None:
self.pwmLow.stop()
self.pwmLow = None
def enableOutHigh(self):
self.stopHigh()
self.pwmHigh = self.enableOutPin(self.high)
def enableOutMid(self):
self.stopMid()
self.pwmMid = self.enableOutPin(self.mid)
def enableOutLow(self):
self.stopLow()
self.pwmLow = self.enableOutPin(self.low)
def disableOutHigh(self):
self.stopHigh()
self.disableOutPin(self.high)
def disableOutMid(self):
self.stopMid()
self.disableOutPin(self.mid)
def disableOutLow(self):
self.stopLow()
self.disableOutPin(self.low)
def accelerate(self,frequency=None):
if frequency != None:
self.pwmHigh.ChangeDutyCycle(frequency)
self.pwmLow.ChangeDutyCycle(frequency)
else:
self.enableOutHigh()
self.enableOutLow()
self.disableOutMid()
self.pwmHigh.start(Engine.BOOT_FEQUENCY)
self.pwmLow.start(Engine.BOOT_FEQUENCY)
def reverse(self,frequency=None):
if frequency != None:
self.pwmHigh.ChangeDutyCycle(frequency)
self.pwmMid.ChangeDutyCycle(frequency)
else:
self.enableOutHigh()
self.enableOutMid()
self.disableOutLow()
self.pwmHigh.start(Engine.BOOT_FEQUENCY)
self.pwmMid.start(Engine.BOOT_FEQUENCY)
def brake(self):
self.enableOutHigh()
self.enableOutMid()
self.enableOutLow()
self.pwmHigh.start(Engine.FREQUENCY)
self.pwmMid.start(Engine.FREQUENCY)
self.pwmLow.start(Engine.FREQUENCY)
def disableEngine(self):
self.disableOutHigh()
self.disableOutMid()
self.disableOutLow()
| 2,613 | 1,267 |
class Pods:
def __init__(self, pods):
self._index = 0
self._pods = pods.items
self._metadata = pods.metadata
@property
def index(self):
return self._index
@property
def current_pod(self):
return self._pods[self._index]
@property
def list(self):
return self._pods
def set_index(self, index):
self._index = index
def add_index(self):
if self._index + 1 < len(self._pods):
self._index += 1
def sub_index(self):
if 0 <= self._index - 1:
self._index -= 1
| 587 | 189 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
import csv
from apps.members.models import Member
from .models import GenerateReportForm
from django.db.models import Q
from django.contrib.auth.decorators import login_required
def export_all(user_obj):
# Generate Users.csv file with Report Data generated by Query
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users.csv"'
writer = csv.writer(response)
writer.writerow(['First name', 'Last name', 'DOB', 'Mobile',
'Admission Date', 'Subscription Type', 'Batch'])
# print(user_obj)
members = user_obj.values_list('first_name', 'last_name', 'dob',
'mobile_number', 'admitted_on', 'subscription_type', 'batch')
# print(members)
for user in members:
writer.writerow(user)
return response
@login_required
# Export Single User Data in CSV format, called on user profile view
def export_single(request, pk):
member = Member.objects.filter(pk=pk)
return export_all(member)
@login_required
def reports(request):
""" Generate reports according to year, month and batch
Year Range = 2020 - Current Year + 5 (Changed in Model)
Month Range - 1 - 12
Batch - Mor, Eve & Both
export_all function is used to generate reports in .CSV (Excel Formats)
"""
if request.method == 'POST':
form = GenerateReportForm(request.POST)
if form.is_valid():
if request.POST.get('month') and request.POST.get('year') and request.POST.get('batch'):
query = Q(
registration_date__month=request.POST.get('month'),
registration_date__year=request.POST.get('year'),
batch=request.POST.get('batch')
)
elif request.POST.get('month') and request.POST.get('year'):
query = Q(
registration_date__month=request.POST.get('month'),
registration_date__year=request.POST.get('year')
)
elif request.POST.get('month') and request.POST.get('batch'):
query = Q(
registration_date__month=request.POST.get('month'),
batch=request.POST.get('batch')
)
elif request.POST.get('year') and request.POST.get('batch'):
query = Q(
registration_date__year=request.POST.get('year'),
batch=request.POST.get('batch')
)
else:
query = Q(
registration_date__year=request.POST.get('year'),
)
users = Member.objects.filter(query)
if 'export' in request.POST:
return export_all(users)
context = {
'users': users,
'form': form,
}
return render(request, 'reports/export.html', context)
else:
form = GenerateReportForm()
return render(request, 'reports/export.html', {'form': form})
| 3,158 | 842 |
from setuptools import setup, find_packages
setup(
name='evolute',
version='0.9.0',
packages=find_packages(),
url='https://github.com/csxeba/evolute.git',
license='MIT',
author='Csaba Gór',
author_email='csxeba@gmail.com',
description='Evolutionary algorithm toolbox',
long_description=open("Readme.md").read(),
long_description_content_type='text/markdown'
)
| 401 | 137 |
# %%
import datetime
import time
import pandas as pd
from io import StringIO
from function_grab import grab_price
import numpy as np
import warnings
import random
data = {}
n_days = 120
date = datetime.datetime.now()
fail_count = 0
allow_continuous_fail_count = 15 # 近五年最長連續12天休市
while len(data) < n_days:
print('parsing', date)
# 使用 grabPrice 爬資料
try:
# 抓資料
data[date.date()] = grab_price(date)
print('success!')
fail_count = 0
except:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
break
# 減一天
date -= datetime.timedelta(days=1)
time.sleep(random.randint(5, 10))
updown = pd.DataFrame({k: d['漲跌(+/-)'] for k, d in data.items()})
# print(updown)
# %%
trade_n = pd.DataFrame({k: d['成交股數'] for k, d in data.items()})
i = 0
while i <= trade_n.shape[0]-1:
trade_n.iloc[i] = trade_n.iloc[i].str.replace(',', '')
trade_n.iloc[i] = round(trade_n.iloc[i].astype(float)/1000)
i += 1
# print(trade_n)
PEratio = pd.DataFrame({k: d['本益比'] for k, d in data.items()})
i = 0
while i <= PEratio.shape[0]-1:
PEratio.iloc[i] = PEratio.iloc[i].str.replace(',', '')
i += 1
updown.to_excel('grab120days_updown.xlsx')
trade_n.to_excel('grab120days_traden.xlsx')
PEratio.to_excel('grab120days_PE.xlsx')
# print(trade_n)
# print(PEratio)
close = pd.DataFrame({k: d['收盤價'] for k, d in data.items()}) # type=string
# print(close)
# print(close.shape)
# %%
# MA5
i = 0
MA5 = []
close = close.replace('--', np.NaN)
while i <= close.shape[0]-1:
close.iloc[i] = close.iloc[i].str.replace(',', '')
mean = np.nanmean(close.iloc[i, 0:5].astype(float))
# print(mean)
MA5.append([close.index[i], mean])
i = i+1
# 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN
MA5 = pd.DataFrame(MA5, columns=['證券代號', 'MA5']).set_index('證券代號')
close = pd.merge(close, MA5, how='outer', left_index=True, right_index=True)
# 必須將dtype轉為float(與算出來的平均值同樣型態),否則在後面merge的時候會因為型別不一樣而存成NaN
close = close.astype(float) # 全轉float
# MA20
i = 0
MA20 = []
# print(close.shape) #df.shape=(n_rows,n_columns)
# 將有收盤價的資料存入np.nan 在後面使用np.mean時可以直接跳過不加入平均值的計算
# close = close.replace('--', np.NaN) replace只能針對字符串全等於才能替換
while i <= close.shape[0]-1:
# dataframe中需使用iloc對索引編號定位,loc是針對索引名稱進行定位
# print(type(close.iloc[i, 1]))
# 針對某一列的字符串進行replace才能將千分位符號消去
# close.iloc[i] = close.iloc[i].str.replace(',', '')
# print(close.iloc[i, 0:3])
# astype只是將資料暫時當作別的型態使用,而不是真的更改 除非有另外存回變數
mean = np.nanmean(close.iloc[i, 0:20])
# print(type(close.iloc[i, 1]))
"""for j in range(3):
close.iloc[i, j] = close.iloc[i, j].replace(',', '')
sum = sum + float(close.iloc[i, j])
print(sum)
mean = sum/3"""
# print(mean)
MA20.append([close.index[i], mean])
i = i+1
# 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN
MA20 = pd.DataFrame(MA20, columns=['證券代號', 'MA20']).set_index('證券代號')
close = pd.merge(close, MA20, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# MA60
i = 0
MA60 = []
while i <= close.shape[0]-1:
mean = np.nanmean(close.iloc[i, 0:60])
MA60.append([close.index[i], mean])
i = i+1
MA60 = pd.DataFrame(MA60, columns=['證券代號', 'MA60']).set_index('證券代號')
close = pd.merge(close, MA60, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# %%
# MA120
i = 0
MA120 = []
while i <= close.shape[0]-1:
mean = np.nanmean(close.iloc[i, 0:120])
MA120.append([close.index[i], mean])
i = i+1
MA120 = pd.DataFrame(MA120, columns=['證券代號', 'MA120']).set_index('證券代號')
close = pd.merge(close, MA120, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# print(close)
close.to_excel('stock120.xlsx')
| 4,044 | 2,139 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import json
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return "null"
return (
"["
+ str(root.val)
+ ","
+ self.serialize(root.left)
+ ","
+ self.serialize(root.right)
+ "]"
)
def getSection(self, data):
brackets = 1
idx = 1
while brackets != 0:
if data[idx] == "[":
brackets += 1
elif data[idx] == "]":
brackets -= 1
idx += 1
return data[:idx], idx
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if data == "null":
return None
first_comma = data.index(",")
val = int(data[1:first_comma])
data = data[first_comma + 1 :]
if data[0] == "[":
leftsection, last = self.getSection(data)
left = self.deserialize(leftsection)
else:
last = 4
left = None
data = data[last + 1 :]
if data[0] == "[":
rightsection, _ = self.getSection(data)
right = self.deserialize(rightsection)
else:
right = None
node = TreeNode(val)
node.left = left
node.right = right
return node
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| 1,811 | 529 |
from collections import defaultdict
from lhc.interval import IntervalBinner
class IntervalMap(object):
def __init__(self, key_value_pairs=None):
self.len = 0
self.binner = IntervalBinner()
self.bins = defaultdict(list)
self.values = defaultdict(list)
if key_value_pairs is not None:
for key, value in key_value_pairs:
self[key] = value
def __len__(self):
return self.len
def __iter__(self):
for bin in self.bins.values():
for item in bin:
yield item
def __contains__(self, item):
bins = self.binner.get_overlapping_bins(item)
for fr, to in bins:
for bin in range(fr, to + 1):
for set_interval in self.bins[bin]:
if set_interval == item:
return True
return False
def __setitem__(self, key, value):
self.len += 1
bin = self.binner.get_bin(key)
self.bins[bin].append(key)
self.values[bin].append(value)
def __getitem__(self, item):
bins = self.binner.get_overlapping_bins(item)
for fr, to in bins:
for bin in range(fr, to + 1):
for i, set_interval in enumerate(self.bins[bin]):
if set_interval.overlaps(item):
yield self.values[bin][i]
def iterkeys(self):
for bin in self.bins.values():
for item in bin:
yield item
def itervalues(self):
for bin in self.values.values():
for value in bin:
yield value
def iteritems(self):
for keys, values in zip(iter(self.bins.items()), iter(self.values.items())):
for key, value in zip(keys, values):
yield key, value
| 1,832 | 542 |
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import sys
from gazebo_msgs.srv import ApplyBodyWrench
from geometry_msgs.msg import Point, Wrench, Vector3
if __name__ == '__main__':
print 'Apply programmed perturbation to vehicle', rospy.get_namespace()
rospy.init_node('set_body_wrench')
if rospy.is_shutdown():
print 'ROS master not running!'
sys.exit(-1)
starting_time = 0.0
if rospy.has_param('~starting_time'):
starting_time = rospy.get_param('~starting_time')
print 'Starting time= %fs' % starting_time
duration = 0.0
if rospy.has_param('~duration'):
duration = rospy.get_param('~duration')
if duration == 0.0:
print 'Duration not set, leaving node...'
sys.exit(-1)
print 'Duration [s]=', ('Inf.' if duration < 0 else duration)
force = [0, 0, 0]
if rospy.has_param('~force'):
force = rospy.get_param('~force')
print force
if len(force) != 3:
raise rospy.ROSException('Invalid force vector')
print 'Force [N]=', force
torque = [0, 0, 0]
if rospy.has_param('~torque'):
torque = rospy.get_param('~torque')
if len(torque) != 3:
raise rospy.ROSException('Invalid torque vector')
print 'Torque [N]=', torque
try:
rospy.wait_for_service('/gazebo/apply_body_wrench', timeout=10)
except rospy.ROSException:
print 'Service not available! Closing node...'
sys.exit(-1)
try:
apply_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
except rospy.ServiceException, e:
print 'Service call failed, error=', e
sys.exit(-1)
ns = rospy.get_namespace().replace('/', '')
body_name = '%s/base_link' % ns
if starting_time >= 0:
rate = rospy.Rate(100)
while rospy.get_time() < starting_time:
rate.sleep()
wrench = Wrench()
wrench.force = Vector3(*force)
wrench.torque = Vector3(*torque)
success = apply_wrench(
body_name,
'world',
Point(0, 0, 0),
wrench,
rospy.Time().now(),
rospy.Duration(duration))
if success:
print 'Body wrench perturbation applied!'
print '\tFrame: ', body_name
print '\tDuration [s]: ', duration
print '\tForce [N]: ', force
print '\tTorque [Nm]: ', torque
else:
print 'Failed!'
| 3,031 | 1,026 |
#! /usr/bin/env python
import sys
import os
import re
import glob
import argparse
import project_util
batch_number_pattern = re.compile(r'batch(?P<batch_number>\d+)')
sim_number_pattern = re.compile(r'-sim-(?P<sim_number>\d+)-')
run_number_pattern = re.compile(r'-run-(?P<sim_number>\d+)\.log')
def line_count(path):
count = 0
with open(path) as stream:
for line in stream:
count += 1
return count
def get_run_number(log_path):
run_number_matches = run_number_pattern.findall(log_path)
assert len(run_number_matches) == 1
run_number_str = run_number_matches[0]
return int(run_number_str)
def consolidate_preempted_logs(
target_run_number = 1,
number_of_samples = 1501,
batch_dir_name = None):
number_of_lines = number_of_samples + 1
val_sim_dirs = glob.glob(os.path.join(project_util.VAL_DIR, '0*'))
for val_sim_dir in sorted(val_sim_dirs):
sim_name = os.path.basename(val_sim_dir)
batch_dirs = glob.glob(os.path.join(val_sim_dir, "batch*"))
for batch_dir in sorted(batch_dirs):
if batch_dir_name and (os.path.basename(batch_dir) != batch_dir_name):
sys.stderr.write("Skipping {0}\n".format(batch_dir))
continue
batch_number_matches = batch_number_pattern.findall(batch_dir)
assert len(batch_number_matches) == 1
batch_number_str = batch_number_matches[0]
batch_number = int(batch_number_str)
sh_paths = glob.glob(os.path.join(batch_dir,
"*simcoevolity-sim-*-config-run-{0}-qsub.sh".format(
target_run_number)))
if not sh_paths:
sys.stderr.write("WARNING: No qsub files found for\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Target run: {2}\n Skipping!!\n".format(
sim_name,
batch_number,
target_run_number))
continue
for sh_path in sorted(sh_paths):
posterior_path = sh_path.replace(
"-run-{0}-qsub.sh".format(target_run_number),
"-state-run-{0}.log".format(target_run_number))
if not os.path.exists(posterior_path):
sys.stderr.write("WARNING: Missing log: {0}\n".format(posterior_path))
sys.stdout.write("{0}\n".format(sh_path))
continue
sim_number_matches = sim_number_pattern.findall(posterior_path)
assert len(sim_number_matches) == 1
sim_number_str = sim_number_matches[0]
sim_number = int(sim_number_str)
posterior_file = os.path.basename(posterior_path)
prefix = posterior_file.split("-sim-")[0]
gp = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
target_run_number))
target_state_log_paths = glob.glob(gp)
assert (len(target_state_log_paths) == 1), (
"Multiple matches to {0!r}".format(gp))
target_state_log_path = target_state_log_paths[0]
gp = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
target_run_number))
target_op_log_paths = glob.glob(gp)
assert (len(target_op_log_paths) == 1), (
"Multiple matches to {0!r}".format(gp))
target_op_log_path = target_op_log_paths[0]
state_log_path_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-*.log*".format(
prefix,
sim_number_str))
state_log_paths = glob.glob(state_log_path_pattern)
op_log_path_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-*.log*".format(
prefix,
sim_number_str))
op_log_paths = glob.glob(op_log_path_pattern)
assert (len(state_log_paths) == len(op_log_paths)), (
"{0} matches for {1!r} and {2} for {3!r}".format(
len(state_log_paths),
state_log_path_pattern,
len(op_log_paths),
op_log_path_pattern))
assert (target_state_log_path in state_log_paths), (
"Target {0!r} not in matches".format(
target_state_log_path))
assert (target_op_log_path in op_log_paths), (
"Target {0!r} not in matches".format(
target_op_log_path))
run_numbers = sorted(get_run_number(p) for p in state_log_paths)
assert (run_numbers == sorted(get_run_number(p) for p in op_log_paths))
extra_run_numbers = [rn for rn in run_numbers if rn > target_run_number]
if len(extra_run_numbers) < 1:
if line_count(target_state_log_path) != number_of_lines:
sys.stderr.write(
"WARNING: Target log is incomplete, but there are no extra runs\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
else:
if line_count(target_state_log_path) >= number_of_lines:
sys.stderr.write(
"WARNING: Target log is complete, but there are extra runs\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
completed_run_number = extra_run_numbers.pop(-1)
completed_state_log_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
completed_run_number))
completed_state_log_paths = glob.glob(completed_state_log_pattern)
assert (len(completed_state_log_paths) == 1), (
"Multiple matches to complete state log {0!r}".format(
completed_state_log_pattern))
completed_state_log_path = completed_state_log_paths[0]
completed_op_log_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
completed_run_number))
completed_op_log_paths = glob.glob(completed_op_log_pattern)
assert (len(completed_op_log_paths) == 1), (
"Multiple matches to complete op log {0!r}".format(
completed_state_log_pattern))
completed_op_log_path = completed_op_log_paths[0]
if line_count(completed_state_log_path) != number_of_lines:
sys.stderr.write(
"WARNING: could not find completed log for\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
os.rename(completed_state_log_path, target_state_log_path)
os.rename(completed_op_log_path, target_op_log_path)
for n in extra_run_numbers:
sp = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
n))
state_purge_paths = glob.glob(sp)
assert (len(state_purge_paths) == 1), (
"Multiple matches to incomplete state log {0!r}".format(
sp))
state_purge_path = state_purge_paths[0]
op = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
n))
op_purge_paths = glob.glob(op)
assert (len(op_purge_paths) == 1), (
"Multiple matches to incomplete op log {0!r}".format(
op))
op_purge_path = op_purge_paths[0]
os.remove(state_purge_path)
os.remove(op_purge_path)
def main_cli(argv = sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--run-number',
action = 'store',
type = int,
default = 1,
help = 'Target run number for consolidation.')
parser.add_argument('-n', '--number-of-samples',
action = 'store',
type = int,
default = 1501,
help = ('Number of MCMC samples that should be found in the '
'completed log file of each analysis.'))
parser.add_argument('-b', '--batch-dir',
action = 'store',
type = str,
default = None,
help = ('Batch directory name.'))
if argv == sys.argv:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
consolidate_preempted_logs(
target_run_number = args.run_number,
number_of_samples = args.number_of_samples,
batch_dir_name = args.batch_dir)
if __name__ == "__main__":
main_cli()
| 11,788 | 3,182 |
import datetime
class DateUtil(object):
@classmethod
def convertDateToString(cls, date):
print(type(date))
value = ''
try:
value = date.strftime('%d/%m/%Y')
except Exception as ex:
print (ex)
value = date
return value
| 303 | 84 |
from django.db import models
class Traceback(models.Model):
type = models.TextField()
value = models.TextField()
traceback = models.TextField()
path = models.TextField(blank=True,null=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'django_traceback'
ordering = ['-created_at']
| 356 | 107 |
# Generated by Django 1.11.14 on 2018-08-03 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ota', '0006_one_reinstall_measure'),
]
operations = [
migrations.RunSQL(
# This migration is not reversible because blobs created
# since the migration will no longer be accessible after
# reversing because the old blob db would use the wrong path.
#
# '_default' is the bucket name from the old blob db API.
"""
UPDATE ota_demouserrestore
SET restore_blob_id = '_default/' || restore_blob_id
WHERE restore_blob_id NOT LIKE '_default/%'
"""
),
]
| 752 | 212 |
from gym.envs.atari.atari_env import AtariEnv
from gym import spaces
import numpy as np
class my_UpNDownEnv(AtariEnv):
def __init__(self):
super(my_UpNDownEnv, self).\
__init__(game = 'up_n_down', obs_type = 'image', frameskip = 1, repeat_action_probability = 0.25)
self.observation_space = spaces.Box(low = 0, high = 255, shape = (840, 160, 3))
def _step(self, action):
ob_list = np.zeros((840, 160, 3))
reward_sum = 0
for i in range(4):
observation, reward, done, info = \
super(my_UpNDownEnv, self)._step(action)
#print(reward)
#print(info)
# print(observation.shape)
# print(ob_list.shape)
ob_list[i * 210:(i+1)*210, :, :] = observation
# if i == 0:
# ob_list = observation
# else:
# ob_list = np.append(ob_list, observation, axis = 0)
reward_sum += reward
if done:
if i != 3:
for j in range(i+1, 4):
ob_list[j * 210:(j+1)*210, :, :]= observation
# ob_list = np.append(ob_list, observation, axis = 0)
reward_sum += reward
break
return ob_list, reward_sum/4, done, info
# return observation, reward_sum/4, done, info
| 1,389 | 464 |
import logging
import re
import requests
from dateutil.parser import parse as parse_time
from json import JSONDecodeError
class site:
def __init__(self):
pass
def parse_team(self, url: str) -> dict:
pass
class bangumi_moe_site(site):
def __init__(self):
self.logger = logging.getLogger("animaid.bangumi_moe_site")
def parse_team(self, url: str) -> dict:
if url.startswith("https") and "torrent" not in url:
raise Exception(
f'This is not a torrent url, as "torrent" is not part of the url. Click the anima title and use new page\'s url (should have "torrent" in it).'
)
torrent_id = url.split("/")[-1]
search_url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}"
response = requests.get(url=search_url).json()
if "team" not in response.keys() or "_id" not in response["team"]:
raise Exception(
f"This record does not have a valid team info, "
f"try another anima record from the same team."
)
team_info = response["team"]
team_name = team_info["name"]
team_id = team_info["_id"]
print(f"The following team info is found:")
print(f" team name: {team_name}")
print(f" team id: {team_id}")
filename = response["content"][0][0]
print(f" filename: {filename}")
auto_alias = re.findall(r"\[[\w\s-]+\]", filename)[0]
if auto_alias:
team_alias = auto_alias.replace("[", "").replace("]", "").replace(" ", "_")
print(f" team alias:{team_alias}")
else:
print(f"Please give this team a unique alias in English,")
team_alias = input(f"Input the team alias:")
team_alias = team_alias.strip()
team = {
"_id": team_alias,
"name": team_name,
"alias": team_alias,
"source": [
{
"site": "bangumi_moe",
"team_id": team_id,
"last_update": parse_time("2000").isoformat(),
}
],
}
return team
def _search(self, url, ignore_properties=["introduction"]):
try:
res = requests.get(url=url).json()
except JSONDecodeError as e:
self.logger.error(f"Anima site request is invalid, url: {url}")
raise Exception(f"Anima site request is invalid, url: {url}")
try:
res["torrents"] = sorted(
res["torrents"],
key=lambda x: parse_time(x["publish_time"]),
reverse=True,
)
except KeyError as e:
self.logger.error(f"Invalid response {res}")
raise e
for t in res["torrents"]:
for i in ignore_properties:
del t[i]
if len(res) == 0:
raise Exception(
f"No data responded, something is wrong with the request to bangumi.moe, url: {url}",
extra={"info": {"url": url}},
)
return res
def search_by_team(self, team, page, ignore_properties=["introduction"]):
url = f'https://bangumi.moe/api/v2/torrent/team/{team["team_id"]}?p={page+1}&LIMIT=500'
return self._search(url, ignore_properties)
def searcy_by_tag(self, tag, page, ignore_properties=["introduction"]):
url = f"https://bangumi.moe/api/v2/torrent/search?query=`{tag}`&p={page+1}&LIMIT=500"
return self._search(url, ignore_properties)
def search_by_torrent(self, torrent_id):
url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}"
res = requests.get(url=url).json()
if len(res) == 0:
return None
return res
| 3,813 | 1,164 |
from flaskeztest import EZTestCase
from flaskeztest.exceptions import FixtureDoesNotExistError, EztestidNotInFixture
class FailTC1(EZTestCase):
FIXTURE = "twousers"
def runTest(self):
self.navigate_to_endpoint('index_two')
try:
self.assert_full_fixture_exists()
self.fail("Should have failed assert full fixture exists")
except AssertionError:
pass
class AssertEleExistsThatWasntLoadedByFixture(EZTestCase):
FIXTURE = "oneuser"
def runTest(self):
self.navigate_to_endpoint('index_one')
try:
self.assert_ele_exists('User.lastname')
self.fail("Should have raised User.lastname is not an eztestid in fixture")
except EztestidNotInFixture:
pass
class AttemptToLoadAFixtureThatDoesntExist(EZTestCase):
FIXTURE = "Invalid"
def setUp(self):
pass
def runTest(self):
try:
EZTestCase.setUp(self)
self.fail("Should not have gotten passed load_fixture")
except FixtureDoesNotExistError:
pass
| 1,099 | 349 |
import inferpy as inf
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from datareduction.variational_linear_regressor_DR import VariationalLinearRegressor_DR
from prml.rv import VariationalGaussianMixture
from prml.features import PolynomialFeatures
from prml.linear import (
VariationalLinearRegressor,
VariationalLogisticRegressor
)
np.random.seed(1234)
N=10000
K=50
D=10
# def create_toy_data(func, sample_size, std, domain=[0, 1]):
# x = np.linspace(domain[0], domain[1], sample_size)
# np.random.shuffle(x)
# t = func(x) + np.random.normal(scale=std, size=x.shape)
# return x, t
#
# def cubic(x):
# return x * (x - 5) * (x + 5)
#
# x_train, y_train = create_toy_data(cubic, N, 10., [-5, 5])
# x = np.linspace(-5, 5, 100)
# y = cubic(x)
X_train=np.ones((N,D+1))
X_train[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2))
X_train[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2))
w = np.random.rand(D+1)
y_train = X_train@w.T
X=np.ones((N,D+1))
X[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2))
X[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2))
y = X@w.T
#feature = PolynomialFeatures(degree=D)
#X_train = feature.transform(x_train)
#X = feature.transform(x)
vlr = VariationalLinearRegressor(beta=0.01)
vlr.fit(X_train, y_train)
y_mean, y_std = vlr.predict(X, return_std=True)
# plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b")
# plt.plot(x, y, c="g", label="$\sin(2\pi x)$")
# plt.plot(x, y_mean, c="r", label="prediction")
# plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink")
# plt.legend()
# plt.show()
normal = inf.models.Normal(y_mean,y_std)
l = normal.log_prob(y)
print(np.sum(l))
y_repeated = np.repeat(np.expand_dims(y_train,axis=1),X_train.shape[1],axis=1)
XY_train = np.multiply(X_train,y_repeated)
# np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1))[1] == np.matmul(np.expand_dims(X_train[1],axis=1), np.expand_dims(X_train[1],axis=1).T)
XX_train = np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1))
XX_train = XX_train.reshape((XX_train.shape[0],-1))
XJoin_train = np.concatenate((XY_train,XX_train),axis=1)
kmeans = KMeans(n_clusters=K, random_state=0).fit(XJoin_train)
weights = np.asarray([sum(kmeans.labels_==x) for x in range(0, K)])
clusters_centers = np.multiply(kmeans.cluster_centers_,np.repeat(weights.reshape(K,1),kmeans.cluster_centers_.shape[1],axis=1))
clusters_sum = np.sum(clusters_centers,axis=0)
X_dr = {'XY': clusters_sum[0:(D+1)],'XX': clusters_sum[(D+1):(D+1)+(D+1)*(D+1)].reshape((D+1,D+1))}
vlr_dr = VariationalLinearRegressor_DR(beta=0.01)
vlr_dr.fit(X_dr)
y_mean_dr, y_std_dr = vlr_dr.predict(X, return_std=True)
# plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b")
# plt.plot(x, y, c="g", label="$\sin(2\pi x)$")
# plt.plot(x, y_mean, c="r", label="prediction")
# plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink")
# plt.legend()
# plt.show()
normal_dr = inf.models.Normal(y_mean_dr,y_std_dr)
l_dr = normal_dr.log_prob(y)
print(np.sum(l_dr))
| 3,227 | 1,485 |
#!/usr/bin/python
"""
Unit tests for worldpop.
"""
from os.path import join
import pytest
from hdx.data.vocabulary import Vocabulary
from hdx.hdx_configuration import Configuration
from hdx.hdx_locations import Locations
from hdx.location.country import Country
from worldpop import (
generate_datasets_and_showcases,
get_countriesdata,
get_indicators_metadata,
)
class TestWorldPop:
indicators_metadata = [
{
"alias": "pop",
"name": "Population",
"title": "Population",
"desc": "WorldPop produces different types of gridded population count datasets...",
},
{
"alias": "births",
"name": "Births",
"title": "Births",
"desc": "The health and survival of women and their new-born babies in low income countries is a key public health priority...",
},
{
"alias": "pregnancies",
"name": "Pregnancies",
"title": "Pregnancies",
"desc": "The health and survival of women and their new-born babies in low income countries is a key public health priority...",
},
{
"alias": "age_structures",
"name": "Age and sex structures",
"title": "Age and sex structures",
"desc": "Age and sex structures: WorldPop produces different types of gridded population count datasets...",
},
]
countriesdata = {
"AUS": {
"pop": {
"wpgp": ["http://papa/getJSON/pop/wpgp?iso3=AUS"],
"wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=AUS"],
}
},
"BRA": {
"pop": {
"wpgp": ["http://papa/getJSON/pop/wpgp?iso3=BRA"],
"wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=BRA"],
}
},
"CAN": {
"pop": {
"wpgp": ["http://papa/getJSON/pop/wpgp?iso3=CAN"],
"wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=CAN"],
}
},
"RUS": {
"pop": {
"wpgp": ["http://papa/getJSON/pop/wpgp?iso3=RUS"],
"wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=RUS"],
}
},
"World": {
"pop": {
"wpgp1km": [
"http://papa/getJSON/pop/wpgp1km?id=24776",
"http://papa/getJSON/pop/wpgp1km?id=24777",
]
}
},
"ZWE": {
"pop": {
"wpgp": ["http://papa/getJSON/pop/wpgp?iso3=ZWE"],
"wpgpunadj": ["http://papa/getJSON/pop/wpgpunadj?iso3=ZWE"],
}
},
}
wpgpdata = [
{"id": "1325", "iso3": "AUS"},
{"id": "1326", "iso3": "RUS"},
{"id": "1327", "iso3": "BRA"},
{"id": "1328", "iso3": "CAN"},
{"id": "1482", "iso3": "ZWE"},
]
wpgpunadjdata = [
{"id": "13251", "iso3": "AUS"},
{"id": "13261", "iso3": "RUS"},
{"id": "13271", "iso3": "BRA"},
{"id": "13281", "iso3": "CAN"},
{"id": "14821", "iso3": "ZWE"},
]
metadata = [
{
"id": "1482",
"title": "The spatial distribution of population in 2000, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2000",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/1482/zwe_ppp_wpgp_2000_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=1482",
},
{
"id": "1731",
"title": "The spatial distribution of population in 2001, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2001",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/1731/zwe_ppp_wpgp_2001_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=1731",
},
{
"id": "3474",
"title": "The spatial distribution of population in 2008, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2008",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/3474/zwe_ppp_wpgp_2008_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=3474",
},
{
"id": "4711",
"title": "The spatial distribution of population in 2013, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2013",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/4711/zwe_ppp_wpgp_2013_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=4711",
},
{
"id": "6205",
"title": "The spatial distribution of population in 2019, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2019",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/6205/zwe_ppp_wpgp_2019_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=6205",
},
{
"id": "6454",
"title": "The spatial distribution of population in 2020, Zimbabwe",
"desc": "Estimated total number of people per grid-cell.",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2020",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=6454",
},
]
metadataunadj = [
{
"id": "14821",
"title": "The spatial distribution of population in 2000 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2000",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2000/ZWE/zwe_ppp_2000_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/1482/zwe_ppp_wpgp_2000_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=14821",
},
{
"id": "17311",
"title": "The spatial distribution of population in 2001 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2001",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/1731/zwe_ppp_wpgp_2001_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=17311",
},
{
"id": "34741",
"title": "The spatial distribution of population in 2008 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2008",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/3474/zwe_ppp_wpgp_2008_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=34741",
},
{
"id": "47111",
"title": "The spatial distribution of population in 2013 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2013",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/4711/zwe_ppp_wpgp_2013_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=47111",
},
{
"id": "62051",
"title": "The spatial distribution of population in 2019 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2019",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/6205/zwe_ppp_wpgp_2019_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=62051",
},
{
"id": "64541",
"title": "The spatial distribution of population in 2020 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"desc": "Estimated total number of people per grid-cell. UNAdj",
"doi": "10.5258/SOTON/WP00645",
"date": "2018-11-01",
"popyear": "2020",
"citation": "WorldPop",
"data_file": "GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "geotiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Individual countries 2000-2020 UN adjusted ( 100m resolution )",
"gtype": "Population",
"continent": "Africa",
"country": "Zimbabwe",
"iso3": "ZWE",
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020_UNadj.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image_UNadj.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=64541",
},
]
wpgp1kmdata = [{"id": "24776"}, {"id": "24777"}]
metadata_24777 = {
"id": "24777",
"title": "The spatial distribution of population in 2020",
"desc": "Estimated total number of people per grid-cell...\r\n",
"doi": "10.5258/SOTON/WP00647",
"date": "0018-02-01",
"popyear": "2020",
"citation": "WorldPop...\r\n",
"data_file": "GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif",
"file_img": "world_ppp_wpgp_2020_Image.png",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "tiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Global mosaics 2000-2020",
"gtype": "Population",
"continent": "World",
"country": None,
"iso3": None,
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif"
],
"url_img": "",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=24777",
}
metadata_24776 = {
"id": "24776",
"title": "The spatial distribution of population in 2019",
"desc": "Estimated total number of people per grid-cell...\r\n",
"doi": "10.5258/SOTON/WP00647",
"date": "2018-11-01",
"popyear": "2019",
"citation": "WorldPop...\r\n",
"data_file": "GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif",
"file_img": "world_ppp_wpgp_2019_Image.png",
"archive": "N",
"public": "Y",
"source": "WorldPop, University of Southampton, UK",
"data_format": "tiff",
"author_email": "wp@worldpop.uk",
"author_name": "WorldPop",
"maintainer_name": "WorldPop",
"maintainer_email": "wp@worldpop.uk",
"project": "Population",
"category": "Global mosaics 2000-2020",
"gtype": "Population",
"continent": "World",
"country": None,
"iso3": None,
"files": [
"ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif"
],
"url_img": "https://www.worldpop.org/tabs/gdata/img/24776/world_ppp_wpgp_2019_Image.png",
"organisation": "WorldPop, University of Southampton, UK, www.worldpop.org",
"license": "https://www.worldpop.org/data/licence.txt",
"url_summary": "https://www.worldpop.org/geodata/summary?id=24776",
}
@pytest.fixture(scope="function")
def configuration(self):
Configuration._create(
hdx_read_only=True,
user_agent="test",
project_config_yaml=join("tests", "config", "project_configuration.yml"),
)
Locations.set_validlocations(
[{"name": "zwe", "title": "Zimbabwe"}, {"name": "world", "title": "World"}]
)
Country.countriesdata(use_live=False)
Vocabulary._tags_dict = True
Vocabulary._approved_vocabulary = {
"tags": [{"name": "population"}, {"name": "geodata"}],
"id": "4e61d464-4943-4e97-973a-84673c1aaa87",
"name": "approved",
}
return Configuration.read()
@pytest.fixture(scope="function")
def downloader(self):
class Download:
url = None
@classmethod
def download(cls, url):
cls.url = url
@classmethod
def get_json(cls):
if cls.url == "http://lala/getJSON/":
return {"data": TestWorldPop.indicators_metadata}
elif cls.url == "http://papa/getJSON/pop/wpgp":
return {"data": TestWorldPop.wpgpdata}
elif cls.url == "http://papa/getJSON/pop/wpgpunadj":
return {"data": TestWorldPop.wpgpunadjdata}
elif cls.url == "http://papa/getJSON/pop/wpgp1km":
return {"data": TestWorldPop.wpgp1kmdata}
elif cls.url == "http://papa/getJSON/pop/wpgp?iso3=ZWE":
return {"data": TestWorldPop.metadata}
elif cls.url == "http://papa/getJSON/pop/wpgpunadj?iso3=ZWE":
return {"data": TestWorldPop.metadataunadj}
elif cls.url == "http://papa/getJSON/pop/wpgp1km?id=24776":
return {"data": TestWorldPop.metadata_24776}
elif cls.url == "http://papa/getJSON/pop/wpgp1km?id=24777":
return {"data": TestWorldPop.metadata_24777}
@staticmethod
def get_text():
return (
"The WorldPop project aims to provide an open access archive of spatial "
"demographic datasets ... at creativecommons.org."
)
return Download()
def test_get_indicators_metadata(self, configuration, downloader):
indicators = configuration["indicators"]
indicators_metadata = get_indicators_metadata(
"http://lala/getJSON/", downloader, indicators
)
assert "pop" in indicators_metadata.keys()
assert sorted(
list(indicators_metadata.values()), key=lambda k: k["alias"]
) == sorted(TestWorldPop.indicators_metadata, key=lambda k: k["alias"])
def test_get_countriesdata(self, configuration, downloader):
indicators = configuration["indicators"]
cutdownindicators = {"pop": indicators["pop"]}
countriesdata, countries = get_countriesdata(
"http://papa/getJSON/", downloader, cutdownindicators
)
assert countriesdata == TestWorldPop.countriesdata
assert countries == [
{"iso3": "AUS"},
{"iso3": "BRA"},
{"iso3": "CAN"},
{"iso3": "RUS"},
{"iso3": "ZWE"},
{"iso3": "World"},
]
def test_generate_datasets_and_showcases(self, configuration, downloader):
indicators_metadata = {"pop": TestWorldPop.indicators_metadata[0]}
countryiso = "World"
countrydata = TestWorldPop.countriesdata[countryiso]
datasets, showcases = generate_datasets_and_showcases(
downloader, countryiso, indicators_metadata, countrydata
)
dataset = datasets[0]
assert dataset == {
"name": "worldpop-population-for-world",
"title": "World - Population",
"notes": "WorldPop produces different types of gridded population count datasets... \nData for earlier dates is available directly from WorldPop. \n \nWorldPop...\r\n",
"methodology": "Other",
"methodology_other": "Estimated total number of people per grid-cell...\r\n",
"dataset_source": "WorldPop, University of Southampton, UK",
"license_id": "hdx-other",
"license_other": "The WorldPop project aims to provide an open access archive of spatial demographic datasets ... at creativecommons.org.",
"private": False,
"maintainer": "37023db4-a571-4f28-8d1f-15f0353586af",
"owner_org": "3f077dff-1d05-484d-a7c2-4cb620f22689",
"data_update_frequency": "365",
"subnational": "1",
"groups": [{"name": "world"}],
"tags": [
{
"name": "population",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
{
"name": "geodata",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
],
"dataset_date": "[2019-01-01T00:00:00 TO 2020-12-31T00:00:00]",
}
resources = dataset.get_resources()
assert resources == [
{
"name": "ppp_2020_1km_Aggregated.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/0_Mosaicked/ppp_2020_1km_Aggregated.tif",
"description": "The spatial distribution of population in 2020",
"resource_type": "api",
"url_type": "api",
},
{
"name": "ppp_2019_1km_Aggregated.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/0_Mosaicked/ppp_2019_1km_Aggregated.tif",
"description": "The spatial distribution of population in 2019",
"resource_type": "api",
"url_type": "api",
},
]
showcase = next(iter(showcases.values()))[0]
assert showcase == {
"name": "worldpop-population-for-world-showcase",
"title": "WorldPop World Population Summary Page",
"notes": "Summary for Global mosaics 2000-2020 - World",
"url": "https://www.worldpop.org/geodata/summary?id=24777",
"image_url": "https://www.worldpop.org/tabs/gdata/img/24776/world_ppp_wpgp_2019_Image.png",
"tags": [
{
"name": "population",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
{
"name": "geodata",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
],
}
countryiso = "ZWE"
countrydata = TestWorldPop.countriesdata[countryiso]
datasets, showcases = generate_datasets_and_showcases(
downloader, countryiso, indicators_metadata, countrydata
)
dataset = datasets[0]
assert dataset == {
"name": "worldpop-population-for-zimbabwe",
"title": "Zimbabwe - Population",
"notes": "WorldPop produces different types of gridded population count datasets... \nData for earlier dates is available directly from WorldPop. \n \nWorldPop",
"methodology": "Other",
"methodology_other": "Estimated total number of people per grid-cell. UNAdj",
"dataset_source": "WorldPop, University of Southampton, UK",
"license_id": "hdx-other",
"license_other": "The WorldPop project aims to provide an open access archive of spatial demographic datasets ... at creativecommons.org.",
"private": False,
"maintainer": "37023db4-a571-4f28-8d1f-15f0353586af",
"owner_org": "3f077dff-1d05-484d-a7c2-4cb620f22689",
"data_update_frequency": "365",
"subnational": "1",
"groups": [{"name": "zwe"}],
"tags": [
{
"name": "population",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
{
"name": "geodata",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
],
"dataset_date": "[2000-01-01T00:00:00 TO 2020-12-31T00:00:00]",
}
resources = dataset.get_resources()
assert resources == [
{
"name": "zwe_ppp_2020.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020.tif",
"description": "The spatial distribution of population in 2020, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2020_UNadj.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2020/ZWE/zwe_ppp_2020_UNadj.tif",
"description": "The spatial distribution of population in 2020 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2019.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019.tif",
"description": "The spatial distribution of population in 2019, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2019_UNadj.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2019/ZWE/zwe_ppp_2019_UNadj.tif",
"description": "The spatial distribution of population in 2019 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2013.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013.tif",
"description": "The spatial distribution of population in 2013, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2013_UNadj.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2013/ZWE/zwe_ppp_2013_UNadj.tif",
"description": "The spatial distribution of population in 2013 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2008.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008.tif",
"description": "The spatial distribution of population in 2008, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2008_UNadj.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2008/ZWE/zwe_ppp_2008_UNadj.tif",
"description": "The spatial distribution of population in 2008 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2001.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001.tif",
"description": "The spatial distribution of population in 2001, Zimbabwe",
"resource_type": "api",
"url_type": "api",
},
{
"name": "zwe_ppp_2001_UNadj.tif",
"format": "geotiff",
"url": "ftp://ftp.worldpop.org.uk/GIS/Population/Global_2000_2020/2001/ZWE/zwe_ppp_2001_UNadj.tif",
"description": "The spatial distribution of population in 2001 with country total adjusted to match the corresponding UNPD estimate, Zimbabwe",
"resource_type": "api",
"url_type": "api",
}
]
showcase = next(iter(showcases.values()))[0]
assert showcase == {
"name": "worldpop-population-for-zimbabwe-showcase",
"title": "WorldPop Zimbabwe Population Summary Page",
"notes": "Summary for Individual countries 2000-2020 ( 100m resolution ) - Zimbabwe",
"url": "https://www.worldpop.org/geodata/summary?id=6454",
"image_url": "https://www.worldpop.org/tabs/gdata/img/6454/zwe_ppp_wpgp_2020_Image.png",
"tags": [
{
"name": "population",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
{
"name": "geodata",
"vocabulary_id": "4e61d464-4943-4e97-973a-84673c1aaa87",
},
],
}
| 38,348 | 13,518 |
from typing import Iterable, List, Tuple
"""
----------> (1,0)
|
|
|
|
v
(0,1)
"""
def parse_input() -> List[Tuple[int, int]]:
with open('day02.txt', 'rb') as f:
INPUT = map(lambda x: x.strip(), map(lambda x: x.decode("utf-8"), f.readlines()))
moves = []
for entry in INPUT:
direction, size = entry.split()[0], int(entry.split()[1])
if direction == "forward":
moves.append((size, 0))
elif direction == "backward":
moves.append((-size, 0))
elif direction == "down":
moves.append((0, size))
elif direction == "up":
moves.append((0, -size))
else:
raise Exception("This should not occur")
return moves
def part_one(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
for move in p_input:
position[0] += move[0]
position[1] += move[1]
return position[0] * position[1]
def part_two(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
aim = 0
for move in p_input:
position[0] += move[0]
position[1] += move[0] * aim
aim += move[1]
return position[0] * position[1]
if __name__ == "__main__":
print(part_one(parse_input()))
print(part_two(parse_input()))
| 1,277 | 450 |
import requests as req
from bs4 import BeautifulSoup
import os
import os.path
import shutil
try:
r1=req.get("https://gaana.com/playlist/gaana-dj-best-of-badshah")
c1=r1.content
print(r)
soup1=BeautifulSoup(c1,"html.parser")
F1=soup1.find_all("a",{"class":"sng_c "})
#print(F1)
namelist=[]
#count=0
for i in F1:
print (i.text)
x=i.text
l=x.split()
name=""
url="https://www.youtube.com/results?search_query="
for j in l:
url=url+j+"+"
name=name+j+"_"
#print(":",url)
url=url[:-1]
name=name[:-1]
namelist.append(name)
#print(url,name)
r2=req.get(url)
c2=r2.content
soup2=BeautifulSoup(c2,"html.parser")
#F2=soup2.find_all("a",{"class":"yt-simple-endpoint"})
#print(F2[0]['href'])
#print(F2)
#F2 = soup2.find_all('a',href=True)
#print(link[40]['href'])
F2 =soup2.findAll(attrs={'class':'yt-uix-tile-link'})
link="https://www.youtube.com"+F2[0]['href']
command="youtube-dl --extract-audio --audio-format mp3 "+link+" -o "+name+".mp3"
os.system(command)
#count=count+1
except req.exceptions.RequestException as e:
print (e)
'''
for n in namelist:
if os.path.isfile(n):
n=n+".mp3"
src = n
dst = 'data/romance/{}'.format(n)
shutil.move(src, dst)
filelist=os.listdir('.')
for f in filelist:
os.remove(f)
'''
| 1,300 | 638 |
# Generated by Django 3.1.7 on 2021-05-23 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interest', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='interest',
name='name',
field=models.CharField(max_length=100, verbose_name='Interesse'),
),
migrations.AlterField(
model_name='interesttype',
name='description',
field=models.CharField(blank=True, max_length=400, null=True, verbose_name='Descrição do tipo de interesse'),
),
]
| 638 | 202 |
import numpy as np
import torchvision
import torch
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from config import device
import torch.nn as nn
import torch.optim as optim
import tqdm
from torch.nn import functional as F
#截取部分数据集
class PartialDataset(Dataset):
def __init__(self, dataset, n_items):
self.dataset = dataset
self.n_items = n_items
def __getitem__(self,index):
return self.dataset.__getitem__(index)
def __len__(self):
return min(self.n_items, len(self.dataset))
#设置随机种子,使得代码可复现
def set_random_seeds(seed_value=0):
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | 897 | 312 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'logzero',
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(xguse): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='holographer',
version='0.0.2',
description="Holographer copies a filesytem object to a storage location and creates in its place a symlinked decoy pointing to the stored target. Think of when you run out of HDD space and need to move things to free up space but do not want to break everything that may expect to find your target in its old location.",
long_description=readme + '\n\n' + history,
author="Gus Dunn",
author_email='w.gus.dunn@gmail.com',
url='https://github.com/xguse/holographer',
packages=find_packages('src'),
package_dir={"": "src"},
entry_points={
'console_scripts': [
'holo=holographer.cli.main:run'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='holographer',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 2,068 | 646 |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s )
count = 0 ;
for i in range ( 0 , n , 1 ) :
if ( s [ i ] == '4' or s [ i ] == '8' or s [ i ] == '0' ) :
count += 1
for i in range ( 0 , n - 1 , 1 ) :
h = ( ord ( s [ i ] ) - ord ( '0' ) ) * 10 + ( ord ( s [ i + 1 ] ) - ord ( '0' ) )
if ( h % 4 == 0 ) :
count = count + i + 1
return count
#TOFILL
if __name__ == '__main__':
param = [
('Qaq',),
('9400761825850',),
('0011001111',),
('lasWqrLRq',),
('5662',),
('110',),
(' tOYKf',),
('6536991235305',),
('11111',),
('uZftT iDHcYiCt',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 1,041 | 450 |
""" fabfile to prepare the notebook """
from fabric.api import local,lcd
from fabric.utils import abort
import os
static_dir = 'static'
components_dir = os.path.join(static_dir,'components')
def test_component(name):
if not os.path.exists(os.path.join(components_dir,name)):
components()
def components():
"""install components with bower"""
with lcd(static_dir):
local('bower install')
def css(minify=True):
"""generate the css from less files"""
test_component('bootstrap')
test_component('less.js')
if minify not in ['True', 'False', True, False]:
abort('minify must be Boolean')
minify = (minify in ['True',True])
min_flag= '-x' if minify is True else ''
with lcd(static_dir):
local('lessc {min_flag} less/style.less css/style.min.css'.format(min_flag=min_flag))
| 846 | 268 |
import os
import pytest
from pennylane import qchem
from pennylane.vqe import Hamiltonian
import numpy as np
symbols = ["C", "C", "N", "H", "H", "H", "H", "H"]
coordinates = np.array(
[
0.68219113,
-0.85415621,
-1.04123909,
-1.34926445,
0.23621577,
0.61794044,
1.29068294,
0.25133357,
1.40784596,
0.83525895,
-2.88939124,
-1.16974047,
1.26989596,
0.19275206,
-2.69852891,
-2.57758643,
-1.05824663,
1.61949529,
-2.17129532,
2.04090421,
0.11338357,
2.06547065,
2.00877887,
1.20186581,
]
)
@pytest.mark.parametrize(
(
"charge",
"mult",
"package",
"nact_els",
"nact_orbs",
"mapping",
),
[
(0, 1, "psi4", 2, 2, "jordan_WIGNER"),
(1, 2, "pyscf", 3, 4, "BRAVYI_kitaev"),
(-1, 2, "pyscf", 1, 2, "jordan_WIGNER"),
(2, 1, "psi4", 2, 2, "BRAVYI_kitaev"),
],
)
def test_building_hamiltonian(
charge,
mult,
package,
nact_els,
nact_orbs,
mapping,
psi4_support,
requires_babel,
tmpdir,
):
r"""Test that the generated Hamiltonian `built_hamiltonian` is an instance of the PennyLane
Hamiltonian class and the correctness of the total number of qubits required to run the
quantum simulation. The latter is tested for different values of the molecule's charge and
for active spaces with different size"""
if package == "psi4" and not psi4_support:
pytest.skip("Skipped, no Psi4 support")
built_hamiltonian, qubits = qchem.molecular_hamiltonian(
symbols,
coordinates,
charge=charge,
mult=mult,
package=package,
active_electrons=nact_els,
active_orbitals=nact_orbs,
mapping=mapping,
outpath=tmpdir.strpath,
)
assert isinstance(built_hamiltonian, Hamiltonian)
assert qubits == 2 * nact_orbs
| 2,028 | 879 |
import os
import subprocess
import traceback
import logging
logger = logging.getLogger("lims_dashboard")
def run_script(app, name, options):
cwd = os.getcwd()
os.chdir('{0}/uploads'.format(app.root_path))
conf_obj = app.config['my_scripts'][name]
command = [':']
if conf_obj['type'] == 'python':
try:
python_exec = conf_obj['python_exec']
except KeyError: # No python exec specified in script conf
python_exec = app.config['python_exec']
command = [python_exec, os.path.join(app.config['SCRIPT_FOLDER'],app.config['my_scripts'][name]['script'])]
command.extend(options.split())
logger.info("About to run command: {}".format(" ".join(command)))
try:
handle = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = handle.communicate()
returncode = handle.returncode
except Exception:
returncode = -1
out = "Running the command: {}".format(" ".join(command))
err = traceback.format_exc()
os.chdir(cwd)
return returncode, out, err
| 1,106 | 349 |
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2019 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import UTCDateTime, db
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
from indico_chat.xmpp import delete_room
class Chatroom(db.Model):
__tablename__ = 'chatrooms'
__table_args__ = (db.UniqueConstraint('jid_node', 'custom_server'),
{'schema': 'plugin_chat'})
#: Chatroom ID
id = db.Column(
db.Integer,
primary_key=True
)
#: Node of the chatroom's JID (the part before `@domain`)
jid_node = db.Column(
db.String,
nullable=False
)
#: Name of the chatroom
name = db.Column(
db.String,
nullable=False
)
#: Description of the chatroom
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: Password to join the room
password = db.Column(
db.String,
nullable=False,
default=''
)
#: Custom Jabber MUC server hostname
custom_server = db.Column(
db.String,
nullable=False,
default=''
)
#: ID of the creator
created_by_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
#: Creation timestamp of the chatroom
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
#: Modification timestamp of the chatroom
modified_dt = db.Column(
UTCDateTime
)
#: The user who created the chatroom
created_by_user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'chatrooms',
lazy='dynamic'
)
)
@property
def locator(self):
return {'chatroom_id': self.id}
@property
def server(self):
"""The server name of the chatroom.
Usually the default one unless a custom one is set.
"""
from indico_chat.plugin import ChatPlugin
return self.custom_server or ChatPlugin.settings.get('muc_server')
@property
def jid(self):
return '{}@{}'.format(self.jid_node, self.server)
@return_ascii
def __repr__(self):
server = self.server
if self.custom_server:
server = '!' + server
return '<Chatroom({}, {}, {}, {})>'.format(self.id, self.name, self.jid_node, server)
class ChatroomEventAssociation(db.Model):
__tablename__ = 'chatroom_events'
__table_args__ = {'schema': 'plugin_chat'}
#: ID of the event
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
#: ID of the chatroom
chatroom_id = db.Column(
db.Integer,
db.ForeignKey('plugin_chat.chatrooms.id'),
primary_key=True,
index=True
)
#: If the chatroom should be hidden on the event page
hidden = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: If the password should be visible on the event page
show_password = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The associated :class:Chatroom
chatroom = db.relationship(
'Chatroom',
lazy=False,
backref=db.backref('events', cascade='all, delete-orphan')
)
#: The associated event
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'chatroom_associations',
lazy='dynamic'
)
)
@property
def locator(self):
return dict(self.chatroom.locator, confId=self.event_id)
@return_ascii
def __repr__(self):
return '<ChatroomEventAssociation({}, {})>'.format(self.event_id, self.chatroom)
@classmethod
def find_for_event(cls, event, include_hidden=False, **kwargs):
"""Returns a Query that retrieves the chatrooms for an event
:param event: an indico event (with a numeric ID)
:param include_hidden: if hidden chatrooms should be included, too
:param kwargs: extra kwargs to pass to ``find()``
"""
query = cls.find(event_id=event.id, **kwargs)
if not include_hidden:
query = query.filter(~cls.hidden)
return query
def delete(self, reason=''):
"""Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
"""
db.session.delete(self)
db.session.flush()
if not self.chatroom.events:
db.session.delete(self.chatroom)
db.session.flush()
delete_room(self.chatroom, reason)
return True
return False
| 5,152 | 1,610 |
DATABASE_ENGINE = 'sqlite3'
INSTALLED_APPS = [
'richcomments'
]
| 69 | 31 |
from django.db import models
from django.conf import settings
from core.mixins import Trackable
class ExpenseProto(models.Model):
tags = models.ManyToManyField('tags.Tag')
kit = models.ForeignKey('categories.Kit', on_delete=models.SET_NULL, blank=True, null=True)
title = models.CharField(max_length=255)
channel = models.ForeignKey('channels.Channel', on_delete=models.CASCADE)
comment = models.TextField(blank=True, null=True)
amount = models.IntegerField(default=0)
@property
def cat(self):
return self.kit.cat
class Expense(Trackable, ExpenseProto):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
is_fulfilled = models.BooleanField(default=True)
money_stored = models.BooleanField(default=False)
ongoing_origin = models.ForeignKey('expenses.OngoingExpense', null=True, blank=True, default=None, on_delete=models.SET_NULL)
class OngoingExpenseScope(models.IntegerChoices):
MONTH = 0, 'Month'
YEAR = 1, 'Year'
class OngoingExpense(Trackable, ExpenseProto):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
scope = models.IntegerField(
choices=OngoingExpenseScope.choices,
default=OngoingExpenseScope.MONTH,
)
| 1,272 | 427 |
# Generated by Django 3.1.5 on 2021-01-09 06:50
import cloudinary.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('imageDescription', models.CharField(max_length=450)),
('image_url', cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='image')),
('date_uploaded', models.DateTimeField(auto_now_add=True)),
],
),
]
| 775 | 233 |
# -*- coding: utf-8 -*-
"""共有マスター関連APIの実装."""
# community module
from flask import abort, request
import sqlalchemy.exc
# project module
from circle_core.models import MetaDataSession, ReplicationMaster
from .api import api
from .utils import respond_failure, respond_success
from ..utils import (oauth_require_read_schema_scope, oauth_require_write_schema_scope)
@api.route('/replication_masters/', methods=['GET', 'POST'])
def api_repliction_masters():
"""全てのReplicationMasterのCRUD."""
if request.method == 'GET':
return _get_replication_masters()
elif request.method == 'POST':
return _post_replication_masters()
abort(405)
@oauth_require_read_schema_scope
def _get_replication_masters():
"""全てのReplicationMasterの情報を取得する.
:return: 全てのReplicationMasterの情報
:rtype: Response
"""
# TODO: earger loading
replication_masters = [obj.to_json() for obj in ReplicationMaster.query]
return respond_success(replicationMasters=replication_masters)
@oauth_require_write_schema_scope
def _post_replication_masters():
"""ReplicationMasterを作成する.
:return: 作成したReplicationMasterの情報
:rtype: Response
"""
data = request.json
try:
with MetaDataSession.begin():
replication_master = ReplicationMaster(endpoint_url=data['endpointUrl'],)
MetaDataSession.add(replication_master)
except sqlalchemy.exc.IntegrityError:
return respond_failure('このURLは既に登録されています')
return respond_success(replicationMaster=replication_master.to_json())
@api.route('/replication_masters/<int:replication_master_id>', methods=['GET', 'DELETE'])
def api_replication_master(replication_master_id):
"""単一のReplicationMasterのCRUD."""
repmaster = ReplicationMaster.query.get(replication_master_id)
if not repmaster:
return respond_failure('Replication Master not found.', _status=404)
if request.method == 'GET':
return _get_replication_master(repmaster)
elif request.method == 'DELETE':
return _delete_replication_master(repmaster)
abort(405)
@oauth_require_read_schema_scope
def _get_replication_master(replication_master):
"""ReplicationMasterの情報を取得する.
:param ReplicationMaster replication_master: 取得するReplicationMaster
:return: ReplicationMasterの情報
:rtype: Response
"""
return respond_success(replicationMaster=replication_master.to_json())
@oauth_require_write_schema_scope
def _delete_replication_master(replication_master):
"""ReplicationMasterを削除する.
:param ReplicationMaster replication_master: 削除するReplicationMaster
:return: ReplicationMasterの情報
:rtype: Response
"""
with MetaDataSession.begin():
MetaDataSession.delete(replication_master)
return respond_success(replicationMaster=replication_master.to_json())
| 2,832 | 929 |
from django.conf import settings
from django_assets import Bundle, register
from webassets.filter import get_filter
libsass = get_filter("libsass", style="compressed")
css_libs = Bundle(
settings.BASE_DIR + "/assets/styles/css/libs/normalize.css",
filters="cssutils",
output="css/libs.css"
)
css_custom = Bundle(
settings.BASE_DIR + "/assets/styles/sass/base.sass",
filters=libsass,
output="css/style.css",
depends="/**/*.sass",
)
register("css_libs", css_libs)
register("css_custom", css_custom)
| 532 | 177 |
import os
import sys
import time
import multiprocessing
from subprocess import Popen
from multiprocessing.pool import ThreadPool
def work(command, logfile, job_id, tot):
line_info = f'\n{time.asctime()} Starting Job {job_id} (out of {tot})'
line_command = f'\n{time.asctime()} Job {job_id} command: {command}\n'
with open(logfile, "a+") as fh:
fh.write(line_info)
fh.write(line_command)
try:
process = Popen(command, shell=True)
process.wait()
# If the output of the process needs further processing/parsing, it can be done here
# Source: https://stackoverflow.com/questions/26774781/
# python-multiple-subprocess-with-a-pool-queue-recover-output-as-soon-as-one-finis
except Exception as e:
line_error = f'\n{time.asctime()} Error while executing Job {job_id}:\n'
print(line_error)
with open(logfile, "a+") as fh:
fh.write(line_error)
fh.write(str(e) + "\n")
line_end = f'\n{time.asctime()} Job {job_id} completed!\n'
print(line_end)
with open(logfile, "a+") as fh:
fh.write(line_end)
def launch_jobs(commands_list, logfile=None, n_jobs=None, core_proportion=(1, 3), max_cores=8, log_dir=None):
if not log_dir:
log_dir = os.getcwd()
# Create a log file to track the completed jobs
if not logfile:
logfile = os.path.join(log_dir, f"{time.asctime().replace(' ', '_')}_jobs_logfile.txt")
# Attach ID to the jobs to track their execution
indexed_commands = [(i + 1, command) for i, command in enumerate(commands_list)]
tot = len(indexed_commands)
if n_jobs:
if n_jobs > multiprocessing.cpu_count():
sys.exit(f"The system does not posses that many cores. It must be {multiprocessing.cpu_count()} or less.")
else:
n_cores = n_jobs
else:
# Use a predetermined proportion of available total cores (ex: one third (1/3) of the available cores)
n_cores = int((multiprocessing.cpu_count() / core_proportion[1]) * core_proportion[0])
# Limit the maximum number of cores to use at a time
if n_cores > max_cores:
n_cores = max_cores
line_start = f'{time.asctime()} Launching {tot} jobs, using {n_cores} cores\n'
print(line_start)
with open(logfile, "a+") as fh:
fh.write(line_start)
# Launch "n" number of jobs at a time; whenever a job is finish, launch a new one
# The number of jobs is determined by the number of available/selected cores to use
tp = ThreadPool(n_cores)
for (job_id, command) in indexed_commands:
tp.apply_async(work, (command, logfile, job_id, tot, ))
tp.close()
tp.join()
line_end = f'\n{time.asctime()} All Jobs completed! ({tot})\n'
print(line_end)
with open(logfile, "a+") as fh:
fh.write(line_end)
| 2,864 | 1,007 |
# Assignment 3 for OMS6250
#
# Defines a Topology, which is a collection of Nodes. Students should not
# modify this file. This is NOT a topology like the ones defined in Mininet projects.
#
# Copyright 2015 Sean Donovan
from DistanceVector import *
class Topology(object):
def __init__(self, conf_file):
''' Initializes the topology. Called from outside of DistanceVector.py '''
self.topodict = {}
self.nodes = []
self.topo_from_conf_file(conf_file)
def topo_from_conf_file(self, conf_file):
''' This created all the nodes in the Topology from the configuration
file passed into __init__(). Can throw an exception if there is a
problem with the config file. '''
try:
conf = __import__(conf_file)
for key in conf.topo.keys():
new_node = DistanceVector(key, self, conf.topo[key])
self.nodes.append(new_node)
self.topodict[key] = new_node
except:
print "error importing conf_file " + conf_file
raise
self.verify_topo()
def verify_topo(self):
''' Once the topology is imported, we verify the topology to make sure
it is actually valid. '''
print self.topodict
for node in self.nodes:
try:
node.verify_neighbors()
except:
print "error with neighbors of " + node.name
raise
def run_topo(self):
''' This is where most of the action happens. First, we have to "prime
the pump" and send to each neighbor that they are connected.
Next, in a loop, we go through all of the nodes in the topology running
their instances of Bellman-Ford, passing and receiving messages, until
there are no further messages to service. Each loop, print out the
distances after the loop instance. After the full loop, check to see if
we're finished (all queues are empty).
'''
#Prime the pump
for node in self.nodes:
node.send_initial_messages()
done = False
while done == False:
for node in self.nodes:
node.process_BF()
node.log_distances()
# Done with a round. Now, we call finish_round() which writes out
# each entry in log_distances(). By default, this will will print
# out alphabetical order, which you can turn off so the logfile
# matches what is printed during log_distances().
finish_round()
done = True
for node in self.nodes:
if len(node) != 0:
done = False
break
| 2,803 | 729 |
# 如何在items.py里定义这些数据
import scrapy
# 导入scrapy
class DoubanItem(scrapy.Item):
# 定义一个类DoubanItem,它继承自scrapy.Item
title = scrapy.Field()
# 定义书名的数据属性
publish = scrapy.Field()
# 定义出版信息的数据属性
score = scrapy.Field()
# 定义评分的数据属性
| 250 | 142 |
import copy
from alg.semi_supervised import SemiSupervised
from lib.ops import evaluate
from lib.utils import load_avast_weeks_pca, parse_arguments
args = parse_arguments()
dataset_avast_pca = {
'name': 'avast_pca',
'input_size': 128,
'num_classes': 5,
'mlp_arch': [96, 64, 32],
'visualise_funcs': ['losses']
}
dataset_avast_pca_binary = copy.deepcopy(dataset_avast_pca)
dataset_avast_pca_binary['num_classes'] = 2
ds = dataset_avast_pca_binary if args.binary else dataset_avast_pca
if args.m: # can't load matplotlib in metacentrum
ds['visualise_funcs'] = []
def train(run_nmb, x, y, x_un=None, y_un=None, x_test=None, y_test=None):
semi_sup = SemiSupervised(
dataset_params=ds,
num_epoch=100,
ssl_method=args.method,
options=args.options,
hyper_par=args.hyper_par,
save_results=False,
imbalanced=False,
should_evaluate=True
)
semi_sup.set_train_data(x, y)
if x_test is not None and y_test is not None:
semi_sup.set_test_data(x_test, y_test)
if x_un is not None:
semi_sup.set_unsupervised_data(x_un)
else:
semi_sup.split_sup_unsup(args.ratio)
semi_sup.prepare_train_test_data()
model = semi_sup.train(run_nmb)
return model
def train_eval(run_nmb):
x, y, x_test, y_test = load_avast_weeks_pca(args.train_weeks, 10000 + 5000, 5000, args.binary)
print('y_test: ', y_test[:10])
model = train(run_nmb, x, y, x_test=x_test, y_test=y_test)
test_acc = evaluate(model, ds['num_classes'], x_test, y_test, hot=False)
return test_acc
def main():
with open(args.out_path + 'results.out', 'w') as f:
print('weeks:', args.train_weeks, file=f)
print('ratio:', args.ratio, file=f)
print('ssl:', args.method, file=f)
print(args.hyper_par, file=f)
print(args.options, file=f)
runs = args.runs
accuracies = []
for i in range(runs):
acc = train_eval(i)
print('Final test acc:', acc)
with open(args.out_path + 'results.out', 'a') as f:
print(i, acc, file=f)
accuracies.append(acc)
with open(args.out_path + 'results.out', 'a') as f:
print('min', min(accuracies), file=f)
print('max', max(accuracies), file=f)
print('avg', sum(accuracies) / runs, file=f)
if __name__ == '__main__':
main()
| 2,388 | 935 |
functions = {
'default-start':"""@minecraft:activator_rail\n""",
'default-end':"""setblock ~ ~1 ~ minecraft:chain_command_block[facing=up]{auto:1,Command:"fill ~ ~ ~ ~ ~-2 ~ air"}
setblock ~ ~ ~ minecraft:command_block[facing=up]{auto:1,Command:"kill @e[type=minecraft:command_block_minecart,distance=..2]"}\n""",
'default-fullend':"""setblock ~ ~1 ~ minecraft:chain_command_block[facing=up]{auto:1,Command:"fill ~ ~ ~ ~ ~-3 ~ air"}
setblock ~ ~ ~ minecraft:command_block[facing=up]{auto:1,Command:"kill @e[type=minecraft:command_block_minecart,distance=..2]"}\n"""
}
| 580 | 220 |
import logging
log = logging.getLogger(__name__)
class RemediationResult(object):
def __init__(self, address, message_id, mailbox_type, action, success=True, message=None):
self.address = address
self.message_id = message_id
self.mailbox_type = mailbox_type
self.success = success
self.message = message
self.owner = None
self.members = []
self.forwards = []
self.action = action
def result(self, message, success=False):
log.info(message)
self.success = success
self.message = message
| 593 | 165 |
import math as m
# All functions expect base SI units for any arguments given
# DBT - Dry bulb temperature - Degrees Rankine, R
# DPT - Dew point temperature - Degress Rankine, R
# H - Specific enthalpy - British thermal unit per pound mass,
# Btu/lbm
# P - Atmospheric pressure - Pounds force per square inch, psi
# Pw - Water vapor partial pressure - Pounds force per square inch, psi
# RH - Relative humidity - Decimal (i.e. not a percentage)
# V - Specific volume - Cubic feet per pound mass, ft^3/lbm
# W - Humidity ratio - pounds mass per pound mass, lbm/lbm
# WBT - Wet bulb temperature - Degrees Rankine, R
# Minimum dry bulb temperature
Min_DBT=491.67
# Maximum dry bulb temperature
Max_DBT=851.67
# Convergence tolerance
TOL=0.0000005
def __DBT_H_RH_P(H, RH, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_V_P(H, V, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_V_P(DBTa, V, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_V_P(DBT, V, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_W(H, W):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=W-__W_DBT_H(DBTa, H)
y=W-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_WBT_P(H, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_V_P(RH, V, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_V_P(DBTa, V, P)
y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_W_P(RH, W, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-W
y=__W_DBT_RH_P(DBT, RH, P)-W
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_WBT_P(RH, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_RH_P(DBTa, RH, P)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_RH_P(DBT, RH, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_V_W_P(V, W, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=W-__W_DBT_V_P(DBTa, V, P)
y=W-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_V_WBT_P(V, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_V_P(DBTa, V, P)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_W_WBT_P(W, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-W
y=__W_DBT_WBT_P(DBT, WBT, P)-W
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
# ASHRAE 2009 Chapter 1 Equation 39
def __DPT_Pw(Pw):
Pw=Pw
C14=100.45
C15=33.193
C16=2.319
C17=0.17074
C18=1.2063
a=m.log(Pw)
return (C14+C15*a+C16*a**2+C17*a**3+C18*Pw**0.1984)+459.67
# ASHRAE 2009 Chapter 1 Equation 32
def __H_DBT_W(DBT, W):
if __valid_DBT(DBT):
DBT=DBT-459.67
return 0.240*DBT+W*(1061+0.444*DBT)
def __is_positive(x):
if x>0:
return True
else:
return False
# ASHRAE 2009 Chapter 1 Equation 22
def __Pw_W_P(W, P):
return W*P/(W+0.621945)
# ASHRAE 2009 Chapter 1 Equation 6
def __Pws(DBT):
if __valid_DBT(DBT):
C8=-1.0440397*10**4
C9=-1.1294650*10**1
C10=-2.7022355*10**-2
C11=1.2890360*10**-5
C12=-2.4780681*10**-9
C13=6.5459673
return m.exp(C8/DBT+C9+C10*DBT+C11*DBT**2+C12*DBT**3+C13*m.log(DBT))
def state(prop1, prop1val, prop2, prop2val,P):
if prop1==prop2:
print("Properties must be independent.")
return
prop=["DBT","WBT","RH","W","V","H"]
if prop1 not in prop or prop2 not in prop:
print("Valid property must be given.")
return
prop1i=prop.index(prop1)
prop2i=prop.index(prop2)
if prop1i<prop2i:
cd1=prop1
cd1val=prop1val
cd2=prop2
cd2val=prop2val
else:
cd1=prop2
cd1val=prop2val
cd2=prop1
cd2val=prop1val
if cd1=="DBT":
DBT=cd1val
if cd2=="WBT":
WBT=cd2val
W=__W_DBT_WBT_P(DBT, WBT, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="RH":
RH=cd2val
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="W":
W=cd2val
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
W=__W_DBT_V_P(DBT, V, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
W=__W_DBT_H(DBT, H)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="WBT":
WBT=cd1val
if cd2=="RH":
RH=cd2val
DBT=__DBT_RH_WBT_P(RH, WBT, P)
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="W":
W=cd2val
DBT=__DBT_W_WBT_P(W, WBT, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
DBT=__DBT_V_WBT_P(V, WBT, P)
W=__W_DBT_V_P(DBT, V, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_WBT_P(H, WBT, P)
W=__W_DBT_H(DBT, H)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd1=="RH":
RH=cd1val
if cd2=="W":
W=cd2val
DBT=__DBT_RH_W_P(RH, W, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
DBT=__DBT_RH_V_P(RH, V, P)
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_RH_P(H, RH, P)
W=__W_DBT_RH_P(DBT, RH, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="W":
W=cd1val
if cd2=="V":
V=cd2val
DBT=__DBT_V_W_P(V, W, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_W(H, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="V":
V=cd1val
H=cd2val
DBT=__DBT_H_V_P(H, V, P)
W=__W_DBT_V_P(DBT, V, P)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
return [DBT, H, RH, V, W, WBT]
# ASHRAE 2009 Chapter 1 Equation 22 and Equation 24
def __RH_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
return W*P/((0.621945+W)*__Pws(DBT))
# ASHRAE 2009 Chapter 1 Equation 28
def __V_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
return 0.370486*DBT*(1+1.607858*W)/P
# ASHRAE 2009 Chapter 1 Equation 32
def __W_DBT_H(DBT, H):
if __valid_DBT(DBT):
DBT=DBT-459.67
return (H-0.240*DBT)/(1061+0.444*DBT)
# ASHRAE 2009 Chapter 1 Equation 22 and Equation 24
def __W_DBT_RH_P(DBT, RH, P):
if __valid_DBT(DBT):
Pw=RH*__Pws(DBT)
return 0.621945*Pw/(P-Pw)
# ASHRAE 2009 Chapter 1 Equation 28
def __W_DBT_V_P(DBT, V, P):
if __valid_DBT(DBT):
return (P*V-0.370486*DBT)/(1.607858*0.370486*DBT)
# ASHRAE 2009 Chapter 1 Equation 35
def __W_DBT_WBT_P(DBT, WBT, P):
if __valid_DBT(DBT):
DBT=DBT-459.67
WBT=WBT-459.67
return ((1093-0.556*WBT)*__W_DBT_RH_P(WBT+459.67,1,P)-0.240*(DBT-WBT))/\
(1093+0.444*DBT-WBT)
# ASHRAE 2009 Chapter 1 Equation 35
def __WBT_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
WBTa=__DPT_Pw(__Pw_W_P(W, P))
WBTb=DBT
WBT=(WBTa+WBTb)/2
while WBTb-WBTa>TOL:
Ws=__W_DBT_WBT_P(DBT, WBT, P)
if W>Ws:
WBTa=WBT
else:
WBTb=WBT
WBT=(WBTa+WBTb)/2
return WBT
def __valid_DBT(DBT):
if Min_DBT<=DBT<=Max_DBT:
return True
else:
return False
| 10,452 | 5,470 |
import time
def total_time(fun):
def f():
before_time=time.time()
fun()
current_time=time.time()
t_time=current_time-before_time
print(t_time)
return f
@total_time
def add():
time.sleep(1)
return 3
#为函数添加装饰器,统计时间
@total_time
def sub():
print('睡不好')
time.sleep(2)
print('很烦')
return 10
if __name__=='__main__':
sub()
| 391 | 165 |
"""Run server as module"""
from argparse import ArgumentParser
from too_simple_server.configuration import DEFAULT_CFG_PATH
from too_simple_server.run import main
AGP = ArgumentParser(description="Mock server with simple DB interactions")
AGP.add_argument("--debug", action="store_true", default=None)
AGP.add_argument("--config", help=f"Configuration file to be used, '{DEFAULT_CFG_PATH}' by default",
default=DEFAULT_CFG_PATH)
AGP.add_argument("--no-wsgi", action="store_true", default=False)
AGP.add_argument("action", default="start", choices=["start", "stop"])
ARGS = AGP.parse_args()
main(ARGS.action, ARGS.debug, ARGS.config, ARGS.no_wsgi)
| 667 | 209 |
## Program: VMTK
## Language: Python
## Date: February 12, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacecelldatatopointdata as celltopoint
from vtk.numpy_interface import dataset_adapter as dsa
import numpy as np
@pytest.fixture(scope='module')
def centerline_pointdata(aorta_centerline_branches):
ctp = celltopoint.vmtkSurfaceCellDataToPointData()
ctp.Surface = aorta_centerline_branches
ctp.Execute()
return ctp.Surface
@pytest.mark.parametrize("expectedKey",[
('CenterlineIds'),
('TractIds'),
('Blanking'),
('GroupIds')
])
def test_expected_cell_data_keys(centerline_pointdata, expectedKey):
wp = dsa.WrapDataObject(centerline_pointdata)
assert expectedKey in wp.CellData.keys()
@pytest.mark.parametrize("expectedKey",[
('MaximumInscribedSphereRadius'),
('EdgeArray'),
('EdgePCoordArray'),
('CenterlineIds'),
('TractIds'),
('Blanking'),
('GroupIds')
])
def test_expected_point_data_keys(centerline_pointdata, expectedKey):
wp = dsa.WrapDataObject(centerline_pointdata)
assert expectedKey in wp.PointData.keys()
def test_number_of_cell_data_keys_is_4(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
assert len(wp.CellData.keys()) == 4
def test_number_of_point_data_keys_is_7(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
assert len(wp.PointData.keys()) == 7
def test_expected_number_of_points_in_output(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
assert wp.Points.shape == (417, 3)
def test_blanking_array_is_correct(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert np.allclose(wp.PointData.GetArray('Blanking'), expectedOutput) == True
def test_centerlineids_array_is_correct(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.allclose(wp.PointData.GetArray('CenterlineIds'), expectedOutput) == True
def test_groupids_array_is_correct(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])
assert np.allclose(wp.PointData.GetArray('GroupIds'), expectedOutput) == True
def test_tractids_array_is_correct(centerline_pointdata):
wp = dsa.WrapDataObject(centerline_pointdata)
expectedOutput = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert np.allclose(wp.PointData.GetArray('TractIds'), expectedOutput) == True
| 10,239 | 6,148 |
#Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
from SIM_utils.components.perf_sim import *
from SIM_utils.components.pow_sim import *
#from OSSIM_utils.components.pow_knob_sim import *
from design_utils.design import *
from settings import config
# This module is our top level simulator containing all simulators (perf, and pow simulator)
class OSASimulator:
def __init__(self, dp, database, pk_dp=""):
self.time_elapsed = 0 # time elapsed from the beginning of the simulation
self.dp = dp # design point to simulate
self.perf_sim = PerformanceSimulator(self.dp) # performance simulator instance
self.pow_sim = PowerSimulator(self.dp) # power simulator instance
self.database = database
if config.simulation_method == "power_knobs":
self.pk_dp = pk_dp
#self.knob_change_sim = PowerKnobSimulator(self.dp, self.pk_dp, self.database)
self.completion_time = -1 # time passed for the simulation to complete
self.program_status = "idle"
self.cur_tick_time = self.next_tick_time = 0 # current tick time
# ------------------------------
# Functionality:
# whether the simulation should terminate
# ------------------------------
def terminate(self, program_status):
if config.termination_mode == "workload_completion":
return program_status == "done"
elif config.termination_mode == "time_budget_reahced":
return self.time_elapsed >= config.time_budge
else:
return False
# ------------------------------
# Functionality:
# ticking the simulation. Note that the tick time varies depending on what is (dynamically) happening in the
# system
# ------------------------------
def tick(self):
self.cur_tick_time = self.next_tick_time
# ------------------------------
# Functionality
# progress the simulation for clock_time forward
# ------------------------------
def step(self, clock_time):
self.next_tick_time, self.program_status = self.perf_sim.simulate(clock_time)
# ------------------------------
# Functionality:
# simulation
# ------------------------------
def simulate(self):
while not self.terminate(self.program_status):
self.tick()
self.step(self.cur_tick_time)
if config.use_cacti:
self.dp.correct_power_area_with_cacti(self.database)
# collect all the stats upon completion of simulation
self.dp.collect_dp_stats(self.database)
if config.simulation_method == "power_knobs":
self.knob_change_sim.launch()
self.completion_time = self.next_tick_time
self.dp.set_serial_design_time(self.perf_sim.serial_latency)
self.dp.set_par_speedup(self.perf_sim.serial_latency/self.completion_time)
return self.dp | 3,038 | 874 |
from string import Template
from graphene.test import Client
from django.test import TestCase
from ipam.models import VLAN
from netbox_graphql.schema import schema
from netbox_graphql.tests.utils import obj_to_global_id
from netbox_graphql.tests.factories.ipam_factories import VLANFactory, RoleFactory
from netbox_graphql.tests.factories.tenant_factories import TenantFactory
class CreateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.tenant = TenantFactory()
cls.role = RoleFactory()
cls.query = Template('''
mutation{
newVlan(input: { tenant: "$tenantId", role: "$roleId", vid: 2, name: "New Vlan"}) {
vlan{
name
vid
tenant{
name
}
role{
name
}
}
}
}
''').substitute(tenantId=obj_to_global_id(cls.tenant),
roleId=obj_to_global_id(cls.role))
def test_creating_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_creating_returns_data(self):
expected = {'newVlan':
{'vlan': {'name': 'New Vlan',
'vid': 2,
'tenant': {'name': self.tenant.name},
'role': {'name': self.role.name}
}}}
result = schema.execute(self.query)
self.assertEquals(result.data, expected)
def test_creating_creates_it(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount + 1)
class QueryMultipleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.second = VLANFactory()
cls.query = '''
{
vlans {
edges {
node {
id
}
}
}
}
'''
def test_querying_all_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_querying_all_returns_two_results(self):
result = schema.execute(self.query)
self.assertEquals(len(result.data['vlans']['edges']), 2)
class QuerySingleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.second = VLANFactory()
cls.query = Template('''
{
vlans(id: "$id") {
edges {
node {
name
vid
tenant {
name
}
role {
name
}
}
}
}
}
''').substitute(id=obj_to_global_id(cls.second))
def test_querying_single_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_querying_single_returns_result(self):
result = schema.execute(self.query)
self.assertEquals(len(result.data['vlans']['edges']), 1)
def test_querying_single_returns_expected_result(self):
result = schema.execute(self.query)
expected = {'vlans':
{'edges': [
{'node': {'name': self.second.name,
'vid': self.second.vid,
'tenant': {'name': self.second.tenant.name},
'role': {'name': self.second.role.name}}}
]}}
self.assertEquals(result.data, expected)
class UpdateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.tenant = TenantFactory()
cls.query = Template('''
mutation{
updateVlan(input: { id: "$id", vid: 10, name: "New Name", tenant: "$tenantId"}) {
vlan{
name
vid
tenant {
name
}
}
}
}
''').substitute(id=obj_to_global_id(cls.first),
tenantId=obj_to_global_id(cls.tenant))
def test_updating_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_updating_doesnt_change_count(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount)
def test_updating_returns_updated_data(self):
expected = {'updateVlan':
{'vlan': {'name': 'New Name',
'vid': 10,
'tenant': {'name': self.tenant.name}}}}
result = schema.execute(self.query)
self.assertEquals(result.data, expected)
def test_updating_alters_data(self):
schema.execute(self.query)
vlan = VLAN.objects.get(id=self.first.id)
self.assertEquals(vlan.name, 'New Name')
self.assertEquals(vlan.vid, 10)
self.assertEquals(vlan.tenant.name, self.tenant.name)
class DeleteTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.query = Template('''
mutation{
deleteVlan(input: { id:"$id"}) {
vlan{
id
}
}
}
''').substitute(id=obj_to_global_id(cls.first))
def test_deleting_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_deleting_removes_a_type(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount - 1)
| 5,992 | 1,750 |
import re
import json
import logging
from lxml import etree
from copy import deepcopy
from xmljson import Parker
from ncclient import xml_
from xml.etree import ElementTree
from collections import OrderedDict, defaultdict
from .errors import ModelError
from .composer import Tag, Composer
from .calculator import BaseCalculator
from .proto.gnmi.gnmi_pb2 import PathElem, Path, SetRequest, TypedValue, Update
# create a logger for this module
logger = logging.getLogger(__name__)
nc_url = xml_.BASE_NS_1_0
config_tag = '{' + nc_url + '}config'
ns_spec = {
'legacy': {
'path': Tag.JSON_PREFIX,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_PREFIX,
},
'rfc7951': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
'openconfig': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
'': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
}
def _tostring(value):
'''_tostring
Convert value to XML compatible string.
'''
if value is True:
return 'true'
elif value is False:
return 'false'
elif value is None:
return None
else:
return str(value)
def _fromstring(value):
'''_fromstring
Convert XML string value to None, boolean, int or float.
'''
if not value:
return None
std_value = value.strip().lower()
if std_value == 'true':
return 'true'
elif std_value == 'false':
return 'false'
# try:
# return int(std_value)
# except ValueError:
# pass
# try:
# return float(std_value)
# except ValueError:
# pass
return value
class gNMIParser(object):
'''gNMIParser
A parser to convert a gNMI GetResponse to an lxml Element object. gNMI
specification can be found at
https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md
Attributes
----------
ele : `Element`
An lxml Element object which is the root of the config tree.
config_nodes : `list`
A list of config nodes. Each config node is an Element node in the
config tree, which is corresponding to one 'update' in the gNMI
GetResponse.
xpaths : `list`
A list of strings. Each string is an xpath of an Element node in the
config tree, which is corresponding to one 'update' in the gNMI
GetResponse.
'''
def __init__(self, device, gnmi_get_reply):
self.device = device
self.reply = gnmi_get_reply
self._config_nodes = None
self._ele = None
self._convert_tag = defaultdict(dict)
self._prefix_to_name = {i[1]: i[0] for i in self.device.namespaces
if i[1] is not None}
self._prefix_to_url = {i[1]: i[2] for i in self.device.namespaces
if i[1] is not None}
@property
def ele(self):
if self._ele is None:
self._ele = self.config_nodes.ele
return self._ele
@property
def config_nodes(self):
if self._config_nodes is None:
self._config_nodes = self.get_config_nodes()
return self._config_nodes
@property
def xpaths(self):
xpaths = []
if len(self.config_nodes) > 0 and len(self.config_nodes[0]) > 0:
if len(self.config_nodes[0]) > 1:
xpaths.append(self.device.get_xpath(self.config_nodes[0][0],
type=Tag.LXML_XPATH,
instance=False))
else:
xpaths.append(self.device.get_xpath(self.config_nodes[0][0],
type=Tag.LXML_XPATH,
instance=True))
return xpaths
def parse_value(self, origin, value, tag):
def convert_xml_to_lxml(xml_element, lxml_parent=None, default_ns=''):
ns_name, tag = self.convert_tag(default_ns, xml_element.tag,
src=ns_spec[origin]['val_name'],
dst=Tag.LXML_ETREE)
val_name_ns_tuple = self.convert_ns(ns_name,
src=ns_spec[origin]['val_name'][0])
nsmap = {None: val_name_ns_tuple[Tag.NAMESPACE]}
val_name_ns = val_name_ns_tuple[ns_spec[origin]['val_val'][0]]
if xml_element.text is not None:
ns_val, text = self.convert_tag(val_name_ns, xml_element.text,
src=ns_spec[origin]['val_val'],
dst=Tag.JSON_PREFIX)
if ns_val != val_name_ns:
v_v_ns = self.convert_ns(ns_val,
src=ns_spec[origin]['val_val'][0])
v_v_prefix = v_v_ns[Tag.PREFIX]
v_v_url = v_v_ns[Tag.NAMESPACE]
nsmap[v_v_prefix] = v_v_url
if lxml_parent is None:
lxml_element = etree.Element(tag, nsmap=nsmap)
else:
lxml_element = etree.SubElement(lxml_parent, tag, nsmap=nsmap)
if xml_element.text is not None:
lxml_element.text = text
for xml_child in xml_element:
convert_xml_to_lxml(xml_child,
lxml_parent=lxml_element,
default_ns=ns_name)
return lxml_element
n, t = self.convert_tag('', tag,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['val_name'])
json_val_str = '{{"{}": {}}}'.format(t, value.json_ietf_val.decode())
json_data = json.loads(json_val_str, object_pairs_hook=OrderedDict)
pk = Parker(xml_tostring=_tostring, element=ElementTree.Element)
return [convert_xml_to_lxml(i) for i in pk.etree(json_data)]
@staticmethod
def parse_tag(tag):
ret = re.search('^{(.+)}(.+)$', tag)
if ret:
return ret.group(1), ret.group(2)
else:
raise ModelError("tag '{}' does not have URL info" \
.format(tag))
def convert_tag(self, default_ns, tag, src=Tag.LXML_ETREE, dst=Tag.YTOOL):
if src == Tag.JSON_NAME and dst == Tag.LXML_ETREE:
if default_ns not in self._convert_tag or \
tag not in self._convert_tag[default_ns]:
self._convert_tag[default_ns][tag] = \
self.device.convert_tag(default_ns, tag, src=src, dst=dst)
return self._convert_tag[default_ns][tag]
else:
return self.device.convert_tag(default_ns, tag, src=src, dst=dst)
def convert_ns(self, ns, src=Tag.NAME):
entries = [i for i in self.device.namespaces if i[src] == ns]
c = len(entries)
if c == 0:
raise ConfigError("{} '{}' does not exist in device attribute " \
"'namespaces'" \
.format(Tag.STR[src], ns))
if c > 1:
raise ModelError("device has more than one {} '{}': {}" \
.format(Tag.STR[src], ns, entries))
return entries[0]
def get_config_nodes(self):
'''get_config_nodes
High-level api: get_config_nodes returns a list of config nodes. Each
config node is an Element node in the config tree, which is
corresponding to one 'update' in the gNMI GetResponse.
Returns
-------
list
A list of config nodes.
Config
A Config object.
'''
from .config import Config
config = Config(self.device, config=None)
for notification in self.reply.notification:
updates = []
for update in notification.update:
config += self.build_config_node(Config(self.device, config=None),
notification.prefix,
update.path, update.val)
return config
def get_schema_node(self, parent_schema_node, tag, origin=''):
def is_parent(node1, node2):
ancestors = {id(a): a for a in node2.iterancestors()}
ids_1 = set([id(a) for a in node1.iterancestors()])
ids_2 = set([id(a) for a in node2.iterancestors()])
if not ids_1 < ids_2:
return False
for i in ids_2 - ids_1:
if ancestors[i] is not node1 and \
ancestors[i].attrib['type'] != 'choice' and \
ancestors[i].attrib['type'] != 'case':
return False
return True
def get_root(tag):
if origin == 'openconfig' or origin == '':
models = [m for m in self.device.models_loaded
if m[:10] == 'openconfig']
else:
models = self.device.models_loaded
roots = {}
for m in models:
root = get_child(tag, parent=self.device.models[m].tree)
if root is not None:
roots[m] = root
if len(roots) == 1:
return list(roots.values())[0]
elif len(roots) > 1:
if origin == 'openconfig' or origin == '':
tag = self.parse_tag(tag)[1]
raise ModelError("more than one models have root with tag " \
"'{}': {}" \
.format(tag, ', '.join(roots.keys())))
else:
return None
def get_child(tag, parent):
if origin == 'openconfig' or origin == '':
children = [i for i in parent.iterdescendants() \
if self.parse_tag(i.tag)[1] == tag and \
i.get('type') != 'choice' and \
i.get('type') != 'case' and \
is_parent(parent, i)]
else:
children = [i for i in parent.iterdescendants() \
if i.tag == tag and \
i.get('type') != 'choice' and \
i.get('type') != 'case' and \
is_parent(parent, i)]
if len(children) == 1:
return children[0]
elif len(children) > 1:
if parent.getparent() is None:
raise ModelError("model {} has more than one root with " \
"tag '{}'" \
.format(parent.tag, tag))
else:
raise ModelError("node {} has more than one child with " \
"tag '{}'" \
.format(self.device.get_xpath(parent),
tag))
else:
return None
# search roots
if parent_schema_node is None:
child = get_root(tag)
if child is None:
raise ConfigError("root '{}' cannot be found in loaded models" \
.format(tag))
else:
return child
# search from a parent
child = get_child(tag, parent_schema_node)
if child is None:
raise ConfigError("node {} does not have child with tag '{}'" \
.format(self.device.get_xpath(parent_schema_node),
tag))
else:
return child
def build_config_node_per_elem(self, origin, parent_config_node, path_elem,
value=None):
def cleanup_and_append(origin, parent_config_node, child_schema_node,
value):
for n in parent_config_node.findall(child_schema_node.tag):
parent_config_node.remove(n)
for n in self.parse_value(origin, value, child_schema_node.tag):
parent_config_node.append(n)
return None
if parent_config_node.tag == config_tag:
parent_schema_node = None
parent_ns = ''
else:
parent_schema_node = self.device.get_schema_node(parent_config_node)
parent_url, parent_tag_name = self.parse_tag(parent_config_node.tag)
parent_ns_tuple = self.convert_ns(parent_url, src=Tag.LXML_ETREE[0])
parent_ns = parent_ns_tuple[ns_spec[origin]['path'][0]]
if origin == 'openconfig' or origin == '':
child_schema_node = self.get_schema_node(parent_schema_node,
path_elem.name,
origin=origin)
else:
child_ns, child_tag = self.convert_tag(parent_ns, path_elem.name,
src=ns_spec[origin]['path'],
dst=Tag.LXML_ETREE)
child_schema_node = self.get_schema_node(parent_schema_node,
child_tag,
origin=origin)
type = child_schema_node.get('type')
if type == 'leaf' or type == 'leaf-list':
if value is None:
raise ConfigError("node {} does not have value" \
.format(self.device.get_xpath(child_schema_node)))
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
elif type == 'container':
if value is None:
match = parent_config_node.find(child_schema_node.tag)
if match is not None:
return match
else:
return self.subelement(origin,
parent_config_node,
child_schema_node.tag)
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
elif type == 'list':
if value is None:
instance = self.find_instance(origin,
parent_config_node,
child_schema_node,
path_elem.key)
if instance is not None:
return instance
else:
return self.subelement(origin,
parent_config_node,
child_schema_node.tag,
key=path_elem.key)
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
else:
raise ModelError("type of node {} is unknown: '{}'" \
.format(self.device.get_xpath(parent_schema_node),
type))
def build_config_node(self, config, prefix, path, value):
from .config import Config
config_node = config.ele
absolute_path = list(prefix.elem) + list(path.elem)
for index, elem in enumerate(absolute_path):
if index == len(path.elem) - 1:
config_saved = Config(self.device, config=deepcopy(config.ele))
config_node = self.build_config_node_per_elem(path.origin,
config_node,
elem,
value=value)
return config_saved + config
else:
config_node = self.build_config_node_per_elem(path.origin,
config_node,
elem)
def find_instance(self, origin, parent_config_node, child_schema_node, key):
def find_key(config_node, key_tag, key_text):
match = config_node.find(key_tag)
if match is None:
return False
if match.text != key_text:
return False
return True
def find_keys(config_node, key_tuple):
for key_tag, nsmap, key_text in key_tuple:
if not find_key(config_node, key_tag, key_text):
return False
return True
keys = child_schema_node.get('key').split()
if len(keys) != len(key):
raise ConfigError("node {} has {} keys in Path object, but the " \
"schema node requires {} keys: {}" \
.format(self.device.get_xpath(child_schema_node),
len(key), len(keys), ', '.join(keys)))
key_tuple = self.parse_key(origin, child_schema_node.tag, key)
for key_tag, nsmap, text in key_tuple:
url, tag_name = self.parse_tag(key_tag)
if tag_name not in keys:
raise ConfigError("node {} does not have key {}" \
.format(self.device.get_xpath(child_schema_node),
key_tag))
for child in parent_config_node.findall(child_schema_node.tag):
if find_keys(child, key_tuple):
return child
return None
def get_prefix(self, text):
if text is None:
return '', None
m = re.search('^(.*):(.*)$', text)
if m:
if m.group(1) in self._prefix_to_name:
return m.group(1), m.group(2)
else:
return '', text
else:
return '', text
def parse_key(self, origin, tag, key):
url, tag_name = self.parse_tag(tag)
text_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE)
default_ns = text_ns_tuple[ns_spec[origin]['path'][0]]
ret = []
for k, v in key.items():
tag_ns, key_tag = self.convert_tag(default_ns, k,
src=ns_spec[origin]['path'],
dst=Tag.LXML_ETREE)
text_ns, text = self.convert_tag(tag_ns, v,
src=ns_spec[origin]['path'],
dst=Tag.XPATH)
text_ns_tuple = self.convert_ns(tag_ns,
src=ns_spec[origin]['path'][0])
nsmap = {None: text_ns_tuple[Tag.NAMESPACE]}
if text_ns != tag_ns:
text_ns_tuple = self.convert_ns(text_ns,
src=ns_spec[origin]['path'][0])
nsmap[text_ns_tuple[Tag.PREFIX]] = text_ns_tuple[Tag.NAMESPACE]
ret.append((key_tag, nsmap, text))
return ret
def subelement(self, origin, parent, tag, key={}):
url, tag_name = self.parse_tag(tag)
e = etree.SubElement(parent, tag, nsmap={None: url})
default_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE)
default_ns = default_ns_tuple[ns_spec[origin]['path'][0]]
if key:
for key_tag, nsmap, text in self.parse_key(origin, tag, key):
e_child = etree.SubElement(e, key_tag, nsmap=nsmap)
e_child.text = text
return e
class gNMIComposer(Composer):
'''gNMIComposer
A composer to convert an lxml Element object to gNMI JSON format. gNMI
adopts RFC 7951 when encoding data. One gNMIComposer instance abstracts
a config node in config tree.
'''
def __init__(self, *args, **kwargs):
super(gNMIComposer, self).__init__(*args, **kwargs)
self._url_to_prefix = {i[2]: i[1] for i in self.device.namespaces
if i[1] is not None}
def get_json(self, instance=True, origin='openconfig'):
'''get_json
High-level api: get_json returns json_val of the config node.
Parameters
----------
instance : `bool`
True if only one instance of list or leaf-list is required. False if
all instances of list or leaf-list are needed.
Returns
-------
str
A string in JSON format.
'''
def get_json_instance(node):
pk = Parker(xml_fromstring=_fromstring, dict_type=OrderedDict)
default_ns = {}
for item in node.iter():
parents = [p for p in node.iter() if item in p]
if parents and id(parents[0]) in default_ns:
ns, tag = self.device.convert_tag(default_ns[id(parents[0])],
item.tag,
dst=ns_spec[origin]['val_name'])
else:
ns, tag = self.device.convert_tag('',
item.tag,
dst=ns_spec[origin]['val_name'])
default_ns[id(item)] = ns
item.tag = tag
if item.text:
text = self.device.convert_tag(self._url_to_prefix[ns],
item.text,
src=Tag.JSON_PREFIX,
dst=ns_spec[origin]['val_val'])[1]
item.text = text
return pk.data(node)
def convert_node(node):
# lxml.etree does not allow tag name like oc-if:enable
# so it is converted to xml.etree.ElementTree
string = etree.tostring(node, encoding='unicode',
pretty_print=False)
return ElementTree.fromstring(string)
if instance:
return json.dumps(get_json_instance(convert_node(self.node)))
else:
nodes = [n for n in
self.node.getparent().iterchildren(tag=self.node.tag)]
if len(nodes) > 1:
return json.dumps([get_json_instance(convert_node(n))
for n in nodes])
else:
return json.dumps(get_json_instance(convert_node(nodes[0])))
def get_path(self, instance=True, origin='openconfig'):
'''get_path
High-level api: get_path returns gNMI path object of the config node.
Note that gNMI Path can specify list instance but cannot specify
leaf-list instance.
Parameters
----------
instance : `bool`
True if the gNMI Path object refers to only one instance of a list.
False if the gNMI Path object refers to all instances of a list.
Returns
-------
Path
An object of gNMI Path class.
'''
def get_name(node, default_ns):
if origin == 'openconfig' or origin == '':
return gNMIParser.parse_tag(node.tag)
else:
return self.device.convert_tag(default_ns,
node.tag,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['path'])
def get_keys(node, default_ns):
keys = Composer(self.device, node).keys
ret = {}
for key in keys:
if origin=='openconfig' or origin == '':
key_ns, key_val = gNMIParser.parse_tag(key)
else:
key_ns, key_val = self.device.convert_tag(default_ns,
key,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['path'])
ns_tuple = self.convert_ns(key_ns, src=Tag.NAMESPACE)
val_ns, val_val = self.device.convert_tag(ns_tuple[Tag.PREFIX],
node.find(key).text,
src=Tag.XPATH,
dst=ns_spec[origin]['path'])
ret[key_val] = val_val
return ret
def get_pathelem(node, default_ns):
ns, name = get_name(node, default_ns)
schema_node = self.device.get_schema_node(node)
if schema_node.get('type') == 'list' and \
(node != self.node or instance):
return ns, PathElem(name=name, key=get_keys(node, ns))
else:
return ns, PathElem(name=name)
nodes = list(reversed(list(self.node.iterancestors())))[1:] + \
[self.node]
path_elems = []
default_ns = ''
for node in nodes:
default_ns, path_elem = get_pathelem(node, default_ns)
path_elems.append(path_elem)
return Path(elem=path_elems, origin=origin)
def convert_ns(self, ns, src=Tag.NAME):
entries = [i for i in self.device.namespaces if i[src] == ns]
c = len(entries)
if c == 0:
raise ConfigError("{} '{}' does not exist in device attribute " \
"'namespaces'" \
.format(Tag.STR[src], ns))
if c > 1:
raise ModelError("device has more than one {} '{}': {}" \
.format(Tag.STR[src], ns, entries))
return entries[0]
class gNMICalculator(BaseCalculator):
'''gNMICalculator
A gNMI calculator to do subtraction and addition. A subtraction is to
compute the delta between two Config instances in a form of gNMI SetRequest.
An addition is to apply one gNMI SetRequest to a Config instance (TBD).
Attributes
----------
sub : `SetRequest`
A gNMI SetRequest which can achieve a transition from one config, i.e.,
self.etree2, to another config, i.e., self.etree1.
'''
@property
def sub(self):
deletes, replaces, updates = self.node_sub(self.etree1, self.etree2)
return SetRequest(prefix=None,
delete=deletes,
replace=replaces,
update=updates)
def node_sub(self, node_self, node_other):
'''node_sub
High-level api: Compute the delta of two config nodes. This method is
recursive, assuming two config nodes are different.
Parameters
----------
node_self : `Element`
A config node in the destination config that is being processed.
node_self cannot be a leaf node.
node_other : `Element`
A config node in the source config that is being processed.
Returns
-------
tuple
There are three elements in the tuple: a list of gNMI Path
instances that need to be deleted, a list of gNMI Update instances
for replacement purpose, and a list of gNMI Update instances for
merging purpose.
'''
paths_delete = []
updates_replace = []
updates_update = []
done_list = []
# if a leaf-list node, delete the leaf-list totally
# if a list node, by default delete the list instance
# if a list node and delete_whole=True, delete the list totally
def generate_delete(node, instance=True):
paths_delete.append(gNMIComposer(self.device, node) \
.get_path(instance=instance))
# if a leaf-list node, replace the leaf-list totally
# if a list node, replace the list totally
def generate_replace(node, instance=True):
n = gNMIComposer(self.device, node)
json_value = n.get_json(instance=instance).encode()
value = TypedValue(json_val=json_value)
path = n.get_path(instance=instance)
updates_replace.append(Update(path=path, val=value))
# if a leaf-list node, update the leaf-list totally
# if a list node, by default update the list instance
# if a list node and update_whole=True, update the list totally
def generate_update(node, instance=True):
n = gNMIComposer(self.device, node)
json_value = n.get_json(instance=instance).encode()
value = TypedValue(json_val=json_value)
path = n.get_path(instance=instance)
updates_update.append(Update(path=path, val=value))
# the leaf-list value sequence under node_self is different from the one
# under node_other
def leaf_list_seq_is_different(tag):
if [i.text for i in node_self.iterchildren(tag=tag)] == \
[i.text for i in node_other.iterchildren(tag=tag)]:
return False
else:
return True
# the leaf-list value set under node_self is different from the one
# under node_other
def leaf_list_set_is_different(tag):
s_list = [i.text for i in node_self.iterchildren(tag=tag)]
o_list = [i.text for i in node_other.iterchildren(tag=tag)]
if set(s_list) == set(o_list):
return False
else:
return True
# the leaf-list or list under node_self is empty
def list_is_empty(tag):
if [i for i in node_self.iterchildren(tag=tag)]:
return False
else:
return True
# the sequence of list instances under node_self is different from the
# one under node_other
def list_seq_is_different(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
if [self.device.get_xpath(n) for n in s_list] == \
[self.device.get_xpath(n) for n in o_list]:
return False
else:
return True
# all list instances under node_self have peers under node_other, and
# the sequence of list instances under node_self that have peers under
# node_other is same as the sequence of list instances under node_other
def list_seq_is_inclusive(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
s_seq = [self.device.get_xpath(n) for n in s_list]
o_seq = [self.device.get_xpath(n) for n in o_list]
if set(s_seq) <= set(o_seq) and \
[i for i in s_seq if i in o_seq] == o_seq:
return True
else:
return False
in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \
self._group_kids(node_self, node_other)
for child_s in in_s_not_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
generate_update(child_s)
elif schema_node.get('type') == 'leaf-list':
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
elif schema_node.get('type') == 'container':
generate_update(child_s)
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
else:
generate_update(child_s, instance=True)
for child_o in in_o_not_in_s:
schema_node = self.device.get_schema_node(child_o)
if schema_node.get('type') == 'leaf':
generate_delete(child_o)
elif schema_node.get('type') == 'leaf-list':
if child_o.tag not in done_list:
child_s = node_self.find(child_o.tag)
if child_s is None:
generate_delete(child_o, instance=False)
else:
generate_replace(child_s, instance=False)
done_list.append(child_o.tag)
elif schema_node.get('type') == 'container':
generate_delete(child_o)
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_inclusive(child_o.tag):
generate_delete(child_o, instance=True)
else:
if child_o.tag not in done_list:
generate_replace(child_o, instance=False)
done_list.append(child_o.tag)
else:
if list_is_empty(child_o.tag):
if child_o.tag not in done_list:
generate_delete(child_o, instance=False)
done_list.append(child_o.tag)
else:
generate_delete(child_o, instance=True)
for child_s, child_o in in_s_and_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
if child_s.text != child_o.text:
generate_update(child_s)
elif schema_node.get('type') == 'leaf-list':
if child_s.tag not in done_list:
if schema_node.get('ordered-by') == 'user':
if leaf_list_seq_is_different(child_s.tag):
generate_replace(child_s, instance=False)
else:
if leaf_list_set_is_different(child_s.tag):
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
elif schema_node.get('type') == 'container':
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
else:
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
else:
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
return (paths_delete, updates_replace, updates_update)
| 36,264 | 9,807 |
"""exercism tournament module."""
class Team:
def __init__(self, name):
"""A Team in a football tournament.
Keeps track of a teams matches, wins, draws, losses and points.
Parameters
----------
arg1 : string
Name of the Team.
"""
self.name = name
self.matches = 0
self.wins = 0
self.draws = 0
self.losses = 0
self.points = 0
def __repr__(self):
# return self.name.ljust(31) + f'|
# {self.matches} |
# {self.wins} |
# {self.draws} |
# {self.losses} |
# {self.points}'
return repr((self.name, self.points))
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.points < other.points
def __le__(self, other):
return self.points <= other.points
def __eq__(self, other):
return self.points == other.points
def __ne__(self, other):
return self.points != other.points
def __gt__(self, other):
return self.points > other.points
def __ge__(self, other):
return self.points >= other.points
def win(self):
"""The team Won! Updates the teams matches, wins and points."""
self.matches += 1
self.wins += 1
self.points += 3
def loss(self):
"""The team Lost! Updates the teams matches and losses."""
self.matches += 1
self.losses += 1
def draw(self):
"""The team drew! Updates the teams draws, matches and points."""
self.matches += 1
self.draws += 1
self.points += 1
class Tourney:
def __init__(self):
"""A football Tournament that tracks teams that take part."""
self.teams = {}
def _get_team(self, name):
if name not in self.teams:
self.teams[name] = Team(name)
return self.teams[name]
def get_teams(self):
"""A Dictionary of the teams in the Tournament"""
return self.teams
def parse_contest(self, rows):
"""Determine the teams and their results in a Tournament.
Parse lines in text (rows) to find the teams and their results
in a Tournament.
"""
for line in rows:
matchresult = line.split(';')
team1 = self._get_team(matchresult[0])
team2 = self._get_team(matchresult[1])
if matchresult[2] == 'win':
team1.win()
team2.loss()
elif matchresult[2] == 'loss':
team1.loss()
team2.win()
elif matchresult[2] == 'draw':
team1.draw()
team2.draw()
#else:
# ????
def tally(rows):
"""Tally the results of a small football competition.
Parameters
----------
arg1 : list
A list of the results of matches in the competition
Example: "Allegoric Alaskans;Blithering Badgers;win"
Returns
------
string
The results of the Tournament.
# containing the lines in the file
Test says to make a file... so create and send it's contents?
"""
tournament = Tourney()
tournament.parse_contest(rows)
sortedTeams = sorted(tournament.get_teams().values(),
key=lambda team: (-team.points, team.name))
f = open('tournament_results.txt', 'w')
f.write('Team'.ljust(31) + '| MP | W | D | L | P\n')
for team in sortedTeams:
f.write(team.name.ljust(31)
+ f'| {team.matches} '
+ f'| {team.wins} '
+ f'| {team.draws} '
+ f'| {team.losses} '
+ f'| {team.points} \n')
f.close()
# ...
f = open('tournament_results.txt', 'r')
buffer = []
for line in f:
buffer.append(line.rstrip())
f.close()
return buffer
| 3,927 | 1,211 |
import logging
from discord import Message
from discord.ext.commands import Cog, Context, command, has_permissions, Bot
from utils.utils import log_event, db, get_dict
from extensions.extension_templates import DatabaseHandler
DEFAULT_PREFIX = '?'
PREFIXES_DB_KEY = 'prefixes_for_servers'
class PrefixDBHandler(DatabaseHandler):
# On First Joining Server
@Cog.listener()
async def on_guild_join(self, guild: Context.guild):
self.set_value_for_server(guild_id=guild.id, value=DEFAULT_PREFIX)
log_event(f'Joined the server: {guild.name} - {guild.id}')
@command(brief="Change the bot's prefix for this server")
@has_permissions(administrator=True)
async def pf(self, ctx: Context, prefix):
self.set_value_for_server(guild_id=ctx.guild.id, value=prefix)
message = f"set '{prefix}' as the prefix for the server '{ctx.guild}'"
log_event(message)
await ctx.send(f'{ctx.author.mention} {message}')
############################
# STATIC METHODS #
############################
def get_prefix_for_guild(guild_id: int):
prefixes_raw_dict = db.get(PREFIXES_DB_KEY)
if prefixes_raw_dict is not None:
try:
return get_dict(prefixes_raw_dict)[str(guild_id)]
except KeyError:
log_event(f"Failed trying to fetch prefix for server id {guild_id}", logging.CRITICAL)
return DEFAULT_PREFIX
log_event(f"Error Fetching prefixes DB", logging.CRITICAL)
return DEFAULT_PREFIX
# bot is passed by default by the API but not needed for this function.
def get_prefix(_: Bot, message: Message):
return get_prefix_for_guild(message.guild.id)
# expected function for outside calling function 'load_extension()'
def setup(_bot):
_bot.add_cog(PrefixDBHandler(_bot, PREFIXES_DB_KEY))
| 1,816 | 589 |
import gws
import importlib
def add(job):
uwsgi = importlib.import_module('uwsgi')
gws.log.info("SPOOLING", job.uid)
d = {b'job_uid': gws.as_bytes(job.uid)}
uwsgi.spool(d)
| 190 | 85 |
"""
Blockchain configuration
"""
import os
import json
from dataclasses import dataclass, field, replace
from typing import List, Mapping, Optional
import yaml
import dacite
from .models import Peer, Channel, User, Orderer, ChaincodeSpec
from .models.gateway import Gateway
from .constants import ChaincodeLanguage
@dataclass()
class GatewayConfig:
""" A gateway config object """
channel: str
requestor: str
endorsing_peers: List[str] = field(default_factory=list)
orderers: List[str] = field(default_factory=list)
chaincode: Optional[str] = None
@dataclass()
class BlockchainConfig:
""" A gateway for accessing the blockchain """
@classmethod
def from_file(cls, file_path: str):
""" Loads gateway config from a static file """
ext = os.path.splitext(file_path)[1]
with open(file_path) as inf:
if ext == '.json':
return cls.from_dict(json.load(inf))
if ext in {'.yaml', '.yml'}:
return cls.from_dict(yaml.load(inf, Loader=yaml.SafeLoader))
raise ValueError(
f'Unrecognized file extension for file {file_path}'
)
@classmethod
def from_dict(cls, value: dict):
""" Creates a gateway config from a dictionary """
return dacite.from_dict(cls, value, config=dacite.Config(
type_hooks={
ChaincodeLanguage: ChaincodeLanguage
}
))
peers: Mapping[str, Peer] = field(default_factory=dict)
orderers: Mapping[str, Orderer] = field(default_factory=dict)
users: Mapping[str, User] = field(default_factory=dict)
chaincodes: Mapping[str, ChaincodeSpec] = field(default_factory=dict)
gateways: Mapping[str, GatewayConfig] = field(default_factory=dict)
def __post_init__(self):
# Set names to be the mapping key for all entities that weren't
# provided names
self.peers = {
name: replace(peer, name=peer.name or name)
for name, peer in self.peers.items()
}
self.orderers = {
name: replace(orderer, name=orderer.name or name)
for name, orderer in self.orderers.items()
}
self.users = {
name: replace(user, name=user.name or name)
for name, user in self.users.items()
}
self.chaincodes = {
name: replace(chaincode, name=chaincode.name or name)
for name, chaincode in self.chaincodes.items()
}
def get_gateway(self, name: str):
""" Gets a gateway using the config name """
if name not in self.gateways:
raise KeyError(f'No gateway defined with name "{name}"')
config = self.gateways[name]
return Gateway(
endorsing_peers=[
self.get_peer(peer) for peer in config.endorsing_peers
],
chaincode=self.get_chaincode(config.chaincode) if config.chaincode else None,
requestor=self.get_user(config.requestor),
orderers=[
self.get_orderer(orderer) for orderer in config.orderers
],
channel=Channel(name=config.channel)
)
def get_peer(self, name: str):
""" Gets a peer using the config name """
if not name in self.peers:
raise KeyError(f'No peer defined with name "{name}"')
return self.peers[name]
def get_orderer(self, name: str):
""" Gets a orderer using the config name """
if not name in self.orderers:
raise KeyError(f'No orderer defined with name "{name}"')
return self.orderers[name]
def get_user(self, name: str):
""" Gets a user using the config name """
if not name in self.users:
raise KeyError(f'No user defined with name "{name}"')
return self.users[name]
def get_chaincode(self, name: str):
""" Gets a chaincode spec using the config name """
if not name in self.chaincodes:
raise KeyError(f'No chaincode defined with name "{name}"')
return self.chaincodes[name]
| 4,116 | 1,193 |
# -*- coding: utf-8 -*-
import nltk
from papersmith.editor.issue import Issue
def check(content):
cno=['many','few','a few','a number of','the number of','numbers of','a quantity of','quantities of','a good many','a great many','a large number of','a great number of','scores of','dozens of']
uno=['much','little','a little','huge amounts of','a great amount of','a large amount of','a great deal of','a large deal of','a plenty of','a good supply of','a piece of','a bit of','an item of', 'an article of','a bottle of','a cup of','a drop of','a glass of']
uncountable_nouns=eval(open('papersmith/editor/grammar/uncountable_nouns.txt').read())
issues=[]
w=''
for i in range(len(content)):
if (ord(content[i])>64 and ord(content[i])<91) or (ord(content[i])>96 and ord(content[i])<123) or content[i]=="'":
w+=content[i]
if len(w)==1 and w=='\'':
w=''
continue
if len(w)==0:
continue
if w=='many' or w=='few':
pos=i
sentence=''
for j in range(1000):
if i>=len(content) or content[i]=='.' or content[i]=='!' or content[i]=='?' or content[i]==',' or content[i]==':' or content[i]==';':
t=nltk.word_tokenize(sentence)
l=nltk.pos_tag(t)
for j in l:
if j[1]=='NN':
if j[0] in uncountable_nouns:
if w=='many':
issues.append(Issue(2, 1, [pos-4], [pos], 'much', 0))
else:
issues.append(Issue(2, 1, [pos-3], [pos], 'little', 0))
break
sentence+=content[i]
i+=1
w=''
return issues
| 1,570 | 689 |
import docutils
import docutils.nodes
import docutils.parsers.rst
import docutils.parsers.rst.directives
import sphinx.addnodes
import sphinx.application
import sphinx.directives
import sphinx.domains
import sphinx.environment
import sphinx.locale
import sphinx.roles
import sphinx.util.compat
import sphinx.util.docfields
import sphinx.util.nodes
class AnsibleRoleRole(sphinx.roles.XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
return title, target
class AnsibleRoleDirective(sphinx.directives.ObjectDescription):
# :BUG: Something is wrong (just a test)
required_arguments = 1
doc_field_types = [
sphinx.util.docfields.GroupedField(
'default',
label=sphinx.locale.l_('Defaults'),
names=('default', 'default')
),
sphinx.util.docfields.Field(
'dependency',
label=sphinx.locale.l_('Dependencies'),
names=('dependency', 'depend'),
rolename='role',
bodyrolename='role'
),
sphinx.util.docfields.TypedField(
'parameter',
label=sphinx.locale.l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='role',
typenames=('type',)
),
sphinx.util.docfields.Field(
'become',
label=sphinx.locale.l_('Uses become'),
names=('become')
)
]
option_spec = {
'noindex': docutils.parsers.rst.directives.flag
}
has_content = True
def handle_signature(self, sig: str, signode: sphinx.addnodes.desc_signature):
(ns, _, rolename) = sig.rpartition('/')
signode += sphinx.addnodes.desc_annotation('role', 'Role ')
signode += sphinx.addnodes.desc_addname(ns, "{ns} ".format(ns=ns))
signode += sphinx.addnodes.desc_name(rolename, rolename)
return 'role-' + sig
def add_target_and_index(self, name, sig, signode):
targetname = name
signode['ids'].append(targetname)
self.env.domaindata['ansible']['roles'][name] = (self.env.docname, name)
self.state.document.note_explicit_target(signode)
class AnsibleDomain(sphinx.domains.Domain):
"""Ansible domain"""
name = "ansible"
label = "Ansible"
object_types = {
'role': sphinx.domains.ObjType(sphinx.locale.l_('role'), 'role')
}
directives = {
'role': AnsibleRoleDirective
}
roles = {
'role': AnsibleRoleRole()
}
initial_data = {
"roles": {}
}
def clear_doc(self, doc):
for name in self.data['roles']:
if doc == self.data['roles'][name][1]:
del self.data['roles'][name]
def get_objects(self):
for docname, name in self.data['roles'].values():
yield name, name, 'role', docname, 'role-' + name, 1
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
print(target)
if (type == "role"):
for (docname, name) in self.data['roles'].values():
if name == target:
print("Yes")
return sphinx.util.nodes.make_refnode(
builder,
fromdocname,
docname,
name,
contnode
)
else:
# print("here")
# print(node)
# print(contnode)
# print(type)
# print(target)
# print(builder)
# print(fromdocname)
# print(env)
return
def resolve_any_xref(self, env, fromdocname, builder,
type, target, node, contnode):
print("resolve_xref")
print(type)
print(target)
def setup(app: sphinx.application.Sphinx):
"""Initialize the sphinx extension for ansible.
"""
app.add_domain(AnsibleDomain)
| 4,038 | 1,249 |
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ..core.dag import BaseDagNode, DagEndpointType
from ..nodes.exporters import get_input_table_name, get_input_partitions, get_output_table_name, \
get_output_table_partitions
class SplitNode(BaseDagNode):
def __init__(self, percentage):
super(SplitNode, self).__init__("split")
self.marshal({
"parameters": {
"fraction": percentage,
},
"inputs": [(1, "input", DagEndpointType.DATA)],
"outputs": [(1, "output1", DagEndpointType.DATA), (2, "output2", DagEndpointType.DATA)]
})
self.add_exporter("inputTableName", lambda context: get_input_table_name(context, self, "input"))
self.add_exporter("inputTablePartitions", lambda context: get_input_partitions(context, self, "input"))
self.add_exporter("output1TableName", lambda context: get_output_table_name(context, self, "output1"))
self.add_exporter("output1TablePartition", lambda context: get_output_table_partitions(context, self, "output1"))
self.add_exporter("output2TableName", lambda context: get_output_table_name(context, self, "output2"))
self.add_exporter("output2TablePartition", lambda context: get_output_table_partitions(context, self, "output2"))
| 2,068 | 597 |
import re
import datetime
import time
import urllib
import json
from inspect import ismethod
from django.views import generic
from django.utils import timezone
from django.utils.decorators import classonlymethod
from django.db.models import (
Model, Manager, AutoField, CharField
)
from django.db.models.query import QuerySet
from django.db.models.fields.files import ImageFieldFile
from django.core.exceptions import FieldDoesNotExist
from django.http import (HttpResponse,
HttpResponseNotFound, HttpResponseForbidden)
from .. import utils
DEFAULT_PAGE_SIZE = 200
DEFAULT_PAGE_SIZE_MAX = 200
class Stream:
user = None
query = None
params = None # from path
count = None
skip = None
keys = [
('count', int, DEFAULT_PAGE_SIZE),
('skip', int, 0)
]
def __init__(self, HttpRequest, params):
self.user = HttpRequest.user
self.query = HttpRequest.GET
self.params = params
self.client_params = {}
for key, type, default in self.keys:
value = HttpRequest.GET.get(key, default)
self.client_params[key] = type(value)
def convertToJSON(method):
def wrapper(self, HttpRequest, **kwargs):
stream = method(self, HttpRequest, **kwargs)
if isinstance(stream.data, HttpResponse):
return stream.data
if stream.data == None:
return HttpResponseNotFound()
response = HttpResponse(
json.dumps(
stream.data,
ensure_ascii = False,
sort_keys = True
),
content_type = 'application/json'
)
if hasattr(stream, 'total'):
response['Total'] = stream.total
if hasattr(stream, 'skip'):
response['Skip'] = stream.skip
if hasattr(stream, 'count'):
response['Count'] = stream.count
# print(stream)
return response
return wrapper
class Base(generic.View):
model = None
key = 'id'
# data = None
page_size = DEFAULT_PAGE_SIZE
page_size_max = DEFAULT_PAGE_SIZE_MAX
@classonlymethod
def as_api(self, **kwargs):
self.dynamic_filters = {};
if len(kwargs) > 0:
if 'filters' in kwargs:
# Представление получает новые фильтры из urls.py (.as_api())
filters = kwargs['filters']
for key in filters:
# print('> > >', key)
value = filters[key]
if type(value) == tuple and len(value) == 2:
self.dynamic_filters.update({
key: value
})
return self.as_view()
available_filters = []
dynamic_filters = {}
def getFilters(self, stream):
# print('- - - - - - - - -')
# print('- - - - - - - - -')
# print(self.model)
# print('- - - - - - - - -')
# print('- - - - - - - - -')
filters = {}
try:
field = self.model._meta.get_field('status')
filters.update({
'status': True
})
except FieldDoesNotExist:
pass
#stream.user.is_staff or stream.user.is_superuser
try:
field = self.model._meta.get_field('pub_date')
filters.update({
'pub_date__lt': timezone.now()
})
except FieldDoesNotExist:
pass
if len(self.dynamic_filters) > 0:
# print('... dynamic_filters:', self.dynamic_filters)
for key in self.dynamic_filters:
param_key = self.dynamic_filters[key][0]
param_class = self.dynamic_filters[key][1]
# print('param_key', param_key)
# print('param_class', param_class)
if param_key in stream.params:
value = param_class(stream.params[param_key])
filters[key] = value
# for filter in self.available_filters:
# if (stream.query.__contains__(filter[0])):
# self.filters[filter[1]] = stream.query.__getitem__(filter[0])
return filters
orders = []
available_orders = []
# Данные
def getResource(self, HttpRequest, params):
stream = Stream(HttpRequest, params)
input_key, field_key = self.getKeys(self.key)
try:
query = {
field_key: stream.params[input_key]
}
stream.response = self.model.objects.get(**query)
except self.model.DoesNotExist:
stream.response = HttpResponseNotFound()
return stream
def getCollection(self, HttpRequest, params):
stream = Stream(HttpRequest, params)
stream.response = self.model.objects
# Проверка и вся хуйня
# Фильтры
filters = self.getFilters(stream)
stream.response = stream.response.filter(**filters)
# print('... filters', filters)
# print('..query..', stream.response.query)
# Сортировка
if (len(self.orders) > 0):
stream.response = stream.response.order_by(*self.orders)
# Пагинация
stream.total = stream.response.count()
stream.skip = stream.client_params['skip']
stream.count = stream.client_params['count']
if (stream.skip > stream.total):
stream.skip = stream.total
elif stream.skip < 0:
stream.skip = 0
if stream.count < 0:
stream.count = 0
elif stream.count > DEFAULT_PAGE_SIZE_MAX:
stream.count = DEFAULT_PAGE_SIZE_MAX
stream.response = stream.response[
stream.skip : stream.skip + stream.count
]
return stream
def getKeys(self, string):
# line = re.search(r'\s-[d]*$', string)
# line = re.sub(r'\s-[d]*', '', string)
# print('..', line)
if ' as ' in string:
orig, view = string.split(' as ')
else:
orig = view = string
return orig, view
# NOTE: Префикс используется?
def export__resource(self, resource, schema = None, prefix = None):
data = {};
if type(schema) != tuple:
schema = self.scheme
def get_attr_by_path(resource, path):
key = path.pop(0)
# print('..', key, resource)
if ismethod(resource):
resource = resource()
if (resource and hasattr(resource, key)):
value = getattr(resource, key)
else:
value = None
# print(resource)
# print(key)
if len(path) == 0:
if isinstance(value, datetime.date):
return(value.isoformat())
if isinstance(value, datetime.time):
return(value.isoformat())
if isinstance(value, datetime.datetime):
return(value.isoformat())
if ismethod(value):
return value()
return value
else:
return get_attr_by_path(value, path)
for item in schema:
if type(item) == str:
orig, view = self.getKeys(item)
if prefix:
orig = prefix + orig
path = orig.split('.')
data[view] = get_attr_by_path(resource, path)
elif type(item) == tuple:
orig, view = self.getKeys(item[0])
keys = item[1]
model = getattr(resource, orig)
if isinstance(model, Manager):
resources = model.all()
if type(keys) == tuple:
data[view] = self.export(
resources,
schema = keys,
)
elif type(keys) == str:
data[view] = [ i[keys] for i in self.export(
resources,
schema = (keys, ),
)]
else:
print('*** Manager: Некорректный тип:', type(keys))
elif isinstance(model, Model):
if type(keys) == str:
keys = (keys, )
if type(keys) == tuple:
data[view] = self.export(
model,
schema = keys,
)
else:
print('*** Model: Некорректный тип:', type(keys))
else:
print('*** Не Менеджер! ***', manager)
else:
print('Что-то непонятное')
return data
# Конвертирование данных для выдачи
def export(self, response, schema = None):
if isinstance(response, QuerySet):
if not self.scheme:
return None
data = []
# FIXME: ЗАПРОСЫ второго уровня НЕ ФИЛЬТРУЮТСЯ
# filters = {}
#
# for filter_key in self.__instance['filters']:
# filter = {filter_key: self.__instance['filters'][filter_key]}
# try:
# field = response.model._meta.get_field(filter_key)
# filters.update(filter)
# except FieldDoesNotExist:
# pass
#
## print('....', filters)
#
# response = response.filter(**filters)
for resource in response:
item = self.export__resource(resource, schema)
if item != None:
data.append(item)
elif isinstance(response, Model):
if not self.scheme:
return None
data = self.export__resource(response, schema);
else:
return None
return data
def get_exported_resource(self, HttpRequest, params):
stream = self.getResource(HttpRequest, params)
stream.data = self.export(stream.response)
return stream
def get_exported_collection(self, HttpRequest, params):
stream = self.getCollection(HttpRequest, params)
stream.data = self.export(stream.response)
return stream
class ResourceMixin:
# def __init__(self):
# self.test = True
'''Заголовок ресурса'''
def head(self, HttpRequest, **kwargs):
pass
'''Ресурс'''
@convertToJSON
def get(self, HttpRequest, **kwargs):
stream = self.get_exported_resource(HttpRequest, kwargs)
return stream
'''Изменение ресурса'''
def post(self, HttpRequest, *args, **kwargs):
pass
'''Удаление ресурса'''
def delete(self, HttpRequest, **kwargs):
pass
class CollectionMixin:
'''Заголовок коллекции'''
def head(self, HttpRequest, **kwargs):
pass
'''Коллекция'''
@convertToJSON
def get(self, HttpRequest, **kwargs):
stream = self.get_exported_collection(HttpRequest, kwargs)
return stream
'''Новый ресурс в коллекции'''
def post(self, HttpRequest, *args, **kwargs):
pass
from django.urls import path
class Scheme:
pass
class API():
model = None
name = None
scheme = []
# filters = {}
# order = []
def __init__(self, Model):
self.model = Model
self.name = Model.KK.name_plural
self.scheme = Model.KK.scheme
class Mixin(Base):
model = self.model
scheme = self.scheme
class ResourceView(Mixin, ResourceMixin): pass
class CollectionView(Mixin, CollectionMixin):pass
self.Mixin = Mixin;
self.ResourceView = ResourceView;
self.CollectionView = CollectionView;
def getUrlPatterns(self):
patterns = []
patterns.append(
path(
'{}/<int:id>/'.format(self.name),
self.ResourceView.as_api()
)
)
patterns.append(
path(
'{}/'.format(self.name),
self.CollectionView.as_api()
)
)
return patterns
| 12,302 | 3,388 |
import pexpect
import boardfarm.config as config
from boardfarm.lib.bft_pexpect_helper import bft_pexpect_helper
class AuthenticatedTelnetConnection:
"""Allow authenticated telnet sessions to be established with a \
unit's serial ports by OpenGear server.
If a board is connected serially to a OpenGear terminal server, this class can be used
to connect to the board.
"""
def __init__(self, device=None, conn_cmd=None, **kwargs):
"""Initialize the class instance to open a pexpect session.
:param device: device to connect, defaults to None
:type device: object
:param conn_cmd: conn_cmd to connect to device, defaults to None
:type conn_cmd: string
:param ``**kwargs``: args to be used
:type ``**kwargs``: dict
"""
self.device = device
self.conn_cmd = conn_cmd
self.device.conn_cmd = conn_cmd
if not config.ldap:
raise Exception("Please, provide ldap credentials in env variables")
self.username, self.password = config.ldap.split(";")
def connect(self):
"""Connect to the board/station using telnet.
This method spawn a pexpect session with telnet command.
The telnet port must be as per the ser2net configuration file in order to connect to
serial ports of the board.
:raises: Exception Board is in use (connection refused).
"""
if "telnet" not in self.conn_cmd:
raise Exception(
"Telnet connection string is not found. Check inventory server or ams.json"
)
bft_pexpect_helper.spawn.__init__(
self.device, command="/bin/bash", args=["-c", self.conn_cmd]
)
try:
self.device.expect(["login:"])
self.device.sendline(self.username)
self.device.expect(["Password:"])
self.device.setecho(False)
self.device.sendline(self.password)
self.device.setecho(True)
self.device.expect(["OpenGear Serial Server"])
except Exception:
raise
except pexpect.EOF:
raise Exception("Board is in use (connection refused).")
def close(self):
"""Close the connection."""
try:
self.sendcontrol("]")
self.sendline("q")
finally:
super().close()
| 2,393 | 644 |
import os
import re
def parse(data):
start = data.find("<tbody>")
end = data.find("</tbody>")
data = data[start+7:end]
for chunk in re.findall(r"<tr>.+?</tr>", data, re.DOTALL):
fields = re.findall(r"<td>(.+?)</td>", chunk, re.DOTALL)
parsedFields = []
for field in fields:
m = re.search(r'href="(.+?)"', field)
if m is not None and m.group(1) != "#foot":
parsedFields.append(m.group(1))
tagParts = field.split("'")
if len(tagParts) >= 2:
parsedFields.append(tagParts[1].replace(" ", " "))
else:
parsedFields.append(field)
if parsedFields:
yield parsedFields
def formatFeatures(data, baseURL):
print("features = {")
print(" # tag, friendly name, documentation URL")
for link, tag, friendlyName in data:
if tag == 'cv01':
tags = [f"cv{i:02d}" for i in range(1, 100)]
else:
tags = [tag]
for tag in tags:
print(f" {tag!r}: ({friendlyName!r}, {baseURL+link!r}),")
print("}")
def formatScripts(data):
print("scripts = {")
print(" # tag, friendly name")
duplicates = {}
for i, (friendlyName, tag) in enumerate(data):
if tag in duplicates:
duplicates[tag] = duplicates[tag] + ", " + friendlyName
data[i] = (None, None) # skip
else:
duplicates[tag] = friendlyName
for _, tag in data:
if tag is None:
continue
friendlyName = duplicates[tag]
print(f" {tag!r}: {friendlyName!r},")
print("}")
def formatLanguages(data):
print("languages = {")
print(" # tag, friendly name, ISO 639 IDs (if applicable)")
for friendlyName, *fields in data:
tag = fields[0]
if len(tag) < 4:
tag += (4 - len(tag)) * " "
assert len(tag) == 4, tag
if len(fields) > 1:
assert len(fields) == 2
isoCodes = [isoCode.strip() for isoCode in fields[1].split(",")]
else:
isoCodes = []
t = (friendlyName,) + tuple(isoCodes)
print(f" {tag!r}: {t},")
print("}")
# https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist
# https://docs.microsoft.com/en-us/typography/opentype/spec/scripttags
# https://docs.microsoft.com/en-us/typography/opentype/spec/languagetags
if __name__ == "__main__":
import sys
import time
baseURL = "https://docs.microsoft.com/en-us/typography/opentype/spec/"
if len(sys.argv) > 1:
with open(sys.argv[1]) as f:
html = f.read()
pages = [html]
else:
import urllib.request
pages = []
print(f"# Generated by {os.path.basename(__file__)}")
print("# Scraped from:")
for page in ["featurelist", "scripttags", "languagetags"]:
url = baseURL + page
print(f"# {url}")
with urllib.request.urlopen(url) as fp:
html = fp.read().decode("utf-8", errors="replace")
pages.append(html)
print()
print()
print("__all__ = ['features', 'scripts', 'languages']")
print()
for html in pages:
print()
parsed = list(parse(html))
if "<title>Registered features" in html:
formatFeatures(parsed, baseURL)
elif "<title>Script tags" in html:
formatScripts(parsed)
elif "<title>Language system tags" in html:
formatLanguages(parsed)
else:
assert 0, "huh."
| 3,616 | 1,159 |
from melody_feature import *
if __name__ == '__main__':
file = 'alphaville-forever_young.mid'
for k,note in note_from_midi_test(file):
print k
name = 'forever_youngy' + str(k)+'.txt'
vector_to_file(note,name) | 241 | 91 |
import pytest
from fastapi import Request
from pytest_mock import MockerFixture
from starlette.datastructures import Headers
from fastapi_cloud_logging.request_logging_middleware import (
_FASTAPI_REQUEST_CONTEXT,
RequestLoggingMiddleware,
)
@pytest.fixture
def middleware(mocker: MockerFixture) -> RequestLoggingMiddleware:
return RequestLoggingMiddleware(app=mocker.Mock(), dispatch=mocker.Mock())
def test__set_request_context(middleware: RequestLoggingMiddleware):
request = Request(
{
"type": "http",
"method": "GET",
"root_path": "https://example.com/",
"path": "",
"headers": Headers({}).raw,
"client": ("127.0.0.1", 80),
}
)
middleware.set_request_context(request=request)
request_context = _FASTAPI_REQUEST_CONTEXT.get()
assert request_context is not None
assert request_context.protocol == "https"
assert request_context.request_method == "GET"
assert request_context.remote_ip == "127.0.0.1"
@pytest.mark.parametrize(
(
"example_request, http_method, protocol, ip_address, content_length,"
"url, user_agent, referer, trace_content"
),
[
(
Request(
{
"type": "http",
"method": "GET",
"root_path": "https://example.com/",
"path": "",
"headers": Headers({"X-Forwarded-For": "192.168.0.1"}).raw,
"client": ("127.0.0.1", 80),
}
),
"GET",
"https",
"192.168.0.1",
None,
"https://example.com/",
None,
None,
None,
),
(
Request(
{
"type": "http",
"method": "POST",
"root_path": "https://example.com/",
"path": "",
"headers": Headers(
{"User-Agent": "curl 7.79.1", "X-Forwarded-For": "192.168.0.1"}
).raw,
"client": ("127.0.0.1", 80),
}
),
"POST",
"https",
"192.168.0.1",
None,
"https://example.com/",
"curl 7.79.1",
None,
None,
),
(
Request(
{
"type": "http",
"method": "POST",
"root_path": "https://example.com/",
"path": "",
"headers": Headers(
{
"User-Agent": "curl 7.79.1",
"X-Forwarded-For": "192.168.0.1",
"X-Cloud-Trace-Context": "105445aa7843bc8bf206b12000100000/1;o=1",
}
).raw,
"client": ("127.0.0.1", 80),
}
),
"POST",
"https",
"192.168.0.1",
None,
"https://example.com/",
"curl 7.79.1",
None,
"105445aa7843bc8bf206b12000100000/1;o=1",
),
],
)
def test__parse_request(
middleware: RequestLoggingMiddleware,
example_request: Request,
http_method,
protocol,
ip_address,
content_length,
url,
user_agent,
referer,
trace_content,
):
request_context = middleware._parse_request(example_request)
assert request_context.request_method == http_method
assert request_context.protocol == protocol
assert request_context.content_length == content_length
assert request_context.request_url == url
assert request_context.remote_ip == ip_address
assert request_context.user_agent == user_agent
assert request_context.referer == referer
assert request_context.cloud_trace_content == trace_content
| 4,000 | 1,231 |
import MailSpamFilter
class NaiveBayesClassifier:
def __init__(self, k=0.5):
self.k = k
self.words_probs = []
def train(self, training_set):
# count spam and non-spam messages
num_spams = len(
[is_spam for message, is_spam in training_set if is_spam])
num_non_spams = len(training_set) - num_spams
# run training data through our "pipeline"
word_counts = MailSpamFilter.count_words(training_set)
self.words_probs = MailSpamFilter.word_probabilities(
word_counts, num_spams, num_non_spams, self.k)
def classify(self, message):
return spam_probability(self.words_probs, message)
| 690 | 231 |
#!/usr/bin/env python
import psutil
import sys
import time
# Return Virtual Memory Usage as a JSON
def get_memory_percentage():
while True:
r = psutil.virtual_memory().percent
print(r)
sys.stdout.flush()
time.sleep(1)
if __name__ == '__main__':
get_memory_percentage()
| 311 | 99 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots["test_send_sms 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
snapshots["test_webhook_delivery_log 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
snapshots["test_webhook_delivery_log 2"] = {
"errors": [],
"messages": {
"+358461231231": {
"billingref": "Palvelutarjotin",
"destination": "+358461231231",
"sender": "hel.fi",
"smscount": "1",
"status": "DELIVERED",
"statustime": "2020-07-21T09:18:00Z",
}
},
"warnings": [],
}
snapshots["test_get_delivery_log 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
| 1,055 | 470 |
# -*- coding: utf-8 -*-
#
# Developed by Alex Jercan <jercan_alex27@yahoo.com>
#
# References:
# - https://github.com/XinJCheng/CSPN/blob/b3e487bdcdcd8a63333656e69b3268698e543181/cspn_pytorch/utils.py#L19
# - https://web.eecs.umich.edu/~fouhey/2016/evalSN/evalSN.html
#
from math import radians
import torch
import torch.nn.functional as F
class MetricFunction():
def __init__(self, batch_size) -> None:
self.batch_size = batch_size
self.total_size = 0
self.error_sum = {}
self.error_avg = {}
def evaluate(self, predictions, targets):
normal_p = predictions
normal_gt = targets
error_val = evaluate_error_normal(normal_p, normal_gt)
self.total_size += self.batch_size
self.error_avg = avg_error(self.error_sum, error_val, self.total_size, self.batch_size)
return self.error_avg
def show(self):
error = self.error_avg
format_str = ('======NORMALS=======\nMSE=%.4f\tRMSE=%.4f\tMAE=%.4f\tMME=%.4f\nTANGLE11.25=%.4f\tTANGLE22.5=%.4f\tTANGLE30.0=%.4f')
return format_str % (error['N_MSE'], error['N_RMSE'], error['N_MAE'], error['N_MME'], \
error['N_TANGLE11.25'], error['N_TANGLE22.5'], error['N_TANGLE30.0'])
def evaluate_error_normal(pred_normal, gt_normal):
error = {}
eps = 1e-7
pred_normal = F.normalize(pred_normal, p=2, dim=1)
gt_normal = F.normalize(gt_normal, p=2, dim=1)
dot_product = torch.mul(pred_normal, gt_normal).sum(dim=1)
angular_error = torch.acos(torch.clamp(dot_product, -1+eps, 1-eps))
error['N_MSE'] = torch.mean(torch.mul(angular_error, angular_error))
error['N_RMSE'] = torch.sqrt(error['N_MSE'])
error['N_MAE'] = torch.mean(angular_error)
error['N_MME'] = torch.median(angular_error)
error['N_TANGLE11.25'] = torch.mean((angular_error <= radians(11.25)).float())
error['N_TANGLE22.5'] = torch.mean((angular_error <= radians(22.5)).float())
error['N_TANGLE30.0'] = torch.mean((angular_error <= radians(30.0)).float())
return error
# avg the error
def avg_error(error_sum, error_val, total_size, batch_size):
error_avg = {}
for item, value in error_val.items():
error_sum[item] = error_sum.get(item, 0) + value * batch_size
error_avg[item] = error_sum[item] / float(total_size)
return error_avg
def print_single_error(epoch, loss, error):
format_str = ('%s\nEpoch: %d, loss=%s\n%s\n')
print (format_str % ('eval_avg_error', epoch, loss, error)) | 2,552 | 1,054 |
import numpy
import ielearn
from ielearn import extract, predict, util
def test_123():
assert True
| 102 | 32 |
# No shebang line, this module is meant to be imported
#
# Copyright 2015 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TaskCount Model
====================
Model describing the number of tasks in a given queue in a given state at a
point in time
"""
from datetime import datetime
from pyfarm.master.application import db
from pyfarm.master.config import config
from pyfarm.models.core.types import id_column
class TaskCount(db.Model):
__bind_key__ = 'statistics'
__tablename__ = config.get("table_statistics_task_count")
id = id_column(db.Integer)
counted_time = db.Column(
db.DateTime,
nullable=False,
default=datetime.utcnow,
doc="The point in time at which these counts were done")
# No foreign key reference, because this table is stored in a separate db
# Code reading it will have to check for referential integrity manually.
job_queue_id = db.Column(
db.Integer,
nullable=True,
doc="ID of the jobqueue these stats refer to")
total_queued = db.Column(
db.Integer,
nullable=False,
doc="Number of queued tasks at `counted_time`")
total_running = db.Column(
db.Integer,
nullable=False,
doc="Number of running tasks at `counted_time`")
total_done = db.Column(
db.Integer,
nullable=False,
doc="Number of done tasks at `counted_time`")
total_failed = db.Column(
db.Integer,
nullable=False,
doc="Number of failed tasks at `counted_time`")
| 2,088 | 613 |
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import seaborn as sns
#from sklearn.svm import SVC
#from sklearn.model_selection import KFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
data=pd.read_csv('android.csv')
print(data.shape)
data = data.sample(frac=1).reset_index(drop=True)
print(data.head())
import seaborn as sns
sns.countplot(x='malware',data=data)
#Over sampling
target_count = data.malware.value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
count_class_0, count_class_1 = data.malware.value_counts()
df_class_0 = data[data['malware'] == 0]
df_class_1 = data[data['malware'] == 1]
df_class_1_over = df_class_1.sample(count_class_0, replace=True)
df_test_over = pd.concat([df_class_0, df_class_1_over], axis=0)
print(df_test_over.shape)
sns.countplot(x='malware',data=df_test_over)
X=df_test_over.iloc[:,df_test_over.columns !='malware']
Y=df_test_over.iloc[:,df_test_over.columns =="malware"]
print(X.head())
print(Y.head())
from sklearn.utils import shuffle
X, Y=shuffle(X, Y)
print(X.head())
X=X.drop(columns='name')
print(X.head())
print(Y.head())
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
bestfeatures = SelectKBest(score_func=chi2, k=10)
fit = bestfeatures.fit(X,Y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score']
featureScores.nlargest(10,'Score')
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
model = ExtraTreesClassifier()
model.fit(X,Y)
print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers
#plot graph of feature importances for better visualization
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(10).plot(kind='barh')
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, random_state=0)
print(X_train.shape)
print(X_train.head())
print(y_train.head())
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
tree.fit(X_train,y_train)
DecisionTreeClassifier()
y_pred = tree.predict(X_test)
print(y_pred)
model2=metrics.accuracy_score(y_test,y_pred)
print(model2)
cnf_matrix = confusion_matrix(y_test,y_pred)
labels = [0,1]
sns.heatmap(cnf_matrix, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.show()
| 2,817 | 1,107 |
"""
@brief test log(time=2s)
"""
import sys
import os
import unittest
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.ipythonhelper import AutoCompletion, AutoCompletionFile, MagicCommandParser, MagicClassWithHelpers, open_html_form
class TestAutoCompletion(ExtTestCase):
def test_completion(self):
root = AutoCompletion()
cl = root._add("name", "TestAutoCompletion")
cl._add("method", "test_completion")
cl._add("method2", "test_completion")
cl = root._add("name2", "TestAutoCompletion2")
cl._add("method3", "test_completion")
s = (str # unicode#
(root))
self.assertIn(" | |- method2", s)
ls = len(root)
self.assertEqual(ls, 6)
def test_completion_file(self):
fold = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.join(fold, "..", "..", "src")
this = AutoCompletionFile(fold)
ls = len(this)
self.assertGreater(ls, 30)
def test_html_form(self):
params = {"parA": "valueA", "parB": "valueB"}
title = 'unit_test_title'
key_save = 'jjj'
raw = open_html_form(params, title, key_save, raw=True)
self.assertGreater(len(raw), 1)
def test_eval(self):
params = {"x": 3, "y": 4}
cl = MagicCommandParser(prog="test_command")
res = cl.eval("x+y", params)
self.assertEqual(res, 7)
def test_parse(self):
parser = MagicCommandParser(prog="test_command",
description='display the first lines of a text file')
typstr = str # unicode#
parser.add_argument('f', type=typstr, help='filename')
parser.add_argument(
'-n', '--n',
type=typstr, default=10,
help='number of lines to display')
parser.add_argument(
'-e',
'--encoding',
default="utf8",
help='file encoding')
params = {"x": 3, "y": 4}
res = parser.parse_cmd('this.py -n x+y', context=params)
self.assertNotEmpty(res)
r = parser.format_help()
self.assertIn("usage: test_command", r)
self.assertEqual(res.n, 7)
def test_class_magic(self):
cl = MagicClassWithHelpers()
self.assertEmpty(cl.Context)
def call_MagicCommandParser():
return MagicCommandParser(prog="parser_unittest")
pa = cl.get_parser(call_MagicCommandParser, name="parser_unittest")
typstr = str # unicode#
pa.add_argument('f', type=typstr, help='filename')
pa.add_argument('-n', '--n', type=typstr, default=10,
help='number of lines to display')
pa.add_argument('-e', '--encoding', default="utf8",
help='file encoding')
self.assertNotEmpty(pa)
cl.add_context({"x": 3, "y": 4})
self.assertEqual(cl.Context, {"x": 3, "y": 4})
res = cl.get_args('this.py -n x+y', pa)
if res.n != 7:
raise Exception("res.n == {0}\nres={1}".format(res.n, res))
if __name__ == "__main__":
unittest.main()
| 3,147 | 1,013 |
from .time_execution import *
| 30 | 9 |
import unittest
from codalab.lib.bundle_cli import BundleCLI
class BundleCliTest(unittest.TestCase):
def setUp(self) -> None:
self.bundle_cli = BundleCLI
def tearDown(self) -> None:
del self.bundle_cli
def test_collapse_bare_command_empty_args(self):
argv = ['cl', 'run', '---', 'echo', '']
expected_result = ['cl', 'run', "echo ''"]
actual_result = self.bundle_cli.collapse_bare_command(argv)
self.assertEqual(actual_result, expected_result)
def test_collapse_bare_command_non_empty_str_args(self):
argv = ['cl', 'run', '---', 'echo', 'hello']
expected_result = ['cl', 'run', "echo hello"]
actual_result = self.bundle_cli.collapse_bare_command(argv)
self.assertEqual(actual_result, expected_result)
def test_collapse_bare_command_non_empty_str_args_with_escaped_char(self):
argv = ['cl', 'run', '---', 'echo', 'hello world!']
expected_result = ['cl', 'run', "echo 'hello world!'"]
actual_result = self.bundle_cli.collapse_bare_command(argv)
self.assertEqual(actual_result, expected_result)
| 1,129 | 363 |
# import to namespace
from gevent import select, socket # noqa
from gevent.queue import Empty, Full, Queue # noqa
| 116 | 36 |
#
# PySNMP MIB module ZYXEL-CLUSTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-CLUSTER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:43:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Unsigned32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, TimeTicks, Gauge32, NotificationType, Integer32, IpAddress, MibIdentifier, ModuleIdentity, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Unsigned32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "TimeTicks", "Gauge32", "NotificationType", "Integer32", "IpAddress", "MibIdentifier", "ModuleIdentity", "iso", "Counter32")
DisplayString, TextualConvention, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress", "RowStatus")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelCluster = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14))
if mibBuilder.loadTexts: zyxelCluster.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelCluster.setOrganization('Enterprise Solution ZyXEL')
zyxelClusterSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1))
zyxelClusterStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2))
zyxelClusterManager = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1))
zyClusterManagerMaxNumberOfManagers = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterManagerMaxNumberOfManagers.setStatus('current')
zyxelClusterManagerTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2), )
if mibBuilder.loadTexts: zyxelClusterManagerTable.setStatus('current')
zyxelClusterManagerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterManagerVid"))
if mibBuilder.loadTexts: zyxelClusterManagerEntry.setStatus('current')
zyClusterManagerVid = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: zyClusterManagerVid.setStatus('current')
zyClusterManagerName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyClusterManagerName.setStatus('current')
zyClusterManagerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: zyClusterManagerRowStatus.setStatus('current')
zyxelClusterMembers = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2))
zyClusterMemberMaxNumberOfMembers = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterMemberMaxNumberOfMembers.setStatus('current')
zyxelClusterMemberTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2), )
if mibBuilder.loadTexts: zyxelClusterMemberTable.setStatus('current')
zyxelClusterMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterMemberMacAddress"))
if mibBuilder.loadTexts: zyxelClusterMemberEntry.setStatus('current')
zyClusterMemberMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 1), MacAddress())
if mibBuilder.loadTexts: zyClusterMemberMacAddress.setStatus('current')
zyClusterMemberName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterMemberName.setStatus('current')
zyClusterMemberModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterMemberModel.setStatus('current')
zyClusterMemberPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyClusterMemberPassword.setStatus('current')
zyClusterMemberRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 1, 2, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: zyClusterMemberRowStatus.setStatus('current')
zyxelClusterCandidate = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1))
zyxelClusterCandidateTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1), )
if mibBuilder.loadTexts: zyxelClusterCandidateTable.setStatus('current')
zyxelClusterCandidateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterCandidateMacAddress"))
if mibBuilder.loadTexts: zyxelClusterCandidateEntry.setStatus('current')
zyClusterCandidateMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: zyClusterCandidateMacAddress.setStatus('current')
zyClusterCandidateName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterCandidateName.setStatus('current')
zyClusterCandidateModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterCandidateModel.setStatus('current')
zyClusterRole = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("none", 0), ("manager", 1), ("member", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterRole.setStatus('current')
zyClusterInfoManager = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterInfoManager.setStatus('current')
zyxelClusterInfoMemberTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4), )
if mibBuilder.loadTexts: zyxelClusterInfoMemberTable.setStatus('current')
zyxelClusterInfoMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1), ).setIndexNames((0, "ZYXEL-CLUSTER-MIB", "zyClusterInfoMemberMacAddress"))
if mibBuilder.loadTexts: zyxelClusterInfoMemberEntry.setStatus('current')
zyClusterInfoMemberMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 1), MacAddress())
if mibBuilder.loadTexts: zyClusterInfoMemberMacAddress.setStatus('current')
zyClusterInfoMemberName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterInfoMemberName.setStatus('current')
zyClusterInfoMemberModel = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterInfoMemberModel.setStatus('current')
zyClusterInfoMemberStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 14, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("error", 0), ("online", 1), ("offline", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyClusterInfoMemberStatus.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-CLUSTER-MIB", zyClusterManagerMaxNumberOfManagers=zyClusterManagerMaxNumberOfManagers, zyxelClusterCandidateTable=zyxelClusterCandidateTable, zyxelClusterInfoMemberEntry=zyxelClusterInfoMemberEntry, zyClusterMemberModel=zyClusterMemberModel, zyxelClusterMemberEntry=zyxelClusterMemberEntry, zyClusterManagerVid=zyClusterManagerVid, zyClusterCandidateModel=zyClusterCandidateModel, zyClusterMemberRowStatus=zyClusterMemberRowStatus, zyxelClusterStatus=zyxelClusterStatus, zyClusterMemberMaxNumberOfMembers=zyClusterMemberMaxNumberOfMembers, zyClusterManagerName=zyClusterManagerName, zyxelClusterSetup=zyxelClusterSetup, zyClusterMemberPassword=zyClusterMemberPassword, zyxelClusterMembers=zyxelClusterMembers, zyClusterMemberMacAddress=zyClusterMemberMacAddress, zyClusterInfoManager=zyClusterInfoManager, zyClusterInfoMemberName=zyClusterInfoMemberName, zyClusterInfoMemberStatus=zyClusterInfoMemberStatus, zyClusterCandidateMacAddress=zyClusterCandidateMacAddress, zyClusterRole=zyClusterRole, zyxelClusterManagerTable=zyxelClusterManagerTable, zyxelClusterManager=zyxelClusterManager, zyClusterManagerRowStatus=zyClusterManagerRowStatus, zyClusterInfoMemberMacAddress=zyClusterInfoMemberMacAddress, zyxelCluster=zyxelCluster, zyClusterCandidateName=zyClusterCandidateName, PYSNMP_MODULE_ID=zyxelCluster, zyxelClusterCandidateEntry=zyxelClusterCandidateEntry, zyxelClusterCandidate=zyxelClusterCandidate, zyxelClusterInfoMemberTable=zyxelClusterInfoMemberTable, zyClusterMemberName=zyClusterMemberName, zyxelClusterManagerEntry=zyxelClusterManagerEntry, zyxelClusterMemberTable=zyxelClusterMemberTable, zyClusterInfoMemberModel=zyClusterInfoMemberModel)
| 9,563 | 4,228 |
NUM_CORES = 8
PATH_VALID_DEVICES = '/path/to/valid.pkl.gz'
PATH_DEVICES_DIR = '/path/to/devices/' | 97 | 48 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy import cmdline
cmdline.execute("scrapy crawl dmzj".split()) | 114 | 46 |
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""
Вывод графиков.
"""
def plt2d(data, types):
"""
Строит двумерный график из произвольного количества таблиц.
Параметры:
data - список из таблиц данных типа pandas.DataFrame. Таблицы должны
иметь два столбца - ось X и ось Y. Тип - list.
types - список из типов графиков. Значения: "plot" - график из точек,
соединенных прямой, "scatter" - график из точек.
"""
# Определяем оси, на которых будем строить графики
ax = plt.figure().gca()
# Строим графики в зависимости от типа
for i in range(len(types)):
element = data[i]
keys = element.keys()
if types[i] == "plot":
ax.plot(element[keys[0]], element[keys[1]])
if types[i] == "scatter":
ax.scatter(element[keys[0]], element[keys[1]])
plt.show()
def plt3d(data, types):
"""
Строит трехмерный график из произвольного количества таблиц.
Параметры:
data - список из таблиц данных типа pandas.DataFrame. Таблицы должны
иметь три столбца - ось X, ось Y и ось Z. Тип - list.
types - список из типов графиков. Значения: "plot" - график из точек,
соединенных прямой, "scatter" - график из точек.
"""
# Определяем оси, на которых будем строить графики
ax = plt.figure().gca(projection='3d')
# Строим графики в зависимости от типа
for i in range(len(types)):
element = data[i]
keys = element.keys()
if types[i] == "plot":
ax.plot(element[keys[0]], element[keys[1]], element[keys[2]])
if types[i] == "scatter":
ax.scatter(element[keys[0]], element[keys[1]], element[keys[2]])
plt.show()
| 1,752 | 638 |
# -*- coding=utf-8 -*-
import pydicom
import os
import numpy
from os.path import splitext
import PIL.Image as Image
def getfile(file):
dcm = pydicom.dcmread(file)
img2 = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept
return img2
def get_window_size(window_type):
if window_type =='lung':#肺窗
center = -600
width = 1200
elif window_type =='Mediastinal':#纵膈窗
center =40
width =400
return center, width
#调整CT图像的窗宽窗位
def setDicomWinWidthWinCenter(img_data, window_type):
img_temp = img_data
rows =len(img_temp)
cols =len(img_temp[0])
center, width = get_window_size(window_type)
img_temp.flags.writeable =True
min = (2 * center - width) /2.0 +0.5
max = (2 * center + width) /2.0 +0.5
dFactor =255.0 / (max - min)
for i in numpy.arange(rows):
for j in numpy.arange(cols):
img_temp[i, j] =int((img_temp[i, j]-min)*dFactor)
min_index = img_temp <0
img_temp[min_index] =0
max_index = img_temp >255
img_temp[max_index] =255
return img_temp
pathin = 'dcmin/'
pathout = 'dcmout/'
for root, dirs, files in os.walk(pathin):
for i in range(len(files)):
filename = files[i]
im = getfile(pathin + filename)
im1 = setDicomWinWidthWinCenter(im, 'lung')
dcm_img = Image.fromarray(im1)
dcm_img = dcm_img.convert('L')
output = splitext(files[i])[0]+"." +"png"
dcm_img.save(pathout + output)
| 1,479 | 606 |
from setuptools import setup
setup(name='prepack',
version='0.4.2',
description='Python excel based data preparation library',
long_description="Library for preparing data for analysis. "
"Allows you to load and easily filter many same structure csv or xls, xlsx files. "
"Allows matching tables by incomplete row matching over the shortest Levenshtein "
"distance, just like Pandas df.merge()",
url='http://github.com/legale/prepack',
author='rumi',
author_email='legale.legale@gmail.com',
license='MIT',
packages=['prepack'],
zip_safe=False,
install_requires=['numpy','pandas','python-levenshtein','xlrd'],
keywords = ['xls', 'excel', 'parser', 'pandas','data preparation'],
classifiers=[
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3',
],
) | 1,413 | 376 |
class AnsiText(object):
ENABLE_COLOR = True
COLORS = {'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37}
BOLD = 1
UNDERLINE = 4
def __init__(self, text, **kwargs):
self.text = text
self.color = None
self.bold = False
self.underline = False
if 'color' in kwargs:
color = kwargs['color']
if color not in self.COLORS:
raise IndexError('unknown color %s' % color)
self.color = color
if 'bold' in kwargs:
value = kwargs['bold']
if not isinstance(value, bool):
raise TypeError('bold must be a bool')
self.bold = value
if 'underline' in kwargs:
value = kwargs['underline']
if not isinstance(value, bool):
raise TypeError('underline must be a bool')
self.underline = value
def __repr__(self):
esc = '\x1b['
output = str(self.text)
if not self.ENABLE_COLOR:
return output
ansi_codes = list()
if self.bold:
ansi_codes.append(self.BOLD)
if self.color is not None:
assert self.color in self.COLORS
ansi_codes.append(self.COLORS[self.color])
if self.underline:
ansi_codes.append(self.UNDERLINE)
output = esc + ';'.join([str(x) for x in ansi_codes]) + 'm' + output + esc + '0m'
return output
| 1,600 | 479 |
class Error(Exception):
pass
class BackendError(Error):
def __init__(self, message, server):
self.message = message
self.server = server
def __str__(self):
return "({}:{}) - {}" % (
self.server.hostname, self.server.port, self.message
)
class BackendConnectionError(BackendError):
pass
class BackendIntegrityError(BackendError):
pass
class TimeTravelNotAllowed(BackendError):
pass
class BackendMaxDriftError(BackendError):
pass
class ResourceError(Error):
pass
class ResourceNotFound(ResourceError):
pass
| 600 | 182 |
# -*- coding: utf-8 -*-
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
for i in the_count:
print(i)
for j in fruits:
print(j)
for k in change:
print(k)
element=[]
for i in range(6):
element.append(i)
for e in element:
print(e)
| 330 | 159 |
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
APIKEY = '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'
message = Mail(
from_email='muxa2k11@gmail.com',
to_emails='mikushnerev@stud.etu.ru',
subject='Sending with Twilio SendGrid is Fun',
html_content='<strong>and easy to do anywhere, even with Python</strong>'
)
try:
sg = SendGridAPIClient(APIKEY)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
| 678 | 264 |
from setuptools import setup, find_packages, Extension
from os import path
import os
# fmt: off
import pip
pip.main(['install', 'numpy>=1.18.0'])
import numpy
# fmt: on
here = path.abspath(path.dirname(__file__))
req = ["numpy>=1.18.0", "spatialgeometry>=0.2.0", "websockets"]
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
def package_files(directory):
paths = []
for (pathhere, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", pathhere, filename))
return paths
extra_folders = [
"swift/out",
"swift/core",
]
extra_files = []
for extra_folder in extra_folders:
extra_files += package_files(extra_folder)
phys = Extension(
"phys",
sources=["./swift/core/phys.c"],
include_dirs=["./swift/core/", numpy.get_include()],
)
setup(
name="swift-sim",
version="0.10.0",
description="A Python/Javascript Visualiser",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jhavl/swift",
author="Jesse Haviland",
license="MIT",
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6",
keywords="python robotics robotics-toolbox kinematics dynamics"
" motion-planning trajectory-generation jacobian hessian"
" control simulation robot-manipulator mobile-robot",
packages=find_packages(exclude=["tests", "examples"]),
package_data={"swift": extra_files},
# include_package_data=True,
ext_modules=[phys],
install_requires=req,
)
| 2,324 | 738 |
import sys
from decorator import decorator
from exception_reports.reporter import append_to_exception_message, create_exception_report
from exception_reports.storages import LocalErrorStorage
def exception_report(storage_backend=LocalErrorStorage(), output_format="html", data_processor=None):
"""
Decorator for creating detailed exception reports for thrown exceptions
Usage:
@exception_report()
def foobar(text):
raise Exception("bad things!!")
foobar('hi')
Output:
Exception: bad things!! [report:/tmp/python-error-reports/2018-01-05_06:15:56.218190+00:00_0773698470164da3b2c427d8832dac13.html]
S3 Usage:
@exception_report()
def foobar(text):
raise Exception("bad things!!")
foobar('hi')
"""
def _exception_reports(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, tb = sys.exc_info()
report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)
e = append_to_exception_message(e, tb, f"[report:{report_location}]")
setattr(e, "report", report_location)
# We want to raise the original exception:
# 1) with a modified message containing the report location
# 2) with the original traceback
# 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)
raise e from None
return decorator(_exception_reports)
| 1,669 | 496 |
from django import forms
# class RenewBookForm(forms.Form):
# renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3).")
# class NameForm(forms.Form):
# your_name = forms.CharField(label='Your name', max_length=100)
# class UserForm(forms.Form):
# username = forms.CharField(label="用户名", max_length=128)
# password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput)
class edit_person_form(forms.Form):
# class Meta:
# model = User
# fields = []
first_name = forms.CharField(label="first_name",max_length=200,required = True)
last_name = forms.CharField(label="last_name",max_length=200,required = True)
email = forms.CharField(label="email",max_length=200,required = True)
phone_number = forms.CharField(label="phone number",max_length=200,required = True)
notes = forms.CharField(label="notes",max_length=200,required = True)
class add_person_form(forms.Form):
first_name = forms.CharField(label="first_name",max_length=200,required = True)
last_name = forms.CharField(label="last_name",max_length=200,required = True)
email = forms.CharField(label=" email",max_length=200,required = True)
phone_number = forms.CharField(label="phone number",max_length=200,required = True)
notes = forms.CharField(label="notes",max_length=200,required = True)
| 1,408 | 478 |
from pathlib import PurePath
import importlib
from opera import stdlib
from opera.error import ParseError
from opera.parser import yaml
SUPPORTED_VERSIONS = dict(
tosca_simple_yaml_1_3="v_1_3",
)
def load(base_path, template_name):
with (base_path / template_name).open() as input_fd:
input_yaml = yaml.load(input_fd, str(template_name))
if not isinstance(input_yaml.value, dict):
raise ParseError(
"Top level structure should be a map.", yaml_node.loc,
)
tosca_version = _get_tosca_version(input_yaml)
parser = _get_parser(tosca_version)
stdlib_yaml = stdlib.load(tosca_version)
service = parser.parse(stdlib_yaml, base_path, PurePath("STDLIB"))
service.merge(parser.parse(input_yaml, base_path, PurePath()))
service.visit("resolve_path", base_path)
service.visit("resolve_reference", service)
return service
def _get_parser(tosca_version):
return importlib.import_module(".v_1_3", __name__).Parser
def _get_tosca_version(input_yaml):
for k, v in input_yaml.value.items():
if k.value == "tosca_definitions_version":
try:
return SUPPORTED_VERSIONS[v.value]
except (TypeError, KeyError):
raise ParseError(
"Invalid TOSCA version. Available: {}.".format(
", ".join(SUPPORTED_VERSIONS.keys()),
), v.loc,
)
raise ParseError("Missing TOSCA version", input_yaml.loc)
| 1,510 | 490 |
import json
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils.timezone import now
from accounts.utils import get_imported_user
from annotations.model_utils import AnnotationAreaUtils
from annotations.models import Annotation
from images.forms import MetadataForm
from images.model_utils import PointGen
from images.models import Source, Metadata, Image, Point
from images.utils import metadata_obj_to_dict, get_aux_labels, \
metadata_field_names_to_labels
from lib.decorators import source_permission_required, source_labelset_required
from lib.exceptions import FileProcessError
from lib.forms import get_one_form_error
from lib.utils import filesize_display
from visualization.forms import ImageSpecifyByIdForm
from .forms import (
CPCImportForm, CSVImportForm, ImageUploadForm, ImageUploadFrontendForm)
from .utils import (
annotations_cpcs_to_dict, annotations_csv_to_dict,
annotations_preview, find_dupe_image, metadata_csv_to_dict,
metadata_preview, upload_image_process)
import vision_backend.tasks as backend_tasks
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_portal(request, source_id):
"""
Page which points to the pages for the three different upload types.
"""
if request.method == 'POST':
if request.POST.get('images'):
return HttpResponseRedirect(
reverse('upload_images', args=[source_id]))
if request.POST.get('metadata'):
return HttpResponseRedirect(
reverse('upload_metadata', args=[source_id]))
if request.POST.get('annotations_cpc'):
return HttpResponseRedirect(
reverse('upload_annotations_cpc', args=[source_id]))
if request.POST.get('annotations_csv'):
return HttpResponseRedirect(
reverse('upload_annotations_csv', args=[source_id]))
source = get_object_or_404(Source, id=source_id)
return render(request, 'upload/upload_portal.html', {
'source': source,
})
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_images(request, source_id):
"""
Upload images to a source.
This view is for the non-Ajax frontend.
"""
source = get_object_or_404(Source, id=source_id)
images_form = ImageUploadFrontendForm()
proceed_to_manage_metadata_form = ImageSpecifyByIdForm(source=source)
auto_generate_points_message = (
"We will generate points for the images you upload.\n"
"Your Source's point generation settings: {pointgen}\n"
"Your Source's annotation area settings: {annoarea}").format(
pointgen=PointGen.db_to_readable_format(
source.default_point_generation_method),
annoarea=AnnotationAreaUtils.db_format_to_display(
source.image_annotation_area),
)
return render(request, 'upload/upload_images.html', {
'source': source,
'images_form': images_form,
'proceed_to_manage_metadata_form': proceed_to_manage_metadata_form,
'auto_generate_points_message': auto_generate_points_message,
'image_upload_max_file_size': filesize_display(
settings.IMAGE_UPLOAD_MAX_FILE_SIZE),
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_images_preview_ajax(request, source_id):
"""
Preview the images that are about to be uploaded.
Check to see if there's any problems with the filenames or file sizes.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
file_info_list = json.loads(request.POST.get('file_info'))
statuses = []
for file_info in file_info_list:
dupe_image = find_dupe_image(source, file_info['filename'])
if dupe_image:
statuses.append(dict(
error="Image with this name already exists",
url=reverse('image_detail', args=[dupe_image.id]),
))
elif file_info['size'] > settings.IMAGE_UPLOAD_MAX_FILE_SIZE:
statuses.append(dict(
error="Exceeds size limit of {limit}".format(
limit=filesize_display(
settings.IMAGE_UPLOAD_MAX_FILE_SIZE))
))
else:
statuses.append(dict(
ok=True,
))
return JsonResponse(dict(
statuses=statuses,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_images_ajax(request, source_id):
"""
After the "Start upload" button is clicked, this view is entered once
for each image file. This view saves the image to the database
and media storage.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
# Retrieve image related fields
image_form = ImageUploadForm(request.POST, request.FILES)
# Check for validity of the file (filetype and non-corruptness) and
# the options forms.
if not image_form.is_valid():
# Examples of errors: filetype is not an image,
# file is corrupt, file is empty, etc.
return JsonResponse(dict(
error=get_one_form_error(image_form),
))
img = upload_image_process(
image_file=image_form.cleaned_data['file'],
image_name=image_form.cleaned_data['name'],
source=source,
current_user=request.user,
)
backend_tasks.submit_features.apply_async(
args=[img.id],
eta=(now() + timedelta(minutes=1)),
)
return JsonResponse(dict(
success=True,
link=reverse('image_detail', args=[img.id]),
image_id=img.id,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_metadata(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view is for the non-Ajax frontend.
"""
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm()
return render(request, 'upload/upload_metadata.html', {
'source': source,
'csv_import_form': csv_import_form,
'field_labels': metadata_field_names_to_labels(source).values(),
'aux_field_labels': get_aux_labels(source),
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_metadata_preview_ajax(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view takes the CSV file, processes it, saves the processed metadata
to the session, and returns a preview table of the metadata to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm(request.POST, request.FILES)
if not csv_import_form.is_valid():
return JsonResponse(dict(
error=csv_import_form.errors['csv_file'][0],
))
try:
# Dict of (metadata ids -> dicts of (column name -> value))
csv_metadata = metadata_csv_to_dict(
csv_import_form.get_csv_stream(), source)
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
metadata_preview(csv_metadata, source)
request.session['csv_metadata'] = csv_metadata
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_metadata_ajax(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view gets the metadata that was previously saved to the session
by the upload-preview view. Then it saves the metadata to the database.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_metadata = request.session.pop('csv_metadata', None)
if not csv_metadata:
return JsonResponse(dict(
error=(
"We couldn't find the expected data in your session."
" Please try loading this page again. If the problem persists,"
" let us know on the forum."
),
))
for metadata_id, csv_metadata_for_image in csv_metadata.items():
metadata = Metadata.objects.get(pk=metadata_id, image__source=source)
new_metadata_dict = metadata_obj_to_dict(metadata)
new_metadata_dict.update(csv_metadata_for_image)
metadata_form = MetadataForm(
new_metadata_dict, instance=metadata, source=source)
# We already validated previously, so this SHOULD be valid.
if not metadata_form.is_valid():
raise ValueError("Metadata became invalid for some reason.")
metadata_form.save()
return JsonResponse(dict(
success=True,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_csv(request, source_id):
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm()
return render(request, 'upload/upload_annotations_csv.html', {
'source': source,
'csv_import_form': csv_import_form,
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_csv_preview_ajax(request, source_id):
"""
Add points/annotations to images by uploading a CSV file.
This view takes the CSV file, processes it, saves the processed data
to the session, and returns a preview table of the data to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm(request.POST, request.FILES)
if not csv_import_form.is_valid():
return JsonResponse(dict(
error=csv_import_form.errors['csv_file'][0],
))
try:
csv_annotations = annotations_csv_to_dict(
csv_import_form.get_csv_stream(), source)
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
annotations_preview(csv_annotations, source)
request.session['uploaded_annotations'] = csv_annotations
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_cpc(request, source_id):
source = get_object_or_404(Source, id=source_id)
cpc_import_form = CPCImportForm(source)
return render(request, 'upload/upload_annotations_cpc.html', {
'source': source,
'cpc_import_form': cpc_import_form,
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_cpc_preview_ajax(request, source_id):
"""
Add points/annotations to images by uploading Coral Point Count files.
This view takes multiple .cpc files, processes them, saves the processed
data to the session, and returns a preview table of the data to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
cpc_import_form = CPCImportForm(source, request.POST, request.FILES)
if not cpc_import_form.is_valid():
return JsonResponse(dict(
error=cpc_import_form.errors['cpc_files'][0],
))
try:
cpc_info = annotations_cpcs_to_dict(
cpc_import_form.get_cpc_names_and_streams(), source,
cpc_import_form.cleaned_data['plus_notes'])
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
annotations_preview(cpc_info['annotations'], source)
request.session['uploaded_annotations'] = cpc_info['annotations']
request.session['cpc_info'] = cpc_info
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_ajax(request, source_id):
"""
This view gets the annotation data that was previously saved to the
session by upload-preview-csv or upload-preview-cpc.
Then it saves the data to the database,
while deleting all previous points/annotations for the images involved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
uploaded_annotations = request.session.pop('uploaded_annotations', None)
if not uploaded_annotations:
return JsonResponse(dict(
error=(
"We couldn't find the expected data in your session."
" Please try loading this page again. If the problem persists,"
" let us know on the forum."
),
))
cpc_info = request.session.pop('cpc_info', None)
for image_id, annotations_for_image in uploaded_annotations.items():
img = Image.objects.get(pk=image_id, source=source)
# Delete previous annotations and points for this image.
# Calling delete() on these querysets is more efficient
# than calling delete() on each of the individual objects.
Annotation.objects.filter(image=img).delete()
Point.objects.filter(image=img).delete()
# Create new points and annotations.
new_points = []
new_annotations = []
for num, point_dict in enumerate(annotations_for_image, 1):
# Create a Point.
point = Point(
row=point_dict['row'], column=point_dict['column'],
point_number=num, image=img)
new_points.append(point)
# Save to DB with an efficient bulk operation.
Point.objects.bulk_create(new_points)
for num, point_dict in enumerate(annotations_for_image, 1):
# Create an Annotation if a label is specified.
if 'label' in point_dict:
label_obj = source.labelset.get_global_by_code(
point_dict['label'])
# TODO: Django 1.10 can set database IDs on newly created
# objects, so re-fetching the points may not be needed:
# https://docs.djangoproject.com/en/dev/releases/1.10/#database-backends
new_annotations.append(Annotation(
point=Point.objects.get(point_number=num, image=img),
image=img, source=source,
label=label_obj, user=get_imported_user()))
# Do NOT bulk-create the annotations so that the versioning signals
# (for annotation history) do not get bypassed. Create them one by one.
for annotation in new_annotations:
annotation.save()
# Update relevant image/metadata fields.
img.point_generation_method = PointGen.args_to_db_format(
point_generation_type=PointGen.Types.IMPORTED,
imported_number_of_points=len(new_points)
)
if cpc_info:
# We uploaded annotations as CPC. Save contents for future CPC
# exports.
# Note: Since cpc_info went through session serialization,
# dicts with integer keys have had their keys stringified.
img.cpc_content = cpc_info['cpc_contents'][str(img.pk)]
img.cpc_filename = cpc_info['cpc_filenames'][str(img.pk)]
else:
# We uploaded CSV. Any CPC we had saved previously no longer has
# the correct point positions, so we'll just discard the CPC.
img.cpc_content = ''
img.cpc_filename = ''
img.save()
img.metadata.annotation_area = AnnotationAreaUtils.IMPORTED_STR
img.metadata.save()
# Submit job with 1 hour delay to allow the view and thus DB transaction
# to conclude before jobs are submitted.
# Details: https://github.com/beijbom/coralnet-system/issues/31.
backend_tasks.reset_features.apply_async(
args=[img.id], eta=now() + timedelta(hours=1))
if cpc_info:
# We uploaded annotations as CPC. Save some info for future CPC
# exports.
source.cpce_code_filepath = cpc_info['code_filepath']
source.cpce_image_dir = cpc_info['image_dir']
source.save()
return JsonResponse(dict(
success=True,
))
| 18,100 | 5,167 |
""" Variant-level annotation functions requiring ClinvarDB and Metapub (NCBI/eutils). """
import requests, json, urllib
from metapub.text_mining import is_pmcid, is_ncbi_bookID
from metapub.pubmedcentral import get_pmid_for_otherid
from ..db.clinvar import ClinVarDB
from ..log import log
##########################################################################################
#
# Functions
#
##########################################################################################
def _clinvar_variant_accession(hgvs_text):
"""
See ClinVar FAQ http://www.ncbi.nlm.nih.gov/clinvar/docs/faq/#accs
:param hgvs_text: c.DNA
:return: RCVAccession "Reference ClinVar Accession"
"""
try:
return ClinVarDB().accession_for_hgvs_text(str(hgvs_text))
except Exception as err:
log.debug("no clinvar accession for variant hgvs_text %s " % hgvs_text)
def _clinvar_variant_allele_id(hgvs_text):
"""
Get the unique AlleleID
:param hgvs_text: c.DNA
:return: AlleleID
"""
try:
return ClinVarDB().allele_id_for_hgvs_text(hgvs_text)
except Exception as err:
log.debug('no clinvar AlleleID for variant hgvs_text %s ' % hgvs_text)
def _clinvar_variant_variation_id(hgvs_text):
"""
Get the unique VariationID
:param hgvs_text: c.DNA
:return: VariationID
"""
try:
return ClinVarDB().variation_id_for_hgvs_text(hgvs_text)
except Exception as err:
log.debug('no clinvar VariationID for variant hgvs_text %s ' % hgvs_text)
def _clinvar_variant2pubmed(hgvs_text):
"""
Get PMID for clinvar variants using the AlleleID key.
Keep GeneReviews book references (NKBxxxx) without argument.
ONE EXPENSIVE LOOKUP HERE:
If the citation_source is PubMedCentral, first convert responses to PMID.
:param hgvs_text: c.DNA
:return: set(PMIDs and possibly also NBK ids)
"""
pubmeds = []
citations = ClinVarDB().var_citations(hgvs_text)
if citations:
for cite in citations:
some_id = cite['citation_id']
if is_ncbi_bookID(some_id):
# Todo: convert? drop??
pubmeds.append(some_id)
elif is_pmcid(some_id):
try:
pmid = get_pmid_for_otherid(some_id)
if pmid is not None:
log.debug('found PubMedCentral PMCID %s, converted to PMID %s ', some_id, str(pmid))
pubmeds.append(pmid)
else:
log.debug('PMID not found for PMCID %s; discarding.', some_id)
except Exception as err:
log.debug('error converting PMCID %s: %r', some_id, err)
elif cite['citation_source'] == 'PubMed':
pubmeds.append(some_id)
#return set([int(entry) for entry in pubmeds])
return set(pubmeds)
def clinvar2pmid_with_accessions(hgvs_list):
ret = []
citations = ClinVarDB().var_citations(hgvs_list)
if citations:
for cite in citations:
article_id = cite['citation_id']
if is_ncbi_bookID(article_id):
pmid = article_id
else:
pmid = article_id if cite['citation_source'] == 'PubMed' else get_pmid_for_otherid(article_id)
if pmid:
ret.append({"hgvs_text": cite['HGVS'], "pmid": pmid, "accession": cite['RCVaccession']})
return ret
##########################################################################################
#
# API
#
##########################################################################################
ClinvarAccession = _clinvar_variant_accession
ClinvarAlleleID = _clinvar_variant_allele_id
ClinvarPubmeds = _clinvar_variant2pubmed
ClinvarVariationID = _clinvar_variant_variation_id
| 3,846 | 1,248 |
"""
Functions and data for estimating taxes outside the income tax system.
Examples include value added tax, financial transaction tax, and carbon tax.
"""
import microdf as mdf
import numpy as np
import pandas as pd
# Source:
# https://www.taxpolicycenter.org/briefing-book/who-would-bear-burden-vat
VAT_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[3.9, 3.9, 3.6, 3.6, 3.6, 3.6, 3.6, 3.4, 3.4, 3.2, 2.8, 2.5, 2.5],
)
VAT_INCIDENCE /= 100
# Source: Table 5 in
# https://www.treasury.gov/resource-center/tax-policy/tax-analysis/Documents/WP-115.pdf
CARBON_TAX_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[0.8, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.8, 1.8, 1.8, 1.6, 1.4, 0.7],
)
CARBON_TAX_INCIDENCE /= 100
# Source: Figure 1 in
# https://www.taxpolicycenter.org/sites/default/files/alfresco/publication-pdfs/2000587-financial-transaction-taxes.pdf
FTT_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.4, 0.8, 1.0],
)
FTT_INCIDENCE /= 100
def add_custom_tax(
df,
segment_income,
w,
base_income,
incidence,
name,
total=None,
ratio=None,
verbose=True,
):
"""Add a custom tax based on incidence analysis driven by percentiles.
:param df: DataFrame.
:param segment_income: Income measure used to segment tax units into
quantiles.
:param w: Weight used to segment into quantiles (either s006 or XTOT_m).
:param base_income: Income measure by which incidence is multiplied to
estimate liability.
:param incidence: pandas Series indexed on the floor of an income
percentile, with values for the tax rate.
:param name: Name of the column to add.
:param total: Total amount the tax should generate. If not provided,
liabilities are calculated only based on the incidence schedule.
(Default value = None)
:param ratio: Ratio to adjust the tax by, compared to the original tax.
This acts as a multiplier for the incidence argument.
(Default value = None)
:param verbose: Whether to print the tax adjustment factor if needed.
Defaults to True.
:returns: Nothing. Adds the column name to df representing the tax
liability. df is also sorted by segment_income.
"""
if ratio is not None:
incidence = incidence * ratio
assert total is None, "ratio and total cannot both be provided."
df.sort_values(segment_income, inplace=True)
income_percentile = 100 * df[w].cumsum() / df[w].sum()
tu_incidence = incidence.iloc[
pd.cut(
income_percentile,
# Add a right endpoint. Should be 100 but sometimes a decimal
# gets added.
bins=incidence.index.tolist() + [101],
labels=False,
)
].values
df[name] = np.maximum(0, tu_incidence * df[base_income])
if total is not None:
initial_total = mdf.weighted_sum(df, name, "s006")
if verbose:
print(
"Multiplying tax by "
+ str(round(total / initial_total, 2))
+ "."
)
df[name] *= total / initial_total
def add_vat(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=VAT_INCIDENCE,
name="vat",
**kwargs
):
"""Add value added tax based on incidence estimate from Tax Policy Center.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with VAT defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = VAT_INCIDENCE)
:param name: Default value = "vat")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds vat to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
def add_carbon_tax(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=CARBON_TAX_INCIDENCE,
name="carbon_tax",
**kwargs
):
"""Add carbon tax based on incidence estimate from the US Treasury
Department.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with carbon tax defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = CARBON_TAX_INCIDENCE)
:param name: Default value = "carbon_tax")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds carbon_tax to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
def add_ftt(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=FTT_INCIDENCE,
name="ftt",
**kwargs
):
"""Add financial transaction tax based on incidence estimate from Tax
Policy Center.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with FTT defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = FTT_INCIDENCE)
:param name: Default value = "ftt")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds ftt to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
| 6,049 | 2,242 |
from utilities import Utilities
# from comment_level_evaluation import CommentLevelEvaluation
import operator
class CombineSystems:
def __init__(self):
self.utilities = Utilities()
self.storage_path = 'comment-level-datasets-2/'
# self.storage_path = 'r-combine-outputs/'
# self.random_states = [111, 122, 133, 144, 155]
self.categories = ['environment', 'waiting time', 'staff attitude professionalism', 'care quality', 'other']
def is_valid_asp_from_from_system_a(self, aspect, confidence_value, thresholds):
is_valid = False
# thresholds = {'environment': 0.6,
# 'waiting time': 0.5,
# 'staff attitude and professionalism': 0.5,
# 'care quality': 0.4,
# 'other': 0.7,
# }
aspects = thresholds.keys()
if aspect in aspects and float(confidence_value) >= thresholds[aspect]:
is_valid = True
return is_valid
def is_valid_asp_from_from_system_b(self, aspect, confidence_value, thresholds):
is_valid = False
# thresholds = {'environment': 0.1,
# 'waiting time': 0.8,
# 'staff attitude and professionalism': 0.1,
# 'care quality': 0.1,
# 'other': 0.1
# }
aspects = thresholds.keys()
if aspect in aspects and float(confidence_value) >= thresholds[aspect]:
is_valid = True
return is_valid
def apply_dictionaries(self, comment):
food_lexicon = ['food', 'canteen', 'canten', 'coffee', 'cofee', 'coffe', 'coffee', 'tea', 'drink', 'drinks']
parking_lexicon = ['car park', 'car-park', 'carpark', 'parking', 'bicycle']
aspects = []
all_words = self.utilities.get_lemma(comment)
lemmatized_words = all_words.values()
for word in food_lexicon:
if word in lemmatized_words:
aspects.append('food')
break
for word in parking_lexicon:
if word in lemmatized_words:
aspects.append('parking')
break
return aspects
def combine_by_dynamic_threshold(self, file_a_path, file_b_path, output_file_path, thresholds_a, thresholds_b, evaluation=False):
file_a = self.utilities.read_from_csv(file_a_path)
file_b = self.utilities.read_from_csv(file_b_path)
output = []
for row_a, row_b in zip(file_a, file_b):
comment = row_a[0]
aspects = []
# remove comment from the first column
del row_a[0]
del row_b[0]
for a, b in zip(row_a, row_b):
if not a and not b and a in self.categories:
break
# union with threshold
if a is not None:
asp_threshold = a.rsplit(' ', 1)[0]
sentiment = a.rsplit(' ', 1)[1]
aspect_a = asp_threshold.rsplit(' ', 1)[0]
asp_snt = aspect_a + " " + sentiment
if not any(aspect_a in asp for asp in aspects):
confidence_value_a = asp_threshold.rsplit(' ', 1)[1]
is_valid = self.is_valid_asp_from_from_system_a(aspect_a, confidence_value_a, thresholds_a)
if is_valid:
aspects.append(asp_snt)
if b is not None:
aspect_b = b.rsplit(' ', 1)[0]
if aspect_b in self.categories and not any(aspect_b in asp for asp in aspects):
confidence_value_b = b.rsplit(' ', 1)[1]
is_valid = self.is_valid_asp_from_from_system_b(aspect_b, confidence_value_b, thresholds_b)
if is_valid:
aspects.append(aspect_b)
# Apply food and parking dictionaries
# TURN OFF THIS SNIPPET BEFORE EVALUATION
if evaluation is False:
asps_from_dictionaries = self.apply_dictionaries(comment)
if len(asps_from_dictionaries) > 0:
# if only environment, then replace with food/parking
if len(aspects) == 1 and aspects[0] == 'environment':
aspects = asps_from_dictionaries
else:
aspects = aspects + asps_from_dictionaries
if len(aspects) < 1:
# aspects = ['other']
aspects = ['other negative']
output.append([comment] + aspects)
self.utilities.save_list_as_csv(output, output_file_path)
def combine_by_static_threshold(self, file_a_path, file_b_path, threshold_a, threshold_b, output_file_path):
file_a = self.utilities.read_from_csv(file_a_path)
file_b = self.utilities.read_from_csv(file_b_path)
output = []
for row_a, row_b in zip(file_a, file_b):
comment = row_a[0]
aspects = []
# remove comment from the first column
del row_a[0]
del row_b[0]
for a, b in zip(row_a, row_b):
if not a and not b and a in self.categories:
break
# union with threshold
if a and a.rsplit(' ', 1)[0] not in aspects and float(a.rsplit(' ', 1)[1]) >= threshold_a:
aspects.append(a.rsplit(' ', 1)[0])
if b and b.rsplit(' ', 1)[0] in self.categories and b.rsplit(' ', 1)[0] not in aspects and float(b.rsplit(' ', 1)[1]) >= threshold_b:
aspects.append(b.rsplit(' ', 1)[0])
# Apply food and parking dictionaries
# asps_from_dictionaries = self.apply_dictionaries(comment)
# if len(asps_from_dictionaries) > 0:
# aspects = aspects + asps_from_dictionaries
if len(aspects) < 1:
aspects = ['other']
output.append([comment] + aspects)
self.utilities.save_list_as_csv(output, output_file_path)
def extract_top_comments(self, data_file, output_file_path):
rows = self.utilities.read_from_csv(data_file)
envs = {}
wts = {}
saaps = {}
cqs = {}
ots = {}
for row in rows:
comment = row[0]
del rows[0]
for item in row:
# if there is sentiment remove it
if any(snt_cat in item for snt_cat in self.utilities.sentiment_classes):
item = item.rsplit(' ', 1)[0]
if item and item.rsplit(' ', 1)[0] == 'environment':
envs[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'waiting time':
wts[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'staff attitude and professionalism':
saaps[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'care quality':
cqs[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'other':
ots[comment] = float(item.rsplit(' ', 1)[1])
# sort comments by the descending order of confidence values
sorted_envs = [comment_data[0] for comment_data in sorted(envs.items(), key=operator.itemgetter(1), reverse=True)]
sorted_wts = [comment_data[0] for comment_data in sorted(wts.items(), key=operator.itemgetter(1), reverse=True)]
sorted_saaps = [comment_data[0] for comment_data in sorted(saaps.items(), key=operator.itemgetter(1), reverse=True)]
sorted_cqs = [comment_data[0] for comment_data in sorted(cqs.items(), key=operator.itemgetter(1), reverse=True)]
sorted_ots = [comment_data[0] for comment_data in sorted(ots.items(), key=operator.itemgetter(1), reverse=True)]
# prepare output to save
output = [['Environment', 'Waiting time', 'Staff attitude and professionalism', 'Care quality', 'Other']]
top = 5
for i in range(0, top):
comments = []
try:
comments.append(sorted_envs[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_wts[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_saaps[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_cqs[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_ots[i])
except IndexError:
comments.append(None)
output.append(comments)
self.utilities.save_list_as_csv(output, output_file_path)
| 9,067 | 2,712 |
import json
student = {
"ime" : "Milan",
"prezime" : "Tair",
"indeks" : 2008213514,
"ispiti" : [
{
"predmet" : "Programiranje 1",
"datum" : "2008-01-05",
"ocjena" : 10
},
{
"predmet" : "Informatika",
"datum" : "2008-01-05",
"ocjena" : 10
}
]
}
studentString = str(student)
print(studentString)
studentString = json.dumps(student)
print(studentString)
datoteka = open("student.json", "w")
datoteka.write(studentString)
datoteka.close()
| 603 | 236 |
import pandas as pd
import xlrd
import xlsxwriter
from p01_kennenlernen import meinebibliothek
df = pd.read_excel("O:\___Python\personen.xlsx") # importieren von excel nach python mit datumsangabe in Timestamp
print(df)
print()
df1 = pd.to_datetime(df["Geburtsdatum"]) # umwandeln von Timestamp in datetime
print(df1)
print()
alter = []
for geburtstag in df1: #verwenden der bereits gebauten Altersberechnung
alter.append(meinebibliothek.alter(geburtstag))
durchschnittsalter = sum(alter) / len(alter) # ermitteln des Durchschnittsalters
print ("Durchschnittsalter ", durchschnittsalter)
print()
df["Alter"] = alter # hinzufügen des berechneten Alters in die aus Excel eingelesene Tabelle
print(df)
writer = pd.ExcelWriter("O:\___Python\personen_bearbeitet.xlsx", engine="xlsxwriter") # erstellen eines Excel-"Writers" mit XlsxWriter
df.to_excel(writer, sheet_name='Sheet1') # konvertieren des dataframe in ein XlsxWriter Excel Objekt
writer.save() # schließen des Pandas Excel-"Writer" und exportieren des Excel-Dokuments
| 1,066 | 368 |
from spec import eq_
from invoke import ctask
@ctask
def mytask(c):
eq_(c.hooray, 'yaml')
| 97 | 40 |
import os
import csv
import numpy as np
import pandas as pd
import pickle as pk
from glob import glob
from math import ceil
from vmz_interface.data.db_video_create import create_video_db
class VideoDBBuilder:
def __init__(self, stimulus_id, lmdb_path, temporal_depth, fpv=75, video_strt_offset=15
, clips_overlap=0, batch_size=4, gpu_count=2, max_num_records=6e4, min_records_factor=1
, allow_mkdir=False, *args, **kwargs):
if not os.path.isdir(lmdb_path):
if allow_mkdir:
os.mkdir(lmdb_path)
else:
raise Exception(f'please make sure {lmdb_path} is a valid directory')
self._stim_id = stimulus_id
self._lmdb_path = lmdb_path
self.num_frames_per_clips = temporal_depth
self.BATCH_SIZE = batch_size
self.GPU_CNT = gpu_count
self.gpus = []
self.MAX_RECORDS = max_num_records # 60K max number of records per lmdb (arbitrarily chosen)
self.MIN_RECORDS_MULT = min_records_factor # used to make sure last file is not too large (arbitrarily chosen)
self.fpv = fpv
self.video_start_offset = video_strt_offset
self.clips_overlap = clips_overlap
self.list_lmdb_meta = []
self.units = 1
self.video_lmdb_paths = None
self.uneven_db = True
self.gpu_batch_combo = None
self.clips_dir = f'{stimulus_id}_{self.num_frames_per_clips}_{self.clips_overlap}'
self.clips_lmdb_data_path = f'{self._lmdb_path}/{self.clips_dir}'
if not os.path.isdir(self.clips_lmdb_data_path):
if allow_mkdir:
os.mkdir(self.clips_lmdb_data_path)
else:
raise Exception(f'please make sure {self.clips_lmdb_data_path} is a valid directory')
def make_from_paths(self, stimuli_paths):
self.video_paths = stimuli_paths
self.vid_cnt = len(self.video_paths)
lmdb_metas = glob(f'{self.clips_lmdb_data_path}/lmdb_meta_*.csv')
# make existence check:
if len(lmdb_metas) > 0:
#
vid_list = set(self.video_paths)
created_metas = set()
for i in range(len(lmdb_metas)):
with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{i}.csv') as f:
df = pd.read_csv(f)
created_metas.update(set(df['org_video'].unique()))
if created_metas == vid_list:
self.video_lmdb_paths = glob(f'{self.clips_lmdb_data_path}/lmdb_*_db')
else:
raise Exception(f'Stimulus id {self._stim_id} does not match the videos in the LMDB')
else:
if not self.write_lmdb_meta():
raise Exception('writing stimulus lmdb metas failed')
else:
self._create_video_dbs()
def write_lmdb_meta(self):
num_clips, start_frms = self._start_frames()
db_starts, db_strides = self._records_per_meta(num_clips)
file_strides = [int(i/num_clips) for i in db_strides]
file_starts = [int(i/num_clips) for i in db_starts]
sub_paths = [self.video_paths[offset:offset+stride] for offset, stride in zip(file_starts, file_strides)]
write_data = [[[ data[i]
, 0 # labels is None? hacs_action_dict[os.path.basename(os.path.dirname(data[i]))]
, start_frms[clip_idx]
, num_clips*i + clip_idx + db_starts[idx]]
for i in range(len(data)) for clip_idx in range(num_clips)]
for idx, data in enumerate(sub_paths)]
self.uneven_db = False
if len(file_strides) > 0:
self.uneven_db = file_strides[-1] == file_strides[0]
# self.units = num_clips
return self._write_lmdb_meta(write_data)
def _write_lmdb_meta(self, write_data):
for group, group_paths in enumerate(write_data):
with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['org_video', 'label', 'start_frm', 'video_id'])
writer.writerows(group_paths)
self.list_lmdb_meta.append(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv')
return True
def _start_frames(self):
'''
calculate how many examples given CLIPs type:
FULL: number of clips per video == 1
CLIPs_ONE: each clip strides by 1, overlaping 15 frames between adjacent CLIPs
CLIPs_TEN: overlaping 10 frames between adjacent CLIPs
num_clips = ceil( (total_frames_per_video - temporal_depth - offset) / clips_stride ) + 1
given num_clips per video, calculate frame starts for videos:
start_frm[0] = (total_frames_per_video - temporal_depth) - stride * (num_clips - 1)
start_frm[i] = start_frm[i-1] + 6
'''
video_width = (self.fpv - self.video_start_offset) # 60
clips_stride = (self.num_frames_per_clips - self.clips_overlap)
num_CLIPS = ceil((video_width - self.num_frames_per_clips)/clips_stride) + 1
initial_frame = (self.fpv - self.num_frames_per_clips) - (num_CLIPS - 1) * clips_stride
start_frms = [initial_frame + i*clips_stride for i in range(num_CLIPS)]
assert all(start_frms[i] > 0 for i in range(len(start_frms)))
assert any(start_frms[i] <= self.video_start_offset for i in range(len(start_frms)))
return num_CLIPS, start_frms
def _records_per_meta(self, num_clips):
"""
Caffe2 video model does not pad batched data
this utility function will distribute batched data into even number of record files
a multiple of NUM_GPU and BATCH_SIZE
the remainder will be added to a final meta file with a minimum of
total video remainder * MIN_RECORDS_MULT records
returns list of where in video_paths list lmdb should begin creating DB
and a list of how many videos in list it should consume
"""
total_num_records = num_clips * self.vid_cnt
div_criteria = num_clips * self.BATCH_SIZE * self.GPU_CNT # extract_features requires number of records to divide evenly
# start with 1 files:
num_files = 1
# files_rem = int(total_num_records%num_files)
records_per_file = int(total_num_records/num_files)
if records_per_file > self.MAX_RECORDS:
# files_rem = int(total_num_records % self.MAX_RECORDS)
num_files = int(total_num_records / self.MAX_RECORDS)
records_per_file = int(total_num_records/num_files)
rem_per_file = int(records_per_file % div_criteria)
records_per_file = records_per_file - rem_per_file
file_starts = [int(records_per_file*i) for i in range(0,num_files)]
file_strides = [int(records_per_file) for i in range(num_files)]
rem_total = total_num_records - num_files * records_per_file
temp_rem = rem_total
if rem_total > div_criteria * self.MIN_RECORDS_MULT:
temp_rem = int(rem_total % div_criteria)
extra_file = rem_total - temp_rem
file_starts.append(int(extra_file + file_starts[-1]))
file_strides.append(int(extra_file))
num_files+=1
if temp_rem > 0:
file_starts.append(int(file_starts[-1] + file_strides[-1]))
file_strides.append(int(temp_rem))
self._get_gpu_batch_combo(temp_rem)
assert all(file_starts[i]%div_criteria == 0 for i in range(1,num_files))
assert total_num_records - file_starts[-1] == temp_rem
assert sum(file_strides) == total_num_records
return file_starts, file_strides
def _get_gpu_batch_combo(self, file_remainder):
gpu_check = int(file_remainder % self.GPU_CNT)
batch_check = int(file_remainder % self.BATCH_SIZE)
if gpu_check==0 and (batch_check==0 or batch_check!=0):
self.gpu_batch_combo = [self.GPU_CNT, 1]
elif gpu_check!=0 and batch_check==0:
self.gpu_batch_combo = [1, self.BATCH_SIZE]
else:
self.gpu_batch_combo = [1, 1]
def _create_video_dbs(self):
"""
create_video_db(
args.list_file,
args.output_file,
args.use_list,
args.use_video_id,
args.use_start_frame,
args.num_epochs
)
"""
use_list = 1
use_video_id = 1
use_start_frame = 1
list_lmdb_output = [f'{self.clips_lmdb_data_path}/lmdb_{i}_db' for i in range(len(self.list_lmdb_meta))]
for i in range(len(self.list_lmdb_meta)):
create_video_db(list_file=self.list_lmdb_meta[i], output_file=list_lmdb_output[i], use_list=use_list, use_video_id=use_video_id, use_start_frame=use_start_frame)
self.video_lmdb_paths = list_lmdb_output
| 7,769 | 3,343 |
import time
import base64
import pytest
import responses
from oic.oic import AuthorizationResponse, AccessTokenResponse, TokenErrorResponse, OpenIDSchema, \
AuthorizationErrorResponse
from urllib.parse import parse_qsl, urlparse
from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata, ProviderMetadata, \
ClientRegistrationInfo
from flask_pyoidc.pyoidc_facade import PyoidcFacade, _ClientAuthentication
from .util import signed_id_token
REDIRECT_URI = 'https://rp.example.com/redirect_uri'
class TestPyoidcFacade(object):
PROVIDER_BASEURL = 'https://op.example.com'
PROVIDER_METADATA = ProviderMetadata(PROVIDER_BASEURL,
PROVIDER_BASEURL + '/auth',
PROVIDER_BASEURL + '/jwks')
CLIENT_METADATA = ClientMetadata('client1', 'secret1')
def test_registered_client_metadata_is_forwarded_to_pyoidc(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA)
facade = PyoidcFacade(config, REDIRECT_URI)
assert facade._client.registration_response
def test_no_registered_client_metadata_is_handled(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(config, REDIRECT_URI)
assert not facade._client.registration_response
def test_is_registered(self):
unregistered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
registered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA)
assert PyoidcFacade(unregistered, REDIRECT_URI).is_registered() is False
assert PyoidcFacade(registered, REDIRECT_URI).is_registered() is True
@responses.activate
def test_register(self):
registration_endpoint = self.PROVIDER_BASEURL + '/register'
responses.add(responses.POST, registration_endpoint, json=self.CLIENT_METADATA.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(registration_endpoint=registration_endpoint)
unregistered = ProviderConfiguration(provider_metadata=provider_metadata,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(unregistered, REDIRECT_URI)
facade.register()
assert facade.is_registered() is True
def test_authentication_request(self):
extra_user_auth_params = {'foo': 'bar', 'abc': 'xyz'}
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
auth_request_params=extra_user_auth_params)
state = 'test_state'
nonce = 'test_nonce'
facade = PyoidcFacade(config, REDIRECT_URI)
extra_lib_auth_params = {'foo': 'baz', 'qwe': 'rty'}
auth_request = facade.authentication_request(state, nonce, extra_lib_auth_params)
expected_auth_params = {
'scope': 'openid',
'response_type': 'code',
'client_id': self.CLIENT_METADATA['client_id'],
'redirect_uri': REDIRECT_URI,
'state': state,
'nonce': nonce
}
expected_auth_params.update(extra_user_auth_params)
expected_auth_params.update(extra_lib_auth_params)
assert auth_request.to_dict() == expected_auth_params
def test_parse_authentication_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
auth_code = 'auth_code-1234'
state = 'state-1234'
auth_response = AuthorizationResponse(**{'state': state, 'code': auth_code})
parsed_auth_response = facade.parse_authentication_response(auth_response.to_dict())
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response.to_dict() == auth_response.to_dict()
def test_parse_authentication_response_handles_error_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
error_response = AuthorizationErrorResponse(**{'error': 'invalid_request', 'state': 'state-1234'})
parsed_auth_response = facade.parse_authentication_response(error_response)
assert isinstance(parsed_auth_response, AuthorizationErrorResponse)
assert parsed_auth_response.to_dict() == error_response.to_dict()
@responses.activate
def test_parse_authentication_response_preserves_id_token_jwt(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
state = 'state-1234'
now = int(time.time())
id_token, id_token_signing_key = signed_id_token({
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_sub',
'aud': 'client1',
'exp': now + 1,
'iat': now
})
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
auth_response = AuthorizationResponse(**{'state': state, 'id_token': id_token})
parsed_auth_response = facade.parse_authentication_response(auth_response)
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response['state'] == state
assert parsed_auth_response['id_token_jwt'] == id_token
@pytest.mark.parametrize('request_func,expected_token_request', [
(
lambda facade: facade.exchange_authorization_code('auth-code'),
{
'grant_type': 'authorization_code',
'code': 'auth-code',
'redirect_uri': REDIRECT_URI
}
),
(
lambda facade: facade.refresh_token('refresh-token'),
{
'grant_type': 'refresh_token',
'refresh_token': 'refresh-token',
'redirect_uri': REDIRECT_URI
}
)
])
@responses.activate
def test_token_request(self, request_func, expected_token_request):
token_endpoint = self.PROVIDER_BASEURL + '/token'
now = int(time.time())
id_token_claims = {
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_user',
'aud': [self.CLIENT_METADATA['client_id']],
'exp': now + 1,
'iat': now,
'nonce': 'test_nonce'
}
id_token_jwt, id_token_signing_key = signed_id_token(id_token_claims)
token_response = AccessTokenResponse(access_token='test_access_token',
token_type='Bearer',
id_token=id_token_jwt)
responses.add(responses.POST, token_endpoint, json=token_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
token_response = request_func(facade)
assert isinstance(token_response, AccessTokenResponse)
expected_token_response = token_response.to_dict()
expected_token_response['id_token'] = id_token_claims
expected_token_response['id_token_jwt'] = id_token_jwt
assert token_response.to_dict() == expected_token_response
token_request = dict(parse_qsl(responses.calls[0].request.body))
assert token_request == expected_token_request
@responses.activate
def test_token_request_handles_error_response(self):
token_endpoint = self.PROVIDER_BASEURL + '/token'
token_response = TokenErrorResponse(error='invalid_request', error_description='test error description')
responses.add(responses.POST, token_endpoint, json=token_response.to_dict(), status=400)
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') == token_response
def test_token_request_handles_missing_provider_token_endpoint(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') is None
@pytest.mark.parametrize('userinfo_http_method', [
'GET',
'POST'
])
@responses.activate
def test_configurable_userinfo_endpoint_method_is_used(self, userinfo_http_method):
userinfo_endpoint = self.PROVIDER_BASEURL + '/userinfo'
userinfo_response = OpenIDSchema(sub='user1')
responses.add(userinfo_http_method, userinfo_endpoint, json=userinfo_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=userinfo_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=userinfo_http_method),
REDIRECT_URI)
assert facade.userinfo_request('test_token') == userinfo_response
def test_no_userinfo_request_is_made_if_no_userinfo_http_method_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=None),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_userinfo_endpoint_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_access_token(self):
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=self.PROVIDER_BASEURL + '/userinfo')
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request(None) is None
class TestClientAuthentication(object):
CLIENT_ID = 'client1'
CLIENT_SECRET = 'secret1'
@property
def basic_auth(self):
credentials = '{}:{}'.format(self.CLIENT_ID, self.CLIENT_SECRET)
return 'Basic {}'.format(base64.urlsafe_b64encode(credentials.encode('utf-8')).decode('utf-8'))
@pytest.fixture(autouse=True)
def setup(self):
self.client_auth = _ClientAuthentication(self.CLIENT_ID, self.CLIENT_SECRET)
def test_client_secret_basic(self):
request = {}
headers = self.client_auth('client_secret_basic', request)
assert headers == {'Authorization': self.basic_auth}
assert request == {}
def test_client_secret_post(self):
request = {}
headers = self.client_auth('client_secret_post', request)
assert headers is None
assert request == {'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET}
def test_defaults_to_client_secret_basic(self):
assert self.client_auth('invalid_client_auth_method', {}) == self.client_auth('client_secret_basic', {})
| 13,038 | 3,762 |
# Thanks to zecoxao and flatz <3
import struct
from binascii import unhexlify as uhx
from binascii import hexlify as hx
from Crypto.Cipher import AES
from Crypto.Hash import SHA, HMAC, CMAC
import os
import sys
EID1KEYS = [
'88228B0F92C4C36AF097F1FE948D27CE',
'5794BC8C2131B1E3E7EC61EF14C32EB5',
]
INITKEYS = [
'48FF6BFA9C172C6E14AE444419CAF676'
]
ZEROS128 = ['00000000000000000000000000000000']
def aes_decrypt_cbc(key, iv, input):
return AES.new(key, AES.MODE_CBC, iv).decrypt(input)
def aes_decrypt_ecb(key, input):
return AES.new(key, AES.MODE_ECB).decrypt(input)
def aes_encrypt_cbc(key, iv, input):
return AES.new(key, AES.MODE_CBC, iv).encrypt(input)
def main(argc, argv):
with open(sys.argv[1], 'rb') as f:
data = f.read()
data1 = data[0x2A0:0x2B0]
data2 = data[0x2B0:0x2C0]
data3 = data[0x2C0:0x2D0]
data4 = data[0x2D0:0x2E0]
data5 = data[0x2E0:0x300]
data6 = data[0x300:0x320]
data7 = data[0x320:0x340]
data8 = data[0x340:0x360]
eid1 = data[0x10:0x290]
hash = data[0x290:0x2A0]
cmac1= CMAC.new(uhx(EID1KEYS[0]), ciphermod=AES)
cmac1.update(eid1)
print(hx(hash))
print(cmac1.hexdigest())
sexy = aes_decrypt_cbc(uhx(EID1KEYS[0]), uhx(ZEROS128[0]), eid1)
keyseed = sexy[:0x10]
pck1 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), keyseed)
pck2 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck1)
pck3 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck2)
pck4 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck3)
pck5 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck4)
pck6 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck5)
pck7 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck6)
pck8 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck7)
pck9 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck8)
pck10 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck9)
pck11 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck10)
pck12 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck11)
data1_stage1 = aes_decrypt_ecb(pck1,data1)
data2_stage1 = aes_decrypt_ecb(pck2,data2)
data3_stage1 = aes_decrypt_ecb(pck3,data3)
data4_stage1 = aes_decrypt_ecb(pck4,data4)
hash1 = data5[0x10:]
body1 = data5[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body1)
print(hx(hash1))
print(cmac1.hexdigest())
hash2 = data6[0x10:]
body2 = data6[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body2)
print(hx(hash2))
print(cmac1.hexdigest())
hash3 = data7[0x10:]
body3 = data7[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body3)
print(hx(hash3))
print(cmac1.hexdigest())
hash4 = data8[0x10:]
body4 = data8[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body4)
print(hx(hash4))
print(cmac1.hexdigest())
data5_stage1 = aes_decrypt_ecb(pck1,body1)
data6_stage1 = aes_decrypt_ecb(pck1,body2)
data7_stage1 = aes_decrypt_ecb(pck1,body3)
data8_stage1 = aes_decrypt_ecb(pck1,body4)
with open(sys.argv[1] + '.eid1.dec.bin', 'wb') as g:
g.write(sexy)
with open(sys.argv[1] + '.init.dec.bin', 'wb') as g:
g.write(data1_stage1+data2_stage1+data3_stage1+data4_stage1+data5_stage1+data6_stage1+data7_stage1+data8_stage1)
if __name__ == '__main__':
main(len(sys.argv), sys.argv) | 4,260 | 1,965 |
class Stick:
def __init__(self, length=None, location=None):
self._length = length
self._loc = location
self._id = id(self)
self._color = None
# fixme: Verilere setter metodu ile yükleme yapılması!
@property
def length(self):
return self._length
@property
def location(self):
return self._loc
@property
def o_id(self):
return self._id
@property
def color(self):
return self._color
@length.setter
def length(self, value):
self._length = value
@location.setter
def location(self, location):
self._loc = location
@color.setter
def color(self, value: tuple):
if value[0] <= 255 and value[1] <= 255 and value[2] <= 255 \
and value[0] >= 0 and value[1] >= 0 and value[2] >= 0:
self._color = value
else:
self._color = (255, 255, 255)
if __name__ == "__main__":
stick = Stick()
stick.length(120)
stick.location(0)
print(stick.length, stick.location)
| 1,078 | 370 |