hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35111898dcd4fe4049cb8c64c9d5c33193e6cb76 | 24,003 | py | Python | src/infra/Zoning.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 19 | 2021-12-11T17:17:00.000Z | 2022-03-24T07:27:06.000Z | src/infra/Zoning.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | null | null | null | src/infra/Zoning.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 1 | 2021-12-21T11:20:39.000Z | 2021-12-21T11:20:39.000Z | # -------------------------------------------------------------------------------------------------------------------- #
# standard distribution imports
# -----------------------------
import os
import logging
# additional module imports (> requirements)
# ------------------------------------------
import pandas as pd
import numpy as np
from scipy.sparse import load_npz
# src imports
# -----------
# -------------------------------------------------------------------------------------------------------------------- #
# global variables
# ----------------
from src.misc.globals import *
# from src.fleetctrl.FleetControlBase import PlanRequest # TODO # circular dependency!
# set log level to logging.DEBUG or logging.INFO for single simulations
LOG_LEVEL = logging.WARNING
LOG = logging.getLogger(__name__)
NOON = 12*3600
# -------------------------------------------------------------------------------------------------------------------- #
# main class
# ----------
class ZoneSystem:
# TODO # scenario par now input! add in fleetsimulations
def __init__(self, zone_network_dir, scenario_parameters, dir_names):
# general information
self.zone_general_dir = os.path.dirname(zone_network_dir)
self.zone_system_name = os.path.basename(self.zone_general_dir)
self.zone_network_dir = zone_network_dir
general_info_f = os.path.join(self.zone_general_dir, "general_information.csv")
try:
self.general_info_df = pd.read_csv(general_info_f, index_col=0)
except:
self.general_info_df = None
# network specific information
node_zone_f = os.path.join(zone_network_dir, "node_zone_info.csv")
node_zone_df = pd.read_csv(node_zone_f)
# pre-process some data
self.zones = sorted(node_zone_df[G_ZONE_ZID].unique().tolist()) # TODO
if self.general_info_df is not None:
self.all_zones = self.general_info_df.index.to_list()
else:
self.all_zones = list(self.zones)
if self.general_info_df is not None:
self.node_zone_df = pd.merge(node_zone_df, self.general_info_df, left_on=G_ZONE_ZID, right_on=G_ZONE_ZID)
self.node_zone_df.set_index(G_ZONE_NID, inplace=True)
self.zone_centroids = None # zone_id -> list node_indices (centroid not unique!)
if G_ZONE_CEN in self.node_zone_df.columns:
self.zone_centroids = {}
for node_id, zone_id in self.node_zone_df[self.node_zone_df[G_ZONE_CEN] == 1][G_ZONE_ZID].items():
try:
self.zone_centroids[zone_id].append(node_id)
except KeyError:
self.zone_centroids[zone_id] = [node_id]
# # edge specific information -> not necessary at the moment
# edge_zone_f = os.path.join(zone_network_dir, "edge_zone_info.csv")
# self.edge_zone_df = pd.read_csv(edge_zone_f)
self.current_toll_cost_scale = 0
self.current_toll_coefficients = {}
self.current_park_costs = {}
self.current_park_search_durations = {}
# reading zone-correlation matrix if available
if scenario_parameters.get(G_ZONE_CORR_M_F):
# load correlation matrix files; these are saved as sparse matrices by scipy module
# important: name of squared matrix depends on linear correlation matrix
tmp_k_f = os.path.join(self.zone_general_dir, scenario_parameters[G_ZONE_CORR_M_F])
tmp_k2_f = tmp_k_f.replace("zone_to_zone_correlations", "zone_to_zone_squared_correlations")
if not os.path.isfile(tmp_k_f) or not os.path.isfile(tmp_k2_f):
raise IOError(f"Could not find zone-to-zone correlation files {tmp_k_f} or {tmp_k2_f}!")
self.zone_corr_matrix = load_npz(tmp_k_f).todense()
self.zone_sq_corr_matrix = load_npz(tmp_k2_f).todense()
else:
self.zone_corr_matrix = np.eye(len(self.zones))
self.zone_sq_corr_matrix = np.eye(len(self.zones))
# read forecast files
if scenario_parameters.get(G_FC_FNAME) and scenario_parameters.get(G_FC_TYPE):
fc_dir = dir_names.get(G_DIR_FC)
self.fc_temp_resolution = int(os.path.basename(fc_dir))
forecast_f = os.path.join(fc_dir, scenario_parameters.get(G_FC_FNAME))
if os.path.isfile(forecast_f):
fc_type = scenario_parameters.get(G_FC_TYPE)
self.forecast_df = pd.read_csv(forecast_f)
self.fc_times = sorted(self.forecast_df[G_ZONE_FC_T].unique())
self.forecast_df.set_index([G_ZONE_FC_T, G_ZONE_ZID], inplace=True)
self.in_fc_type = f"in {fc_type}"
self.out_fc_type = f"out {fc_type}"
if self.in_fc_type not in self.forecast_df.columns or self.out_fc_type not in self.forecast_df.columns:
raise IOError(f"Could not find forecast data for {fc_type} in {forecast_f}")
drop_columns = []
for col in self.forecast_df.columns:
if col != self.in_fc_type and col != self.out_fc_type:
drop_columns.append(col)
self.forecast_df.drop(drop_columns, axis=1, inplace=True)
else:
raise IOError(f"Could not find forecast file {forecast_f}")
else:
self.forecast_df = None
self.in_fc_type = None
self.out_fc_type = None
self.fc_times = []
self.fc_temp_resolution = None
self.demand = None
def register_demand_ref(self, demand_ref):
self.demand = demand_ref
def get_zone_system_name(self):
return self.zone_system_name
def get_zone_dirs(self):
return self.zone_general_dir, self.zone_network_dir
def get_all_zones(self):
"""This method returns a list of zone_ids that have a node in them
:return: zone_ids
:rtype: list
"""
return self.zones
def get_complete_zone_list(self):
"""This method returns a list of all zone_ids.
:return: zone_ids
:rtype: list
"""
return self.all_zones
def get_all_nodes_in_zone(self, zone_id):
"""This method returns all nodes within a zone. This can be used as a search criterion.
:param zone_id: id of the zone in question
:type zone_id: int
:return: list of node_ids
:rtype: list
"""
tmp_df = self.node_zone_df[self.node_zone_df[G_ZONE_ZID] == zone_id]
return tmp_df.index.values.tolist()
def get_random_node(self, zone_id):
"""This method returns a random node_id for a given zone_id.
:param zone_id: id of the zone in question
:type zone_id: int
:return: node_id of a node within the zone in question; return -1 if invalid zone_id is given
:rtype: int
"""
tmp_df = self.node_zone_df[self.node_zone_df[G_ZONE_ZID] == zone_id]
if len(tmp_df) > 0:
return np.random.choice(tmp_df.index.values.tolist())
else:
return -1
def get_random_centroid_node(self, zone_id):
if self.zone_centroids is not None:
nodes = self.zone_centroids.get(zone_id, [])
if len(nodes) > 0:
return np.random.choice(nodes)
else:
return -1
else:
raise EnvironmentError("No zone centroid nodes defined! ({} parameter not in"
" node_zone_info.csv!)".format(G_ZONE_CEN))
def get_zone_from_node(self, node_id):
"""This method returns the zone_id of a given node_id.
:param node_id: id of node in question
:type node_id: int
:return: zone_id of the node in question; return -1 if no zone is found
:rtype: int
"""
return self.node_zone_df[G_ZONE_ZID].get(node_id, -1)
def get_zone_from_pos(self, pos):
"""This method returns the zone_id of a given position by returning the zone of the origin node.
:param pos: position-tuple (edge origin node, edge destination node, edge relative position)
:type pos: list
:return: zone_id of the node in question; return -1 if no zone is found
:rtype: int
"""
return self.get_zone_from_node(pos[0])
def check_first_last_mile_option(self, o_node, d_node):
"""This method checks whether first/last mile service should be offered in a given zone.
:param o_node: node_id of a trip's start location
:type o_node: int
:param d_node: node_id of a trip's end location
:type d_node: int
:return: True/False
:rtype: bool
"""
if G_ZONE_FLM in self.node_zone_df.columns:
mod_access = self.node_zone_df[G_ZONE_FLM].get(o_node, True)
mod_egress = self.node_zone_df[G_ZONE_FLM].get(d_node, True)
else:
mod_access = True
mod_egress = True
return mod_access, mod_egress
def set_current_park_costs(self, general_park_cost=0, park_cost_dict={}):
"""This method sets the current park costs in cent per region per hour.
:param general_park_cost: this is a scale factor that is multiplied by each zones park_cost_scale_factor.
:type general_park_cost: float
:param park_cost_dict: sets the park costs per zone directly. Code prioritizes input over general_park_cost.
:type park_cost_dict: dict
"""
if park_cost_dict:
for k,v in park_cost_dict.items():
if k in self.general_info_df.index:
self.current_park_costs[k] = v
else:
for k, zone_scale_factor in self.general_info_df[G_ZONE_PC].items():
self.current_park_costs[k] = general_park_cost * zone_scale_factor
def set_current_toll_cost_scale_factor(self, general_toll_cost):
self.current_toll_cost_scale = general_toll_cost
def set_current_toll_costs(self, use_pre_defined_zone_scales=False, rel_toll_cost_dict={}):
"""This method sets the current toll costs in cent per meter.
:param use_pre_defined_zone_scales: use each zones toll_cost_scale_factor of zone definition.
:type use_pre_defined_zone_scales: bool
:param rel_toll_cost_dict: sets the toll costs per zone directly. Code prioritizes input over general_toll_cost.
:type rel_toll_cost_dict: dict
"""
if rel_toll_cost_dict and self.current_toll_cost_scale > 0:
for k,v in rel_toll_cost_dict.items():
if k in self.general_info_df.index:
self.current_toll_coefficients[k] = self.current_toll_cost_scale * v
elif use_pre_defined_zone_scales and self.current_toll_cost_scale > 0:
for k, zone_scale_factor in self.general_info_df[G_ZONE_TC].items():
self.current_toll_coefficients[k] = self.current_toll_cost_scale * zone_scale_factor
def get_external_route_costs(self, routing_engine, sim_time, route, park_origin=True, park_destination=True):
"""This method returns the external costs of a route, namely toll and park costs. Model simplifications:
1) Model assumes a trip-based model, in which duration of activity is unknown. For this reason, park costs
are assigned to a trip depending on their destination (trip start in the morning) or the origin (trip starts
in the afternoon).
2) Toll costs are computed for the current point in time. No extrapolation for the actual route time is
performed.
:param routing_engine: network and routing class
:type routing_engine: Network
:param sim_time: relevant for park costs - am: destination relevant; pm: origin relevant
:type sim_time: float
:param route: list of node ids that a vehicle drives along
:type route: list
:param park_origin: flag showing whether vehicle could generate parking costs at origin
:type park_origin: bool
:param park_destination: flag showing whether vehicle could generate parking costs at destination
:type park_destination: bool
:return: tuple of total external costs, toll costs, parking costs in cent
:rtype: list
"""
park_costs = 0
toll_costs = 0
if route:
# 1) park cost model
if sim_time < NOON:
if park_destination:
# assume 1 hour of parking in order to return the set park cost values (current value!)
d_zone = self.get_zone_from_node(route[-1])
park_costs += self.current_park_costs.get(d_zone, 0)
else:
if park_origin:
# assume 1 hour of parking in order to return the set park cost values (current value!)
o_zone = self.get_zone_from_node(route[0])
park_costs += self.current_park_costs.get(o_zone, 0)
# 2) toll model
for i in range(len(route)-1):
o_node = route[i]
d_node = route[i+1]
zone = self.get_zone_from_node(o_node)
length = routing_engine.get_section_infos(o_node, d_node)[1]
toll_costs += np.rint(self.current_toll_coefficients.get(zone, 0) * length)
external_pv_costs = park_costs + toll_costs
return external_pv_costs, toll_costs, park_costs
def _get_trip_forecasts(self, trip_type, t0, t1, aggregation_level, scale = None):
"""This method returns the number of expected trip arrivals or departures inside a zone in the
time interval [t0, t1]. The return value is created by interpolation of the forecasts in the data frame
if necessary. The default if no values can be found for a zone should be 0.
:param t0: start of forecast time horizon
:type t0: float
:param t1: end of forecast time horizon
:type t1: float
:param aggregation_level: spatial aggregation level, by default zone_id is used
:type aggregation_level: int
:param scale: scales forecast distributen by this value if given
:type scale: float
:return: {}: zone -> forecast of arrivals
:rtype: dict
"""
if trip_type == "in":
col = self.in_fc_type
elif trip_type == "out":
col = self.out_fc_type
else:
raise AssertionError("Invalid forecast column chosen!")
#
if aggregation_level is not None:
tmp_forecast_df = self.forecast_df.reset_index().groubpy([G_ZONE_FC_T,
aggregation_level]).aggregate({col: sum})
else:
tmp_forecast_df = self.forecast_df
#
def _create_forecast_dict(tmp_col, row_index, tmp_return_dict, tmp_scale_factor=1.0):
# LOG.info(f"{self.forecast_df}")
# LOG.info(f"{tmp_forecast_df}")
# LOG.info(f"{row_index} | {G_ZONE_FC_T}")
try:
tmp_df = tmp_forecast_df.xs(row_index, level=G_ZONE_FC_T)
except:
LOG.info("couldnt find forecast for t {}".format(row_index))
return {}
tmp_dict = tmp_df[tmp_col].to_dict()
for k, v in tmp_dict.items():
try:
tmp_return_dict[k] += (v * tmp_scale_factor)
except KeyError:
tmp_return_dict[k] = (v * tmp_scale_factor)
return tmp_return_dict
#
return_dict = {}
# get forecast of initial time interval
last_t0 = t0
if t0 not in self.fc_times:
# check whether t0 and t1 are valid times
if t0 > self.fc_times[-1] or t1 < self.fc_times[0]:
# use first/last forecast and scale
if t1 > self.fc_times[0]:
last_t0 = self.fc_times[0]
else:
last_t0 = self.fc_times[-1]
scale_factor = (t1 - t0) / self.fc_temp_resolution
return_dict = _create_forecast_dict(col, last_t0, return_dict, scale_factor)
# if scale is not None:
# for key, val in return_dict.items():
# return_dict[key] = val * scale
return return_dict
else:
# get forecast from t0 to next value in self.fc_times
for i in range(len(self.fc_times)):
# last_t0 = self.fc_times[i]
# next_t0 = self.fc_times[i+1]
next_t0 = self.fc_times[i]
if next_t0 > t1:
if last_t0 == t0:
scale_factor = (t1 - t0) / self.fc_temp_resolution
return_dict = _create_forecast_dict(col, self.fc_times[i-1], return_dict, scale_factor)
return return_dict
break
if last_t0 <= t0 and t0 < next_t0:
scale_factor = (next_t0 - last_t0) / self.fc_temp_resolution
# scale down the values
return_dict = _create_forecast_dict(col, next_t0, return_dict, scale_factor)
last_t0 = next_t0
break
# add forecasts of next intervals as well
while t1 - last_t0 > self.fc_temp_resolution:
return_dict = _create_forecast_dict(col, last_t0, return_dict)
last_t0 += self.fc_temp_resolution
if last_t0 not in self.fc_times:
break
# append rest of last interval
if t1 != last_t0:
scale_factor = (t1 - last_t0) / self.fc_temp_resolution
return_dict = _create_forecast_dict(col, last_t0, return_dict, scale_factor)
if scale is not None:
for key, val in return_dict.items():
return_dict[key] = val * scale
return return_dict
def get_trip_arrival_forecasts(self, t0, t1, aggregation_level=None, scale = None):
"""This method returns the number of expected trip arrivals inside a zone in the time interval [t0, t1].
The return value is created by interpolation of the forecasts in the data frame if necessary.
The default if no values can be found for a zone should be 0.
:param t0: start of forecast time horizon
:type t0: float
:param t1: end of forecast time horizon
:type t1: float
:param aggregation_level: spatial aggregation level, by default zone_id is used
:type aggregation_level: int
:param scale: scales forecast distributen by this value if given
:type scale: float
:return: {}: zone -> forecast of arrivals
:rtype: dict
"""
if self.in_fc_type is None:
raise AssertionError("get_trip_arrival_forecasts() called even though no forecasts are available!")
return self._get_trip_forecasts("in", t0, t1, aggregation_level, scale = scale)
def get_trip_departure_forecasts(self, t0, t1, aggregation_level=None, scale = None):
"""This method returns the number of expected trip departures inside a zone in the time interval [t0, t1].
:param t0: start of forecast time horizon
:type t0: float
:param t1: end of forecast time horizon
:type t1: float
:param aggregation_level: spatial aggregation level, by default zone_id is used
:type aggregation_level: int
:param scale: scales forecast distributen by this value if given
:type scale: float
:return: {}: zone -> forecast of departures
:rtype: dict
"""
if self.out_fc_type is None:
raise AssertionError("get_trip_departure_forecasts() called even though no forecasts are available!")
return self._get_trip_forecasts("out", t0, t1, aggregation_level, scale = scale)
def get_zone_correlation_matrix(self):
"""This method returns the zone correlation matrix for a given bandwidth (see PhD thesis of Flo) for further
details.
:return: N_z x N_z numpy matrix, where N_z is the number of forecast zones
"""
return self.zone_corr_matrix
def get_squared_correlation_matrix(self):
"""This method returns the squared zone correlation matrix for a given bandwidth (see RFFR Frontiers paper of
Arslan and Flo or PhD thesis of Flo) for further details.
:return: N_z x N_z numpy matrix, where N_z is the number of forecast zones
"""
return self.zone_sq_corr_matrix
def get_centroid_node(self, zone_id):
# TODO # after ISTTT: get_centroid_node()
pass
def get_parking_average_access_egress_times(self, o_node, d_node):
# TODO # after ISTTT: get_parking_average_access_egress_times()
t_access = 0
t_egress = 0
return t_access, t_egress
def get_cordon_sections(self):
# TODO # after ISTTT: get_cordon_sections()
pass
def get_aggregation_levels(self):
"""This method returns a dictionary of
:return:
"""
# TODO # after ISTTT: get_aggregation_levels()
# is this necessary?
pass
def draw_future_request_sample(self, t0, t1, request_attribute = None, attribute_value = None, scale = None): #request_type=PlanRequest # TODO # cant import PlanRequest because of circular dependency of files!
""" this function returns future request attributes drawn probabilistically from the forecast method for the intervall [t0, t1]
currently origin is drawn from get_trip_departure_forecasts an destination is drawn form get_trip_arrival_forecast (independently! # TODO #)
:param t0: start of forecast time horizon
:type t0: float
:param t1: end of forecast time horizon
:type t1: float
:param request_attribute: (not for this class) name of the attribute of the request class. if given, only returns requests with this attribute
:type request_attribute: str
:param attribute_value: (not for this class) if and request_attribute given: only returns future requests with this attribute value
:type attribute_value: type(request_attribute)
:param scale: scales forecast distribution by this values
:type scale: float
:return: list of (time, origin_node, destination_node) of future requests
:rtype: list of 3-tuples
"""
dep_fc = self.get_trip_departure_forecasts(t0, t1, scale = scale)
arr_fc = self.get_trip_arrival_forecasts(t0, t1, scale = scale)
N_dep = sum(dep_fc.values())
N_arr = sum(arr_fc.values())
if N_dep == 0 or N_arr == 0:
return []
dep_zones = [dep_z for dep_z, dep_val in dep_fc.items() if dep_val > 0]
dep_prob = [dep_val/N_dep for dep_val in dep_fc.values() if dep_val > 0]
arr_zones = [arr_z for arr_z, arr_val in arr_fc.items() if arr_val > 0]
arr_prob = [arr_val/N_arr for arr_val in arr_fc.values() if arr_val > 0]
future_list = []
tc = t0
#LOG.warning(f"draw future: dep {N_dep} arr {N_arr} from {t0} - {t1} with scale {scale}")
while True:
tc += np.random.exponential(scale=float(t1-t0)/N_dep)
if tc > t1:
break
o_zone = np.random.choice(dep_zones, p=dep_prob)
d_zone = np.random.choice(arr_zones, p=arr_prob)
o_node = self.get_random_centroid_node(o_zone)
d_node = self.get_random_centroid_node(d_zone)
future_list.append( (int(tc), o_node, d_node) )
#LOG.warning(f"future set: {len(future_list)} | {future_list}")
return future_list | 47.25 | 213 | 0.619839 |
95fbca25fa0457198ed6712ec4385ebc9f0f3a35 | 3,105 | py | Python | tools/funlines.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 1,160 | 2015-05-02T15:13:20.000Z | 2022-03-31T20:04:28.000Z | tools/funlines.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 19 | 2015-04-20T13:47:00.000Z | 2021-07-07T13:00:42.000Z | tools/funlines.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 257 | 2015-04-01T21:42:33.000Z | 2022-03-10T11:57:51.000Z | from __future__ import print_function
import os, sys, pickle, subprocess
try:
from argparse import ArgumentParser
except:
print("Failed to import module 'argparse'. Upgrade to Python 2.7, copy argparse.py to this directory or try 'apt-get install python-argparse'")
raise
parser = ArgumentParser()
parser.add_argument("-l", "--left", required=True)
parser.add_argument("-r", "--right", required=True)
args = parser.parse_args()
with open(args.left, "rb") as forig:
with open(args.right, "rb") as fthen:
orig = pickle.load(forig)
then = pickle.load(fthen)
sk_orig = sorted(orig.keys())
sk_then = sorted(then.keys())
for key in sk_orig:
if not key in sk_then:
print("Missing expected key: %s" % key)
for key in sk_then:
if not key in sk_orig:
print("Unexpected key found: %s" % key)
subprocess.check_call(["rm", "-r", "-f", "/tmp/diffs"])
subprocess.check_call(["mkdir", "/tmp/diffs"])
for key in sk_orig:
if not key in sk_then:
continue
lines_orig = orig[key]
# sanitize lines_orig
def san(l):
l = l.replace("raise_python_stl_bad_alloc", "__raise_ba")
l = l.replace("raise_python_out_of_range_exception", "__raise_oor")
l = l.replace("raise_python_stl_exception", "__raise_e")
l = l.replace("raise_python_swig_director_exception", "__raise_de")
l = l.replace("raise_python_unknown_exception", "__raise_u")
return l
lines_orig = map(san, lines_orig)
lines_then = then[key]
if lines_orig != lines_then:
left_path = "/tmp/left_%s" % key
right_path = "/tmp/right_%s" % key
with open(left_path, "wb") as fleft:
fleft.write("\n".join(lines_orig))
with open(right_path, "wb") as fright:
fright.write("\n".join(lines_then))
patch_path = "/tmp/diffs/%s.patch" % key
with open(patch_path, "wb") as fout:
subprocess.call(["diff", "-u", "-w", left_path, right_path], stdout=fout)
# Now, let's see if the patch isn't trivial/acceptable. If it is,
# then we just delete that file
trivial_crap = [
"""- {
- Py_INCREF(Py_None);
- resultobj = Py_None;
- }
+ resultobj = SWIG_Py_Void();""",
"""- result = (bool)__init_hexrays_plugin(arg1);
+ result = (bool)py_init_hexrays_plugin(arg1);""",
"""- result = (bool)__install_hexrays_callback(arg1);
+ result = (bool)py_install_hexrays_callback(arg1);""",
"""- __add_custom_viewer_popup_item(arg1,(char const *)arg2,(char const *)arg3,arg4);
+ py_add_custom_viewer_popup_item(arg1,(char const *)arg2,(char const *)arg3,arg4);""",
]
with open(patch_path, "rb") as fin:
lines = fin.readlines()
diff_lines = filter(lambda l: (l.startswith("-") and not l.startswith("---")) or (l.startswith("+") and not l.startswith("+++")), lines)
diff_clob = "".join(diff_lines)
# print "\"%s\"" % diff_clob
if diff_clob.strip() in trivial_crap:
# print "Removing trivial patch: %s" % patch_path
os.remove(patch_path)
| 35.689655 | 145 | 0.631884 |
ea227b603c873f5b64f00618d284137e86f09d6d | 987 | py | Python | libs/bugreport/test_bugreport.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | null | null | null | libs/bugreport/test_bugreport.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | 2 | 2020-01-17T00:44:53.000Z | 2020-01-19T21:10:48.000Z | libs/bugreport/test_bugreport.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-01-14T22:44:27.000Z | 2020-01-14T22:44:27.000Z | import os
import zipfile
import unittest
from mock import Mock
from bugreport import BugReport
class TestBugReport(unittest.TestCase):
def test_workflow_without_send(self):
br = BugReport("test.zip")
br.add_dir_or_file("__init__.py")
br.add_dir_or_file("resources/")
br.add_text("print('Hello')", "main.py")
# Let's test if the resulting file is a ZIP file
br.zip.close()
br.zip_contents.seek(0)
assert(zipfile.is_zipfile(br.zip_contents))
# The file *might* be massive if we've packed the entire ZPUI directory by some chance
del br.zip_contents
del br.zip
def test_save_in(self):
br = BugReport("test.zip")
br.add_dir_or_file("__init__.py")
br.store_in("/tmp")
assert(os.path.isfile("/tmp/test.zip"))
# Test if the resulting file is a ZIP file
assert(zipfile.is_zipfile("/tmp/test.zip"))
if __name__ == "__main__":
unittest.main()
| 29.029412 | 94 | 0.647416 |
08d9c9f87563dff46b853a4e0fcb3d1aeeabc28b | 24,354 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_capacity_reservation_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_capacity_reservation_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_capacity_reservation_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CapacityReservationGroupsOperations:
"""CapacityReservationGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: "_models.CapacityReservationGroup",
**kwargs: Any
) -> "_models.CapacityReservationGroup":
"""The operation to create or update a capacity reservation group. When updating a capacity
reservation group, only tags may be modified. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Create capacity reservation Group.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CapacityReservationGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'capacityReservationGroupName': self._serialize.url("capacity_reservation_group_name", capacity_reservation_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CapacityReservationGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CapacityReservationGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CapacityReservationGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'} # type: ignore
async def update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: "_models.CapacityReservationGroupUpdate",
**kwargs: Any
) -> "_models.CapacityReservationGroup":
"""The operation to update a capacity reservation group. When updating a capacity reservation
group, only tags may be modified.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Update capacity reservation Group operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CapacityReservationGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'capacityReservationGroupName': self._serialize.url("capacity_reservation_group_name", capacity_reservation_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CapacityReservationGroupUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CapacityReservationGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
**kwargs: Any
) -> None:
"""The operation to delete a capacity reservation group. This operation is allowed only if all the
associated resources are disassociated from the reservation group and all capacity reservations
under the reservation group have also been deleted. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group.
:type capacity_reservation_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'capacityReservationGroupName': self._serialize.url("capacity_reservation_group_name", capacity_reservation_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
expand: Optional[Union[str, "_models.CapacityReservationGroupInstanceViewTypes"]] = None,
**kwargs: Any
) -> "_models.CapacityReservationGroup":
"""The operation that retrieves information about a capacity reservation group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group.
:type capacity_reservation_group_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the capacity reservations under the capacity reservation group
which is a snapshot of the runtime properties of a capacity reservation that is managed by the
platform and can change outside of control plane operations.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroupInstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CapacityReservationGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'capacityReservationGroupName': self._serialize.url("capacity_reservation_group_name", capacity_reservation_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CapacityReservationGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
expand: Optional[Union[str, "_models.ExpandTypesForGetCapacityReservationGroups"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.CapacityReservationGroupListResult"]:
"""Lists all of the capacity reservation groups in the specified resource group. Use the nextLink
property in the response to get the next page of capacity reservation groups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param expand: The expand expression to apply on the operation. Based on the expand param(s)
specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are
associated to capacity reservation group in the response.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.ExpandTypesForGetCapacityReservationGroups
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CapacityReservationGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CapacityReservationGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CapacityReservationGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups'} # type: ignore
def list_by_subscription(
self,
expand: Optional[Union[str, "_models.ExpandTypesForGetCapacityReservationGroups"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.CapacityReservationGroupListResult"]:
"""Lists all of the capacity reservation groups in the subscription. Use the nextLink property in
the response to get the next page of capacity reservation groups.
:param expand: The expand expression to apply on the operation. Based on the expand param(s)
specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are
associated to capacity reservation group in the response.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.ExpandTypesForGetCapacityReservationGroups
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CapacityReservationGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.CapacityReservationGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CapacityReservationGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CapacityReservationGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups'} # type: ignore
| 53.174672 | 210 | 0.686499 |
5360ef209c77ca9b6906d3ea772c188fb46825df | 7,229 | py | Python | cs-config/cs_config/constants.py | grantseiter/Tax-Brain | 180063a193ff8cb0b56349878110066b012dde6d | [
"MIT"
] | 10 | 2019-05-07T12:04:58.000Z | 2021-05-18T13:14:45.000Z | cs-config/cs_config/constants.py | grantseiter/Tax-Brain | 180063a193ff8cb0b56349878110066b012dde6d | [
"MIT"
] | 113 | 2019-03-05T16:13:32.000Z | 2021-11-10T13:53:32.000Z | cs-config/cs_config/constants.py | grantseiter/Tax-Brain | 180063a193ff8cb0b56349878110066b012dde6d | [
"MIT"
] | 14 | 2019-01-24T18:39:11.000Z | 2021-03-17T17:00:51.000Z | # constants used by the new compconfig
import paramtools
from marshmallow import fields, Schema
from datetime import datetime
from taxbrain import TaxBrain
POLICY_SCHEMA = {
"labels": {
"year": {
"type": "int",
"validators": {
"choice": {
"choices": [
yr for yr in range(TaxBrain.FIRST_BUDGET_YEAR,
TaxBrain.LAST_BUDGET_YEAR + 1)
]
}
}
},
"MARS": {
"type": "str",
"validators": {"choice": {"choices": ["single", "mjoint",
"mseparate", "headhh",
"widow"]}}
},
"idedtype": {
"type": "str",
"validators": {"choice": {"choices": ["med", "sltx", "retx", "cas",
"misc", "int", "char"]}}
},
"EIC": {
"type": "str",
"validators": {"choice": {"choices": ["0kids", "1kid",
"2kids", "3+kids"]}}
},
"data_source": {
"type": "str",
"validators": {"choice": {"choices": ["PUF", "CPS", "other"]}}
}
},
"additional_members": {
"section_1": {"type": "str"},
"section_2": {"type": "str"},
"start_year": {"type": "int"},
"checkbox": {"type": "bool"}
}
}
AGG_ROW_NAMES = ['ind_tax', 'payroll_tax', 'combined_tax',
'benefit_cost_total']
RESULTS_TABLE_TITLES = {
'diff_comb_xbin': ('Combined Payroll and Individual Income Tax: Difference'
' between Base and User plans by expanded income bin'),
'diff_comb_xdec': ('Combined Payroll and Individual Income Tax: Difference'
' between Base and User plans by expanded income '
'decile'),
'diff_itax_xbin': ('Individual Income Tax: Difference between Base and '
'User plans by expanded income bin'),
'diff_itax_xdec': ('Individual Income Tax: Difference between Base and '
'User plans by expanded income decile'),
'diff_ptax_xbin': ('Payroll Tax: Difference between Base and User plans '
'by expanded income bin'),
'diff_ptax_xdec': ('Payroll Tax: Difference between Base and User plans '
'by expanded income decile'),
'dist1_xbin': 'Base plan tax vars, weighted total by expanded income bin',
'dist1_xdec': ('Base plan tax vars, weighted total by expanded income '
'decile'),
'dist2_xbin': 'User plan tax vars, weighted total by expanded income bin',
'dist2_xdec': ('User plan tax vars, weighted total by expanded income '
'decile'),
'aggr_1': 'Total Liabilities Baseline by Calendar Year (Billions)',
'aggr_d': 'Total Liabilities Change by Calendar Year (Billions)',
'aggr_2': 'Total Liabilities Reform by Calendar Year (Billions)'}
RESULTS_TABLE_TAGS = {
# diff tables
'diff_comb_xbin': {'table_type': 'diff', 'tax_type': 'combined',
'grouping': 'bins'},
'diff_comb_xdec': {'table_type': 'diff', 'tax_type': 'combined',
'grouping': 'deciles'},
'diff_itax_xbin': {'table_type': 'diff', 'tax_type': 'ind_income',
'grouping': 'bins'},
'diff_itax_xdec': {'table_type': 'diff', 'tax_type': 'ind_income',
'grouping': 'deciles'},
'diff_ptax_xbin': {'table_type': 'diff', 'tax_type': 'payroll',
'grouping': 'bins'},
'diff_ptax_xdec': {'table_type': 'diff', 'tax_type': 'payroll',
'grouping': 'deciles'},
# dist tables
'dist1_xbin': {'table_type': 'dist', 'law': 'current',
'grouping': 'bins'},
'dist1_xdec': {'table_type': 'dist', 'law': 'current',
'grouping': 'deciles'},
'dist2_xbin': {'table_type': 'dist', 'law': 'reform',
'grouping': 'bins'},
'dist2_xdec': {'table_type': 'dist', 'law': 'reform',
'grouping': 'deciles'},
# aggr tables
'aggr_1': {'law': 'current'},
'aggr_d': {'law': 'change'},
'aggr_2': {'law': 'reform'},
# gdp elaticity model table
'gdp_effect': {'default': 'gdp_elast'}
}
RESULTS_TOTAL_ROW_KEY_LABELS = {
'ind_tax': 'Individual Income Tax Liability',
'payroll_tax': 'Payroll Tax Liability',
'combined_tax': ('Combined Payroll and Individual Income Tax ' +
'Liability'),
'benefit_cost_total': 'Total Benefits Spending',
}
MONEY_VARS = {
"AGI", "Standard Deduction", "Itemized Deduction",
"Personal Exemption", "Taxable Income", "Regular Tax", "AMTI",
"Tax before Credits", "Non-refundable Credits", "Other Taxes",
"Refundable Credits", "Individual Income Tax Liabilities",
"Payroll Tax Liablities",
"Combined Payroll and Individual Income Tax Liabilities",
"Universal Basic Income", "Total Cost of Benefits",
"Consumption Value of Benefits", "Expanded Income",
"After-Tax Expanded Income", "Average Tax Change",
"Total Tax Difference"
}
class MetaParameters(paramtools.Parameters):
array_first = True
defaults = {
"year": {
"title": "Start Year",
"description": "Year for parameters.",
"type": "int",
"value": min(datetime.now().year, TaxBrain.LAST_BUDGET_YEAR),
"validators": {
"when": {
"param": "data_source",
"is": "CPS",
"then": {"range": {"min": 2014, "max": TaxBrain.LAST_BUDGET_YEAR}},
"otherwise": {"range": {"min": 2013, "max": TaxBrain.LAST_BUDGET_YEAR}},
}
},
},
"data_source": {
"title": "Data Source",
"description": "Data source can be PUF or CPS",
"type": "str",
"value": "PUF",
"validators": {"choice": {"choices": ["PUF", "CPS"]}}
},
"use_full_sample": {
"title": "Use Full Sample",
"description": "Use entire data set or a 2% sample.",
"type": "bool",
"value": True,
"validators": {"choice": {"choices": [True, False]}}
}
}
def dump(self, *args, **kwargs):
"""
This method extends the default ParamTools dump method by swapping the
when validator for a choice validator. This is required because C/S does
not yet implement the when validator.
"""
data = super().dump(*args, **kwargs)
if self.data_source == "CPS":
data["year"]["validators"] = {
"choice": {
"choices": list(range(2014, TaxBrain.LAST_BUDGET_YEAR))
}
}
else:
data["year"]["validators"] = {
"choice": {
"choices": list(range(2013, TaxBrain.LAST_BUDGET_YEAR))
}
}
return data
| 39.71978 | 92 | 0.511551 |
f6ba300c1f4a115e3d2973f12bb1b89014e2a68e | 4,805 | py | Python | src/main_detection.py | go-toh/MixBerryPi | 62a148534077871bea105cf22579adc240770ef5 | [
"Apache-2.0"
] | 4 | 2019-07-10T12:52:42.000Z | 2019-08-08T14:34:40.000Z | src/main_detection.py | go-toh/MixBerryPi | 62a148534077871bea105cf22579adc240770ef5 | [
"Apache-2.0"
] | null | null | null | src/main_detection.py | go-toh/MixBerryPi | 62a148534077871bea105cf22579adc240770ef5 | [
"Apache-2.0"
] | 1 | 2019-08-08T14:35:08.000Z | 2019-08-08T14:35:08.000Z | from tflite_runtime.interpreter import Interpreter
from slacker import Slacker
import picamera
import numpy as np
import cv2
import io
import time
import datetime
import threading
def wait_input():
global key_flag
input()
key_flag = False
def set_interpreter(interpreter):
interpreter.set_num_threads(4)
interpreter.allocate_tensors()
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def get_output_tensor(interpreter, index):
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
return tensor
#物体検出の推論
def detect_objects(interpreter, image):
detect_flag = False
set_input_tensor(interpreter, image)
interpreter.invoke()
detect_flag = False
scores_array = []
boxes = get_output_tensor(interpreter, 0)
classes = get_output_tensor(interpreter, 1)
scores = get_output_tensor(interpreter, 2)
count = int(get_output_tensor(interpreter, 3))
for i in range(count):
if scores[i] >= 0.5 and classes[i] == 0:
scores_array.append(scores[i])
if scores_array:
max_score = scores_array.index(max(scores_array))
target_box = boxes[max_score]
detect_flag = True
else:
target_box = []
detect_flag = False
return detect_flag, target_box
def person_position(result, width):
_, xmin, _, xmax = result
after_xmin = int(xmin * width) if int(xmin * width) >= 0 else 0
after_xmax = int(xmax * width) if int(xmax * width) >= 0 else 0
return after_xmin, after_xmax
def get_center_line(left, right):
return (left + right) // 2
def image_cap(width, height, count):
camera.resolution = (width, height)
filepath = "image/" + str(count) + ".jpg"
camera.capture(filepath)
camera.resolution = (480, 270)
return filepath
#slackへの画像アップロード,Hubotを使用
def upload_image(file):
#各自のワークスペースのAPIトークン
token = 'api-token'
#任意のチャンネル
channel = 'channel'
upload_file = file
slacker = Slacker(token)
slacker.files.upload(file_=upload_file, channels=channel)
if __name__ == '__main__':
interpreter = Interpreter("model/mobilenet_ssd_v2_coco_quant_postprocess.tflite")
set_interpreter(interpreter)
with picamera.PiCamera() as camera:
image_width, image_height = 480,270
camera.resolution = (image_width, image_height)
camera.framerate = 15
camera.shutter_speed = 30000
camera.iso = 800
stream = io.BytesIO()
key_flag = True
person_detect_flag = False
push_count = 0
filepath_array = []
th = threading.Thread(target=wait_input)
th.start()
while key_flag:
camera.capture(stream, format='jpeg', use_video_port=True)
frame = np.frombuffer(stream.getvalue(), dtype=np.uint8)
getimage = cv2.imdecode(frame, 1)
inputimage = cv2.resize(getimage, (300, 300))
result_flag, result_box = detect_objects(interpreter, inputimage)
if result_flag:
left_line, right_line = person_position(result_box, image_width)
center_line = get_center_line(left_line, right_line)
#cv2.line(getimage, (left_line,0), (left_line,image_height), (0, 255, 0), 2)
#cv2.line(getimage, (right_line,0), (right_line,image_height), (0, 255, 0), 2)
#cv2.line(getimage, (center_line,0), (center_line,image_height), (255, 0, 0), 2)
print(left_line, right_line, center_line)
if not person_detect_flag:
save_left_line, save_right_line = left_line, right_line
person_detect_flag = True
else:
pass
#cv2.line(getimage, (save_left_line,0), (save_left_line,image_height), (0, 0, 255), 2)
#cv2.line(getimage, (save_right_line,0), (save_right_line,image_height), (0, 0, 255), 2)
if not save_left_line < center_line < save_right_line:
push_count += 1
print(push_count)
file_path = image_cap(1920, 1080, push_count)
filepath_array.append(file_path)
person_detect_flag = False
else:
print('Not detection')
#cv2.imshow('image', getimage)
#cv2.waitKey(1)
stream.truncate()
stream.seek(0)
th.join()
#cv2.destroyAllWindows()
for file in filepath_array:
upload_image(file)
| 31.611842 | 108 | 0.627055 |
6a275942a4026d7e2c119f4344ebee829a2820cf | 2,878 | py | Python | setup.py | BOBO1997/qiskit-ignis | 824372e18a42a735bbef37cd8ee61bc5d5821f9f | [
"Apache-2.0"
] | null | null | null | setup.py | BOBO1997/qiskit-ignis | 824372e18a42a735bbef37cd8ee61bc5d5821f9f | [
"Apache-2.0"
] | null | null | null | setup.py | BOBO1997/qiskit-ignis | 824372e18a42a735bbef37cd8ee61bc5d5821f9f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import os
import inspect
import setuptools
import sys
requirements = [
"numpy>=1.13",
"qiskit-terra>=0.13.0",
"networkx>=2.2",
"scipy>=0.19,!=0.19.1",
"setuptools>=40.1.0",
"scikit-learn>=0.17",
]
if not hasattr(setuptools,
'find_namespace_packages') or not inspect.ismethod(
setuptools.find_namespace_packages):
print("Your setuptools version:'{}' does not support PEP 420 "
"(find_namespace_packages). Upgrade it to version >='40.1.0' and "
"repeat install.".format(setuptools.__version__))
sys.exit(1)
version_path = os.path.abspath(
os.path.join(
os.path.join(
os.path.join(os.path.dirname(__file__), 'qiskit'), 'ignis'),
'VERSION.txt'))
with open(version_path, 'r') as fd:
version = fd.read().rstrip()
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'README.md')
with open(README_PATH) as readme_file:
README = readme_file.read()
setuptools.setup(
name="qiskit-ignis",
version=version,
description="Qiskit tools for quantum information science",
long_description=README,
long_description_content_type='text/markdown',
url="https://github.com/Qiskit/qiskit-ignis",
author="Qiskit Development Team",
author_email="hello@qiskit.org",
license="Apache 2.0",
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
keywords="qiskit sdk quantum",
packages=setuptools.find_namespace_packages(exclude=['test*']),
extras_require={
'visualization': ['matplotlib>=2.1'],
'cvx': ['cvxpy>=1.0.15'],
'jit': ['numba'],
},
install_requires=requirements,
include_package_data=True,
python_requires=">=3.6",
zip_safe=False
)
| 31.626374 | 77 | 0.63968 |
c8a46c2f76f3bafac3c9a46acbe610d313df6bab | 7,758 | py | Python | fboss/system_tests/system_tests.py | flarnie/fboss | a747c7b778d5259d336dd30e01067291926e267b | [
"BSD-3-Clause"
] | null | null | null | fboss/system_tests/system_tests.py | flarnie/fboss | a747c7b778d5259d336dd30e01067291926e267b | [
"BSD-3-Clause"
] | null | null | null | fboss/system_tests/system_tests.py | flarnie/fboss | a747c7b778d5259d336dd30e01067291926e267b | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import importlib
import logging
import os
import sys
import unittest
user_requested_tags = []
Defaults = {
"test_dirs": None,
"config": 'test_topologies/example_topology.py',
"console_log_level": logging.INFO, # very terse, for console log_level
"file_log_level": logging.DEBUG, # result/test-foo.log, more verbose
"log_dir": "results",
"log_file": "{dir}/result-{test}.log",
"test_topology": None,
"min_hosts": 2,
"tags": user_requested_tags
}
def _test_has_user_requested_tag(test_tags):
for tag in test_tags:
if tag in user_requested_tags:
return True
return False
def test_tags(*args):
def fn(cls):
if _test_has_user_requested_tag(list(args)):
cls.valid_tags = True
return cls
return fn
def generate_default_test_argparse(**kwargs):
""" Put all command line args into a function, so that other
programs (e.g., internal automation) can start with these args and build
on them.
"""
global Defaults
parser = argparse.ArgumentParser(description='FBOSS System Tests', **kwargs)
parser.add_argument('--test_dirs', default=Defaults['test_dirs'],
action='append')
parser.add_argument('--config', default=Defaults['config'])
parser.add_argument('--log_dir', default=Defaults['log_dir'])
parser.add_argument('--log_file', default=Defaults['log_file'])
parser.add_argument('--min_hosts', default=Defaults['min_hosts'])
parser.add_argument('--console_log_level', default=Defaults['console_log_level'])
parser.add_argument('--file_log_level', default=Defaults['file_log_level'])
parser.add_argument('--tags',
help="Provide list of test tags, default is all tests "
"Example tags qsfp, port etc",
default=Defaults['tags'])
return parser
def generate_default_test_options(**kwargs):
""" Global system parameters for the test suite.
This is conveniently formed from an argparse structure.
"""
return generate_default_test_argparse(**kwargs).parse_args()
def dynamic_generate_test_topology(options):
""" Read test topology from file, import it, and return
the test_topology specified in generate_test_topology()
This particular magic requires Python 3.5+
"""
if hasattr(options, 'test_topology'):
return options.test_topology
spec = importlib.util.spec_from_file_location("config", options.config)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
options.test_topology = module.generate_test_topology()
return options.test_topology
def setup_logging(options):
""" Make sure that if a log_dir is set, it exists
"""
if options.log_dir is not None:
if not os.path.exists(options.log_dir):
os.makedirs(options.log_dir)
if not hasattr(options, "log"):
# setup the console log if not done already
# this is different from the per test file log
options.log = logging.getLogger("__main__")
options.log.setLevel(options.console_log_level)
class FbossBaseSystemTest(unittest.TestCase):
""" This Class is the base class of all Fboss System Tests """
_format = "%(asctime)s.%(msecs)03d %(name)-10s: %(levelname)-8s: %(message)s"
_datefmt = "%H:%M:%S"
def setUp(self):
if self.options is None:
raise Exception("options not set - did you call run_tests()?")
if (not hasattr(self.options, 'test_topology') or
self.options.test_topology is None):
raise Exception("options.test_topology not set - " +
"did you call run_tests()?")
self.test_topology = self.options.test_topology # save typing
my_name = str(self.__class__.__name__)
self.log = logging.getLogger(my_name)
self.log.setLevel(logging.DEBUG) # logging controlled by handlers
logfile_opts = {'test': my_name, 'dir': self.options.log_dir}
logfile = self.options.log_file.format(**logfile_opts)
# close old log files
for handler in self.log.handlers:
self.log.removeHandler(handler)
handler.close()
# open one unique for each test class
handler = logging.FileHandler(logfile, mode='w+')
handler.setLevel(self.options.file_log_level)
handler.setFormatter(logging.Formatter(self._format, self._datefmt))
self.log.addHandler(handler)
class TestTopologyValidation(FbossBaseSystemTest):
def test_topology_sanity(self):
self.log.info("Testing connection to switch")
self.assertTrue(self.test_topology.verify_switch())
self.log.info("Testing connection to hosts")
self.assertTrue(self.test_topology.verify_hosts())
def frob_options_into_tests(suite, options):
""" Make sure 'options' is available as a class variable
to all of the tests.
This is a horrible hack, but saves a lot of typing.
"""
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
# recursively iterate through all of the TestSuites
frob_options_into_tests(test, options)
else:
test.options = options
def add_interested_tests_to_test_suite(tests, suite):
if not isinstance(tests, unittest.suite.TestSuite):
# when user provides a tag , add testcases which has
# valid tags and add all testcases when user do not
# provide any tags
if hasattr(tests, "valid_tags") or not user_requested_tags:
suite.addTest(tests)
# Edge case when user uses tag & there is import error
# The import error will just be silently ignored
if isinstance(tests, unittest.loader._FailedTest):
raise Exception("Failed to import tests: {}".format(tests._exception))
return
for test in tests:
add_interested_tests_to_test_suite(test, suite)
def run_tests(options):
""" Run all of the tests as described in options
:options : a dict of testing options, as described above
"""
setup_logging(options)
options.test_topology = dynamic_generate_test_topology(options)
suite = unittest.TestSuite()
# this test needs to run first
suite.addTest(TestTopologyValidation('test_topology_sanity'))
for directory in options.test_dirs:
if not os.path.exists(directory):
raise Exception("Specified test directory '%s' does not exist" %
directory)
print("Loading tests from test_dir=%s" % directory)
testsdir = unittest.TestLoader().discover(start_dir=directory,
pattern='*test*.py')
add_interested_tests_to_test_suite(testsdir, suite)
frob_options_into_tests(suite, options)
options.log.info("""
===================================================
================ STARTING TESTS ===================
===================================================
""")
ret = unittest.TextTestRunner(verbosity=2).run(suite)
options.log.info("""
===================================================
================ ENDING TESTS ===================
===================================================
""")
return ret
def main(args):
options_parser = generate_default_test_argparse()
options = options_parser.parse_args()
run_tests(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 36.942857 | 85 | 0.644625 |
2865f0619154fc0a3b07a31ad519bde81890fc94 | 981 | py | Python | api/api/urls.py | HARSHIT-GUPTA-coder/OCDE | d1b4cc6ed0dde78149725be8e2809218f9c53bc5 | [
"MIT"
] | null | null | null | api/api/urls.py | HARSHIT-GUPTA-coder/OCDE | d1b4cc6ed0dde78149725be8e2809218f9c53bc5 | [
"MIT"
] | null | null | null | api/api/urls.py | HARSHIT-GUPTA-coder/OCDE | d1b4cc6ed0dde78149725be8e2809218f9c53bc5 | [
"MIT"
] | null | null | null | """api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from authentication import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('authentication.urls')),
path('api/', include('fileHandling.urls')),
path('api/', include('competing.urls')),
path('compile', include('compilation.urls')),
]
| 35.035714 | 77 | 0.698267 |
8e9f947553a42edc760b4071a19a43f2f0440f88 | 773 | py | Python | transkg/dataset/FB15K237/trainer.py | MobTgZhang/TransKG | 615d8addaa670c97c421bc00263edfcaad59d443 | [
"MIT"
] | 1 | 2022-03-12T13:56:24.000Z | 2022-03-12T13:56:24.000Z | transkg/dataset/FB15K237/trainer.py | MobTgZhang/TransKG | 615d8addaa670c97c421bc00263edfcaad59d443 | [
"MIT"
] | 1 | 2022-03-14T04:12:53.000Z | 2022-03-14T05:04:34.000Z | transkg/dataset/FB15K237/trainer.py | MobtgZhang/TransKG | 615d8addaa670c97c421bc00263edfcaad59d443 | [
"MIT"
] | null | null | null | import logging
import json
logger = logging.getLogger()
from ..trainer import Trainer
from .data import prepareDataloader
class FB15K237Trainer(Trainer):
def __init__(self,args):
super(FB15K237Trainer, self).__init__(args)
def prepareData(self):
logger.info("INFO : Prepare dataloader.")
self.train_loader = prepareDataloader(self.root_dir,"train",self.batch_size,self.shuffle,
self.num_workers)
self.valid_loader = prepareDataloader(self.root_dir,"valid",self.batch_size,self.shuffle,
self.num_workers)
self.entityDict = json.load(open(self.ent_path, mode="r"))
self.relationDict = json.load(open(self.rel_path, mode="r"))
| 42.944444 | 97 | 0.645537 |
5fbc59935fdbf61bb09c2d6f3302b5ac1f0f89ca | 11,435 | py | Python | src/python/pants/pantsd/service/scheduler_service.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | src/python/pants/pantsd/service/scheduler_service.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | src/python/pants/pantsd/service/scheduler_service.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import queue
import threading
from typing import List, Optional, Set
from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
from pants.base.specs import Specs
from pants.engine.fs import PathGlobs, Snapshot
from pants.engine.rules import UnionMembership
from pants.goal.run_tracker import RunTracker
from pants.init.engine_initializer import LegacyGraphScheduler, LegacyGraphSession
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pants_service import PantsService
class SchedulerService(PantsService):
"""The pantsd scheduler service.
This service holds an online Scheduler instance that is primed via watchman filesystem events.
"""
QUEUE_SIZE = 64
def __init__(
self,
*,
fs_event_service: FSEventService,
legacy_graph_scheduler: LegacyGraphScheduler,
build_root: str,
invalidation_globs: List[str],
pantsd_pidfile: Optional[str],
union_membership: UnionMembership,
) -> None:
"""
:param fs_event_service: An unstarted FSEventService instance for setting up filesystem event handlers.
:param legacy_graph_scheduler: The LegacyGraphScheduler instance for graph construction.
:param build_root: The current build root.
:param invalidation_globs: A list of `globs` that when encountered in filesystem event
subscriptions will tear down the daemon.
:param pantsd_pidfile: The path to the pantsd pidfile for fs event monitoring.
"""
super().__init__()
self._fs_event_service = fs_event_service
self._graph_helper = legacy_graph_scheduler
self._invalidation_globs = invalidation_globs
self._build_root = build_root
self._pantsd_pidfile = pantsd_pidfile
self._union_membership = union_membership
self._scheduler = legacy_graph_scheduler.scheduler
# This session is only used for checking whether any invalidation globs have been invalidated.
# It is not involved with a build itself; just with deciding when we should restart pantsd.
self._scheduler_session = self._scheduler.new_session(
zipkin_trace_v2=False, build_id="scheduler_service_session",
)
self._logger = logging.getLogger(__name__)
self._event_queue: queue.Queue = queue.Queue(maxsize=self.QUEUE_SIZE)
self._watchman_is_running = threading.Event()
self._invalidating_snapshot = None
self._invalidating_files: Set[str] = set()
self._loop_condition = LoopCondition()
def _get_snapshot(self):
"""Returns a Snapshot of the input globs."""
return self._scheduler_session.product_request(
Snapshot, subjects=[PathGlobs(self._invalidation_globs)]
)[0]
def setup(self, services):
"""Service setup."""
super().setup(services)
# Register filesystem event handlers on an FSEventService instance.
self._fs_event_service.register_all_files_handler(
self._enqueue_fs_event, self._fs_event_service.PANTS_ALL_FILES_SUBSCRIPTION_NAME
)
# N.B. We compute the invalidating fileset eagerly at launch with an assumption that files
# that exist at startup are the only ones that can affect the running daemon.
if self._invalidation_globs:
self._invalidating_snapshot = self._get_snapshot()
self._invalidating_files = self._invalidating_snapshot.files
self._logger.info("watching invalidating files: {}".format(self._invalidating_files))
if self._pantsd_pidfile:
self._fs_event_service.register_pidfile_handler(
self._pantsd_pidfile, self._enqueue_fs_event
)
def _enqueue_fs_event(self, event):
"""Watchman filesystem event handler for BUILD/requirements.txt updates.
Called via a thread.
"""
self._logger.info(
"enqueuing {} changes for subscription {}".format(
len(event["files"]), event["subscription"]
)
)
self._event_queue.put(event)
def _maybe_invalidate_scheduler_batch(self):
new_snapshot = self._get_snapshot()
if (
self._invalidating_snapshot
and new_snapshot.directory_digest != self._invalidating_snapshot.directory_digest
):
self._logger.critical(
"saw file events covered by invalidation globs [{}], terminating the daemon.".format(
self._invalidating_files
)
)
self.terminate()
def _maybe_invalidate_scheduler_pidfile(self):
new_pid = self._check_pid_changed()
if new_pid is not False:
self._logger.critical(
"{} says pantsd PID is {} but my PID is: {}: terminating".format(
self._pantsd_pidfile, new_pid, os.getpid(),
)
)
self.terminate()
def _check_pid_changed(self):
"""Reads pidfile and returns False if its PID is ours, else a printable (maybe falsey)
value."""
try:
with open(os.path.join(self._build_root, self._pantsd_pidfile), "r") as f:
pid_from_file = f.read()
except IOError:
return "[no file could be read]"
if int(pid_from_file) != os.getpid():
return pid_from_file
else:
return False
def _handle_batch_event(self, files):
self._logger.debug("handling change event for: %s", files)
invalidated = self._scheduler.invalidate_files(files)
if invalidated:
self._loop_condition.notify_all()
self._maybe_invalidate_scheduler_batch()
def _process_event_queue(self):
"""File event notification queue processor."""
try:
event = self._event_queue.get(timeout=0.05)
except queue.Empty:
return
try:
subscription, is_initial_event, files = (
event["subscription"],
event["is_fresh_instance"],
event["files"],
)
except (KeyError, UnicodeDecodeError) as e:
self._logger.warning("%r raised by invalid watchman event: %s", e, event)
return
self._logger.debug(
"processing {} files for subscription {} (first_event={})".format(
len(files), subscription, is_initial_event
)
)
# The first watchman event for all_files is a listing of all files - ignore it.
if (
not is_initial_event
and subscription == self._fs_event_service.PANTS_ALL_FILES_SUBSCRIPTION_NAME
):
self._handle_batch_event(files)
# However, we do want to check for the initial event in the pid file creation.
if subscription == self._fs_event_service.PANTS_PID_SUBSCRIPTION_NAME:
self._maybe_invalidate_scheduler_pidfile()
if not self._watchman_is_running.is_set():
self._watchman_is_running.set()
self._event_queue.task_done()
def prepare_graph(self, options: Options) -> LegacyGraphSession:
# If any nodes exist in the product graph, wait for the initial watchman event to avoid
# racing watchman startup vs invalidation events.
graph_len = self._scheduler.graph_len()
if graph_len > 0:
self._logger.debug(f"graph len was {graph_len}, waiting for initial watchman event")
self._watchman_is_running.wait()
global_options = options.for_global_scope()
build_id = RunTracker.global_instance().run_id
v2_ui = global_options.get("v2_ui", False)
zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2
return self._graph_helper.new_session(zipkin_trace_v2, build_id, v2_ui)
def graph_run_v2(
self,
session: LegacyGraphSession,
specs: Specs,
options: Options,
options_bootstrapper: OptionsBootstrapper,
) -> int:
"""Perform an entire v2 run.
The exit_code in the return indicates whether any issue was encountered.
"""
global_options = options.for_global_scope()
perform_loop = global_options.get("loop", False)
v2 = global_options.v2
if not perform_loop:
return self._body(session, options, options_bootstrapper, specs, v2)
# TODO: See https://github.com/pantsbuild/pants/issues/6288 regarding Ctrl+C handling.
iterations = global_options.loop_max
exit_code = PANTS_SUCCEEDED_EXIT_CODE
while iterations and not self._state.is_terminating:
try:
exit_code = self._body(session, options, options_bootstrapper, specs, v2)
except session.scheduler_session.execution_error_type as e:
self._logger.warning(e)
iterations -= 1
while (
iterations
and not self._state.is_terminating
and not self._loop_condition.wait(timeout=1)
):
continue
return exit_code
def _body(
self,
session: LegacyGraphSession,
options: Options,
options_bootstrapper: OptionsBootstrapper,
specs: Specs,
v2: bool,
) -> int:
exit_code = PANTS_SUCCEEDED_EXIT_CODE
_, ambiguous_goals, v2_goals = options.goals_by_version
if v2_goals or (ambiguous_goals and v2):
goals = v2_goals + (ambiguous_goals if v2 else tuple())
# N.B. @goal_rules run pre-fork in order to cache the products they request during execution.
exit_code = session.run_goal_rules(
options_bootstrapper=options_bootstrapper,
union_membership=self._union_membership,
options=options,
goals=goals,
specs=specs,
)
return exit_code
def run(self):
"""Main service entrypoint."""
while not self._state.is_terminating:
self._process_event_queue()
self._state.maybe_pause()
class LoopCondition:
"""A wrapped condition variable to handle deciding when loop consumers should re-run.
Any number of threads may wait and/or notify the condition.
"""
def __init__(self):
super().__init__()
self._condition = threading.Condition(threading.Lock())
self._iteration = 0
def notify_all(self):
"""Notifies all threads waiting for the condition."""
with self._condition:
self._iteration += 1
self._condition.notify_all()
def wait(self, timeout):
"""Waits for the condition for at most the given timeout and returns True if the condition
triggered.
Generally called in a loop until the condition triggers.
"""
with self._condition:
previous_iteration = self._iteration
self._condition.wait(timeout)
return previous_iteration != self._iteration
| 37.369281 | 111 | 0.648535 |
5633f8fa5411caa74b460ff19c104209b60ef032 | 2,174 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/background.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/background.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/background.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""
Out of the original code for background, this seems the most likely to
remain intact.
"""
from Scheduler import scheduler
from Component import component
import threading
import CoordinatingAssistantTracker as cat
class background(threading.Thread):
"""A python thread which runs a scheduler. Takes the same arguments at creation that scheduler.run.runThreads accepts."""
lock = threading.Lock()
def __init__(self,slowmo=0,zap=False):
if not background.lock.acquire(False):
raise "only one scheduler for now can be run!"
self.slowmo = slowmo
threading.Thread.__init__(self)
self.setDaemon(True) # Die when the caller dies
self.zap = zap
def run(self):
if self.zap:
X = scheduler()
scheduler.run = X
cat.coordinatingassistanttracker.basecat.zap()
scheduler.run.waitForOne()
scheduler.run.runThreads(slowmo = self.slowmo)
background.lock.release()
if __name__ == "__main__":
from Kamaelia.UI.Pygame.MagnaDoodle import MagnaDoodle
import time
background = background().start()
MagnaDoodle().activate()
while 1:
time.sleep(1)
print "."
| 33.446154 | 125 | 0.675253 |
bd26b5dfd2070459ec4d9bd9525be259224b003e | 12,207 | py | Python | nlp2/voice/views.py | tjas/postgrad-ai-nlp2-voice-ui | 489888bdb2613530aa105698f1f999f56c6d60b6 | [
"MIT"
] | null | null | null | nlp2/voice/views.py | tjas/postgrad-ai-nlp2-voice-ui | 489888bdb2613530aa105698f1f999f56c6d60b6 | [
"MIT"
] | null | null | null | nlp2/voice/views.py | tjas/postgrad-ai-nlp2-voice-ui | 489888bdb2613530aa105698f1f999f56c6d60b6 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from .models import Credentials
from nlp2.settings import BASE_DIR, PATH_TO_CREDENTIALS, PATH_TO_AUDIO, SPEECH_TO_TEXT_KEY, TEXT_TO_SPEECH_KEY
import json
def index(request):
return render(request, 'index.html', {})
def tts(request):
if request.method == 'GET':
ttsText = request.GET.get('ttsText')
context = {'success': True, 'ttsText': ttsText, 'audio': ""}
return render(request, 'tts.html', context)
if request.method == 'POST':
ttsText = request.POST.get('ttsText')
#credential.speech_to_text_key = sttKey
#credential.text_to_speech_key = ttsKey
#credential.save(update_fields=['speech_to_text_key', 'text_to_speech_key'])
success = sintetizar(ttsText)
context = {'success': True, 'ttsText': ttsText, 'audio': ""}
if success:
context = {'ttsText': ttsText, 'audio': os.path.join(PATH_TO_AUDIO, 'audio-sintetizado.wav')}
return render(request, 'tts.html', context)
def stt(request):
# def handle_uploaded_file(f):
# with open(os.path.join(PATH_TO_AUDIO, 'audio-gravado.webm'), 'wb+') as destination:
# for chunk in f.chunks():
# destination.write(chunk)
if request.method == 'GET':
sttGeneratedText = request.GET.get('sttGeneratedText')
# audio = request.GET.get('blob')
context = {'success': True, 'result': sttGeneratedText}
return render(request, 'stt.html', context)
if request.method == 'POST':
#sttGeneratedText = request.POST.get('sttGeneratedText')
audio = request.FILES['blob']
#print("\nAudio: " + audio)
if audio:
# filename = 'audio-gravado.wav'
print("\nReconheceu o áudio")
try:
with open(os.path.join(PATH_TO_AUDIO, 'audio-gravado.webm'), 'wb') as destination:
for chunk in audio.chunks():
destination.write(chunk)
# with open(os.path.join(PATH_TO_AUDIO, 'audio-gravado.webm'), 'wb') as arquivo:
# arquivo.write(audio)
# result = reconhecer(arquivo)
result = reconhecer(os.path.join(PATH_TO_AUDIO, 'audio-gravado.webm'))
#result = reconhecer(audio)
# audio.save(filename)
print("\nResultado: "+ str(result) +"\n")
context = {'success': True, 'result': str(result)}
#return HttpResponse(json.dumps(context))
return HttpResponse(result)
except PermissionError:
print('Sem permissão de escrita')
context = {'success': False, 'error': 'Não foi possível transcrever a fala.'}
return HttpResponse(json.dumps(context))
except Exception as exc:
import linecache
import sys
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION thrown from {}, line {}: at "{}". {}: {}'.format(filename, lineno, line.strip(), type(exc), exc_obj))
else:
print("\nResultado: não entrou no tratamento do áudio\n")
context = {'success': False, 'error': 'Não foi possível transcrever a fala.'}
return HttpResponse(json.dumps(context))
context = {'success': False, 'error': 'Não foi possível transcrever a fala.'}
#return render(request, 'stt.html', context)
return HttpResponse(json.dumps(context))
def settings(request):
if request.method == 'GET':
sttKey = request.GET.get('sttKey')
ttsKey = request.GET.get('ttsKey')
#page = request.GET.get('page')
try:
credential = Credentials.objects.all()[:1].get()
except Exception as exc:
credential = Credentials(speech_to_text_key=SPEECH_TO_TEXT_KEY, text_to_speech_key=TEXT_TO_SPEECH_KEY)
credential.save()
context = {'sttKey': credential.speech_to_text_key, 'ttsKey': credential.text_to_speech_key}
return render(request, 'settings.html', context)
if request.method == 'POST':
sttKey = request.POST.get('sttKey')
ttsKey = request.POST.get('ttsKey')
credential.speech_to_text_key = sttKey
credential.text_to_speech_key = ttsKey
credential.save(update_fields=['speech_to_text_key', 'text_to_speech_key'])
context = {'sttKey': credential.speech_to_text_key, 'ttsKey': credential.text_to_speech_key}
return render(request, 'settings.html', context)
""" ***********************************
IMPORTS
***********************************
"""
from ibm_watson import TextToSpeechV1, SpeechToTextV1
from ibm_watson.websocket import RecognizeCallback, AudioSource
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import os
import sys
import json
import pyaudio
import wave
import tempfile
from threading import Thread
try:
from Queue import Queue, Full
except ImportError:
from queue import Queue, Full
import logging
logging.disable(logging.CRITICAL)
""" ***********************************
TEXT TO SPEECH
***********************************
"""
class Synthetizer:
def __init__(self):
self.authenticator = IAMAuthenticator(TEXT_TO_SPEECH_KEY)
self.text_to_speech = TextToSpeechV1(
authenticator=self.authenticator
)
def call_api(self, orig_text):
"""Retorna os bytes conforme a sintetização"""
api_result = self.text_to_speech.synthesize(
orig_text,
voice='pt-BR_IsabelaV3Voice',
accept='audio/wav'
).get_result().content
return api_result
def sintetizar(texto: str):
download = PATH_TO_AUDIO
transcriber = Synthetizer()
audio = transcriber.call_api(texto)
if download and audio:
try:
with open(os.path.join(PATH_TO_AUDIO, 'audio-sintetizado.wav'), 'wb') as arquivo:
arquivo.write(audio)
except PermissionError:
print('Sem permissão de escrita')
return False
return True
""" ***********************************
SPEECH TO TEXT
***********************************
"""
def treat_api_output(speech_recognition_results):
"""Entre as alternativas, escolhe a de maior confiança. Supõe que a API devolve um dicionario em que os itens
de interesse estão todos debaixo do nó da árvore ['results'][0]"""
alternatives = [result['alternatives'] for result in speech_recognition_results['results'] if result['final']]
return alternatives
class S2tTranscriber:
def __init__(self):
# with open(os.path.join(PATH_TO_AUDIO, , 'ibm-credentials.env'), 'r') as ibm_credentials:
# lines = ibm_credentials.readlines()
# for line in lines:
# if 'SPEECH_TO_TEXT_APIKEY' in line:
# api_key = line.split('=')[-1].strip()
# break
self.authenticator = IAMAuthenticator(SPEECH_TO_TEXT_KEY)
#self.authenticator = IAMAuthenticator(api_key)
self.speech_to_text = SpeechToTextV1(
authenticator=self.authenticator
)
def call_api(self, audio_file, extension):
speech_recognition_results = self.speech_to_text.recognize(
audio=audio_file,
content_type='audio/' + extension,
model='pt-BR_BroadbandModel'
).get_result()
return speech_recognition_results
class MyRecognizeCallback(RecognizeCallback):
""" Classe necessária para tratamento da resposta da API via websocket"""
def __init__(self):
RecognizeCallback.__init__(self)
self.transcriptions = []
self.data = {}
def on_transcription(self, transcript):
self.transcriptions.append(transcript)
sys.stdout.flush()
sys.stdout.write('\b\b\b\b')
print(f'[Confiança {transcript[0]["confidence"]}]')
sys.stdout.seek(0)
def on_connected(self):
print('Conexão estabelecida com API')
print('Pressione ctrl + c para interromper a coleta de áudio')
def on_error(self, error):
print('Error received: {}'.format(error))
def on_inactivity_timeout(self, error):
print('Inactivity timeout: {}'.format(error))
def on_listening(self):
print('Ouvindo')
def on_hypothesis(self, hypothesis):
sys.stdout.write('\r')
output = hypothesis + ' ...'
sys.stdout.write(output)
def on_data(self, data):
self.data = data
def on_close(self):
sys.stdout.write('\n')
print("Conexão finalizada")
return self.transcriptions
class S2tLive(S2tTranscriber):
CHUNK = 1024
BUF_MAX_SIZE = CHUNK * 10
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
q = Queue(maxsize=int(round(BUF_MAX_SIZE / CHUNK)))
audio = pyaudio.PyAudio()
def pyaudio_callback(self, in_data, frame_count, time_info, status):
try:
self.q.put(in_data)
except Full:
pass
return None, pyaudio.paContinue
audio_source = AudioSource(q, True, True)
def recognize_using_websocket(self, *args):
callback = MyRecognizeCallback()
self.speech_to_text.recognize_using_websocket(audio=self.audio_source, content_type='audio/l16; rate=44100',
recognize_callback=callback, model='pt-BR_BroadbandModel',
interim_results=True)
# def call_api_live(self):
# stream = self.audio.open(
# format=self.FORMAT,
# channels=self.CHANNELS,
# rate=self.RATE,
# input=True,
# frames_per_buffer=self.CHUNK,
# stream_callback=self.pyaudio_callback,
# start=False
# )
# stream.start_stream()
# try:
# recognize_thread = Thread(target=self.recognize_using_websocket, args=())
# recognize_thread.start()
# while True:
# pass
# except KeyboardInterrupt:
# # stop recording
# stream.stop_stream()
# stream.close()
# self.audio.terminate()
# self.audio_source.completed_recording()
# pass
def reconhecer(path: str):
result = ''
if path:
transcriber = S2tTranscriber()
with open(path, 'rb') as audio:
extension = path.split('.')[-1]
if not extension:
extension = 'wav'
saida = transcriber.call_api(audio, extension)
alternativas = treat_api_output(saida)
segn = 1
for a in alternativas:
for segmento in a:
print(f'Transcrição segmento {segn}: {segmento["transcript"]}. [Confiança: {segmento["confidence"]}]')
result = segmento["transcript"]
segn += 1
# else:
# transcriber = S2tLive()
# transcriber.call_api_live()
# # saida = transcriber.call_api_live()
# # alternativas = treat_api_output(saida)
# # segn = 1
# # for a in alternativas:
# # for segmento in a:
# # print(f'Transcrição segmento {segn}: {segmento["transcript"]}. [Confiança: {segmento["confidence"]}]')
# # segn += 1
return result
# def reconhecer(audio):
# with open(audio, 'rb') as audio_file:
# speech_recognition_results = self.stt.recognize(
# audio=audio_file,
# content_type='audio/webm',
# model='pt-BR_BroadbandModel',
# # max_alternatives=3
# ).get_result()
# # print(json.dumps(speech_recognition_results, indent=2, ensure_ascii=False))
# return speech_recognition_results | 34.097765 | 134 | 0.594659 |
e29fb641f9c84b0ca62e6310e5606f73de21df41 | 3,968 | py | Python | resolwe_bio/tests/unit/test_views.py | JenkoB/resolwe-bio | a958cf3fc82ebc37f527e1b156753f2324a33803 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/unit/test_views.py | JenkoB/resolwe-bio | a958cf3fc82ebc37f527e1b156753f2324a33803 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/unit/test_views.py | JenkoB/resolwe-bio | a958cf3fc82ebc37f527e1b156753f2324a33803 | [
"Apache-2.0"
] | 1 | 2021-09-03T08:50:54.000Z | 2021-09-03T08:50:54.000Z | # pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
from mock import MagicMock
from django.contrib.auth import get_user_model
from django.test import TestCase
from guardian.shortcuts import assign_perm, remove_perm
from rest_framework import exceptions
from rest_framework.test import APIRequestFactory
from resolwe.flow.models import Collection, Data, Process
from resolwe_bio.models import Sample
from resolwe_bio.views import SampleViewSet
factory = APIRequestFactory() # pylint: disable=invalid-name
class SampleViewSetTest(TestCase):
def setUp(self):
user_model = get_user_model()
self.user = user_model.objects.create_user('test_user')
self.collection = Collection.objects.create(name="Test Collection", contributor=self.user)
self.sample = Sample.objects.create(name="Test sample", contributor=self.user)
process = Process.objects.create(name="Test process", contributor=self.user)
self.data = Data.objects.create(name="Test data", contributor=self.user, process=process)
self.data_2 = Data.objects.create(name="Test data 2", contributor=self.user, process=process)
# another Data object to make sure that other objects are not processed
Data.objects.create(name="Dummy data", contributor=self.user, process=process)
self.sample.data.add(self.data)
assign_perm('add_collection', self.user, self.collection)
assign_perm('add_sample', self.user, self.sample)
self.sampleviewset = SampleViewSet()
def test_add_to_collection(self):
request_mock = MagicMock(data={'ids': [self.collection.pk]}, user=self.user)
self.sampleviewset.get_object = lambda: self.sample
self.sampleviewset.add_to_collection(request_mock)
self.assertEqual(self.collection.data.count(), 1)
self.assertEqual(self.sample.collections.count(), 1)
def test_remove_from_collection(self):
# Manually add Sample and it's Data objects to the Collection
self.sample.collections.add(self.collection.pk)
self.collection.data.add(self.data)
request_mock = MagicMock(data={'ids': [self.collection.pk]}, user=self.user)
self.sampleviewset.get_object = lambda: self.sample
self.sampleviewset.remove_from_collection(request_mock)
self.assertEqual(self.collection.data.count(), 0)
self.assertEqual(self.sample.collections.count(), 0)
def test_add_remove_permissions(self):
request_mock = MagicMock(data={'ids': [self.collection.pk]}, user=self.user)
self.sampleviewset.get_object = lambda: self.sample
remove_perm('add_collection', self.user, self.collection)
with self.assertRaises(exceptions.PermissionDenied):
self.sampleviewset.remove_from_collection(request_mock)
with self.assertRaises(exceptions.PermissionDenied):
self.sampleviewset.add_to_collection(request_mock)
def test_add_data(self):
self.sample.collections.add(self.collection)
request_mock = MagicMock(data={'ids': [self.data.pk]}, user=self.user)
self.sampleviewset.get_object = lambda: self.sample
self.sampleviewset.add_data(request_mock)
self.assertEqual(self.sample.data.count(), 1)
self.assertEqual(self.collection.data.count(), 1)
def test_remove_data(self):
self.sample.data.add(self.data_2)
self.sampleviewset.get_object = lambda: self.sample
# sample is removed only when last data object is removed
request_mock = MagicMock(data={'ids': [self.data.pk]}, user=self.user)
self.sampleviewset.remove_data(request_mock)
self.assertEqual(Sample.objects.count(), 1)
request_mock = MagicMock(data={'ids': [self.data_2.pk]}, user=self.user)
self.sampleviewset.remove_data(request_mock)
self.assertEqual(Sample.objects.count(), 0)
| 40.907216 | 101 | 0.721018 |
efa2e0cf3f445f137e2cc97c036607ea70664491 | 328 | py | Python | catalog/bindings/csw/surface_array_property.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/surface_array_property.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/surface_array_property.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.csw.surface_array_property_type import SurfaceArrayPropertyType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class SurfaceArrayProperty(SurfaceArrayPropertyType):
class Meta:
name = "surfaceArrayProperty"
namespace = "http://www.opengis.net/gml"
| 27.333333 | 77 | 0.777439 |
b704764bd97bd3fb92cb054d1b9932c6516beb4c | 1,515 | py | Python | api/users/urls.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | api/users/urls.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | api/users/urls.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.UserList.as_view(), name=views.UserList.view_name),
url(r'^(?P<user_id>\w+)/$', views.UserDetail.as_view(), name=views.UserDetail.view_name),
url(r'^(?P<user_id>\w+)/addons/$', views.UserAddonList.as_view(), name=views.UserAddonList.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/$', views.UserAddonDetail.as_view(), name=views.UserAddonDetail.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/accounts/$', views.UserAddonAccountList.as_view(), name=views.UserAddonAccountList.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/accounts/(?P<account_id>\w+)/$', views.UserAddonAccountDetail.as_view(), name=views.UserAddonAccountDetail.view_name),
url(r'^(?P<user_id>\w+)/institutions/$', views.UserInstitutions.as_view(), name=views.UserInstitutions.view_name),
url(r'^(?P<user_id>\w+)/nodes/$', views.UserNodes.as_view(), name=views.UserNodes.view_name),
url(r'^(?P<user_id>\w+)/preprints/$', views.UserPreprints.as_view(), name=views.UserPreprints.view_name),
url(r'^(?P<user_id>\w+)/registrations/$', views.UserRegistrations.as_view(), name=views.UserRegistrations.view_name),
url(r'^(?P<user_id>\w+)/quickfiles/$', views.UserQuickFiles.as_view(), name=views.UserQuickFiles.view_name),
url(r'^(?P<user_id>\w+)/relationships/institutions/$', views.UserInstitutionsRelationship.as_view(), name=views.UserInstitutionsRelationship.view_name),
]
| 79.736842 | 171 | 0.716832 |
6fcf73dd3590477bd87c5961cb8f8d3bfebd3094 | 3,542 | py | Python | src/pymap3d/eci.py | scienceopen/python-mapping | fb6147542fc0cab9e2f3b07713605f074a30e086 | [
"BSD-2-Clause"
] | null | null | null | src/pymap3d/eci.py | scienceopen/python-mapping | fb6147542fc0cab9e2f3b07713605f074a30e086 | [
"BSD-2-Clause"
] | null | null | null | src/pymap3d/eci.py | scienceopen/python-mapping | fb6147542fc0cab9e2f3b07713605f074a30e086 | [
"BSD-2-Clause"
] | null | null | null | """ transforms involving ECI earth-centered inertial """
from __future__ import annotations
from datetime import datetime
from numpy import array, sin, cos, column_stack, empty, atleast_1d
try:
from astropy.coordinates import GCRS, ITRS, EarthLocation, CartesianRepresentation
from astropy.time import Time
import astropy.units as u
except ImportError:
Time = None
from .sidereal import greenwichsrt, juliandate
__all__ = ["eci2ecef", "ecef2eci"]
def eci2ecef(x, y, z, time: datetime, *, use_astropy: bool = True) -> tuple:
"""
Observer => Point ECI => ECEF
J2000 frame
Parameters
----------
x : float
ECI x-location [meters]
y : float
ECI y-location [meters]
z : float
ECI z-location [meters]
time : datetime.datetime
time of obsevation (UTC)
use_astropy: bool, optional
use AstroPy (much more accurate)
Results
-------
x_ecef : float
x ECEF coordinate
y_ecef : float
y ECEF coordinate
z_ecef : float
z ECEF coordinate
"""
if use_astropy and Time is not None:
gcrs = GCRS(CartesianRepresentation(x * u.m, y * u.m, z * u.m), obstime=time)
itrs = gcrs.transform_to(ITRS(obstime=time))
x_ecef = itrs.x.value
y_ecef = itrs.y.value
z_ecef = itrs.z.value
else:
x = atleast_1d(x)
y = atleast_1d(y)
z = atleast_1d(z)
gst = atleast_1d(greenwichsrt(juliandate(time)))
assert x.shape == y.shape == z.shape
assert x.size == gst.size
eci = column_stack((x.ravel(), y.ravel(), z.ravel()))
ecef = empty((x.size, 3))
for i in range(eci.shape[0]):
ecef[i, :] = R3(gst[i]) @ eci[i, :].T
x_ecef = ecef[:, 0].reshape(x.shape)
y_ecef = ecef[:, 1].reshape(y.shape)
z_ecef = ecef[:, 2].reshape(z.shape)
return x_ecef, y_ecef, z_ecef
def ecef2eci(x, y, z, time: datetime, *, use_astropy: bool = True) -> tuple:
"""
Point => Point ECEF => ECI
J2000 frame
Parameters
----------
x : float
target x ECEF coordinate
y : float
target y ECEF coordinate
z : float
target z ECEF coordinate
time : datetime.datetime
time of observation
use_astropy: bool, optional
use AstroPy (much more accurate)
Results
-------
x_eci : float
x ECI coordinate
y_eci : float
y ECI coordinate
z_eci : float
z ECI coordinate
"""
if use_astropy and Time is not None:
itrs = ITRS(CartesianRepresentation(x * u.m, y * u.m, z * u.m), obstime=time)
gcrs = itrs.transform_to(GCRS(obstime=time))
eci = EarthLocation(*gcrs.cartesian.xyz)
x_eci = eci.x.value
y_eci = eci.y.value
z_eci = eci.z.value
else:
x = atleast_1d(x)
y = atleast_1d(y)
z = atleast_1d(z)
gst = atleast_1d(greenwichsrt(juliandate(time)))
assert x.shape == y.shape == z.shape
assert x.size == gst.size
ecef = column_stack((x.ravel(), y.ravel(), z.ravel()))
eci = empty((x.size, 3))
for i in range(x.size):
eci[i, :] = R3(gst[i]).T @ ecef[i, :]
x_eci = eci[:, 0].reshape(x.shape)
y_eci = eci[:, 1].reshape(y.shape)
z_eci = eci[:, 2].reshape(z.shape)
return x_eci, y_eci, z_eci
def R3(x: float):
"""Rotation matrix for ECI"""
return array([[cos(x), sin(x), 0], [-sin(x), cos(x), 0], [0, 0, 1]])
| 25.854015 | 86 | 0.574252 |
3490a1a8529cb1e29c0c4f3582eca2846fc62f46 | 250 | py | Python | demoapp/demon/doctype/district/district.py | poojamehta3/Demon-Project | e471355ffd2c391807a6c1c2ee578b3827db988a | [
"MIT"
] | null | null | null | demoapp/demon/doctype/district/district.py | poojamehta3/Demon-Project | e471355ffd2c391807a6c1c2ee578b3827db988a | [
"MIT"
] | null | null | null | demoapp/demon/doctype/district/district.py | poojamehta3/Demon-Project | e471355ffd2c391807a6c1c2ee578b3827db988a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Pooja and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class District(Document):
pass
| 22.727273 | 49 | 0.776 |
74b5c5cf77c41c81e9971530901e8b72a1a649a4 | 10,434 | py | Python | src/dal_select2/views.py | robertispas/django-autocomplete-light | 3eb84a09c5828e4a9409f0aec14c2874bcbe17df | [
"MIT"
] | 1,368 | 2015-01-03T09:52:33.000Z | 2022-03-27T09:06:00.000Z | src/dal_select2/views.py | robertispas/django-autocomplete-light | 3eb84a09c5828e4a9409f0aec14c2874bcbe17df | [
"MIT"
] | 919 | 2015-01-01T05:17:48.000Z | 2022-03-25T22:41:14.000Z | src/dal_select2/views.py | robertispas/django-autocomplete-light | 3eb84a09c5828e4a9409f0aec14c2874bcbe17df | [
"MIT"
] | 469 | 2015-01-19T21:40:30.000Z | 2022-03-26T17:27:40.000Z | """Select2 view implementation."""
import collections
# import json
from dal.views import BaseQuerySetView, ViewMixin
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.db.models import F
from django.utils.translation import gettext as _
from django.views.generic.list import View
import six
class Select2ViewMixin(object):
"""View mixin to render a JSON response for Select2."""
case_sensitive_create = False
def get_results(self, context):
"""Return data for the 'results' key of the response."""
return [
{
'id': self.get_result_value(result),
'text': self.get_result_label(result),
'selected_text': self.get_selected_result_label(result),
} for result in context['object_list']
]
def get_create_option(self, context, q):
"""Form the correct create_option to append to results."""
create_option = []
display_create_option = False
if self.create_field and q:
page_obj = context.get('page_obj', None)
if page_obj is None or page_obj.number == 1:
display_create_option = True
if not self.case_sensitive_create:
# Don't offer to create a new option if a
# case-insensitive) identical one already exists
existing_options = (self.get_result_label(result).lower()
for result in context['object_list'])
if q.lower() in existing_options:
display_create_option = False
else:
existing_options = (self.get_result_label(result) for result in context["object_list"])
if q in existing_options:
display_create_option = False
if display_create_option and self.has_add_permission(self.request):
create_option = [{
'id': q,
'text': _('Create "%(new_value)s"') % {'new_value': q},
'create_id': True,
}]
return create_option
def render_to_response(self, context):
"""Return a JSON response in Select2 format."""
q = self.request.GET.get('q', None)
create_option = self.get_create_option(context, q)
return http.JsonResponse(
{
'results': self.get_results(context) + create_option,
'pagination': {
'more': self.has_more(context)
}
})
class Select2QuerySetView(Select2ViewMixin, BaseQuerySetView):
"""List options for a Select2 widget."""
class Select2GroupQuerySetView(Select2QuerySetView):
"""List of grouped options for a Select2 widget.
.. py:attribute:: group_by_related
Name of the field for the related Model on a One to Many relation
.. py:attribute:: related_field_name
Name of the related Model field to run filter against.
"""
group_by_related = None
related_field_name = 'name'
def get_results(self, context):
"""Return the options grouped by a common related model.
Raises ImproperlyConfigured if self.group_by_name is not configured
"""
if not self.group_by_related:
raise ImproperlyConfigured("Missing group_by_related.")
groups = collections.OrderedDict()
object_list = context['object_list']
object_list = object_list.annotate(
group_name=F(f'{self.group_by_related}__{self.related_field_name}'))
for result in object_list:
group_name = getattr(result, 'group_name')
groups.setdefault(group_name, [])
groups[group_name].append(result)
return [{
'id': None,
'text': group,
'children': [{
'id': self.get_result_value(result),
'text': self.get_result_label(result),
'selected_text': self.get_selected_result_label(result),
} for result in results]
} for group, results in groups.items()]
class Select2ListView(ViewMixin, View):
"""Autocomplete from a list of items rather than a QuerySet."""
def get_list(self):
"""Return the list strings from which to autocomplete."""
return []
def get(self, request, *args, **kwargs):
"""Return option list json response."""
results = self.get_list()
create_option = []
if self.q:
results = self.autocomplete_results(results)
if hasattr(self, 'create'):
create_option = [{
'id': self.q,
'text': _('Create "%(new_value)s"') % {
'new_value': self.q
},
'create_id': True
}]
return http.JsonResponse({
'results': self.results(results) + create_option
}, content_type='application/json')
def autocomplete_results(self, results):
"""Return list of strings that match the autocomplete query."""
if all(isinstance(el, list) for el in results) and len(results) > 0:
return [[x, y] for [x, y] in results if self.q.lower() in y.lower()]
if all(isinstance(el, tuple) for el in results) and len(results) > 0:
return [[x, y] for (x, y) in results if self.q.lower() in y.lower()]
else:
return [x for x in results if self.q.lower() in x.lower()]
def results(self, results):
"""Return the result dictionary."""
if all(isinstance(el, list) for el in results) and len(results) > 0:
return [dict(id=x, text=y) for [x, y] in results]
elif all(isinstance(el, tuple) for el in results) and len(results) > 0:
return [dict(id=x, text=y) for (x, y) in results]
else:
return [dict(id=x, text=x) for x in results]
def post(self, request, *args, **kwargs):
"""Add an option to the autocomplete list.
If 'text' is not defined in POST or self.create(text) fails, raises
bad request. Raises ImproperlyConfigured if self.create if not defined.
"""
if not hasattr(self, 'create'):
raise ImproperlyConfigured('Missing "create()"')
text = request.POST.get('text', None)
if text is None:
return http.HttpResponseBadRequest()
text = self.create(text)
if text is None:
return http.HttpResponseBadRequest()
return http.JsonResponse({
'id': text,
'text': text,
})
class Select2GroupListView(Select2ListView):
"""View mixin for grouped options."""
def get_item_as_group(self, entry):
"""Return the item with its group."""
group = None
item = entry
if isinstance(entry, collections.Sequence) and \
not isinstance(entry, six.string_types):
entry_length = len(entry)
if all(isinstance(el, list) for el in entry) and entry_length > 1:
group, item = entry[0:2]
return (group, item),
elif all(isinstance(el, list) for el in entry) and entry_length > 1:
group, item = entry[0:2]
return (group, item),
else:
if(entry_length > 1):
group, item = entry[0:2]
elif(entry_length > 0):
item = entry[0]
if not isinstance(item, collections.Sequence) or \
isinstance(item, six.string_types):
item = (item,)
return (group, item),
def get(self, request, *args, **kwargs):
"""Return option list with children(s) json response."""
results_dict = {}
results = self.get_list()
if results:
if (
all(isinstance(el, list) for el in results)
or all(isinstance(el, tuple) for el in results)
):
flat_results = [
(group[0], group[1], item[0], item[1]) for entry in results
for group, items in self.get_item_as_group(entry)
for item in items
]
if self.q:
q = self.q.lower()
flat_results = [(g, h, x, y) for g, h, x, y in flat_results
if q in y.lower()]
for group_id, group, item_id, item in flat_results:
results_dict.setdefault((group_id, group), [])
results_dict[(group_id, group)].append([item_id, item])
return http.JsonResponse({
"results": [
{
"id": x, "text": y
} for x, y in results_dict.pop((None, None), [])
] + [
{
"id": g[0],
"text": g[1],
"children": [
{"id": x, "text": y} for x, y in l
]
}
for g, l in six.iteritems(results_dict)
]
})
else:
flat_results = [(group, item) for entry in results
for group, items in self.get_item_as_group(entry)
for item in items]
if self.q:
q = self.q.lower()
flat_results = [(g, x) for g, x in flat_results
if q in x.lower()]
for group, item in flat_results:
results_dict.setdefault(group, [])
results_dict[group].append(item)
return http.JsonResponse({
"results": [
{"id": x, "text": x} for x in results_dict.pop(None, [])
] + [
{
"id": g,
"text": g,
"children": [
{"id": x, "text": x} for x in l
]
}
for g, l in six.iteritems(results_dict)
]
})
| 35.732877 | 103 | 0.520222 |
b2b55109b28fb125b1d6c5ec71ed757afd26d1ee | 2,824 | py | Python | mealie/services/settings_services.py | zackbcom/mealie | f5ab2dcde8bd87d5a9a54f3ebe3615e3f52ba118 | [
"MIT"
] | null | null | null | mealie/services/settings_services.py | zackbcom/mealie | f5ab2dcde8bd87d5a9a54f3ebe3615e3f52ba118 | [
"MIT"
] | null | null | null | mealie/services/settings_services.py | zackbcom/mealie | f5ab2dcde8bd87d5a9a54f3ebe3615e3f52ba118 | [
"MIT"
] | null | null | null | import json
from typing import List, Optional
from db.settings_models import (
SiteSettingsDocument,
SiteThemeDocument,
ThemeColorsDocument,
WebhooksDocument,
)
from pydantic import BaseModel
class Webhooks(BaseModel):
webhookTime: str
webhookURLs: Optional[List[str]]
enabled: bool
@staticmethod
def run():
pass
class SiteSettings(BaseModel):
name: str = "main"
webhooks: Webhooks
@staticmethod
def _unpack_doc(document: SiteSettingsDocument):
document = json.loads(document.to_json())
del document["_id"]
document["webhhooks"] = Webhooks(**document["webhooks"])
return SiteSettings(**document)
@staticmethod
def get_site_settings():
try:
document = SiteSettingsDocument.objects.get(name="main")
except:
webhooks = WebhooksDocument()
document = SiteSettingsDocument(name="main", webhooks=webhooks)
document.save()
return SiteSettings._unpack_doc(document)
def update(self):
document = SiteSettingsDocument.objects.get(name="main")
new_webhooks = WebhooksDocument(**self.webhooks.dict())
document.update(set__webhooks=new_webhooks)
document.save()
class Colors(BaseModel):
primary: str
accent: str
secondary: str
success: str
info: str
warning: str
error: str
class SiteTheme(BaseModel):
name: str
colors: Colors
@staticmethod
def get_by_name(theme_name):
document = SiteThemeDocument.objects.get(name=theme_name)
return SiteTheme._unpack_doc(document)
@staticmethod
def _unpack_doc(document):
document = json.loads(document.to_json())
del document["_id"]
theme_colors = SiteTheme(**document)
return theme_colors
@staticmethod
def get_all():
all_themes = []
for theme in SiteThemeDocument.objects():
all_themes.append(SiteTheme._unpack_doc(theme))
return all_themes
def save_to_db(self):
theme = self.dict()
theme["colors"] = ThemeColorsDocument(**theme["colors"])
theme_document = SiteThemeDocument(**theme)
theme_document.save()
def update_document(self):
theme = self.dict()
theme["colors"] = ThemeColorsDocument(**theme["colors"])
theme_document = SiteThemeDocument.objects.get(name=self.name)
if theme_document:
theme_document.update(set__colors=theme["colors"])
theme_document.save()
@staticmethod
def delete_theme(theme_name: str) -> str:
""" Removes the theme by name """
document = SiteThemeDocument.objects.get(name=theme_name)
if document:
document.delete()
return "Document Deleted"
| 24.556522 | 75 | 0.648017 |
f4fd38812e9584fdeee7c54d827361334efb2063 | 2,430 | py | Python | host-software/keyplus/keycodes/lang_map/Macedonian0.py | ai03-2725/keyplus | 6fb857dff7aa88284d6f6f08532f8da3aae981e1 | [
"MIT"
] | 226 | 2017-08-14T16:11:36.000Z | 2022-03-13T00:58:13.000Z | host-software/keyplus/keycodes/lang_map/Macedonian0.py | ai03-2725/keyplus | 6fb857dff7aa88284d6f6f08532f8da3aae981e1 | [
"MIT"
] | 90 | 2017-09-12T02:07:39.000Z | 2022-01-27T20:58:19.000Z | host-software/keyplus/keycodes/lang_map/Macedonian0.py | ai03-2725/keyplus | 6fb857dff7aa88284d6f6f08532f8da3aae981e1 | [
"MIT"
] | 44 | 2017-09-17T17:31:25.000Z | 2022-02-27T08:19:46.000Z | # Copyright 2018 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from hid_keycodes import *
lang = 'Macedonian'
country = 'Macedonia FYR'
scancode_map = {
KC_0: ('0', '=', '', '0', '=', ''),
KC_1: ('1', '!', '~', '1', '!', '~'),
KC_2: ('2', '"', '', '2', '"', ''),
KC_3: ('3', '#', '^', '3', '#', '^'),
KC_4: ('4', '$', '', '4', '$', ''),
KC_5: ('5', '%', '', '5', '%', ''),
KC_6: ('6', '&', '', '6', '&', ''),
KC_7: ('7', '/', '`', '7', '/', ''),
KC_8: ('8', '(', '', '8', '(', ''),
KC_9: ('9', ')', '', '9', ')', ''),
KC_A: ('a', 'A', '', 'а', 'А', ''),
KC_B: ('b', 'B', '{', 'б', 'Б', '{'),
KC_C: ('c', 'C', '', 'ц', 'Ц', ''),
KC_D: ('d', 'D', '', 'д', 'Д', ''),
KC_E: ('e', 'E', '€', 'е', 'Е', '€'),
KC_F: ('f', 'F', '[', 'ф', 'Ф', '['),
KC_G: ('g', 'G', ']', 'г', 'Г', ']'),
KC_H: ('h', 'H', '', 'х', 'Х', ''),
KC_I: ('i', 'I', '', 'и', 'И', ''),
KC_J: ('j', 'J', '', 'ј', 'Ј', ''),
KC_K: ('k', 'K', '', 'к', 'К', ''),
KC_L: ('l', 'L', '', 'л', 'Л', ''),
KC_M: ('m', 'M', '§', 'м', 'М', '§'),
KC_N: ('n', 'N', '}', 'н', 'Н', '}'),
KC_O: ('o', 'O', '', 'о', 'О', ''),
KC_P: ('p', 'P', '', 'п', 'П', ''),
KC_Q: ('q', 'Q', '\\', 'љ', 'Љ', '\\'),
KC_R: ('r', 'R', '', 'р', 'Р', ''),
KC_S: ('s', 'S', '', 'с', 'С', ''),
KC_T: ('t', 'T', '', 'т', 'Т', ''),
KC_U: ('u', 'U', '', 'у', 'У', ''),
KC_V: ('v', 'V', '@', 'в', 'В', '@'),
KC_W: ('w', 'W', '|', 'њ', 'Њ', '|'),
KC_X: ('x', 'X', '', 'џ', 'Џ', ''),
KC_Y: ('z', 'Z', '', 'з', 'З', ''),
KC_Z: ('y', 'Y', '', 'ѕ', 'Ѕ', ''),
KC_APOSTROPHE: ('§', '§', '', 'ќ', 'Ќ', 'ћ'),
KC_BACKSPACE: ('\x08', '\x08', '', '\x08', '\x08', ''),
KC_COMMA: (',', ';', '', ',', ';', ''),
KC_ENTER: ('\r', '', '', '', '', ''),
KC_EQUAL: ('+', '*', '', '+', '*', ''),
KC_FORWARD_SLASH: ('-', '_', '', '-', '_', ''),
KC_GRAVE: ('\\', '|', '', '\\', '|', ''),
KC_ISO_BACK_SLASH: ('<', '>', '', '<', '>', ''),
KC_ISO_HASH: ('@', '@', '', 'ж', 'Ж', ''),
KC_LEFT_BRACKET: ('[', '{', '', 'ш', 'Ш', 'Ђ'),
KC_MINUS: ("'", '?', '', "'", '?', ''),
KC_PERIOD: ('.', ':', '', '.', ':', ''),
KC_RIGHT_BRACKET: (']', '}', '', 'ѓ', 'Ѓ', 'ђ'),
KC_SEMICOLON: ('^', '^', '', 'ч', 'Ч', 'Ћ'),
KC_SPACEBAR: (' ', ' ', '', ' ', ' ', ''),
KC_TAB: ('\t', '', '', '\t', '', ''),
} | 41.186441 | 69 | 0.257202 |
b9bfc5da1e6d9ac592f1c8b2d7e5206845ca6ff7 | 44,717 | py | Python | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_recommendation_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_recommendation_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_recommendation_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.common.types import ad_asset
from google.ads.googleads.v8.common.types import ad_type_infos
from google.ads.googleads.v8.common.types import asset_policy
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import extensions
from google.ads.googleads.v8.common.types import final_app_url
from google.ads.googleads.v8.common.types import policy
from google.ads.googleads.v8.common.types import url_collection
from google.ads.googleads.v8.enums.types import ad_type
from google.ads.googleads.v8.enums.types import app_url_operating_system_type
from google.ads.googleads.v8.enums.types import asset_performance_label
from google.ads.googleads.v8.enums.types import call_conversion_reporting_state
from google.ads.googleads.v8.enums.types import device
from google.ads.googleads.v8.enums.types import display_ad_format_setting
from google.ads.googleads.v8.enums.types import display_upload_product_type
from google.ads.googleads.v8.enums.types import keyword_match_type
from google.ads.googleads.v8.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v8.enums.types import mime_type
from google.ads.googleads.v8.enums.types import policy_approval_status
from google.ads.googleads.v8.enums.types import policy_review_status
from google.ads.googleads.v8.enums.types import policy_topic_entry_type
from google.ads.googleads.v8.enums.types import policy_topic_evidence_destination_mismatch_url_type
from google.ads.googleads.v8.enums.types import policy_topic_evidence_destination_not_working_device
from google.ads.googleads.v8.enums.types import policy_topic_evidence_destination_not_working_dns_error_type
from google.ads.googleads.v8.enums.types import recommendation_type
from google.ads.googleads.v8.enums.types import served_asset_field_type
from google.ads.googleads.v8.enums.types import system_managed_entity_source
from google.ads.googleads.v8.resources.types import ad
from google.ads.googleads.v8.resources.types import recommendation
from google.ads.googleads.v8.services.services.recommendation_service import RecommendationServiceClient
from google.ads.googleads.v8.services.services.recommendation_service import transports
from google.ads.googleads.v8.services.types import recommendation_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RecommendationServiceClient._get_default_mtls_endpoint(None) is None
assert RecommendationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert RecommendationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert RecommendationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert RecommendationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert RecommendationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_recommendation_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = RecommendationServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_recommendation_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = RecommendationServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = RecommendationServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_recommendation_service_client_get_transport_class():
transport = RecommendationServiceClient.get_transport_class()
assert transport == transports.RecommendationServiceGrpcTransport
transport = RecommendationServiceClient.get_transport_class("grpc")
assert transport == transports.RecommendationServiceGrpcTransport
@mock.patch.object(RecommendationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommendationServiceClient))
def test_recommendation_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.RecommendationServiceClient.get_transport_class') as gtc:
transport = transports.RecommendationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = RecommendationServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.RecommendationServiceClient.get_transport_class') as gtc:
client = RecommendationServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecommendationServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecommendationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecommendationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = RecommendationServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = RecommendationServiceClient()
@mock.patch.object(RecommendationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommendationServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_recommendation_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = RecommendationServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = RecommendationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = RecommendationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_recommendation_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecommendationServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_recommendation(transport: str = 'grpc', request_type=recommendation_service.GetRecommendationRequest):
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation(
resource_name='resource_name_value',
type_=recommendation_type.RecommendationTypeEnum.RecommendationType.UNKNOWN,
campaign_budget='campaign_budget_value',
campaign='campaign_value',
ad_group='ad_group_value',
dismissed=True,
campaign_budget_recommendation=recommendation.Recommendation.CampaignBudgetRecommendation(current_budget_amount_micros=3004),
)
response = client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommendation_service.GetRecommendationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.resource_name == 'resource_name_value'
assert response.type_ == recommendation_type.RecommendationTypeEnum.RecommendationType.UNKNOWN
assert response.campaign_budget == 'campaign_budget_value'
assert response.campaign == 'campaign_value'
assert response.ad_group == 'ad_group_value'
assert response.dismissed is True
def test_get_recommendation_from_dict():
test_get_recommendation(request_type=dict)
def test_get_recommendation_field_headers():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommendation_service.GetRecommendationRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
call.return_value = recommendation.Recommendation()
client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_recommendation_flattened():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_recommendation(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_recommendation_flattened_error():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_recommendation(
recommendation_service.GetRecommendationRequest(),
resource_name='resource_name_value',
)
def test_apply_recommendation(transport: str = 'grpc', request_type=recommendation_service.ApplyRecommendationRequest):
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation_service.ApplyRecommendationResponse(
)
response = client.apply_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommendation_service.ApplyRecommendationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation_service.ApplyRecommendationResponse)
def test_apply_recommendation_from_dict():
test_apply_recommendation(request_type=dict)
def test_apply_recommendation_field_headers():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommendation_service.ApplyRecommendationRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_recommendation),
'__call__') as call:
call.return_value = recommendation_service.ApplyRecommendationResponse()
client.apply_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_apply_recommendation_flattened():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation_service.ApplyRecommendationResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.apply_recommendation(
customer_id='customer_id_value',
operations=[recommendation_service.ApplyRecommendationOperation(resource_name='resource_name_value')],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [recommendation_service.ApplyRecommendationOperation(resource_name='resource_name_value')]
def test_apply_recommendation_flattened_error():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.apply_recommendation(
recommendation_service.ApplyRecommendationRequest(),
customer_id='customer_id_value',
operations=[recommendation_service.ApplyRecommendationOperation(resource_name='resource_name_value')],
)
def test_dismiss_recommendation(transport: str = 'grpc', request_type=recommendation_service.DismissRecommendationRequest):
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.dismiss_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation_service.DismissRecommendationResponse(
)
response = client.dismiss_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommendation_service.DismissRecommendationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation_service.DismissRecommendationResponse)
def test_dismiss_recommendation_from_dict():
test_dismiss_recommendation(request_type=dict)
def test_dismiss_recommendation_field_headers():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommendation_service.DismissRecommendationRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.dismiss_recommendation),
'__call__') as call:
call.return_value = recommendation_service.DismissRecommendationResponse()
client.dismiss_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_dismiss_recommendation_flattened():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.dismiss_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation_service.DismissRecommendationResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.dismiss_recommendation(
customer_id='customer_id_value',
operations=[recommendation_service.DismissRecommendationRequest.DismissRecommendationOperation(resource_name='resource_name_value')],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [recommendation_service.DismissRecommendationRequest.DismissRecommendationOperation(resource_name='resource_name_value')]
def test_dismiss_recommendation_flattened_error():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.dismiss_recommendation(
recommendation_service.DismissRecommendationRequest(),
customer_id='customer_id_value',
operations=[recommendation_service.DismissRecommendationRequest.DismissRecommendationOperation(resource_name='resource_name_value')],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RecommendationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RecommendationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RecommendationServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.RecommendationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.RecommendationServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.RecommendationServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_recommendation_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.RecommendationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_recommendation',
'apply_recommendation',
'dismiss_recommendation',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_recommendation_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.recommendation_service.transports.RecommendationServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecommendationServiceTransport()
adc.assert_called_once()
def test_recommendation_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RecommendationServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_recommendation_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.RecommendationServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_recommendation_service_host_no_port():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_recommendation_service_host_with_port():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_recommendation_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.RecommendationServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.RecommendationServiceGrpcTransport])
def test_recommendation_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.RecommendationServiceGrpcTransport,])
def test_recommendation_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_ad_path():
customer_id = "squid"
ad_id = "clam"
expected = "customers/{customer_id}/ads/{ad_id}".format(customer_id=customer_id, ad_id=ad_id, )
actual = RecommendationServiceClient.ad_path(customer_id, ad_id)
assert expected == actual
def test_parse_ad_path():
expected = {
"customer_id": "whelk",
"ad_id": "octopus",
}
path = RecommendationServiceClient.ad_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_ad_path(path)
assert expected == actual
def test_ad_group_path():
customer_id = "oyster"
ad_group_id = "nudibranch"
expected = "customers/{customer_id}/adGroups/{ad_group_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, )
actual = RecommendationServiceClient.ad_group_path(customer_id, ad_group_id)
assert expected == actual
def test_parse_ad_group_path():
expected = {
"customer_id": "cuttlefish",
"ad_group_id": "mussel",
}
path = RecommendationServiceClient.ad_group_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_ad_group_path(path)
assert expected == actual
def test_campaign_path():
customer_id = "winkle"
campaign_id = "nautilus"
expected = "customers/{customer_id}/campaigns/{campaign_id}".format(customer_id=customer_id, campaign_id=campaign_id, )
actual = RecommendationServiceClient.campaign_path(customer_id, campaign_id)
assert expected == actual
def test_parse_campaign_path():
expected = {
"customer_id": "scallop",
"campaign_id": "abalone",
}
path = RecommendationServiceClient.campaign_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_campaign_path(path)
assert expected == actual
def test_campaign_budget_path():
customer_id = "squid"
campaign_budget_id = "clam"
expected = "customers/{customer_id}/campaignBudgets/{campaign_budget_id}".format(customer_id=customer_id, campaign_budget_id=campaign_budget_id, )
actual = RecommendationServiceClient.campaign_budget_path(customer_id, campaign_budget_id)
assert expected == actual
def test_parse_campaign_budget_path():
expected = {
"customer_id": "whelk",
"campaign_budget_id": "octopus",
}
path = RecommendationServiceClient.campaign_budget_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_campaign_budget_path(path)
assert expected == actual
def test_recommendation_path():
customer_id = "oyster"
recommendation_id = "nudibranch"
expected = "customers/{customer_id}/recommendations/{recommendation_id}".format(customer_id=customer_id, recommendation_id=recommendation_id, )
actual = RecommendationServiceClient.recommendation_path(customer_id, recommendation_id)
assert expected == actual
def test_parse_recommendation_path():
expected = {
"customer_id": "cuttlefish",
"recommendation_id": "mussel",
}
path = RecommendationServiceClient.recommendation_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_recommendation_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = RecommendationServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = RecommendationServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = RecommendationServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = RecommendationServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = RecommendationServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = RecommendationServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = RecommendationServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = RecommendationServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = RecommendationServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = RecommendationServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RecommendationServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.RecommendationServiceTransport, '_prep_wrapped_messages') as prep:
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.RecommendationServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = RecommendationServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_grpc_transport_close():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
with mock.patch.object(type(client.transport._grpc_channel), 'close') as chan_close:
with client as _:
chan_close.assert_not_called()
chan_close.assert_called_once()
def test_grpc_client_ctx():
client = RecommendationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client as _:
pass
close.assert_called()
| 43.079961 | 216 | 0.720576 |
c60b9ef382c5f50d543ebc730c8852ecc34bd72d | 17 | py | Python | epytope/Data/pssms/calisimm/mat/A0301.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/calisimm/mat/A0301.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/calisimm/mat/A0301.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | A0301 = [1, 2, 9] | 17 | 17 | 0.470588 |
12bd4e3081119939fe53eb8b544f26b59c299f58 | 6,559 | py | Python | carim/configuration/server/types.py | schana/dayz-server-carim | 007c2f3d1861b736c4a59aeb4ef6275c56f76869 | [
"Apache-2.0"
] | 3 | 2020-04-06T17:57:24.000Z | 2020-06-02T04:21:41.000Z | carim/configuration/server/types.py | schana/dayz-server-carim | 007c2f3d1861b736c4a59aeb4ef6275c56f76869 | [
"Apache-2.0"
] | 16 | 2020-03-18T09:07:51.000Z | 2020-05-11T14:58:55.000Z | carim/configuration/server/types.py | schana/dayz-server-carim | 007c2f3d1861b736c4a59aeb4ef6275c56f76869 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import pathlib
from xml.etree import ElementTree
from carim.configuration import decorators
from carim.global_resources import types, item_type, matching_model, resourcesdir, mission, deploydir, locations
from carim.util import file_writing
log = logging.getLogger(__name__)
_MAX_TIME = 3888000
_REMOVE_MODIFICATION = {
"nominal": 0,
"lifetime": 1,
"restock": _MAX_TIME,
"min": 0,
"cost": 0,
"flags": [
{
"name": "count_in_cargo",
"value": True
},
{
"name": "count_in_hoarder",
"value": True
},
{
"name": "count_in_map",
"value": True
},
{
"name": "count_in_player",
"value": True
}
],
"value": []
}
@decorators.server(directory='mpmissions/' + mission.get() + '/db')
def modify_types(directory):
with open(pathlib.Path(resourcesdir.get(), 'modifications/server/types_config.json')) as f:
type_config = json.load(f)
for action in type_config:
log.info(action.get('description'))
matching = action.get('matching')
if action['action'] == 'remove':
process_type = remove
elif action['action'] == 'ratio':
process_type = ratio
else:
process_type = modify
for m in matching:
process_type(matching=m, modification=action.get('modification', dict()))
with open(pathlib.Path(resourcesdir.get(), 'modifications/server/types_universal.json')) as f:
type_universal_config = json.load(f)
ratio_modifier = type_universal_config.get('ratio', 1)
matching = {
"nominal": "^[^0]"
}
m = matching_model.Match(matching)
count = 0
for t in types.get().getroot():
if m.match(t) and t.find('nominal') is not None:
count += 1
t.find('nominal').text = str(max(1, int(ratio_modifier * int(t.find('nominal').text))))
t.find('min').text = str(max(1, int(ratio_modifier * int(t.find('min').text))))
log.info('modified {} items with ratio {}'.format(count, ratio_modifier))
def remove(matching=None, modification=None):
match = matching_model.Match(matching)
count = 0
to_remove = list()
for t in types.get().getroot():
if match.match(t):
count += 1
log.debug('removing ' + t.attrib.get('name'))
to_remove.append(t)
# types.get().getroot().remove(t)
# apply_modification(t, _REMOVE_MODIFICATION)
for t in to_remove:
types.get().getroot().remove(t)
log.info('removed {} items matching {}'.format(count, matching))
def modify(matching=None, modification=None):
if modification is None:
return
match = matching_model.Match(matching)
count = 0
for t in types.get().getroot():
if match.match(t):
count += 1
log.debug('modifying ' + t.attrib.get('name'))
apply_modification(t, modification)
log.info('modified {} items matching {} with {}'.format(count, matching, json.dumps(modification)))
def ratio(matching=None, modification=None):
match = matching_model.Match(matching)
count = 0
ratio_modifier = modification.get('ratio')
for t in types.get().getroot():
if match.match(t) and t.find('nominal') is not None:
count += 1
t.find('nominal').text = str(max(1, int(ratio_modifier * int(t.find('nominal').text))))
t.find('min').text = str(max(1, int(ratio_modifier * int(t.find('min').text))))
count += 1
log.debug('applying ratio to ' + t.attrib.get('name'))
log.info('modified {} items with ratio {}'.format(count, ratio_modifier))
def apply_modification(item_element: ElementTree.Element, modification):
template = item_type.modification_template
text_fields = [k for k in template.keys() if template.get(k) == 1]
for field in text_fields:
if modification.get(field) is not None:
if item_element.find(field) is not None:
item_element.find(field).text = str(modification.get(field))
array_fields = [k for k in template.keys() if k != 'flags' and isinstance(template.get(k), list)]
for field in array_fields:
if modification.get(field) is not None:
for child in item_element.findall(field):
item_element.remove(child)
values = modification.get(field)
for value in values:
ElementTree.SubElement(item_element, field, attrib=value)
attribute_fields = [k for k in template.keys() if k != 'flags' and isinstance(template.get(k), dict)]
for field in attribute_fields:
if modification.get(field) is not None:
for child in item_element.findall(field):
item_element.remove(child)
if modification.get(field):
ElementTree.SubElement(item_element, field, attrib=modification.get(field))
field = 'flags'
if modification.get(field) is not None:
for child in item_element.findall(field):
for flag in modification.get(field):
child.set(flag.get('name'), "1" if flag.get('value') else "0")
@decorators.register
@decorators.mission
def remove_spawns_outside_radius(directory):
with open(pathlib.Path(resourcesdir.get(), 'modifications/server/types_universal.json')) as f:
type_universal_config = json.load(f)
radius = type_universal_config.get('limit_spawn_locations_radius', 0)
if radius <= 0:
return
p = pathlib.Path(deploydir.get(), 'mpmissions', mission.get(), 'mapgrouppos.xml')
count = 0
areas = list(mark[1] for mark in locations.marks[:4])
mapgroups = ElementTree.parse(p).getroot()
for group in mapgroups.findall('.//group'):
raw = group.get('pos')
# log.info('{} {}'.format(group.get('name'), raw))
x, y, z = (float(i) for i in raw.split(' '))
is_good = False
for position in areas:
if locations.overlaps(position, radius, x, z, 1):
is_good = True
if not is_good:
mapgroups.remove(group)
count += 1
log.debug('removed group {}, {}, {}'.format(group.get('name'), x, z))
if count > 0:
log.info('removed {} groups from {}'.format(count, p.name))
with file_writing.f_open(pathlib.Path(directory, p.name), mode='w') as f:
f.write(file_writing.convert_to_string(mapgroups))
| 37.056497 | 112 | 0.610611 |
16208ca3f040be4a5534244d930ef13861189034 | 41,380 | py | Python | python/ccxt/async_support/hollaex.py | zhenjie/ccxt | 37808d0f23c9bd2a60da10a3ebd2abdcd3c3b313 | [
"MIT"
] | 3 | 2017-12-14T04:56:10.000Z | 2021-09-02T02:25:26.000Z | python/ccxt/async_support/hollaex.py | zhenjie/ccxt | 37808d0f23c9bd2a60da10a3ebd2abdcd3c3b313 | [
"MIT"
] | 1 | 2017-10-28T14:35:08.000Z | 2017-10-28T14:35:08.000Z | python/ccxt/async_support/hollaex.py | zhenjie/ccxt | 37808d0f23c9bd2a60da10a3ebd2abdcd3c3b313 | [
"MIT"
] | 3 | 2018-10-17T09:29:29.000Z | 2019-03-12T09:18:42.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.decimal_to_precision import TICK_SIZE
class hollaex(Exchange):
def describe(self):
return self.deep_extend(super(hollaex, self).describe(), {
'id': 'hollaex',
'name': 'HollaEx',
'countries': ['KR'],
'rateLimit': 333,
'version': 'v1',
'has': {
'CORS': False,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchOrderBook': True,
'fetchOrderBooks': True,
'fetchTrades': True,
'fetchOHLCV': True,
'fetchBalance': True,
'createOrder': True,
'createLimitBuyOrder': True,
'createLimitSellOrder': True,
'createMarketBuyOrder': True,
'createMarketSellOrder': True,
'cancelOrder': True,
'cancelAllOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': False,
'fetchOpenOrder': True,
'fetchOrder': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
'fetchOrders': False,
'fetchMyTrades': True,
'withdraw': True,
'fetchDepositAddress': True,
},
'timeframes': {
'1h': '1h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/75841031-ca375180-5ddd-11ea-8417-b975674c23cb.jpg',
'api': 'https://api.hollaex.com',
'www': 'https://hollaex.com',
'doc': 'https://apidocs.hollaex.com',
'referral': 'https://pro.hollaex.com/signup?affiliation_code=QSWA6G',
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'health',
'constant',
'ticker',
'ticker/all',
'orderbooks',
'trades',
'chart',
# TradingView data
'udf/config',
'udf/history',
'udf/symbols',
],
},
'private': {
'get': [
'user',
'user/balance',
'user/trades',
'user/orders',
'user/orders/{order_id}',
'user/deposits',
'user/withdrawals',
'user/withdraw/{currency}/fee',
],
'post': [
'user/request-withdrawal',
'order',
],
'delete': [
'user/orders',
'user/orders/{order_id}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
},
},
'exceptions': {
'broad': {
'Invalid token': AuthenticationError,
'Order not found': OrderNotFound,
'Insufficient balance': InsufficientFunds,
},
'exact': {
'400': BadRequest,
'403': AuthenticationError,
'404': BadRequest,
'405': BadRequest,
'410': BadRequest,
'429': BadRequest,
'500': NetworkError,
'503': NetworkError,
},
},
'options': {
# how many seconds before the authenticated request expires
'api-expires': int(self.timeout / 1000),
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetConstant(params)
#
# {
# coins: {
# xmr: {
# id: 7,
# fullname: "Monero",
# symbol: "xmr",
# active: True,
# allow_deposit: True,
# allow_withdrawal: True,
# withdrawal_fee: 0.02,
# min: 0.001,
# max: 100000,
# increment_unit: 0.001,
# deposit_limits: {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0},
# withdrawal_limits: {'1': 10, '2': 15, '3': 100, '4': 100, '5': 200, '6': 300, '7': 350, '8': 400, '9': 500, '10': -1},
# created_at: "2019-12-09T07:14:02.720Z",
# updated_at: "2020-01-16T12:12:53.162Z"
# },
# # ...
# },
# pairs: {
# 'btc-usdt': {
# id: 2,
# name: "btc-usdt",
# pair_base: "btc",
# pair_2: "usdt",
# taker_fees: {'1': 0.3, '2': 0.25, '3': 0.2, '4': 0.18, '5': 0.1, '6': 0.09, '7': 0.08, '8': 0.06, '9': 0.04, '10': 0},
# maker_fees: {'1': 0.1, '2': 0.08, '3': 0.05, '4': 0.03, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0, '10': 0},
# min_size: 0.0001,
# max_size: 1000,
# min_price: 100,
# max_price: 100000,
# increment_size: 0.0001,
# increment_price: 0.05,
# active: True,
# created_at: "2019-12-09T07:15:54.537Z",
# updated_at: "2019-12-09T07:15:54.537Z"
# },
# },
# config: {tiers: 10},
# status: True
# }
#
pairs = self.safe_value(response, 'pairs', {})
keys = list(pairs.keys())
result = []
for i in range(0, len(keys)):
key = keys[i]
market = pairs[key]
id = self.safe_string(market, 'name')
baseId = self.safe_string(market, 'pair_base')
quoteId = self.safe_string(market, 'pair_2')
base = self.common_currency_code(baseId.upper())
quote = self.common_currency_code(quoteId.upper())
symbol = base + '/' + quote
active = self.safe_value(market, 'active')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': {
'price': self.safe_number(market, 'increment_price'),
'amount': self.safe_number(market, 'increment_size'),
},
'limits': {
'amount': {
'min': self.safe_number(market, 'min_size'),
'max': self.safe_number(market, 'max_size'),
},
'price': {
'min': self.safe_number(market, 'min_price'),
'max': self.safe_number(market, 'max_price'),
},
'cost': {'min': None, 'max': None},
},
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetConstant(params)
coins = self.safe_value(response, 'coins', {})
keys = list(coins.keys())
result = {}
for i in range(0, len(keys)):
key = keys[i]
currency = coins[key]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'fullname')
active = self.safe_value(currency, 'active')
fee = self.safe_number(currency, 'withdrawal_fee')
precision = self.safe_number(currency, 'increment_unit')
withdrawalLimits = self.safe_value(currency, 'withdrawal_limits', [])
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(currency, 'min'),
'max': self.safe_number(currency, 'max'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': self.safe_value(withdrawalLimits, 0),
},
},
}
return result
async def fetch_order_books(self, symbols=None, limit=None, params={}):
await self.load_markets()
response = await self.publicGetOrderbooks(params)
result = {}
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
orderbook = response[marketId]
symbol = self.safe_symbol(marketId, None, '-')
timestamp = self.parse8601(self.safe_string(orderbook, 'timestamp'))
result[symbol] = self.parse_order_book(response[marketId], timestamp)
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
response = await self.publicGetOrderbooks(self.extend(request, params))
#
# {
# "btc-usdt": {
# "bids": [
# [8836.4, 1.022],
# [8800, 0.0668],
# [8797.75, 0.2398],
# ],
# "asks": [
# [8839.35, 1.5334],
# [8852.6, 0.0579],
# [8860.45, 0.1815],
# ],
# "timestamp": "2020-03-03T02:27:25.147Z"
# },
# "eth-usdt": {},
# # ...
# }
#
orderbook = self.safe_value(response, marketId)
timestamp = self.parse8601(self.safe_string(orderbook, 'timestamp'))
return self.parse_order_book(orderbook, timestamp)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTicker(self.extend(request, params))
#
# {
# open: 8615.55,
# close: 8841.05,
# high: 8921.1,
# low: 8607,
# last: 8841.05,
# volume: 20.2802,
# timestamp: '2020-03-03T03:11:18.964Z'
# }
#
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickerAll(self.extend(params))
#
# {
# "bch-usdt": {
# "time": "2020-03-02T04:29:45.011Z",
# "open": 341.65,
# "close":337.9,
# "high":341.65,
# "low":337.3,
# "last":337.9,
# "volume":0.054,
# "symbol":"bch-usdt"
# },
# # ...
# }
#
return self.parse_tickers(response, symbols)
def parse_tickers(self, response, symbols=None):
result = {}
keys = list(response.keys())
for i in range(0, len(keys)):
key = keys[i]
ticker = response[key]
marketId = self.safe_string(ticker, 'symbol', key)
market = self.safe_market(marketId, None, '-')
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# open: 8615.55,
# close: 8841.05,
# high: 8921.1,
# low: 8607,
# last: 8841.05,
# volume: 20.2802,
# timestamp: '2020-03-03T03:11:18.964Z',
# }
#
# fetchTickers
#
# {
# "time": "2020-03-02T04:29:45.011Z",
# "open": 341.65,
# "close": 337.9,
# "high": 341.65,
# "low": 337.3,
# "last": 337.9,
# "volume": 0.054,
# "symbol": "bch-usdt"
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.parse8601(self.safe_string_2(ticker, 'time', 'timestamp'))
close = self.safe_number(ticker, 'close')
result = {
'symbol': symbol,
'info': ticker,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_number(ticker, 'open'),
'close': close,
'last': self.safe_number(ticker, 'last', close),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': None,
}
return result
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTrades(self.extend(request, params))
#
# {
# "btc-usdt": [
# {
# "size": 0.5,
# "price": 8830,
# "side": "buy",
# "timestamp": "2020-03-03T04:44:33.034Z"
# },
# # ...
# ]
# }
#
trades = self.safe_value(response, market['id'], [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "size": 0.5,
# "price": 8830,
# "side": "buy",
# "timestamp": "2020-03-03T04:44:33.034Z"
# }
#
# fetchMyTrades(private)
#
# {
# "side": "buy",
# "symbol": "eth-usdt",
# "size": 0.086,
# "price": 226.19,
# "timestamp": "2020-03-03T08:03:55.459Z",
# "fee": 0.1
# }
#
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
datetime = self.safe_string(trade, 'timestamp')
timestamp = self.parse8601(datetime)
side = self.safe_string(trade, 'side')
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'size')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
feeCost = self.safe_number(trade, 'fee')
fee = None
if feeCost is not None:
quote = market['quote']
feeCurrencyCode = market['quote'] if (market is not None) else quote
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'info': trade,
'id': None,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': None,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_ohlcv(self, symbol, timeframe='1h', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + " fetchOHLCV() requires a 'since' or a 'limit' argument")
else:
end = self.seconds()
start = end - duration * limit
request['to'] = end
request['from'] = start
else:
if limit is None:
request['from'] = int(since / 1000)
request['to'] = self.seconds()
else:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, duration * limit)
response = await self.publicGetChart(self.extend(request, params))
#
# [
# {
# "time":"2020-03-02T20:00:00.000Z",
# "close":8872.1,
# "high":8872.1,
# "low":8858.6,
# "open":8858.6,
# "symbol":"btc-usdt",
# "volume":1.2922
# },
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_ohlcv(self, response, market=None, timeframe='1h', since=None, limit=None):
#
# {
# "time":"2020-03-02T20:00:00.000Z",
# "close":8872.1,
# "high":8872.1,
# "low":8858.6,
# "open":8858.6,
# "symbol":"btc-usdt",
# "volume":1.2922
# }
#
return [
self.parse8601(self.safe_string(response, 'time')),
self.safe_number(response, 'open'),
self.safe_number(response, 'high'),
self.safe_number(response, 'low'),
self.safe_number(response, 'close'),
self.safe_number(response, 'volume'),
]
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUserBalance(params)
#
# {
# "updated_at": "2020-03-02T22:27:38.428Z",
# "btc_balance": 0,
# "btc_pending": 0,
# "btc_available": 0,
# "eth_balance": 0,
# "eth_pending": 0,
# "eth_available": 0,
# # ...
# }
#
result = {'info': response}
currencyIds = list(self.currencies_by_id.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_number(response, currencyId + '_available')
account['total'] = self.safe_number(response, currencyId + '_balance')
result[code] = account
return self.parse_balance(result)
async def fetch_open_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.privateGetUserOrdersOrderId(self.extend(request, params))
#
# {
# "created_at": "2018-03-23T04:14:08.663Z",
# "title": "string",
# "side": "sell",
# "type": "limit",
# "price": 0,
# "size": 0,
# "symbol": "xht-usdt",
# "id": "string",
# "created_by": 1,
# "filled": 0
# }
#
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privateGetUserOrders(self.extend(request, params))
#
# [
# {
# "created_at":"2020-03-03T08:02:18.639Z",
# "title":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "side":"buy",
# "type":"limit",
# "price":226.19,
# "size":0.086,
# "symbol":"eth-usdt",
# "id":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "created_by":620,
# "filled":0
# }
# ]
#
return self.parse_orders(response, market)
def parse_order(self, order, market=None):
#
# fetchOpenOrder, fetchOpenOrders
#
# {
# "created_at":"2020-03-03T08:02:18.639Z",
# "title":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "side":"buy",
# "type":"limit",
# "price":226.19,
# "size":0.086,
# "symbol":"eth-usdt",
# "id":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "created_by":620,
# "filled":0
# }
#
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '-')
id = self.safe_string(order, 'id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'size')
filled = self.safe_number(order, 'filled')
status = 'closed' if (type == 'market') else 'open'
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'info': order,
'average': None,
})
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'side': side,
'size': amount,
'type': type,
}
if type != 'market':
order['price'] = price
response = await self.privatePostOrder(self.extend(order, params))
#
# {
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0,
# "status": "pending"
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.privateDeleteUserOrdersOrderId(self.extend(request, params))
#
# {
# "title": "string",
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0
# }
#
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.markets(symbol)
request['symbol'] = market['id']
response = await self.privateDeleteUserOrders(self.extend(request, params))
#
# [
# {
# "title": "string",
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'symbol': market['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserTrades(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "side": "buy",
# "symbol": "eth-usdt",
# "size": 0.086,
# "price": 226.19,
# "timestamp": "2020-03-03T08:03:55.459Z",
# "fee": 0.1
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privateGetUser(params)
#
# {
# "id": 620,
# "email": "email@gmail.com",
# "full_name": "",
# "name_verified": False,
# "gender": False,
# "nationality": "",
# "phone_number": "",
# "address": {"city": "", "address": "", "country": "", "postal_code": ""},
# "id_data": {"note": "", "type": "", "number": "", "status": 0},
# "bank_account":[],
# "crypto_wallet":{
# "xrp": "rJtoECs6rPkJoAfgtR8SDDshV6hRHe3X7y:391496555"
# "usdt":"0x1fb4248e167901dfa0d8cdda2243a2126d7ce48d"
# # ...
# },
# "verification_level": 1,
# "otp_enabled": True,
# "activated": True,
# "note": "",
# "username": "user",
# "affiliation_code": "QSWA6G",
# "settings": {
# "chat": {"set_username": False},
# "risk": {"order_portfolio_percentage": 20},
# "audio": {
# "public_trade": False,
# "order_completed": True,
# "order_partially_completed": True
# },
# "language": "en",
# "interface": {"theme": "white","order_book_levels": 10},
# "notification": {
# "popup_order_completed": True,
# "popup_order_confirmation": True,
# "popup_order_partially_filled": True
# }
# },
# "flagged": False,
# "is_hap": False,
# "pin": False,
# "discount": 0,
# "created_at": "2020-03-02T22:27:38.331Z",
# "updated_at": "2020-03-03T07:54:58.315Z",
# "balance": {
# "xht_balance": 0,
# "xht_pending": 0,
# "xht_available": 0,
# # ...
# "updated_at": "2020-03-03T10:21:05.430Z"
# },
# "images": [],
# "fees": {
# "btc-usdt": {"maker_fee": 0.1, "taker_fee": 0.3},
# "eth-usdt": {"maker_fee": 0.1, "taker_fee": 0.3},
# # ...
# }
# }
#
cryptoWallet = self.safe_value(response, 'crypto_wallet')
address = self.safe_string(cryptoWallet, currency['id'])
tag = None
if address is not None:
parts = address.split(':')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserDeposits(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "deposit",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserWithdrawals(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "withdrawal",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "withdrawal",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'transaction_id')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
type = self.safe_string(transaction, 'type')
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
addressTo = None
addressFrom = None
tag = None
tagTo = None
tagFrom = None
if address is not None:
parts = address.split(':')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
addressTo = address
tagTo = tag
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
status = self.safe_value(transaction, 'status')
dismissed = self.safe_value(transaction, 'dismissed')
rejected = self.safe_value(transaction, 'rejected')
if status:
status = 'ok'
elif dismissed:
status = 'canceled'
elif rejected:
status = 'failed'
else:
status = 'pending'
fee = {
'currency': code,
'cost': self.safe_number(transaction, 'fee'),
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': tagFrom,
'tag': tag,
'tagTo': tagTo,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag is not None:
address += ':' + tag
request = {
'currency': currency['id'],
'amount': amount,
'address': address,
}
# one time password
otp = self.safe_string(params, 'otp_code')
if (otp is not None) or (self.twofa is not None):
if otp is None:
otp = self.oath()
request['otp_code'] = otp
response = await self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
path = '/' + self.version + '/' + self.implode_params(path, params)
if method == 'GET':
if query:
path += '?' + self.urlencode(query)
url = self.urls['api'] + path
if api == 'private':
self.check_required_credentials()
defaultExpires = self.safe_integer_2(self.options, 'api-expires', 'expires', int(self.timeout / 1000))
expires = self.sum(self.seconds(), defaultExpires)
expiresString = str(expires)
auth = method + path + expiresString
headers = {
'api-key': self.encode(self.apiKey),
'api-expires': expiresString,
}
if method == 'POST':
headers['Content-type'] = 'application/json'
if query:
body = self.json(query)
auth += body
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['api-signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if (code >= 400) and (code <= 503):
#
# {"message": "Invalid token"}
#
feedback = self.id + ' ' + body
message = self.safe_string(response, 'message')
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
status = str(code)
self.throw_exactly_matched_exception(self.exceptions['exact'], status, feedback)
| 37.481884 | 144 | 0.435621 |
afc1a284c7d67feed6ce2c06bf670d38495445ff | 12,474 | py | Python | games/reexposition.py | Dynamic-Games-in-Recommender-Systems/siren | 57159e77b6bc5bbc23a50623da07e6494d08f060 | [
"MIT"
] | null | null | null | games/reexposition.py | Dynamic-Games-in-Recommender-Systems/siren | 57159e77b6bc5bbc23a50623da07e6494d08f060 | [
"MIT"
] | null | null | null | games/reexposition.py | Dynamic-Games-in-Recommender-Systems/siren | 57159e77b6bc5bbc23a50623da07e6494d08f060 | [
"MIT"
] | 1 | 2021-06-10T12:08:49.000Z | 2021-06-10T12:08:49.000Z | import numpy as np
import random
import metrics
import copy
import math
class Reexposition_game:
def __init__(self, number_of_recommendations):
self.number_of_recommendations = number_of_recommendations
self.np_rng = np.random.default_rng()
pass
def play(self, recommendations, recommendation_strenghs, items, users, SalesHistory, controlId,
a, b, c, pi, num_particles, num_generations):
# print("OLD RECOMMENDATIONS",recommendation_strenghs)
new_recommendations = {}
exposures = []
exposure_factors = pi
new = {}
for user in range(len(users.activeUserIndeces)):
exposure = np.zeros(len(recommendation_strenghs[user]))
for exposure_factor in range(len(exposure_factors)):
exposure[exposure_factor] = exposure_factors[exposure_factor]
exposures.append(exposure)
optimized_exposure = self.optimize_exposure(items, users, SalesHistory, controlId, exposure_factors,
recommendations, num_particles, self.number_of_recommendations,
num_generations, recommendation_strenghs, a, b, c)
updated_probabilities = self.update_probabilities(users.activeUserIndeces, optimized_exposure, recommendation_strenghs)
# normalize?
#print("update_probabilities", updated_probabilities)
# sort and return best reommendations
for i in range(len(recommendations)):
zipped_lists = zip(recommendations[i],updated_probabilities[i])
sorted_zipped_lists = sorted(zipped_lists,key=lambda x: x[1], reverse=True)
sorted_user_recommendations = [item[0] for item in sorted_zipped_lists]
filtered_user_recommendations = sorted_user_recommendations[0:self.number_of_recommendations]
new_recommendations[i] = filtered_user_recommendations
print(new_recommendations)
'''Opitmization:
PSO/other evolutionary algorithm ?
'''
''' reducing the search space: - consider articles that have not been read much first
- maybe search sequentially, considering that the larger values in the pi vectore should have a larger impact on the final result
- the same goes for articles that are highly relevant to the user (i.e. are recommender system output is very high)
-> search space might be reduced if we search more intensively along some kind of pareto line reconciling these three variables.
'''
for i in range(len(recommendations)):
new[i] = recommendations[i][0:self.number_of_recommendations]
if new_recommendations==new:
print("wrong")
return new_recommendations
def optimize_exposure(self, items, users, sales_history, controlId, exposure_set, user_recommendations, n_particles,
number_of_recommendations, number_of_generations,recommendation_strengths, a, b, c):
#print("optimize_exposure",sales_history)
# initialize population
particles = []
best_for_particles = []
best_score_per_particle = []
velocities = []
best_neighbour = None
best_score = 0
a = a
b = b
c = c
a_decay = ( a*5/6 )/(number_of_generations)
#print(exposure_set)
max_values_per_user = []
for user in range(len(user_recommendations)):
max_values_per_user.append(len(user_recommendations[user]))
for i in range(n_particles):
#print(f"particle {i}")
particle = self.np_rng.integers(min(max_values_per_user), size = len(exposure_set) * len(user_recommendations))
#print(particle)
self.legalize_position(particle, len(exposure_set), max_values_per_user)
best_neighbour = particle
initial_velocity = self.np_rng.integers(2, size = len(exposure_set) * len(user_recommendations)) - 1
velocities .append(initial_velocity)
particles .append(particle)
best_for_particles .append(particle)
best_score_per_particle.append(0)
# iterate for each generation
for g in range(number_of_generations):
print(f"Generation {g}/{number_of_generations}")
for p in range(len(particles)):
# define movement
v_inert = a * velocities[p]
v_previous_best = b * (best_for_particles[p] - particles[p]) * self.np_rng.random()
v_neighbouring_best = c * (best_neighbour - [particles[p]]) * self.np_rng.random()
new_velocity = (v_inert + v_previous_best + v_neighbouring_best)
new_velocity = self.limit_velocity(new_velocity.flatten())
new_position = particles[p] + new_velocity
new_position = new_position.flatten()
velocities[p] = new_velocity
#new_position = np.ndarray.round(new_position)
# check for illegal positions
particles[p] = self.legalize_position(new_position, len(exposure_set), max_values_per_user).flatten()
# formulate pi from particle position:
exposure_parameters = []
for user_id in range(len(user_recommendations)):
user_exposure = np.zeros(len(user_recommendations[user_id]))
for exposure_index in range(len(exposure_set)):
user_exposure[round(particles[p][user_id*len(exposure_set) + exposure_index])] = exposure_set[exposure_index]
exposure_parameters.append(user_exposure)
# update recommendation strengths based on particle position
updated_probabilities = self.update_probabilities(users.activeUserIndeces, exposure_parameters, recommendation_strengths) #TODO: at this point updated probabilities is not sorted
new_recommendations = {}
#print(updated_probabilities)
#print("OLD:", user_recommendations)
for i in range(len(user_recommendations)):
#new_recommendations[i] = filtered_user_recommendations
zipped_lists = zip(user_recommendations[i],updated_probabilities[i])
sorted_zipped_lists = sorted(zipped_lists,key=lambda x: x[1], reverse=True)
sorted_user_recommendations = [item[0] for item in sorted_zipped_lists]
filtered_user_recommendations = sorted_user_recommendations[0:self.number_of_recommendations]
new_recommendations[i] = filtered_user_recommendations
# evaluate position
value = self.evaluate(users, items, sales_history, new_recommendations, controlId)
# TODO we also really need to make sure that we refer to the correct items with the indeces we get!
# after evaluation, update the best positions and the best neighbour value
if value > best_score_per_particle[p]:
best_score_per_particle[p] = value
best_for_particles[p] = particles[p]
if value > best_score:
best_score = value
best_neighbour = particles[p] # TODO also make this the best neighbour per round!
a = a - a_decay
# formulate pi from particle position:
exposure_parameters = []
for user_id in range(len(user_recommendations)):
user_exposure = np.zeros(len(user_recommendations[user_id]))
for exposure_index in range(len(exposure_set)):
user_exposure[round(best_neighbour[user_id*len(exposure_set) + exposure_index])] = exposure_set[exposure_index]
exposure_parameters.append(user_exposure)
return exposure_parameters
def legalize_position(self, particle, parameters_per_user, max_values):
for i in range(len(particle)):
max_value = max_values[int(math.floor(i/parameters_per_user))] - 0.5
left = False
if self.np_rng.random() > 0.5:
left = True
while particle[i] <= -0.5:
left = False
particle[i] += 2
while particle[i] >= max_value:
left = True
particle[i] -= 2
if i%parameters_per_user == 0:
continue
else:
illegal = self.check_illegality(parameters_per_user, particle, i)
while illegal:
if left == True:
particle[i] -= 1
else:
particle[i] += 1
while particle[i] <= -0.5:
left = False
particle[i] += 2
while particle[i] >= max_value:
left = True
particle[i] -= 2
illegal = self.check_illegality(parameters_per_user, particle, i)
return particle
def check_illegality(self, parameters_per_user, particle, current_index):
is_illegal = False
for k in range(current_index%parameters_per_user, 0, -1):
if round(particle[current_index]) == round(particle[current_index - k]):
is_illegal = True
return is_illegal
return is_illegal
def limit_velocity(self, velocity):
for i in range(len(velocity)):
if velocity[i] > 2:
velocity[i] = 2
elif velocity[i] < -2:
velocity[i] = -2
return velocity
def evaluate(self, users, items, sales_history, user_recommendations, controlId):
### from the metrics
sales_history_old = sales_history.copy()
sales_history_new = sales_history.copy()
prior_recommendations = np.copy(items.hasBeenRecommended)
awareness = copy.deepcopy(users.Awareness)
for user in users.activeUserIndeces:
Rec=np.array([-1])
if user not in user_recommendations.keys():
self.printj(" -- Nothing to recommend -- to user ",user)
continue
Rec = user_recommendations[user]
prior_recommendations[Rec] = 1
awareness[user, Rec] = 1
# If recommended but previously purchased, minimize the awareness
awareness[user, np.where(sales_history_old[user,Rec]>0)[0] ] = 0
for user in users.activeUserIndeces:
Rec=np.array([-1])
if user not in user_recommendations.keys():
self.printj(" -- Nothing to recommend -- to user ",user)
continue
Rec = user_recommendations[user]
indecesOfChosenItems,indecesOfChosenItemsW = users.choiceModule(Rec,
awareness[user,:],
controlId[user,:],
users.sessionSize(),)
sales_history_new[user, indecesOfChosenItems] += 1
metric = metrics.metrics(sales_history_old, user_recommendations, items.ItemsFeatures, items.ItemsDistances, sales_history_new)
return metric["EPC"]
def update_probabilities(self, activeUserIndeces, optimized_exposure, recommendation_strenghs):
updated_probabilities = {}
for u in range(len(activeUserIndeces)):
probability_update = optimized_exposure[u] * np.array(recommendation_strenghs[u])
updated_probabilities[u] = probability_update
return updated_probabilities
| 46.894737 | 195 | 0.579926 |
ceea6070d8746f724f5765c8bc23c6baf63e3bc9 | 3,375 | py | Python | cutlet/test/test_basic.py | kinow/cutlet | 7c50a8d2c29e743fb9f95aa2de72ba630860a1de | [
"MIT"
] | null | null | null | cutlet/test/test_basic.py | kinow/cutlet | 7c50a8d2c29e743fb9f95aa2de72ba630860a1de | [
"MIT"
] | null | null | null | cutlet/test/test_basic.py | kinow/cutlet | 7c50a8d2c29e743fb9f95aa2de72ba630860a1de | [
"MIT"
] | null | null | null | import pytest
from cutlet import Cutlet
import fugashi
# Note that if there are multiple words, only the first is used
WORDS = [
('新橋', 'shinbashi'),
('学校', 'gakkou'),
('パンダ', 'panda'),
# without curry, カツ is registered as 人名 (?)
('カツカレー', 'cutlet'),
('カレー', 'curry'),
('繊維', "sen'i"),
('専用', "sen'you"),
('抹茶', 'matcha'),
('重量', 'juuryou'),
('ポール', 'Paul'),
('ジル', 'jiru'), # test of ジル-外国 style lemmas
('1', '1'),
]
WORDS_KUNREI = [
('新橋', 'sinbasi'),
('学校', 'gakkou'),
('パンダ', 'panda'),
# without curry, カツ is registered as 人名
('カツカレー', 'cutlet'),
('カレー', 'curry'),
('繊維', "sen'i"),
('専用', "sen'you"),
('抹茶', 'mattya'),
('重量', 'zyuuryou'),
('ポール', 'Paul'),
('1', '1'),
]
SENTENCES = [
("あっ", "A"),
("括弧は「こう」でなくちゃ", "Kakko wa \"kou\" de nakucha"),
("富士見坂", "Fujimi saka"),
("本を読みました。", "Hon wo yomimashita."),
("新橋行きの電車に乗った。", "Shinbashiiki no densha ni notta."),
("カツカレーは美味しい", "Cutlet curry wa oishii"),
("酵素とは、生体で起こる化学反応に対して触媒として機能する分子である。",
"Kouso to wa, seitai de okoru kagaku hannou ni taishite shokubai to shite kinou suru bunshi de aru."),
("ホッピーは元祖ビアテイスト清涼飲料水です",
"Hoppy wa ganso beer taste seiryou inryousui desu"),
("東京タワーの高さは333mです",
"Tokyo tower no takasa wa 333 m desu"),
("国立国語研究所(NINJAL)は,日本語学・言語学・日本語教育研究を中心とした研究機関です。",
"Kokuritsu kokugo kenkyuusho (NINJAL) wa, Nippon gogaku/gengogaku/Nippon go kyouiku kenkyuu wo chuushin to shita kenkyuu kikan desu."),
("やっちゃった!", "Yacchatta!"),
("暖かかった", "Atatakakatta"),
]
SENTENCES_KUNREI = [
("富士見坂", "Huzimi saka"),
]
SLUGS = [
("東京タワーの高さは?", "tokyo-tower-no-takasa-wa"),
("ゲームマーケットとは", "game-market-to-wa"),
("香川ゲーム条例、「(パブコメは)賛成多数だから採決しては」と発言したのは誰だったのか",
"kagawa-game-jourei-pabukome-wa-sansei-tasuu-dakara-saiketsu-shite-wa-to-hatsugen-shita-no-wa-dare-datta-no-ka"),
("コトヤマ「よふかしのうた」3巻発売記念のPV公開、期間限定で1巻の無料配信も",
"koto-yama-yo-fukashi-no-uta-3-kan-hatsubai-kinen-no-p-v-koukai-kikan-gentei-de-1-kan-no-muryou-haishin-mo"),
]
NON_FOREIGN = [
("カツカレーは美味しい", "Katsu karee wa oishii")
]
@pytest.mark.parametrize('ja, roma', WORDS)
def test_words(ja, roma):
cut = Cutlet()
word = cut.tagger.parseToNodeList(ja)[0]
assert cut.romaji_word(word) == roma
@pytest.mark.parametrize('ja, roma', WORDS_KUNREI)
def test_words_kunrei(ja, roma):
cut = Cutlet('kunrei')
word = cut.tagger.parseToNodeList(ja)[0]
assert cut.romaji_word(word) == roma
@pytest.mark.parametrize('ja, roma', SENTENCES)
def test_romaji(ja, roma):
cut = Cutlet()
assert cut.romaji(ja) == roma
@pytest.mark.parametrize('ja, roma', SENTENCES_KUNREI)
def test_romaji_kunrei(ja, roma):
cut = Cutlet('kunrei')
assert cut.romaji(ja) == roma
@pytest.mark.parametrize('ja, roma', SLUGS)
def test_romaji_slugs(ja, roma):
cut = Cutlet()
assert cut.slug(ja) == roma
@pytest.mark.parametrize('ja, roma', NON_FOREIGN)
def test_romaji_slugs(ja, roma):
cut = Cutlet()
cut.use_foreign_spelling = False
assert cut.romaji(ja) == roma
| 31.839623 | 147 | 0.575407 |
c6be2154de49ac975a6431b486191d1db82124b7 | 2,137 | py | Python | gym_modular/rewards/part/current_part_marker_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/rewards/part/current_part_marker_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/rewards/part/current_part_marker_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | from typing import Sequence, Optional
from .part_marker_reward import PartMarkerReward
from scene.part import Part
class CurrentPartMarkerReward(PartMarkerReward):
"""
A concrete implementation of PartMarkerReward that takes only the part that is currently placed by the robot into
account.
"""
def __init__(self, max_marker_distance: Optional[float] = None,
intermediate_timestep_reward_scale: Optional[float] = 0.0,
final_timestep_reward_scale: Optional[float] = 0.25, logarithmic_penalty_weight: float = 0.01,
marker_pos_tolerance: float = 0.0):
"""
:param max_marker_distance: the maximum marker distance to use for normalizing the (unscaled)
reward to lie in [-1, 0]; the reward is clipped at -1 if the average
marker distance is higher than max_marker_distance
:param intermediate_timestep_reward_scale: scaling factor (applied to the reward at every step in which the gym
environment does not terminate)
:param final_timestep_reward_scale: scaling factor (applied to the reward at the step in which the gym
environment terminates)
:param logarithmic_penalty_weight: the weight of the logarithmic penalty
(see distances.ssd_log_distance)
:param marker_pos_tolerance: a distance to the target position at which a marker is seen as
correctly placed (at this distance the cost is 0)
"""
super().__init__("current", max_marker_distance, intermediate_timestep_reward_scale,
final_timestep_reward_scale, logarithmic_penalty_weight=logarithmic_penalty_weight,
marker_pos_tolerance=marker_pos_tolerance)
def _get_parts(self) -> Sequence[Part]:
return [self.task.current_part]
| 59.361111 | 120 | 0.608329 |
f25eeaab30c8f9327905a9829882a90c4c61c1cb | 68,141 | py | Python | src/olympia/lib/settings_base.py | aki21j/addons-server | fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/lib/settings_base.py | aki21j/addons-server | fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/lib/settings_base.py | aki21j/addons-server | fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Django settings for addons-server project.
import environ
import json
import logging
import os
import socket
from datetime import datetime
import sentry_sdk
from corsheaders.defaults import default_headers
from kombu import Queue
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import ignore_logger
import olympia.core.logger
env = environ.Env()
ENVIRON_SETTINGS_FILE_PATH = '/etc/olympia/settings.env'
if os.path.exists(ENVIRON_SETTINGS_FILE_PATH):
env.read_env(env_file=ENVIRON_SETTINGS_FILE_PATH)
ALLOWED_HOSTS = [
'.allizom.org',
'.mozilla.org',
'.mozilla.com',
'.mozilla.net',
'.mozaws.net',
]
# This variable should only be set to `True` for local env and internal hosts.
INTERNAL_ROUTES_ALLOWED = env('INTERNAL_ROUTES_ALLOWED', default=False)
try:
# If we have a build id (it should be generated in Dockerfile.deploy),
# we'll grab it here and add it to our CACHE_KEY_PREFIX. This will let us
# not have to flush memcache during updates and it will let us preload
# data into it before a production push.
from build import BUILD_ID
except ImportError:
BUILD_ID = ''
# Make filepaths relative to the root of olympia.
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT = os.path.join(BASE_DIR, '..', '..')
def path(*folders):
return os.path.join(ROOT, *folders)
DEBUG = False
DEBUG_TOOLBAR_CONFIG = {
# Deactivate django debug toolbar by default.
'SHOW_TOOLBAR_CALLBACK': lambda request: DEBUG,
}
# Ensure that exceptions aren't re-raised.
DEBUG_PROPAGATE_EXCEPTIONS = False
SILENCED_SYSTEM_CHECKS = (
# Recommendation to use OneToOneField instead of ForeignKey(unique=True)
# but our translations are the way they are...
'fields.W342',
# We have some non-unique constraint/index names.
# See https://github.com/mozilla/addons-server/issues/16497
'models.E030',
'models.E032',
)
# LESS CSS OPTIONS (Debug only).
LESS_PREPROCESS = True # Compile LESS with Node, rather than client-side JS?
LESS_LIVE_REFRESH = False # Refresh the CSS on save?
LESS_BIN = env('LESS_BIN', default='node_modules/less/bin/lessc')
# Path to cleancss (our CSS minifier).
CLEANCSS_BIN = env('CLEANCSS_BIN', default='node_modules/clean-css-cli/bin/cleancss')
# Path to our JS minifier.
JS_MINIFIER_BIN = env('JS_MINIFIER_BIN', default='node_modules/terser/bin/terser')
# rsvg-convert is used to save our svg static theme previews to png
RSVG_CONVERT_BIN = env('RSVG_CONVERT_BIN', default='rsvg-convert')
# Path to pngcrush (to optimize the PNGs uploaded by developers).
PNGCRUSH_BIN = env('PNGCRUSH_BIN', default='pngcrush')
# Path to our addons-linter binary
ADDONS_LINTER_BIN = env(
'ADDONS_LINTER_BIN', default='node_modules/addons-linter/bin/addons-linter'
)
DELETION_EMAIL = 'amo-notifications+deletion@mozilla.org'
THEMES_EMAIL = 'theme-reviews@mozilla.org'
VERIFIED_ADDONS_EMAIL = 'verified-addons@mozilla.com'
DRF_API_VERSIONS = ['auth', 'v3', 'v4', 'v5']
DRF_API_REGEX = r'^/?api/(?:auth|v3|v4|v5)/'
# Add Access-Control-Allow-Origin: * header for the new API with
# django-cors-headers.
CORS_ALLOW_ALL_ORIGINS = True
# Exclude the `accounts/session` endpoint, see:
# https://github.com/mozilla/addons-server/issues/11100
CORS_URLS_REGEX = r'{}(?!accounts/session/)'.format(DRF_API_REGEX)
# https://github.com/mozilla/addons-server/issues/17364
CORS_ALLOW_HEADERS = list(default_headers) + [
'x-country-code',
]
def get_db_config(environ_var, atomic_requests=True):
values = env.db(var=environ_var, default='mysql://root:@127.0.0.1/olympia')
values.update(
{
# Run all views in a transaction unless they are decorated not to.
# `atomic_requests` should be `False` for database replicas where no
# write operations will ever happen.
'ATOMIC_REQUESTS': atomic_requests,
# Pool our database connections up for 300 seconds
'CONN_MAX_AGE': 300,
'ENGINE': 'olympia.core.db.mysql',
'OPTIONS': {
'charset': 'utf8mb4',
'sql_mode': 'STRICT_ALL_TABLES',
'isolation_level': 'read committed',
},
'TEST': {'CHARSET': 'utf8mb4', 'COLLATION': 'utf8mb4_general_ci'},
}
)
return values
DATABASES = {
'default': get_db_config('DATABASES_DEFAULT_URL'),
}
# A database to be used by the services scripts, which does not use Django.
# Please note that this is not a full Django database connection
# so the amount of values supported are limited. By default we are using
# the same connection as 'default' but that changes in prod/dev/stage.
SERVICES_DATABASE = get_db_config('DATABASES_DEFAULT_URL')
DATABASE_ROUTERS = ('multidb.PinningReplicaRouter',)
# Put the aliases for your slave databases in this list.
REPLICA_DATABASES = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales / languages.
from olympia.core.languages import AMO_LANGUAGES # noqa
# Bidirectional languages.
# Locales in here *must* be in `AMO_LANGUAGES` too.
LANGUAGES_BIDI = ('ar', 'fa', 'he', 'ur')
# Explicit conversion of a shorter language code into a more specific one.
SHORTER_LANGUAGES = {
'en': 'en-US',
'ga': 'ga-IE',
'pt': 'pt-PT',
'sv': 'sv-SE',
'zh': 'zh-CN',
}
# Override Django's built-in with our native names
LANGUAGES = [
(locale.lower(), value['native']) for locale, value in AMO_LANGUAGES.items()
]
LANGUAGE_URL_MAP = {locale.lower(): locale for locale in AMO_LANGUAGES}
LOCALE_PATHS = (path('locale'),)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# The host currently running the site. Only use this in code for good reason;
# the site is designed to run on a cluster and should continue to support that
HOSTNAME = socket.gethostname()
# The front end domain of the site. If you're not running on a cluster this
# might be the same as HOSTNAME but don't depend on that. Use this when you
# need the real domain.
DOMAIN = HOSTNAME
# Full base URL for your main site including protocol. No trailing slash.
# Example: https://addons.mozilla.org
SITE_URL = 'http://%s' % DOMAIN
# The base URL for the external user-facing frontend. Only really useful for
# the internal admin instances of addons-server that don't run addons-frontend.
EXTERNAL_SITE_URL = env('EXTERNAL_SITE_URL', default=SITE_URL)
# Domain of the services site. This is where your API, and in-product pages
# live.
SERVICES_DOMAIN = 'services.%s' % DOMAIN
# Full URL to your API service. No trailing slash.
# Example: https://services.addons.mozilla.org
SERVICES_URL = 'http://%s' % SERVICES_DOMAIN
# URL of the code-manager site, see:
# https://github.com/mozilla/addons-code-manager
CODE_MANAGER_URL = 'https://code.{}'.format(DOMAIN)
# Filter IP addresses of allowed clients that can post email through the API.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API', default=[])
# Auth token required to authorize inbound email.
INBOUND_EMAIL_SECRET_KEY = env('INBOUND_EMAIL_SECRET_KEY', default='')
# Validation key we need to send in POST response.
INBOUND_EMAIL_VALIDATION_KEY = env('INBOUND_EMAIL_VALIDATION_KEY', default='')
# Domain emails should be sent to.
INBOUND_EMAIL_DOMAIN = env('INBOUND_EMAIL_DOMAIN', default=DOMAIN)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/user-media/'
# Tarballs in DUMPED_APPS_PATH deleted 30 days after they have been written.
DUMPED_APPS_DAYS_DELETE = 3600 * 24 * 30
# Tarballs in DUMPED_USERS_PATH deleted 30 days after they have been written.
DUMPED_USERS_DAYS_DELETE = 3600 * 24 * 30
# path that isn't just one /, and doesn't require any locale or app.
SUPPORTED_NONAPPS_NONLOCALES_REGEX = DRF_API_REGEX
# paths that don't require an app prefix
# This needs to be kept in sync with addons-frontend's
# validClientAppUrlExceptions
# https://github.com/mozilla/addons-frontend/blob/master/config/default-amo.js
SUPPORTED_NONAPPS = (
'about',
'admin',
'apps',
'contribute.json',
'developer_agreement',
'developers',
'editors',
'review_guide',
'google1f3e37b7351799a5.html',
'google231a41e803e464e9.html',
'reviewers',
'robots.txt',
'statistics',
'services',
'sitemap.xml',
'static',
'user-media',
'__version__',
)
DEFAULT_APP = 'firefox'
# paths that don't require a locale prefix
# This needs to be kept in sync with addons-frontend's validLocaleUrlExceptions
# https://github.com/mozilla/addons-frontend/blob/master/config/default-amo.js
SUPPORTED_NONLOCALES = (
'contribute.json',
'google1f3e37b7351799a5.html',
'google231a41e803e464e9.html',
'robots.txt',
'services',
'sitemap.xml',
'downloads',
'static',
'user-media',
'__version__',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = env(
'SECRET_KEY', default='this-is-a-dummy-key-and-its-overridden-for-prod-servers'
)
# Templates configuration.
# List of path patterns for which we should be using Django Template Language.
# If you add things here, don't forget to also change PUENTE config below.
JINJA_EXCLUDE_TEMPLATE_PATHS = (
# All emails should be processed with Django for consistency.
r'^.*\/emails\/',
# ^admin\/ covers most django admin templates, since their path should
# follow /admin/<app>/<model>/*
r'^admin\/',
# This is a django form widget template.
r'^devhub/forms/widgets/compat_app_input_option.html',
# Third-party apps + django.
r'debug_toolbar',
r'^rangefilter\/',
r'^registration\/',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'NAME': 'jinja2',
'APP_DIRS': True,
'DIRS': (
path('media', 'docs'),
path('src/olympia/templates'),
),
'OPTIONS': {
# http://jinja.pocoo.org/docs/dev/extensions/#newstyle-gettext
'newstyle_gettext': True,
# Match our regular .html and .txt file endings except
# for the admin and a handful of other paths
'match_extension': None,
'match_regex': r'^(?!({paths})).*'.format(
paths='|'.join(JINJA_EXCLUDE_TEMPLATE_PATHS)
),
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'olympia.amo.context_processors.i18n',
'olympia.amo.context_processors.global_settings',
'olympia.amo.context_processors.static_url',
),
'extensions': (
'jinja2.ext.autoescape',
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
'django_jinja.builtins.extensions.CsrfExtension',
'django_jinja.builtins.extensions.DjangoFiltersExtension',
'django_jinja.builtins.extensions.StaticFilesExtension',
'django_jinja.builtins.extensions.TimezoneExtension',
'django_jinja.builtins.extensions.UrlsExtension',
'olympia.amo.templatetags.jinja_helpers.Spaceless',
'puente.ext.i18n',
'waffle.jinja.WaffleExtension',
),
'finalize': lambda x: x if x is not None else '',
'translation_engine': 'django.utils.translation',
'autoescape': True,
'trim_blocks': True,
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
),
},
},
]
# Default datetime format in templates
# https://docs.djangoproject.com/en/3.2/ref/templates/builtins/#std:templatefilter-date
DATETIME_FORMAT = 'N j, Y, H:i'
X_FRAME_OPTIONS = 'DENY'
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_SECONDS = 31536000
# Prefer using `X-Forwarded-Port` header instead of `Port` header.
# We are behind both, the ELB and nginx which forwards requests to our
# uwsgi app.
# Our current request flow is this:
# Request -> ELB (terminates SSL) -> Nginx -> Uwsgi -> addons-server
#
# The ELB terminates SSL and properly sets `X-Forwarded-Port` header
# as well as `X-Forwarded-Proto` and others.
# Nginx on the other hand runs on port 81 and thus sets `Port` to be
# 81 but to make CSRF detection and other mechanisms work properly
# we need to know that we're running on either port 80 or 443, or do something
# with SECURE_PROXY_SSL_HEADER but we shouldn't if we can avoid that.
# So, let's simply grab the properly set `X-Forwarded-Port` header.
# https://github.com/mozilla/addons-server/issues/8835#issuecomment-405340432
#
# This is also backwards compatible for our local setup since Django falls back
# to using `Port` if `X-Forwarded-Port` isn't set.
USE_X_FORWARDED_PORT = True
MIDDLEWARE = (
# Our middleware to make safe requests non-atomic needs to be at the top.
'olympia.amo.middleware.NonAtomicRequestsForSafeHttpMethodsMiddleware',
# Test if it's an API request first so later middlewares don't need to.
# Also add relevant Vary header to API responses.
'olympia.api.middleware.APIRequestMiddleware',
'olympia.api.middleware.APICacheControlMiddleware',
# Gzip middleware needs to be executed after every modification to the
# response, so it's placed at the top of the list.
'django.middleware.gzip.GZipMiddleware',
# Statsd and logging come first to get timings etc. Munging REMOTE_ADDR
# must come before middlewares potentially using REMOTE_ADDR, so it's
# also up there.
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'olympia.amo.middleware.SetRemoteAddrFromForwardedFor',
# AMO URL middleware is as high as possible to get locale/app aware URLs.
'olympia.amo.middleware.LocaleAndAppURLMiddleware',
'olympia.amo.middleware.RemoveSlashMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'waffle.middleware.WaffleMiddleware',
# CSP and CORS need to come before CommonMiddleware because they might
# need to add headers to 304 responses returned by CommonMiddleware.
'csp.middleware.CSPMiddleware',
'corsheaders.middleware.CorsMiddleware',
# Enable conditional processing, e.g ETags.
'django.middleware.http.ConditionalGetMiddleware',
'olympia.amo.middleware.NoVarySessionMiddleware',
'olympia.amo.middleware.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'olympia.amo.middleware.AuthenticationMiddlewareWithoutAPI',
# Our middleware that adds additional information for the user
# and API about our read-only status.
'olympia.amo.middleware.ReadOnlyMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# This should come after AuthenticationMiddlewareWithoutAPI (to get the
# current user) and after SetRemoteAddrFromForwardedFor (to get the correct
# IP).
'olympia.access.middleware.UserAndAddrMiddleware',
'olympia.amo.middleware.RequestIdMiddleware',
)
# Auth
AUTH_USER_MODEL = 'users.UserProfile'
# Override this in the site settings.
ROOT_URLCONF = 'olympia.urls'
INSTALLED_APPS = (
# The translations app *must* be the very first. This isn't necessarily
# relevant for daily business but very important for running initial
# migrations during our tests and local setup.
# Foreign keys to the `translations` table point to `id` which isn't
# unique on it's own but has a (id, locale) unique_together index.
# If `translations` would come after `olympia.addons` for example
# Django tries to first, create the table translations, then create the
# addons table, then adds the foreign key and only after that adds the
# unique_together index to `translations`. MySQL needs that index to be
# created first though, otherwise you'll run into
# `ERROR 1215 (HY000): Cannot add foreign key constraint` errors.
'olympia.translations',
'olympia.core',
'olympia.amo', # amo comes first so it always takes precedence.
'olympia.abuse',
'olympia.access',
'olympia.accounts',
'olympia.activity',
'olympia.addons',
'olympia.api',
'olympia.applications',
'olympia.bandwagon',
'olympia.blocklist',
'olympia.browse',
'olympia.devhub',
'olympia.discovery',
'olympia.files',
'olympia.git',
'olympia.hero',
'olympia.lib.es',
'olympia.lib.akismet',
'olympia.pages',
'olympia.promoted',
'olympia.ratings',
'olympia.reviewers',
'olympia.scanners',
'olympia.search',
'olympia.shelves',
'olympia.stats',
'olympia.tags',
'olympia.users',
'olympia.versions',
'olympia.yara',
'olympia.zadmin',
# Third party apps
'csp',
'aesfield',
'django_extensions',
'rest_framework',
'waffle',
'django_jinja',
'puente',
'rangefilter',
'nobot',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
# Has to load after auth
'django_statsd',
)
# These need to point to prod, because that's where the database lives. You can
# change it locally to test the extraction process, but be careful not to
# accidentally nuke translations when doing that!
DISCOVERY_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v4/discovery/editorial/'
)
PRIMARY_HERO_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v4/hero/primary/?all=true&raw'
)
SECONDARY_HERO_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v4/hero/secondary/?all=true'
)
HOMEPAGE_SHELVES_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v5/shelves/editorial'
)
# Filename where the strings will be stored. Used in puente config below.
EDITORIAL_CONTENT_FILENAME = 'src/olympia/discovery/strings.jinja2'
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The puente library expects this.
PUENTE = {
'BASE_DIR': ROOT,
# Tells the extract script what files to look for l10n in and what function
# handles the extraction.
'DOMAIN_METHODS': {
'django': [
('src/olympia/**.py', 'python'),
# Extract the generated file containing editorial content for all
# disco pane recommendations using jinja2 parser. It's not a real
# template, but it uses jinja2 syntax for convenience, hence why
# it's not in templates/ with a .html extension.
(EDITORIAL_CONTENT_FILENAME, 'jinja2'),
# Make sure we're parsing django-admin & email templates with the
# django template extractor. This should match the behavior of
# JINJA_EXCLUDE_TEMPLATE_PATHS
(
'src/olympia/**/templates/**/emails/**.*',
'enmerkar.extract.extract_django',
),
('**/templates/admin/**.html', 'enmerkar.extract.extract_django'),
(
'**/templates/devhub/forms/widgets/compat_app_input_option.html',
'enmerkar.extract.extract_django',
),
('src/olympia/**/templates/**.html', 'jinja2'),
],
'djangojs': [
# We can't say **.js because that would dive into mochikit
# and timeplot and all the other baggage we're carrying.
# Timeplot, in particular, crashes the extractor with bad
# unicode data.
('static/js/**-all.js', 'ignore'),
('static/js/**-min.js', 'ignore'),
('static/js/*.js', 'javascript'),
('static/js/amo2009/**.js', 'javascript'),
('static/js/common/**.js', 'javascript'),
('static/js/impala/**.js', 'javascript'),
('static/js/zamboni/**.js', 'javascript'),
],
},
}
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'restyle/css': ('css/restyle/restyle.less',),
# CSS files our DevHub (currently only required for the
# new landing page)
'devhub/new-landing/css': ('css/devhub/new-landing/base.less',),
# Responsive error page styling.
'errors/css': ('css/errors/base.less',),
# CSS files common to the entire site.
'zamboni/css': (
'css/legacy/main.css',
'css/legacy/main-mozilla.css',
'css/legacy/jquery-lightbox.css',
'css/zamboni/zamboni.css',
'css/zamboni/tags.css',
'css/zamboni/tabs.css',
'css/impala/buttons.less',
'css/impala/formset.less',
'css/impala/suggestions.less',
'css/impala/header.less',
'css/impala/moz-tab.css',
'css/impala/footer.less',
'css/impala/faux-zamboni.less',
),
'zamboni/impala': (
'css/impala/base.css',
'css/legacy/jquery-lightbox.css',
'css/impala/site.less',
'css/impala/typography.less',
'css/impala/forms.less',
'css/common/invisible-upload.less',
'css/impala/header.less',
'css/impala/footer.less',
'css/impala/moz-tab.css',
'css/impala/hovercards.less',
'css/impala/toplist.less',
'css/impala/carousel.less',
'css/impala/ratings.less',
'css/impala/buttons.less',
'css/impala/promos.less',
'css/impala/addon_details.less',
'css/impala/policy.less',
'css/impala/expando.less',
'css/impala/popups.less',
'css/impala/l10n.less',
'css/impala/lightbox.less',
'css/impala/prose.less',
'css/impala/abuse.less',
'css/impala/paginator.less',
'css/impala/listing.less',
'css/impala/versions.less',
'css/impala/users.less',
'css/impala/tooltips.less',
'css/impala/search.less',
'css/impala/suggestions.less',
'css/node_lib/jquery.minicolors.css',
'css/impala/login.less',
'css/impala/dictionaries.less',
'css/impala/apps.less',
'css/impala/formset.less',
'css/impala/tables.less',
'css/impala/compat.less',
),
'zamboni/stats': ('css/impala/stats.less',),
'zamboni/discovery-pane': (
'css/impala/promos.less',
'css/legacy/jquery-lightbox.css',
),
'zamboni/devhub': (
'css/impala/tooltips.less',
'css/zamboni/developers.css',
'css/zamboni/docs.less',
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/impala/formset.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/refunds.less',
'css/devhub/buttons.less',
'css/devhub/in-app-config.less',
'css/devhub/static-theme.less',
'css/node_lib/jquery.minicolors.css',
),
'zamboni/devhub_impala': (
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/devhub/dashboard.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/search.less',
'css/devhub/refunds.less',
'css/impala/devhub-api.less',
),
'zamboni/reviewers': (
'css/zamboni/reviewers.less',
'css/zamboni/unlisted.less',
),
'zamboni/themes_review': (
'css/zamboni/developers.css',
'css/zamboni/reviewers.less',
'css/zamboni/themes_review.less',
),
},
'js': {
# JS files common to the entire site, apart from dev-landing.
'common': (
'js/node_lib/underscore.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/zamboni/tabs.js',
'js/common/keys.js',
# jQuery UI
'js/node_lib/ui/version.js',
'js/node_lib/ui/data.js',
'js/node_lib/ui/disable-selection.js',
'js/node_lib/ui/ie.js',
'js/node_lib/ui/keycode.js',
'js/node_lib/ui/escape-selector.js',
'js/node_lib/ui/labels.js',
'js/node_lib/ui/jquery-1-7.js',
'js/node_lib/ui/plugin.js',
'js/node_lib/ui/safe-active-element.js',
'js/node_lib/ui/safe-blur.js',
'js/node_lib/ui/scroll-parent.js',
'js/node_lib/ui/focusable.js',
'js/node_lib/ui/tabbable.js',
'js/node_lib/ui/unique-id.js',
'js/node_lib/ui/position.js',
'js/node_lib/ui/widget.js',
'js/node_lib/ui/menu.js',
'js/node_lib/ui/mouse.js',
'js/node_lib/ui/autocomplete.js',
'js/node_lib/ui/datepicker.js',
'js/node_lib/ui/sortable.js',
'js/zamboni/helpers.js',
'js/common/banners.js',
'js/zamboni/global.js',
'js/amo2009/global.js',
'js/common/ratingwidget.js',
'js/node_lib/jqModal.js',
'js/zamboni/l10n.js',
'js/zamboni/debouncer.js',
# Homepage
'js/zamboni/homepage.js',
# Add-ons details page
'js/lib/ui.lightbox.js',
'js/zamboni/addon_details.js',
'js/impala/abuse.js',
'js/zamboni/ratings.js',
'js/lib/jquery.hoverIntent.js',
# Unicode letters for our makeslug function
'js/zamboni/unicode.js',
# Users
'js/zamboni/users.js',
# Search suggestions
'js/impala/forms.js',
'js/impala/ajaxcache.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
),
# Impala and Legacy: Things to be loaded at the top of the page
'preload': (
'js/node_lib/jquery.js',
'js/node_lib/jquery.browser.js',
'js/impala/preloaded.js',
'js/zamboni/analytics.js',
),
# Impala: Things to be loaded at the bottom
'impala': (
'js/lib/ngettext-overload.js',
'js/node_lib/underscore.js',
'js/impala/carousel.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/node_lib/jquery.pjax.js',
# jquery.pjax.js is missing a semicolon at the end which breaks
# our wonderful minification process... so add one.
'js/lib/semicolon.js', # It's just a semicolon!
'js/impala/footer.js',
'js/common/keys.js',
# jQuery UI
'js/node_lib/ui/version.js',
'js/node_lib/ui/data.js',
'js/node_lib/ui/disable-selection.js',
'js/node_lib/ui/ie.js',
'js/node_lib/ui/keycode.js',
'js/node_lib/ui/escape-selector.js',
'js/node_lib/ui/labels.js',
'js/node_lib/ui/jquery-1-7.js',
'js/node_lib/ui/plugin.js',
'js/node_lib/ui/safe-active-element.js',
'js/node_lib/ui/safe-blur.js',
'js/node_lib/ui/scroll-parent.js',
'js/node_lib/ui/focusable.js',
'js/node_lib/ui/tabbable.js',
'js/node_lib/ui/unique-id.js',
'js/node_lib/ui/position.js',
'js/node_lib/ui/widget.js',
'js/node_lib/ui/mouse.js',
'js/node_lib/ui/menu.js',
'js/node_lib/ui/autocomplete.js',
'js/node_lib/ui/datepicker.js',
'js/node_lib/ui/sortable.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/ajaxcache.js',
'js/zamboni/helpers.js',
'js/common/banners.js',
'js/zamboni/global.js',
'js/impala/global.js',
'js/common/ratingwidget.js',
'js/node_lib/jqModal.js',
'js/zamboni/l10n.js',
'js/impala/forms.js',
# Add-ons details page
'js/lib/ui.lightbox.js',
'js/impala/addon_details.js',
'js/impala/abuse.js',
'js/impala/ratings.js',
# Browse listing pages
'js/impala/listing.js',
'js/lib/jquery.hoverIntent.js',
'js/common/upload-image.js',
'js/node_lib/jquery.minicolors.js',
# Unicode letters for our makeslug function
'js/zamboni/unicode.js',
# Users
'js/zamboni/users.js',
'js/impala/users.js',
# Search
'js/impala/serializers.js',
'js/impala/search.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
# Login
'js/impala/login.js',
),
'zamboni/discovery': (
'js/node_lib/jquery.js',
'js/node_lib/jquery.browser.js',
'js/node_lib/underscore.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/impala/carousel.js',
'js/zamboni/analytics.js',
# Add-ons details
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/ui.lightbox.js',
'js/lib/jquery.hoverIntent.js',
'js/zamboni/debouncer.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
),
'zamboni/devhub': (
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/common/upload-base.js',
'js/common/upload-addon.js',
'js/common/upload-image.js',
'js/impala/formset.js',
'js/zamboni/devhub.js',
'js/zamboni/validator.js',
'js/node_lib/jquery.timeago.js',
'js/zamboni/static_theme.js',
'js/node_lib/jquery.minicolors.js',
'js/node_lib/jszip.js',
),
'devhub/new-landing/js': (
'js/common/lang_switcher.js',
'js/lib/basket-client.js',
),
'zamboni/reviewers': (
'js/lib/highcharts.src.js',
'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox.
'js/lib/jquery.zoomBox.js', # Used by themes_review.
'js/zamboni/reviewers.js',
'js/zamboni/themes_review_templates.js',
'js/zamboni/themes_review.js',
),
'zamboni/stats': (
'js/lib/highcharts.src.js',
'js/impala/stats/csv_keys.js',
'js/impala/stats/helpers.js',
'js/impala/stats/dateutils.js',
'js/impala/stats/manager.js',
'js/impala/stats/controls.js',
'js/impala/stats/overview.js',
'js/impala/stats/topchart.js',
'js/impala/stats/chart.js',
'js/impala/stats/table.js',
'js/impala/stats/stats.js',
),
# This is included when DEBUG is True. Bundle in <head>.
'debug': (
'js/debug/less_setup.js',
'js/node_lib/less.js',
'js/debug/less_live.js',
),
},
}
# Prefix for cache keys (will prevent collisions when running parallel copies)
# This value is being used by `conf/settings/{dev,stage,prod}.py
CACHE_KEY_PREFIX = 'amo:%s:' % BUILD_ID
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_KEY_PREFIX
FETCH_BY_ID = True
# Number of seconds a count() query should be cached. Keep it short because
# it's not possible to invalidate these queries.
CACHE_COUNT_TIMEOUT = 60
# To enable pylibmc compression (in bytes)
PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled
# External tools.
JAVA_BIN = '/usr/bin/java'
# URL paths
# paths for images, e.g. mozcdn.com/amo or '/static'
VAMO_URL = 'https://versioncheck.addons.mozilla.org'
# Outgoing URL bouncer
REDIRECT_URL = 'https://outgoing.prod.mozaws.net/v1/'
REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY', default='')
# Allow URLs from these servers. Use full domain names.
REDIRECT_URL_ALLOW_LIST = ['addons.mozilla.org']
# Default to short expiration; check "remember me" to override
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# See: https://github.com/mozilla/addons-server/issues/1789
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# This value must be kept in sync with authTokenValidFor from addons-frontend:
# https://github.com/mozilla/addons-frontend/blob/2f480b474fe13a676237fe76a1b2a057e4a2aac7/config/default-amo.js#L111
SESSION_COOKIE_AGE = 2592000 # 30 days
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_DOMAIN = '.%s' % DOMAIN # bug 608797
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
WAFFLE_SECURE = True
# These should have app+locale at the start to avoid redirects
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# When logging in with browser ID, a username is created automatically.
# In the case of duplicates, the process is recursive up to this number
# of times.
MAX_GEN_USERNAME_TRIES = 50
# Email settings
ADDONS_EMAIL = 'Mozilla Add-ons <nobody@mozilla.org>'
DEFAULT_FROM_EMAIL = ADDONS_EMAIL
# Email goes to the console by default. s/console/smtp/ for regular delivery
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Please use all lowercase for the QA allow list.
EMAIL_QA_ALLOW_LIST = env.list('EMAIL_QA_ALLOW_LIST', default=())
# Please use all lowercase for the deny_list.
EMAIL_DENY_LIST = env.list('EMAIL_DENY_LIST', default=('nobody@mozilla.org',))
# URL for Add-on Validation FAQ.
VALIDATION_FAQ_URL = (
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/'
'AddonReviews#Step_2:_Automatic_validation'
)
SHIELD_STUDIES_SUPPORT_URL = 'https://support.mozilla.org/kb/shield'
# Celery
CELERY_BROKER_URL = env(
'CELERY_BROKER_URL',
default=os.environ.get(
'CELERY_BROKER_URL', 'amqp://olympia:olympia@localhost:5672/olympia'
),
)
CELERY_BROKER_CONNECTION_TIMEOUT = 0.1
CELERY_BROKER_HEARTBEAT = 60 * 15
CELERY_TASK_DEFAULT_QUEUE = 'default'
CELERY_RESULT_BACKEND = env(
'CELERY_RESULT_BACKEND',
default=os.environ.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/1'),
)
CELERY_TASK_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
# Testing responsiveness without rate limits.
CELERY_WORKER_DISABLE_RATE_LIMITS = True
# Only serialize celery tasks using JSON.
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# When testing, we always want tasks to raise exceptions. Good for sanity.
CELERY_TASK_EAGER_PROPAGATES = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP. Note that there is
# a separate, shorter timeout for validation tasks.
CELERY_TASK_SOFT_TIME_LIMIT = 60 * 30
# List of modules that contain tasks and that wouldn't be autodiscovered by
# celery. Typically, it's either `tasks` modules from something not in
# INSTALLED_APPS, or modules not called `tasks`.
CELERY_IMPORTS = (
'olympia.lib.crypto.tasks',
'olympia.lib.es.management.commands.reindex',
)
CELERY_TASK_QUEUES = (
Queue('addons', routing_key='addons'),
Queue('adhoc', routing_key='adhoc'),
Queue('amo', routing_key='amo'),
Queue('bandwagon', routing_key='bandwagon'),
Queue('cron', routing_key='cron'),
Queue('crypto', routing_key='crypto'),
Queue('default', routing_key='default'),
Queue('devhub', routing_key='devhub'),
Queue('images', routing_key='images'),
Queue('priority', routing_key='priority'),
Queue('ratings', routing_key='ratings'),
Queue('reviewers', routing_key='reviewers'),
Queue('search', routing_key='search'),
Queue('tags', routing_key='tags'),
Queue('users', routing_key='users'),
Queue('zadmin', routing_key='zadmin'),
)
# We have separate celeryds for processing devhub & images as fast as possible
# Some notes:
# - always add routes here instead of @task(queue=<name>)
# - when adding a queue, be sure to update deploy.py so that it gets restarted
CELERY_TASK_ROUTES = {
# Priority.
# If your tasks need to be run as soon as possible, add them here so they
# are routed to the priority queue.
'olympia.addons.tasks.index_addons': {'queue': 'priority'},
'olympia.addons.tasks.unindex_addons': {'queue': 'priority'},
'olympia.blocklist.tasks.process_blocklistsubmission': {'queue': 'priority'},
'olympia.blocklist.tasks.import_block_from_blocklist': {'queue': 'priority'},
'olympia.blocklist.tasks.delete_imported_block_from_blocklist': {
'queue': 'priority'
},
'olympia.blocklist.tasks.upload_filter': {'queue': 'priority'},
'olympia.versions.tasks.generate_static_theme_preview': {'queue': 'priority'},
# Other queues we prioritize below.
# 'Default' queue.
'celery.accumulate': {'queue': 'default'},
'celery.backend_cleanup': {'queue': 'default'},
'celery.chain': {'queue': 'default'},
'celery.chord': {'queue': 'default'},
'celery.chunks': {'queue': 'default'},
'celery.group': {'queue': 'default'},
'celery.map': {'queue': 'default'},
'celery.starmap': {'queue': 'default'},
# AMO Devhub.
'olympia.devhub.tasks.check_for_api_keys_in_file': {'queue': 'devhub'},
'olympia.devhub.tasks.create_initial_validation_results': {'queue': 'devhub'},
'olympia.devhub.tasks.forward_linter_results': {'queue': 'devhub'},
'olympia.devhub.tasks.get_preview_sizes': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_file_validation_result': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_upload_validation_result': {'queue': 'devhub'},
'olympia.devhub.tasks.revoke_api_key': {'queue': 'devhub'},
'olympia.devhub.tasks.send_welcome_email': {'queue': 'devhub'},
'olympia.devhub.tasks.submit_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_upload': {'queue': 'devhub'},
'olympia.files.tasks.repack_fileupload': {'queue': 'devhub'},
'olympia.scanners.tasks.run_customs': {'queue': 'devhub'},
'olympia.scanners.tasks.run_wat': {'queue': 'devhub'},
'olympia.scanners.tasks.run_yara': {'queue': 'devhub'},
'olympia.scanners.tasks.call_mad_api': {'queue': 'devhub'},
# Activity (goes to devhub queue).
'olympia.activity.tasks.process_email': {'queue': 'devhub'},
# This is currently used only by validation tasks.
# This puts the chord_unlock task on the devhub queue. Which means anything
# that uses chord() or group() must also be running in this queue or must
# be on a worker that listens to the same queue.
'celery.chord_unlock': {'queue': 'devhub'},
# Images.
'olympia.users.tasks.resize_photo': {'queue': 'images'},
'olympia.devhub.tasks.recreate_previews': {'queue': 'images'},
'olympia.devhub.tasks.resize_icon': {'queue': 'images'},
'olympia.devhub.tasks.resize_preview': {'queue': 'images'},
# AMO
'olympia.amo.tasks.delete_logs': {'queue': 'amo'},
'olympia.amo.tasks.send_email': {'queue': 'amo'},
'olympia.amo.tasks.set_modified_on_object': {'queue': 'amo'},
'olympia.amo.tasks.sync_objects_to_basket': {'queue': 'amo'},
'olympia.blocklist.tasks.cleanup_old_files': {'queue': 'amo'},
# Addons
'olympia.addons.tasks.delete_addons': {'queue': 'addons'},
'olympia.addons.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.addons.tasks.version_changed': {'queue': 'addons'},
'olympia.files.tasks.hide_disabled_files': {'queue': 'addons'},
'olympia.versions.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.git.tasks.continue_git_extraction': {'queue': 'addons'},
'olympia.git.tasks.extract_versions_to_git': {'queue': 'addons'},
'olympia.git.tasks.on_extraction_error': {'queue': 'addons'},
'olympia.git.tasks.remove_git_extraction_entry': {'queue': 'addons'},
# Additional image processing tasks that aren't as important go in the
# addons queue to leave the 'devhub' queue free to process validations etc.
'olympia.addons.tasks.extract_colors_from_static_themes': {'queue': 'addons'},
'olympia.devhub.tasks.pngcrush_existing_preview': {'queue': 'addons'},
'olympia.devhub.tasks.pngcrush_existing_icons': {'queue': 'addons'},
'olympia.addons.tasks.recreate_theme_previews': {'queue': 'addons'},
# Adhoc
# A queue to be used for one-off tasks that could be resource intensive.
'olympia.versions.tasks.delete_list_theme_previews': {'queue': 'adhoc'},
'olympia.versions.tasks.hard_delete_versions': {'queue': 'adhoc'},
'olympia.translations.tasks.reclean_collection_descriptions': {'queue': 'adhoc'},
# Crons
'olympia.addons.tasks.update_addon_average_daily_users': {'queue': 'cron'},
'olympia.addons.tasks.update_addon_hotness': {'queue': 'cron'},
'olympia.addons.tasks.update_addon_weekly_downloads': {'queue': 'cron'},
# Bandwagon
'olympia.bandwagon.tasks.collection_meta': {'queue': 'bandwagon'},
# Reviewers
'olympia.reviewers.tasks.recalculate_post_review_weight': {'queue': 'reviewers'},
# Crypto
'olympia.lib.crypto.tasks.sign_addons': {'queue': 'crypto'},
# Search
'olympia.lib.es.management.commands.reindex.create_new_index': {'queue': 'search'},
'olympia.lib.es.management.commands.reindex.delete_indexes': {'queue': 'search'},
'olympia.lib.es.management.commands.reindex.flag_database': {'queue': 'search'},
'olympia.lib.es.management.commands.reindex.unflag_database': {'queue': 'search'},
'olympia.lib.es.management.commands.reindex.update_aliases': {'queue': 'search'},
'olympia.addons.tasks.find_inconsistencies_between_es_and_db': {'queue': 'search'},
# Ratings
'olympia.ratings.tasks.addon_bayesian_rating': {'queue': 'ratings'},
'olympia.ratings.tasks.addon_rating_aggregates': {'queue': 'ratings'},
'olympia.ratings.tasks.update_denorm': {'queue': 'ratings'},
# Tags
'olympia.tags.tasks.update_all_tag_stats': {'queue': 'tags'},
'olympia.tags.tasks.update_tag_stat': {'queue': 'tags'},
# Users
'olympia.accounts.tasks.primary_email_change_event': {'queue': 'users'},
'olympia.users.tasks.delete_photo': {'queue': 'users'},
'olympia.users.tasks.update_user_ratings_task': {'queue': 'users'},
'olympia.accounts.tasks.delete_user_event': {'queue': 'users'},
# Zadmin
'olympia.scanners.tasks.run_yara_query_rule': {'queue': 'zadmin'},
'olympia.scanners.tasks.run_yara_query_rule_on_versions_chunk': {'queue': 'zadmin'},
'olympia.scanners.tasks.mark_yara_query_rule_as_completed_or_aborted': {
'queue': 'zadmin'
},
'olympia.zadmin.tasks.celery_error': {'queue': 'zadmin'},
}
# See PEP 391 for formatting help.
LOGGING = {
'version': 1,
'filters': {},
'disable_existing_loggers': False,
'formatters': {
'json': {
'()': olympia.core.logger.JsonFormatter,
'logger_name': 'http_app_addons',
},
},
'handlers': {
'mozlog': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'json',
},
'null': {
'class': 'logging.NullHandler',
},
'statsd': {
'level': 'ERROR',
'class': 'django_statsd.loggers.errors.StatsdHandler',
},
},
'root': {'handlers': ['mozlog'], 'level': logging.INFO},
'loggers': {
'amqp': {'handlers': ['null'], 'level': logging.WARNING, 'propagate': False},
'caching': {'handlers': ['mozlog'], 'level': logging.ERROR, 'propagate': False},
'caching.invalidation': {
'handlers': ['null'],
'level': logging.INFO,
'propagate': False,
},
'celery.worker.strategy': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': False,
},
'django': {
'handlers': ['statsd'],
'level': logging.ERROR,
'propagate': True,
},
# Django CSRF related warnings
'django.security.csrf': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': True,
},
'elasticsearch': {
'handlers': ['null'],
'level': logging.INFO,
'propagate': False,
},
'filtercascade': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from filtercascade, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'mohawk.util': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from mohawk.util, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'newrelic': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': False,
},
'parso': {'handlers': ['null'], 'level': logging.INFO, 'propagate': False},
'post_request_task': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from post-request-task, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'sentry_sdk': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': False,
},
'request': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': False,
},
'z.celery': {
'handlers': ['statsd'],
'level': logging.ERROR,
'propagate': True,
},
'z.pool': {'handlers': ['mozlog'], 'level': logging.ERROR, 'propagate': False},
},
}
# CSP Settings
PROD_CDN_HOST = 'https://addons.cdn.mozilla.net'
ANALYTICS_HOST = 'https://www.google-analytics.com'
CSP_REPORT_URI = '/__cspreport__'
CSP_REPORT_ONLY = False
CSP_EXCLUDE_URL_PREFIXES = ()
# NOTE: CSP_DEFAULT_SRC MUST be set otherwise things not set
# will default to being open to anything.
CSP_DEFAULT_SRC = ("'self'",)
CSP_BASE_URI = (
"'self'",
# Required for the legacy discovery pane.
'https://addons.mozilla.org',
)
CSP_CONNECT_SRC = (
"'self'",
'https://sentry.prod.mozaws.net',
ANALYTICS_HOST,
PROD_CDN_HOST,
)
CSP_FORM_ACTION = ("'self'",)
CSP_FONT_SRC = (
"'self'",
PROD_CDN_HOST,
)
CSP_CHILD_SRC = (
"'self'",
'https://www.google.com/recaptcha/',
'https://www.recaptcha.net/recaptcha/',
)
CSP_FRAME_SRC = CSP_CHILD_SRC
CSP_IMG_SRC = (
"'self'",
'data:', # Used in inlined mobile css.
'blob:', # Needed for image uploads.
PROD_CDN_HOST,
'https://static.addons.mozilla.net', # CDN origin server.
'https://sentry.prod.mozaws.net',
)
CSP_MEDIA_SRC = ('https://videos.cdn.mozilla.net',)
CSP_OBJECT_SRC = ("'none'",)
CSP_SCRIPT_SRC = (
'https://www.google-analytics.com/analytics.js',
'https://www.google.com/recaptcha/',
'https://www.recaptcha.net/recaptcha/',
'https://www.gstatic.com/recaptcha/',
'https://www.gstatic.cn/recaptcha/',
PROD_CDN_HOST,
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
PROD_CDN_HOST,
)
RESTRICTED_DOWNLOAD_CSP = {
'DEFAULT_SRC': "'none'",
'BASE_URI': "'none'",
'FORM_ACTION': "'none'",
'OBJECT_SRC': "'none'",
'FRAME_ANCESTORS': "'none'",
'REPORT_URI': CSP_REPORT_URI,
}
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is true, allow everything, toggled to
# False on -dev and stage.
# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
ENGAGE_ROBOTS = True
# Read-only mode setup.
READ_ONLY = env.bool('READ_ONLY', default=False)
# Turn on read-only mode in local_settings.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('REPLICA_DATABASES'):
raise Exception('We need at least one slave database.')
slave = env['REPLICA_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('olympia.users.backends.NoAuthForYou',)
# Uploaded file limits
MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE
MAX_STATICTHEME_SIZE = 7 * 1024 * 1024
MAX_ZIP_UNCOMPRESSED_SIZE = 200 * 1024 * 1024
# File uploads should have -rw-r--r-- permissions in order to be served by
# nginx later one. The 0o prefix is intentional, this is an octal value.
FILE_UPLOAD_PERMISSIONS = 0o644
# RECAPTCHA: overload the following key settings in local_settings.py
# with your keys.
# Old recaptcha V1
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY', default='')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY', default='')
# New Recaptcha V2
NOBOT_RECAPTCHA_PUBLIC_KEY = env('NOBOT_RECAPTCHA_PUBLIC_KEY', default='')
NOBOT_RECAPTCHA_PRIVATE_KEY = env('NOBOT_RECAPTCHA_PRIVATE_KEY', default='')
# Send Django signals asynchronously on a background thread.
ASYNC_SIGNALS = True
# Number of seconds before celery tasks will abort addon validation:
VALIDATOR_TIMEOUT = 360
# Max number of warnings/errors to show from validator. Set to None for no
# limit.
VALIDATOR_MESSAGE_LIMIT = 500
# Feature flags
UNLINK_SITE_STATS = True
# See: https://www.nginx.com/resources/wiki/start/topics/examples/xsendfile/
XSENDFILE_HEADER = 'X-Accel-Redirect'
MOBILE_COOKIE = 'mamo'
# Path to `ps`.
PS_BIN = '/bin/ps'
# The maximum file size that you can have inside a zip file.
FILE_UNZIP_SIZE_LIMIT = 104857600
# How long to delay tasks relying on file system to cope with NFS lag.
NFS_LAG_DELAY = 3
# Elasticsearch
ES_HOSTS = [os.environ.get('ELASTICSEARCH_LOCATION', '127.0.0.1:9200')]
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = {
'default': 'addons',
}
ES_TIMEOUT = 30
ES_DEFAULT_NUM_REPLICAS = 2
ES_DEFAULT_NUM_SHARDS = 5
# Maximum result position. ES defaults to 10000 but we'd like more to make sure
# all our extensions can be found if searching without a query and
# paginating through all results.
# NOTE: This setting is being set during reindex, if this needs changing
# we need to trigger a reindex. It's also hard-coded in amo/pagination.py
# and there's a test verifying it's value is 25000 in amo/test_pagination.py
ES_MAX_RESULT_WINDOW = 25000
# Default AMO user id to use for tasks.
TASK_USER_ID = 4757633
# Special collection that some contributors can modify.
COLLECTION_FEATURED_THEMES_ID = 2143965
# If this is False, tasks and other jobs that send non-critical emails should
# use a fake email backend.
SEND_REAL_EMAIL = False
STATSD_HOST = env('STATSD_HOST', default='localhost')
STATSD_PREFIX = env('STATSD_PREFIX', default='amo')
STATSD_PORT = 8125
# The django statsd client to use, see django-statsd for more.
STATSD_CLIENT = 'django_statsd.clients.normal'
GRAPHITE_HOST = env('GRAPHITE_HOST', default='localhost')
GRAPHITE_PREFIX = env('GRAPHITE_PREFIX', default='amo')
GRAPHITE_PORT = 2003
GRAPHITE_TIMEOUT = 1
# IP addresses of servers we use as proxies.
KNOWN_PROXIES = []
# Blog URL
DEVELOPER_BLOG_URL = 'http://blog.mozilla.com/addons/feed/'
LOGIN_RATELIMIT_USER = 5
LOGIN_RATELIMIT_ALL_USERS = '15/m'
CSRF_FAILURE_VIEW = 'olympia.amo.views.csrf_failure'
CSRF_USE_SESSIONS = True
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'olympia.amo.utils.LocalFileStorage'
# And how long we'll give the server to respond for monitoring.
# We currently do not have any actual timeouts during the signing-process.
SIGNING_SERVER_MONITORING_TIMEOUT = 10
AUTOGRAPH_CONFIG = {
'server_url': env('AUTOGRAPH_SERVER_URL', default='http://autograph:5500'),
'user_id': env('AUTOGRAPH_HAWK_USER_ID', default='alice'),
'key': env(
'AUTOGRAPH_HAWK_KEY',
default='fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu',
),
# This is configurable but we don't expect it to be set to anything else
# but `webextensions-rsa` at this moment because AMO only accepts
# regular add-ons, no system add-ons or extensions for example. These
# are already signed when submitted to AMO.
'signer': env('AUTOGRAPH_SIGNER_ID', default='webextensions-rsa'),
# This signer is only used for add-ons that are recommended.
# The signer uses it's own HAWK auth credentials
'recommendation_signer': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_ID',
default='webextensions-rsa-with-recommendation',
),
'recommendation_signer_user_id': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_HAWK_USER_ID', default='bob'
),
'recommendation_signer_key': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_HAWK_KEY',
default='9vh6bhlc10y63ow2k4zke7k0c3l9hpr8mo96p92jmbfqngs9e7d',
),
}
# Enable addon signing. Autograph is configured to something reasonable
# when running locally so there aren't many reasons to deactivate that.
ENABLE_ADDON_SIGNING = True
# True when the Django app is running from the test suite.
IN_TEST_SUITE = False
# Temporary flag to work with navigator.mozPay() on devices that don't
# support it natively.
SIMULATE_NAV_PAY = False
# When the dev. agreement gets updated, you need users to re-accept it and the
# config 'last_dev_agreement_change_date' is not set, use this fallback.
# You won't want to do this for minor format changes.
# The tuple is passed through to datetime.date, so please use a valid date
# tuple.
DEV_AGREEMENT_CHANGE_FALLBACK = datetime(2019, 12, 2, 12, 00)
# If you want to allow self-reviews for add-ons/apps, then enable this.
# In production we do not want to allow this.
ALLOW_SELF_REVIEWS = False
# Allow URL style format override. eg. "?format=json"
URL_FORMAT_OVERRIDE = 'format'
# Connection to the hive server.
HIVE_CONNECTION = {
'host': 'peach-gw.peach.metrics.scl3.mozilla.com',
'port': 10000,
'user': 'amo_prod',
'password': '',
'auth_mechanism': 'PLAIN',
}
# CDN Host is blank on local installs, overwritten in dev/stage/prod envs.
# Useful to force some dynamic content to be served from the CDN.
CDN_HOST = ''
# Static
STATIC_ROOT = path('site-static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (path('static'),)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# Path related settings. In dev/stage/prod `NETAPP_STORAGE_ROOT` environment
# variable will be set and point to our NFS/EFS storage
# Make sure to check overwrites in conftest.py if new settings are added
# or changed.
STORAGE_ROOT = env('NETAPP_STORAGE_ROOT', default=path('storage'))
ADDONS_PATH = os.path.join(STORAGE_ROOT, 'files')
GUARDED_ADDONS_PATH = os.path.join(STORAGE_ROOT, 'guarded-addons')
GIT_FILE_STORAGE_PATH = os.path.join(STORAGE_ROOT, 'git-storage')
MLBF_STORAGE_PATH = os.path.join(STORAGE_ROOT, 'mlbf')
SITEMAP_STORAGE_PATH = os.path.join(STORAGE_ROOT, 'sitemaps')
SHARED_STORAGE = os.path.join(STORAGE_ROOT, 'shared_storage')
MEDIA_ROOT = os.path.join(SHARED_STORAGE, 'uploads')
TMP_PATH = os.path.join(SHARED_STORAGE, 'tmp')
# These are key files that must be present on disk to encrypt/decrypt certain
# database fields.
# {'api_key:secret': os.path.join(ROOT, 'path', 'to', 'file.key'),}
AES_KEYS = env.dict('AES_KEYS', default={})
# Time in seconds for how long a JWT auth token created by developers with
# their API key can live. When developers are creating auth tokens they cannot
# set the expiration any longer than this.
MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME = 5 * 60
# Time in seconds before the email containing the link allowing developers to
# see their api keys the first time they request one is sent. A value of None
# means it's sent instantaneously.
API_KEY_CONFIRMATION_DELAY = None
# Default cache duration for the API, in seconds.
API_CACHE_DURATION = 3 * 60
# JWT authentication related settings:
JWT_AUTH = {
# Use HMAC using SHA-256 hash algorithm. It should be the default, but we
# want to make sure it does not change behind our backs.
# See https://github.com/jpadilla/pyjwt/blob/master/docs/algorithms.rst
'JWT_ALGORITHM': 'HS256',
# This adds some padding to timestamp validation in case client/server
# clocks are off.
'JWT_LEEWAY': 5,
}
DRF_API_GATES = {
'auth': (),
'v3': (
'ratings-rating-shim',
'ratings-title-shim',
'l10n_flat_input_output',
'collections-downloads-shim',
'addons-locale_disambiguation-shim',
'del-addons-created-field',
'del-accounts-fxa-edit-email-url',
'del-version-license-is-custom',
'del-ratings-flags',
'activity-user-shim',
'autocomplete-sort-param',
'is-source-public-shim',
'is-featured-addon-shim',
'disco-heading-and-description-shim',
'wrap-outgoing-parameter',
'platform-shim',
'keep-license-text-in-version-list',
),
'v4': (
'l10n_flat_input_output',
'addons-search-_score-field',
'ratings-can_reply',
'ratings-score-filter',
'wrap-outgoing-parameter',
'platform-shim',
'keep-license-text-in-version-list',
),
'v5': (
'addons-search-_score-field',
'ratings-can_reply',
'ratings-score-filter',
),
}
# Change this to deactivate API throttling for views using a throttling class
# depending on the one defined in olympia.api.throttling.
API_THROTTLING = True
REST_FRAMEWORK = {
# Set this because the default is to also include:
# 'rest_framework.renderers.BrowsableAPIRenderer'
# Which it will try to use if the client accepts text/html.
'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'olympia.api.authentication.WebTokenAuthentication',
),
# Set parser classes to include the fix for
# https://github.com/tomchristie/django-rest-framework/issues/3951
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'olympia.api.parsers.MultiPartParser',
),
'ALLOWED_VERSIONS': DRF_API_VERSIONS,
'DEFAULT_VERSION': 'v5',
'DEFAULT_VERSIONING_CLASS': ('rest_framework.versioning.NamespaceVersioning'),
# Add our custom exception handler, that wraps all exceptions into
# Responses and not just the ones that are api-related.
'EXCEPTION_HANDLER': 'olympia.api.exceptions.custom_exception_handler',
# Enable pagination
'PAGE_SIZE': 25,
# Use our pagination class by default, which allows clients to request a
# different page size.
'DEFAULT_PAGINATION_CLASS': ('olympia.api.pagination.CustomPageNumberPagination'),
# Use json by default when using APIClient.
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
# Use http://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15
# We can't use the default because we don't use django timezone support.
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%SZ',
# Set our default ordering parameter
'ORDERING_PARAM': 'sort',
}
def get_sentry_release():
version_json = os.path.join(ROOT, 'version.json')
version = None
if os.path.exists(version_json):
try:
with open(version_json, 'r') as fobj:
contents = fobj.read()
data = json.loads(contents)
version = data.get('version') or data.get('commit')
except (IOError, KeyError):
version = None
if not version or version == 'origin/master':
try:
head_path = os.path.join(ROOT, '.git', 'HEAD')
with open(head_path, 'r') as fp:
head = str(fp.read()).strip()
if head.startswith('ref: '):
head = head[5:]
revision_file = os.path.join(ROOT, '.git', *head.split('/'))
else:
return head
with open(revision_file) as fh:
version = str(fh.read()).strip()
except IOError:
version = None
return version
# This is the DSN to the Sentry service.
SENTRY_CONFIG = {
'dsn': env('SENTRY_DSN', default=os.environ.get('SENTRY_DSN')),
# Automatically configure the release based on git information.
# This uses our `version.json` file if possible or tries to fetch
# the current git-sha.
'release': get_sentry_release(),
'send_default_pii': True,
}
# We need to load this before sentry_sdk.init or our reverse replacement is too late.
from olympia.amo import reverse # noqa
sentry_sdk.init(
integrations=[DjangoIntegration(), CeleryIntegration()],
**SENTRY_CONFIG,
)
ignore_logger('django.security.DisallowedHost')
# Automatically do 'from olympia import amo' when running shell_plus.
SHELL_PLUS_POST_IMPORTS = (('olympia', 'amo'),)
FXA_CONTENT_HOST = 'https://accounts.firefox.com'
FXA_OAUTH_HOST = 'https://oauth.accounts.firefox.com/v1'
FXA_PROFILE_HOST = 'https://profile.accounts.firefox.com/v1'
DEFAULT_FXA_CONFIG_NAME = 'default'
ALLOWED_FXA_CONFIGS = ['default']
USE_FAKE_FXA_AUTH = False # Should only be True for local development envs.
# List all jobs that should be callable with cron here.
# syntax is: job_and_method_name: full.package.path
CRON_JOBS = {
'update_addon_average_daily_users': 'olympia.addons.cron',
'update_addon_weekly_downloads': 'olympia.addons.cron',
'addon_last_updated': 'olympia.addons.cron',
'hide_disabled_files': 'olympia.addons.cron',
'unhide_disabled_files': 'olympia.addons.cron',
'update_addon_hotness': 'olympia.addons.cron',
'gc': 'olympia.amo.cron',
'write_sitemaps': 'olympia.amo.cron',
'auto_import_blocklist': 'olympia.blocklist.cron',
'upload_mlbf_to_remote_settings': 'olympia.blocklist.cron',
'update_blog_posts': 'olympia.devhub.cron',
'update_user_ratings': 'olympia.users.cron',
}
RECOMMENDATION_ENGINE_URL = env(
'RECOMMENDATION_ENGINE_URL',
default='https://taar.dev.mozaws.net/v1/api/recommendations/',
)
TAAR_LITE_RECOMMENDATION_ENGINE_URL = env(
'TAAR_LITE_RECOMMENDATION_ENGINE_URL',
default=('https://taar.dev.mozaws.net/taarlite/api/v1/addon_recommendations/'),
)
RECOMMENDATION_ENGINE_TIMEOUT = env.float('RECOMMENDATION_ENGINE_TIMEOUT', default=1)
# Reputation service is disabled by default, enabled for dev/stage/prod via
# those 3 env variables.
REPUTATION_SERVICE_URL = env('REPUTATION_SERVICE_URL', default=None)
REPUTATION_SERVICE_TOKEN = env('REPUTATION_SERVICE_TOKEN', default=None)
REPUTATION_SERVICE_TIMEOUT = env.float('REPUTATION_SERVICE_TIMEOUT', default=1)
# This is the queue used for addons-dev, so it'll consume events (i.e. process
# then delete) before you can locally. If you really need to test get ops to
# stop the 'monitor_fxa_sqs` command.
FXA_SQS_AWS_QUEUE_URL = (
'https://sqs.us-east-1.amazonaws.com/927034868273/amo-account-change-dev'
)
FXA_SQS_AWS_WAIT_TIME = 20 # Seconds.
BASKET_URL = env('BASKET_URL', default='https://basket.allizom.org')
BASKET_API_KEY = env('BASKET_API_KEY', default=None)
# Default is 10, the API usually answers in 0.5 - 1.5 seconds.
BASKET_TIMEOUT = 5
MOZILLA_NEWLETTER_URL = env(
'MOZILLA_NEWSLETTER_URL', default='https://www.mozilla.org/en-US/newsletter/'
)
GEOIP_PATH = '/usr/local/share/GeoIP/GeoLite2-Country.mmdb'
EXTENSION_WORKSHOP_URL = env(
'EXTENSION_WORKSHOP_URL', default='https://extensionworkshop-dev.allizom.org'
)
# Sectools
SCANNER_TIMEOUT = 60 # seconds
CUSTOMS_API_URL = env('CUSTOMS_API_URL', default=None)
CUSTOMS_API_KEY = env('CUSTOMS_API_KEY', default=None)
WAT_API_URL = env('WAT_API_URL', default=None)
WAT_API_KEY = env('WAT_API_KEY', default=None)
MAD_API_URL = env('MAD_API_URL', default=None)
MAD_API_TIMEOUT = 5 # seconds
# Git(Hub) repository names, e.g., `owner/repo-name`
CUSTOMS_GIT_REPOSITORY = env('CUSTOMS_GIT_REPOSITORY', default=None)
# Addon.average_daily_user count that forces dual sign-off for Blocklist Blocks
DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD = 100_000
REMOTE_SETTINGS_API_URL = 'https://kinto.dev.mozaws.net/v1/'
REMOTE_SETTINGS_WRITER_URL = 'https://kinto.dev.mozaws.net/v1/'
REMOTE_SETTINGS_WRITER_BUCKET = 'blocklists'
# The remote settings test server needs accounts and setting up before using.
REMOTE_SETTINGS_IS_TEST_SERVER = False
BLOCKLIST_REMOTE_SETTINGS_USERNAME = env('BLOCKLIST_KINTO_USERNAME', default='amo_dev')
BLOCKLIST_REMOTE_SETTINGS_PASSWORD = env(
'BLOCKLIST_KINTO_PASSWORD', default='amo_dev_password'
)
# The path to the current google service account configuration. This is
# being used to query Google BigQuery as part of our stats processing.
# If this is `None` we're going to use service mocks for testing
GOOGLE_APPLICATION_CREDENTIALS = env('GOOGLE_APPLICATION_CREDENTIALS', default=None)
# See: https://bugzilla.mozilla.org/show_bug.cgi?id=1633746
BIGQUERY_PROJECT = 'moz-fx-data-shared-prod'
BIGQUERY_AMO_DATASET = 'amo_dev'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
SITEMAP_DEBUG_AVAILABLE = False
| 37.543251 | 117 | 0.661775 |
a6c34823deb95971761d99f84345bf30de4b4ab0 | 1,111 | py | Python | test/python/test_log1p.py | slyalin/openvino_tensorflow | 37a2e5b6ff1e60217d31340ad3975b41faa39da0 | [
"Apache-2.0"
] | null | null | null | test/python/test_log1p.py | slyalin/openvino_tensorflow | 37a2e5b6ff1e60217d31340ad3975b41faa39da0 | [
"Apache-2.0"
] | null | null | null | test/python/test_log1p.py | slyalin/openvino_tensorflow | 37a2e5b6ff1e60217d31340ad3975b41faa39da0 | [
"Apache-2.0"
] | 1 | 2021-05-12T07:35:34.000Z | 2021-05-12T07:35:34.000Z | # ==============================================================================
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
"""Openvino Tensorflow abs operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import os
from common import NgraphTest
class TestLog1pOperations(NgraphTest):
def test_log1p(self):
test_input = (-3.0, -1.0, -0.5, 0.0, 0.25, 0.5, 1, 10)
val = tf.compat.v1.placeholder(tf.float32, shape=(8,))
out = tf.math.log1p(val)
def run_test(sess):
return sess.run(out, feed_dict={val: test_input})
ng_result = self.with_ngraph(run_test)
tf_result = self.without_ngraph(run_test)
assert (len(ng_result) == len(tf_result))
for i, j in zip(ng_result, tf_result):
assert (i == j) or (np.isnan(i) and np.isnan(j))
| 27.775 | 80 | 0.579658 |
187edea80da2e545f5238e5e330d06543930f535 | 10,922 | py | Python | script.module.nanscrapers/lib/nanscrapers/scraperplugins/carthd.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:37:15.000Z | 2019-03-05T09:37:15.000Z | script.module.nanscrapers/lib/nanscrapers/scraperplugins/carthd.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | script.module.nanscrapers/lib/nanscrapers/scraperplugins/carthd.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:16:08.000Z | 2021-11-05T22:16:08.000Z | import re,time,base64
import requests
import xbmc
import urllib
from ..scraper import Scraper
from ..common import clean_title,clean_search
requests.packages.urllib3.disable_warnings()
s = requests.session()
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class carthd(Scraper):
domains = ['https://cartoonhd.in']
name = "CartoonHD"
sources = []
def __init__(self):
self.base_link = 'https://cartoonhd.in'
self.sources = []
def scrape_movie(self, title, year, imdb, debrid=False):
try:
scrape_me = urllib.quote_plus(title.lower())
start_url = '%s/full-movie/%s' %(self.base_link,scrape_me.replace('+','-'))
#print 'search url> '+start_url
headers = {'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Origin':self.base_link, 'Referer':self.base_link,
'User-Agent':User_Agent, 'X-Requested-With':'XMLHttpRequest'}
OPEN = requests.get(start_url,headers=headers,verify=False,timeout=5).content
if 'OOPS, ' in OPEN:
start_url = '%s-%s' %(start_url,year)
OPEN = requests.get(start_url,headers=headers,verify=False,timeout=5).content
try:
item_title = re.compile('<h2><b><a href=.+?>([^<>]*)</').findall(OPEN)[0]
except:
item_title = re.compile('<h2><a href=.*?title="([^"]*)"').findall(OPEN)[0]
item_year = re.compile('class="dat">([^<>]*)</').findall(OPEN)[0]
if clean_title(item_title).lower() == clean_title(title).lower():
if item_year in year:
TIME = time.time()- 3600
TIME = str(TIME).split('.')[0]
TIME = base64.b64encode(TIME,'strict')
TIME = TIME.replace('==','%3D%3D')
token = re.compile("var tok.+?'([^']*)'").findall(OPEN)[0]
match = re.compile('elid.+?"([^"]*)"').findall(OPEN)
id = match[0]
headers = {'accept':'application/json, text/javascript, */*; q=0.01',
'accept-encoding':'gzip, deflate, br', 'accept-language':'en-US,en;q=0.8',
'content-type':'application/x-www-form-urlencoded; charset=UTF-8',
'origin':self.base_link, 'referer':start_url, 'user-agent':User_Agent,
'x-requested-with':'XMLHttpRequest'}
request_url = '%s/ajax/tnembedr.php' %self.base_link
postdata={'action':'getMovieEmb','idEl':id,'token':token,'elid':TIME}
links = requests.post(request_url, data=postdata,verify=False, headers=headers).content
#print 'post> '+links
match = re.compile('"type":"(.+?)".+?[iI][fF][rR][aA][mM][eE].+?[sS][rR][cC].+?"(.+?)"',re.DOTALL).findall(links)
for source_base,link in match:
link = link.replace('\\','')
if 'blogspot' in source_base:
source = source_base.split(' -')[0]
quality = source_base.split(' - ')[1]
self.sources.append({'source': source,'quality': quality,'scraper': self.name,'url': link,'direct': True})
elif 'googleuser' in source_base:
if '1080' in source_base:
qual = '1080p'
elif '720' in source_base:
qual='720p'
else:
qual='DVD'
self.sources.append({'source': 'GoogleLink','quality': qual,'scraper': self.name,'url': link,'direct': True})
elif 'googleapis' in source_base:
self.sources.append({'source': 'GoogleLink','quality': '720P','scraper': self.name,'url': link,'direct': True})
elif 'streamango.com' in link:
get_res=requests.get(link,headers=headers,timeout=5).content
qual = re.compile('{type:"video/mp4".+?height:(.+?),',re.DOTALL).findall(get_res)[0]
self.sources.append({'source': source_base, 'quality': qual, 'scraper': self.name, 'url': link,'direct': False})
elif 'openload' in link:
self.sources.append({'source': source_base,'quality': 'DVD','scraper': self.name,'url': link,'direct': False})
elif 'vidnodessl' in link:
self.sources.append({'source': 'VidnodeSSL','quality': '720p','scraper': self.name,'url': link,'direct': True})
else:
self.sources.append({'source': source_base,'quality': 'Unknown','scraper': self.name,'url': link,'direct': False})
return self.sources
except Exception, argument:
return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
scrape_me = urllib.quote_plus(title.lower())
start_url = '%s/show/%s' %(self.base_link,scrape_me.replace('+','-'))
#print 'start_url> '+start_url
headers = {'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Origin':self.base_link, 'Referer':self.base_link,
'User-Agent':User_Agent, 'X-Requested-With':'XMLHttpRequest'}
OPEN = requests.get(start_url,headers=headers,verify=False,timeout=5).content
if 'OOPS, ' in OPEN:
start_url = '%s-%s' %(start_url,year)
OPEN = requests.get(start_url,headers=headers,verify=False,timeout=5).content
try:
item_title = re.compile('<h2><b><a href=.+?>([^<>]*)</').findall(OPEN)[0]
except:
item_title = re.compile('<h2><a href=.*?title="([^"]*)"').findall(OPEN)[0]
item_year = re.compile('class="dat">([^<>]*)</').findall(OPEN)[0]
#print 'item_year >> '+item_year
item_url = '%s/season/%s/episode/%s' %(start_url,season,episode)
#print'itemurlnew>> ' +item_url
content = requests.get(item_url,headers=headers,verify=False,timeout=5).content
if clean_title(item_title).lower() == clean_title(title).lower():
if item_year in show_year:
TIME = time.time()- 3600
TIME = str(TIME).split('.')[0]
TIME = base64.b64encode(TIME,'strict')
TIME = TIME.replace('==','%3D%3D')
token = re.compile("var tok.+?'([^']*)'").findall(content)[0]
match = re.compile('elid.+?"([^"]*)"').findall(content)
id = match[0]
headers = {'accept':'application/json, text/javascript, */*; q=0.01',
'accept-encoding':'gzip, deflate, br', 'accept-language':'en-US,en;q=0.8',
'content-type':'application/x-www-form-urlencoded; charset=UTF-8',
'origin':self.base_link, 'referer':item_url, 'user-agent':User_Agent,
'x-requested-with':'XMLHttpRequest'}
request_url = '%s/ajax/tnembedr.php' %self.base_link
postdata={'action':'getEpisodeEmb','idEl':id,'token':token,'elid':TIME}
links = requests.post(request_url, data=postdata,verify=False, headers=headers).content
#print 'post> '+links
match = re.compile('"type":"(.+?)".+?[iI][fF][rR][aA][mM][eE].+?[sS][rR][cC].+?"(.+?)"',re.DOTALL).findall(links)
for source_base,link in match:
link = link.replace('\\','')
if 'blogspot' in source_base:
source = source_base.split(' -')[0]
quality = source_base.split(' - ')[1]
self.sources.append({'source': source,'quality': quality,'scraper': self.name,'url': link,'direct': True})
elif 'googleuser' in source_base:
if '1080' in source_base:
qual = '1080p'
elif '720' in source_base:
qual='720p'
else:
qual='DVD'
self.sources.append({'source': 'GoogleLink','quality': qual,'scraper': self.name,'url': link,'direct': True})
elif 'googleapis' in source_base:
self.sources.append({'source': 'GoogleLink','quality': '720P','scraper': self.name,'url': link,'direct': True})
elif 'streamango.com' in link:
get_res=requests.get(link,headers=headers,timeout=5).content
qual = re.compile('{type:"video/mp4".+?height:(.+?),',re.DOTALL).findall(get_res)[0]
self.sources.append({'source': source_base, 'quality': qual, 'scraper': self.name, 'url': link,'direct': False})
elif 'openload' in link:
self.sources.append({'source': source_base,'quality': 'DVD','scraper': self.name,'url': link,'direct': False})
elif 'vidnodessl' in link:
self.sources.append({'source': 'VidnodeSSL','quality': '720p','scraper': self.name,'url': link,'direct': True})
else:
self.sources.append({'source': source_base,'quality': 'Unknown','scraper': self.name,'url': link,'direct': False})
return self.sources
except Exception, argument:
return self.sources
| 60.342541 | 163 | 0.478484 |
d0b4cb28839cfdc8c4a734546cbda35b877f0297 | 1,966 | py | Python | examples/http_proxy/py3_websocket.py | kuaidaili/python-sdk | b694d767e2516c6a43336e0f929a0f5a4abddccb | [
"BSD-2-Clause"
] | 50 | 2019-01-25T01:43:55.000Z | 2022-03-21T08:43:41.000Z | examples/http_proxy/py3_websocket.py | kuaidaili/python-sdk | b694d767e2516c6a43336e0f929a0f5a4abddccb | [
"BSD-2-Clause"
] | null | null | null | examples/http_proxy/py3_websocket.py | kuaidaili/python-sdk | b694d767e2516c6a43336e0f929a0f5a4abddccb | [
"BSD-2-Clause"
] | 24 | 2019-06-06T09:49:13.000Z | 2022-03-28T08:34:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
使用HTTP代理发送websocket请求
"""
import gzip
import zlib
import websocket
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
url = "ws://echo.websocket.org/"
proxies = {
"http_proxy_host": "59.38.241.25",
"http_proxy_port": 23916,
"http_proxy_auth": ("username", "password"),
}
ws = websocket.create_connection(url, **proxies)
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
opcode, data = recv()
if opcode == websocket.ABNF.OPCODE_CLOSE:
return
if opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data) > 2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except Exception:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except Exception:
pass
if isinstance(data, bytes):
data = repr(data)
print("< " + data)
def main():
print("Press Ctrl+C to quit")
while True:
message = input("> ")
ws.send(message)
recv_ws()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print('\nbye')
except Exception as e:
print(e) | 24.575 | 92 | 0.610885 |
24bf0c9e1e67ce91c7b8883f0c59a0f50ab2c6d7 | 1,338 | py | Python | examples/example_classification.py | kartik4949/TensorPipe | 9bf893016a735c68afb7be8a9fa420cfb6781cd5 | [
"Apache-2.0"
] | 89 | 2020-10-09T20:31:03.000Z | 2022-03-30T12:23:08.000Z | examples/example_classification.py | kartik4949/TensorPipe | 9bf893016a735c68afb7be8a9fa420cfb6781cd5 | [
"Apache-2.0"
] | 3 | 2020-12-15T06:18:07.000Z | 2020-12-15T06:33:14.000Z | examples/example_classification.py | kartik4949/TensorPipe | 9bf893016a735c68afb7be8a9fa420cfb6781cd5 | [
"Apache-2.0"
] | 21 | 2020-10-11T12:49:55.000Z | 2022-01-27T11:17:57.000Z | """
Copyright 2020 Kartik Sharma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
sys.path.append(os.getcwd())
from tensorpipe.pipe import Funnel
import numpy as np
from tensorpipe.pipe import Funnel
"""
Create a Funnel for the Pipeline!
"""
# Custom numpy code for injection.
def numpy_function(image, label):
image = np.fliplr(image)
return image, label
config = {
"batch_size": 2,
"image_size": [512, 512],
"transformations": {
"flip_left_right": None,
"gridmask": None,
"random_rotate": None,
},
"categorical_encoding": "labelencoder",
"numpy_function": numpy_function,
}
funnel = Funnel(data_path="testdata", config=config, datatype="categorical")
dataset = funnel.from_dataset(type="train")
for data in dataset:
print(data[0].shape)
| 25.730769 | 76 | 0.710015 |
dd91cfd03ecdc609dcb3f30daedfabd8fa324807 | 5,799 | py | Python | youtube_transcript_api/_api.py | danielcliu/youtube-transcript-api | dc9fc2ee9342649e09e311b3b87dfc70dace4833 | [
"MIT"
] | null | null | null | youtube_transcript_api/_api.py | danielcliu/youtube-transcript-api | dc9fc2ee9342649e09e311b3b87dfc70dace4833 | [
"MIT"
] | null | null | null | youtube_transcript_api/_api.py | danielcliu/youtube-transcript-api | dc9fc2ee9342649e09e311b3b87dfc70dace4833 | [
"MIT"
] | null | null | null | import requests
try:
import http.cookiejar as cookiejar
except ImportError:
import cookielib as cookiejar
from ._transcripts import TranscriptListFetcher
class YouTubeTranscriptApi():
@classmethod
def list_transcripts(cls, video_id, proxies=None, cookies=None):
"""
Retrieves the list of transcripts which are available for a given video. It returns a `TranscriptList` object
which is iterable and provides methods to filter the list of transcripts for specific languages. While iterating
over the `TranscriptList` the individual transcripts are represented by `Transcript` objects, which provide
metadata and can either be fetched by calling `transcript.fetch()` or translated by calling
`transcript.translate('en')`. Example::
# retrieve the available transcripts
transcript_list = YouTubeTranscriptApi.get('video_id')
# iterate over all available transcripts
for transcript in transcript_list:
# the Transcript object provides metadata properties
print(
transcript.video_id,
transcript.language,
transcript.language_code,
# whether it has been manually created or generated by YouTube
transcript.is_generated,
# a list of languages the transcript can be translated to
transcript.translation_languages,
)
# fetch the actual transcript data
print(transcript.fetch())
# translating the transcript will return another transcript object
print(transcript.translate('en').fetch())
# you can also directly filter for the language you are looking for, using the transcript list
transcript = transcript_list.find_transcript(['de', 'en'])
# or just filter for manually created transcripts
transcript = transcript_list.find_manually_created_transcript(['de', 'en'])
# or automatically generated ones
transcript = transcript_list.find_generated_transcript(['de', 'en'])
:param video_id: the youtube video id
:type video_id: str
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:return: the list of available transcripts
:rtype TranscriptList:
"""
print(cookies)
with requests.Session() as http_client:
if cookies:
cj = cookiejar.MozillaCookieJar()
cj.load(cookies)
http_client.cookies = cj
http_client.proxies = proxies if proxies else {}
return TranscriptListFetcher(http_client).fetch(video_id)
@classmethod
def get_transcripts(cls, video_ids, languages=('en',), continue_after_error=False, proxies=None, cookies=None):
"""
Retrieves the transcripts for a list of videos.
:param video_ids: a list of youtube video ids
:type video_ids: list[str]
:param languages: A list of language codes in a descending priority. For example, if this is set to ['de', 'en']
it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if it fails to
do so.
:type languages: list[str]
:param continue_after_error: if this is set the execution won't be stopped, if an error occurs while retrieving
one of the video transcripts
:type continue_after_error: bool
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:return: a tuple containing a dictionary mapping video ids onto their corresponding transcripts, and a list of
video ids, which could not be retrieved
:rtype ({str: [{'text': str, 'start': float, 'end': float}]}, [str]}):
"""
data = {}
unretrievable_videos = []
for video_id in video_ids:
try:
data[video_id] = cls.get_transcript(video_id, languages, proxies, cookies)
except Exception as exception:
if not continue_after_error:
raise exception
unretrievable_videos.append(video_id)
return data, unretrievable_videos
@classmethod
def get_transcript(cls, video_id, languages=('en',), proxies=None, cookies=None):
"""
Retrieves the transcript for a single video. This is just a shortcut for calling::
YouTubeTranscriptApi.list_transcripts(video_id, proxies).find_transcript(languages).fetch()
:param video_id: the youtube video id
:type video_id: str
:param languages: A list of language codes in a descending priority. For example, if this is set to ['de', 'en']
it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if it fails to
do so.
:type languages: list[str]
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:return: a list of dictionaries containing the 'text', 'start' and 'duration' keys
:rtype [{'text': str, 'start': float, 'end': float}]:
"""
return cls.list_transcripts(video_id, proxies, cookies).find_transcript(languages).fetch()
| 48.325 | 120 | 0.649077 |
fd89ea0bd71ccd73ed4a17a726b40b551831ef75 | 46 | py | Python | visualvocab/deps/__init__.py | schwettmann/visualvocab | d6ba80dc648576baf13efcb7c778e7e4384dcacb | [
"MIT"
] | 8 | 2021-10-15T19:45:41.000Z | 2022-02-20T20:52:11.000Z | visualvocab/deps/__init__.py | schwettmann/visualvocab | d6ba80dc648576baf13efcb7c778e7e4384dcacb | [
"MIT"
] | null | null | null | visualvocab/deps/__init__.py | schwettmann/visualvocab | d6ba80dc648576baf13efcb7c778e7e4384dcacb | [
"MIT"
] | 1 | 2021-12-31T01:46:54.000Z | 2021-12-31T01:46:54.000Z | """Code dependencies not available on pip."""
| 23 | 45 | 0.717391 |
83ced50aef9e29c3fc00a3a5a62420a9588ab320 | 2,283 | py | Python | Miscellaneous/TOPOLOGIE_FUNZIONANTI/esperimenti/e3/start.py | Balzu/network-inference-with-Blockchain | 0ce67d44662771b3e575fdb1b3cebd5a233e6c6e | [
"MIT"
] | 1 | 2019-04-01T13:21:52.000Z | 2019-04-01T13:21:52.000Z | Miscellaneous/TOPOLOGIE_FUNZIONANTI/esperimenti/e3/start.py | Balzu/network-inference-with-Blockchain | 0ce67d44662771b3e575fdb1b3cebd5a233e6c6e | [
"MIT"
] | null | null | null | Miscellaneous/TOPOLOGIE_FUNZIONANTI/esperimenti/e3/start.py | Balzu/network-inference-with-Blockchain | 0ce67d44662771b3e575fdb1b3cebd5a233e6c6e | [
"MIT"
] | 1 | 2020-06-17T23:29:53.000Z | 2020-06-17T23:29:53.000Z | #!/us5/bin/python
import time
import sys
from create_merge_topo import *
from threading import Thread, Lock
lock = Lock()
def run(i, nh, hosts, lock):
if len(hosts) == 0:
hosts = get_hosts(int(nh))
alias = create_alias()
lock.acquire()
compute_distances(net, hosts)
lock.release()
lock.acquire()
make_anonymous_and_blocking_routers(net)
lock.release()
lock.acquire()
create_traces(net, hosts)
lock.release()
(vtopo, traces) = create_virtual_topo_and_traces(alias, net, hosts)
(M,C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
print_topo(mtopo)
out_i = 'm_topo_' + str(i)
os.system('touch ' + out_i)
with open(out_i, "w") as file:
file.write('\nThread ' + str(i) + ' :\n')
file.write('Hosts che hanno partecipato a raccolta tracce:\n')
for h in hosts:
file.write(h + '\n')
file.write('Topologia indotta:\n')
for src in mtopo:
for d in mtopo[src][1]:
file.write(src + ' -> ' + d + '\n')
def stop_net(net):
net.stop()
def start_net():
''' Start Mininet Topology'''
topo = NetworkTopo()
net = Mininet( topo=topo )
add_static_routes(net)
net.start()
return net
def parse_cmd_line():
nt = sys.argv[1]
nh = 0
hosts = []
if sys.argv[2].startswith('h'):
hosts = sys.argv[2:]
else:
nh = sys.argv[2]
return (nt, nh, hosts)
if __name__ == '__main__':
if len(sys.argv) < 3:
print '\nUsage: python start.py <nt> < nh | hosts >\n'
'<nt> = number of threads to be used to collect traces\n'
'<nh> = number of random hosts that each thread will use\n'
'[hosts] = optional sequence of hosts, separated by whitespace, that'
'each thread will use deterministically'
sys.exit()
# Delete previously generated files..
os.system('./clean.sh')
(nt, nh, hosts) = parse_cmd_line()
net = start_net()
threads = []
for i in range(int(nt)):
thread = Thread(target = run, args = (i, nh, hosts, lock))
threads.append(thread)
thread.start()
for t in threads:
t.join()
print 'Threads finished'
| 27.178571 | 78 | 0.583443 |
faa07209ad0a64fb08f38e2d441e4ce61f99eeb4 | 807 | py | Python | makeraster.py | phargogh/prototype-gdal-raster-attribute-table-cpp | dd14d2512fef27197a800d6e433a680f7fb24b3f | [
"MIT"
] | null | null | null | makeraster.py | phargogh/prototype-gdal-raster-attribute-table-cpp | dd14d2512fef27197a800d6e433a680f7fb24b3f | [
"MIT"
] | null | null | null | makeraster.py | phargogh/prototype-gdal-raster-attribute-table-cpp | dd14d2512fef27197a800d6e433a680f7fb24b3f | [
"MIT"
] | null | null | null | from osgeo import gdal
import numpy
import numpy.random
# Make a raster with a RAT on import.
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(
'raster.tif', 5, 5, 1, gdal.GDT_Byte)
band = raster.GetRasterBand(1)
array = numpy.repeat(
numpy.linspace(0, 4, 5), 5).reshape((5,5)).astype(numpy.int8)
band.WriteArray(array)
values = numpy.unique(array)
random_values = numpy.random.randint(0, 5)
# Create and populate the RAT
rat = gdal.RasterAttributeTable()
rat.CreateColumn('VALUE', gdal.GFT_Integer, gdal.GFU_Generic)
rat.CreateColumn('RANDOM', gdal.GFT_Integer, gdal.GFU_Generic)
for i, value in enumerate(values):
rat.SetValueAsInt(i, 0, int(value))
rat.SetValueAsInt(i, 1, numpy.random.randint(0, 5))
# Associate with the band
raster.FlushCache()
band.SetDefaultRAT(rat)
| 28.821429 | 65 | 0.741016 |
d3cc0271bb0d934fe7034974b1385e41735a694e | 447 | py | Python | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
dict_1 = {}
for i in s:
if i not in dict_1:
dict_1[i] = 1
else:
dict_1[i] += 1
print(dict_1)
for idx, val in enumerate(s):
if dict_1[val] == 1:
return idx
return -1
| 20.318182 | 37 | 0.364653 |
c4e2846a7c2400ce7d9d8ad32d5ba254fcd092dd | 4,456 | py | Python | app/views.py | mrcl/govhack-nz | 9d49edcaa7b5fe000606c35d543b84258f602aa6 | [
"MIT"
] | null | null | null | app/views.py | mrcl/govhack-nz | 9d49edcaa7b5fe000606c35d543b84258f602aa6 | [
"MIT"
] | null | null | null | app/views.py | mrcl/govhack-nz | 9d49edcaa7b5fe000606c35d543b84258f602aa6 | [
"MIT"
] | null | null | null | import os
import re
from glob import glob
from app import app
from flask import render_template, request, flash, redirect, url_for
from .pages import pages
from .value_ink_vars import FACTS
from .forms import GrantForm, ValueInkForm
import app.grant_hunter_vars as ghv
@app.route('/')
def index():
return render_template('index.html',
active='/',
pages=pages)
@app.route('/nz-revenue')
@app.route('/nz-revenue/<year>')
def revenue(year=None):
path = url_for('static', filename='data/revenue')
report_files = glob('app%s/old/*.json'%path)
reports = {}
for rf in report_files:
y = re.findall('201\d', rf)[0]
reports[y] = rf.replace('app', '')
years = list(reports.keys())
years.sort()
route = 'nz-revenue'
return render_template(pages[route]['template'],
active=route,
page=pages[route],
pages=pages,
years=years,
report=reports[year] if year else None,
year=year
)
@app.route('/grant-hunter', methods=['GET', 'POST'])
def grant_hunter():
form = GrantForm()
if form.validate_on_submit() and request.method == 'POST':
#if request.method == 'POST':
pool = form.pool.data
area = form.area.data
age = form.age.data
group = form.group.data
amount = form.amount.data
percent = form.percent.data
result = ghv.calc_approve(pool,area,age,group,amount,percent)
return render_template('grant-hunter-results.html',
pages=pages,
method=request.method,
form=GrantForm(),
result=result,
list_pool=ghv.list_pool)
return render_template('grant-hunter-form.html',
pages=pages,
form=GrantForm(),
list_pool=ghv.list_pool,
list_area=ghv.list_area,
list_age=ghv.list_age,
list_group=ghv.list_group,
list_amount=ghv.list_amount,
list_percent=ghv.list_percent
)
@app.route('/value-ink', methods=['GET', 'POST'])
def value_ink():
form = ValueInkForm()
if form.validate_on_submit() or request.method == 'POST':
p_budget_values = [
1.084671616,
0.348805923,
0.081447637,
0.009242686,
0.013894476,
0,
0,
0.039521726,
0.309450566,
35.37391036,
]
check_box_list_results = [
form.checkbox_1.data,
form.checkbox_2.data,
form.checkbox_3.data,
form.checkbox_4.data,
form.checkbox_5.data,
form.checkbox_6.data,
form.checkbox_7.data,
form.checkbox_8.data,
form.checkbox_9.data,
form.checkbox_10.data,
]
selection_based_result = 0
for cb, v in zip(check_box_list_results, p_budget_values):
if cb:
selection_based_result += v
print('age_group', form.age_group.data)
return render_template('value-ink.html',
pages=pages,
method=request.method,
form=form,
selection_based_result='%3.2f' % selection_based_result,
facts=FACTS[form.age_group.data])
return render_template('value-ink.html',
pages=pages,
form=form,
selection_based_result=None)
@app.route('/mind-the-gap')
def generic_view():
route = 'mind-the-gap'
return render_template(pages[route]['template'],
active=route,
page=pages[route],
pages=pages)
@app.route('/partition')
def partition_chart():
return render_template('widgets/partition_chart.html',
active='tab-info',
pages=pages)
@app.route('/tree')
def collapsible_tree():
return render_template('widgets/collapsible_tree.html',
active='tab-info',
pages=pages)
| 29.706667 | 87 | 0.5193 |
73431d1139b7155df93786d7268a8ff3e2fdcff7 | 1,903 | py | Python | espnet/nets/chainer_backend/transformer/encoder_layer.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/chainer_backend/transformer/encoder_layer.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/chainer_backend/transformer/encoder_layer.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""Class Declaration of Transformer's Encoder Block."""
import chainer
import chainer.functions as F
from espnet.nets.chainer_backend.transformer.attention import MultiHeadAttention
from espnet.nets.chainer_backend.transformer.layer_norm import LayerNorm
from espnet.nets.chainer_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
class EncoderLayer(chainer.Chain):
"""Single encoder layer module.
Args:
n_units (int): Number of input/output dimension of a FeedForward layer.
d_units (int): Number of units of hidden layer in a FeedForward layer.
h (int): Number of attention heads.
dropout (float): Dropout rate
"""
def __init__(
self, n_units, d_units=0, h=8, dropout=0.1, initialW=None, initial_bias=None
):
"""Initialize EncoderLayer."""
super(EncoderLayer, self).__init__()
with self.init_scope():
self.self_attn = MultiHeadAttention(
n_units,
h,
dropout=dropout,
initialW=initialW,
initial_bias=initial_bias,
)
self.feed_forward = PositionwiseFeedForward(
n_units,
d_units=d_units,
dropout=dropout,
initialW=initialW,
initial_bias=initial_bias,
)
self.norm1 = LayerNorm(n_units)
self.norm2 = LayerNorm(n_units)
self.dropout = dropout
self.n_units = n_units
def forward(self, e, xx_mask, batch):
"""Forward Positional Encoding."""
n_e = self.norm1(e)
n_e = self.self_attn(n_e, mask=xx_mask, batch=batch)
e = e + F.dropout(n_e, self.dropout)
n_e = self.norm2(e)
n_e = self.feed_forward(n_e)
e = e + F.dropout(n_e, self.dropout)
return e
| 31.716667 | 84 | 0.613768 |
af92db4f41f04923990da6f378c41563c241873a | 913 | py | Python | flask_app/__init__.py | Parikshit-njit/Project4 | 5ff23b692827bc95074f59e29b04a87c6ad109e9 | [
"MIT"
] | null | null | null | flask_app/__init__.py | Parikshit-njit/Project4 | 5ff23b692827bc95074f59e29b04a87c6ad109e9 | [
"MIT"
] | null | null | null | flask_app/__init__.py | Parikshit-njit/Project4 | 5ff23b692827bc95074f59e29b04a87c6ad109e9 | [
"MIT"
] | 2 | 2021-08-03T02:07:40.000Z | 2021-12-03T11:43:12.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import redis
from flask_app.routes import routes_api
db = SQLAlchemy()
def create_app():
"""Construct the core application."""
app = Flask(__name__,
instance_relative_config=False,
template_folder="templates",
static_folder="static"
)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///addresses.db'
app.config['SECRET_KEY'] = "Hello World!"
app.config['SESSION_TYPE'] = 'redis'
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_USE_SIGNER'] = True
app.config['SESSION_REDIS'] = redis.from_url('redis://localhost:6379')
app.register_blueprint(routes_api)
db.init_app(app)
with app.app_context():
from . import routes
return app
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
| 26.085714 | 76 | 0.664841 |
59c4392574348331efd8326a61765c22483c004d | 812 | py | Python | pics/urls.py | BrilliantGrant/Personal-Gallery | d51f8521d45a001a37120360fb9ac65d4ef375a2 | [
"MIT",
"Unlicense"
] | 2 | 2018-01-28T14:35:32.000Z | 2018-02-27T04:36:16.000Z | galleria/urls.py | NewtonBii/galleria | 06429d8f03e7629e18a1ad995d6a9fb11440f2ab | [
"MIT"
] | null | null | null | galleria/urls.py | NewtonBii/galleria | 06429d8f03e7629e18a1ad995d6a9fb11440f2ab | [
"MIT"
] | null | null | null | """galleria URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('gallery.urls'))
]
| 35.304348 | 79 | 0.699507 |
adc381b17750a4c6e78f30f8d063fc687f69e11f | 170 | py | Python | BootCRUDApp/urls.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-PorcheWooten | ee7c94ac30b1bb4d7f64cc37aee813fd4fd3d112 | [
"Apache-2.0"
] | null | null | null | BootCRUDApp/urls.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-PorcheWooten | ee7c94ac30b1bb4d7f64cc37aee813fd4fd3d112 | [
"Apache-2.0"
] | null | null | null | BootCRUDApp/urls.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-PorcheWooten | ee7c94ac30b1bb4d7f64cc37aee813fd4fd3d112 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name=''),
path('edititem/<int:id>/', views.edititem, name='edititem'),
] | 18.888889 | 64 | 0.647059 |
925c118cae09367f07b60c03c5602c13585f94ee | 987 | py | Python | main.py | bwhitman/paper-display | 1a6405861bd66a8c86197a6afdef8712939e67a8 | [
"BSD-3-Clause"
] | 23 | 2017-02-10T17:51:57.000Z | 2019-10-14T18:16:28.000Z | main.py | bwhitman/paper-display | 1a6405861bd66a8c86197a6afdef8712939e67a8 | [
"BSD-3-Clause"
] | null | null | null | main.py | bwhitman/paper-display | 1a6405861bd66a8c86197a6afdef8712939e67a8 | [
"BSD-3-Clause"
] | null | null | null | from flask import Flask
from flask import request
app = Flask(__name__)
app.config['DEBUG'] = True
from random import shuffle
from unidecode import unidecode
import tweepy
import textwrap
import re
# All your twitter API stuff goes here
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Get a random tweet from a twitter account
@app.route('/message')
def message():
tweets = api.user_timeline(screen_name='cookbook', count=200, exclude_replies = True, include_rts = False)
shuffle(tweets)
text = tweets[0].text
text = unidecode(text)
text = text.replace('&','&')
text = text.replace('<','<')
text = text.replace('>','>')
text = text.replace('"','"')
text = text.replace(''',"'")
text = re.sub(r"http.*? ", "", text+" ")[:-1]
text = "\n".join(textwrap.wrap(text, 25))
return text
| 27.416667 | 107 | 0.708207 |
4a6fa04f6b5a345e1d3fcc085d453dc2febbc540 | 4,828 | py | Python | neutron_taas/tests/unit/taas_client/test_cli20_tapflow.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | 68 | 2015-10-18T02:57:10.000Z | 2022-02-22T11:33:25.000Z | neutron_taas/tests/unit/taas_client/test_cli20_tapflow.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | null | null | null | neutron_taas/tests/unit/taas_client/test_cli20_tapflow.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | 27 | 2015-11-11T02:00:35.000Z | 2020-03-07T03:36:33.000Z | # Copyright 2015 NEC Corporation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from unittest import mock
from neutron_taas.taas_client import tapflow
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20TapFlowJSON(test_cli20.CLITestV20Base):
resource = 'tap_flow'
resource_plural = '%ss' % resource
def setUp(self):
self._mock_extension_loading()
super(CLITestV20TapFlowJSON, self).setUp()
self.resources = self.resource_plural
self.register_non_admin_status_resource(self.resource)
def _create_patch(self, name, func=None):
patcher = mock.patch(name)
thing = patcher.start()
return thing
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = self._create_patch(ext_pkg + '._discover_via_entry_points')
contrib.return_value = [("_tap_flow", tapflow)]
return contrib
def test_ext_cmd_loaded(self):
neutron_shell = shell.NeutronShell('2.0')
extension_cmd = {'tap-flow-create': tapflow.CreateTapFlow,
'tap-flow-delete': tapflow.DeleteTapFlow,
'tap-flow-show': tapflow.ShowTapFlow,
'tap-flow-list': tapflow.ListTapFlow}
for cmd_name, cmd_class in extension_cmd.items():
found = neutron_shell.command_manager.find_command([cmd_name])
self.assertEqual(cmd_class, found[0])
def _test_create_tap_flow(self, port_id="random_port",
service_id="random_service",
direction="BOTH", arg_attr=None,
name_attr=None, val_attr=None,
name=''):
# Common definition for creating Tap flow
arg_attr = arg_attr or []
name_attr = name_attr or []
val_attr = val_attr or []
cmd = tapflow.CreateTapFlow(test_cli20.MyApp(sys.stdout), None)
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id,
'--port', port_id,
'--tap-service', service_id,
'--direction', direction] + arg_attr
pos_names = ['source_port', 'tap_service_id', 'direction'] + name_attr
pos_values = [port_id, service_id, direction] + val_attr
self._test_create_resource(self.resource, cmd, name, my_id, args,
pos_names, pos_values,
tenant_id=tenant_id)
def test_create_tap_flow_mandatory_params(self):
self._test_create_tap_flow()
def test_create_tap_flow_all_params(self):
name = 'dummyTapFlow'
description = 'Create a dummy tap flow'
self._test_create_tap_flow(name=name,
arg_attr=[
'--name', name,
'--description', description],
name_attr=['name', 'description'],
val_attr=[name, description])
def test_delete_tap_flow(self):
# Delete tap_flow: myid.
cmd = tapflow.DeleteTapFlow(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(self.resource, cmd, myid, args)
def test_update_tap_flow(self):
# Update tap_flow: myid --name myname.
cmd = tapflow.UpdateTapFlow(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(self.resource, cmd, 'myid',
['myid', '--name', 'myname'],
{'name': 'myname'})
def test_list_tap_flows(self):
# List tap_flows.
cmd = tapflow.ListTapFlow(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(self.resources, cmd, True)
def test_show_tap_flow(self):
# Show tap_flow: --fields id --fields name myid.
cmd = tapflow.ShowTapFlow(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(self.resource, cmd, self.test_id,
args, ['id', 'name'])
| 41.982609 | 78 | 0.60232 |
986485bba6501ef00e215d919b7081b50ca30850 | 14,803 | py | Python | src/gpcsd/gpcsd1d.py | natalieklein/gpcsd | 5f05a86a4f4ba9c1a40c3403d49c15d1f8626b75 | [
"MIT"
] | 1 | 2021-10-02T03:47:10.000Z | 2021-10-02T03:47:10.000Z | src/gpcsd/gpcsd1d.py | natalieklein/gpcsd | 5f05a86a4f4ba9c1a40c3403d49c15d1f8626b75 | [
"MIT"
] | 1 | 2021-10-12T23:16:46.000Z | 2021-10-12T23:16:46.000Z | src/gpcsd/gpcsd1d.py | natalieklein/gpcsd | 5f05a86a4f4ba9c1a40c3403d49c15d1f8626b75 | [
"MIT"
] | null | null | null | """
Class for 1D GPCSD model, fitting, and prediction.
"""
import autograd.numpy as np
np.seterr(all='ignore')
from autograd import grad
import scipy
from tqdm import tqdm
from gpcsd.priors import *
from gpcsd.covariances import *
from gpcsd.forward_models import *
from gpcsd.utility_functions import *
JITTER = 1e-8
class GPCSD1D:
def __init__(self, lfp, x, t, a=None, b=None, ngl=100, spatial_cov=None, temporal_cov_list=None, R_prior=None, sig2n_prior=None):
"""
:param lfp: LFP array, shape (n_spatial, n_time, n_trials); recommend rescaling to approximately std dev = 1
:param x: LFP observed spatial locations shape (n_spatial, 1), in microns
:param t: LFP observed time points, shape (n_time, 1), in milliseconds
:param a: Edge of range for integration (defaults to np.min(x))
:param b: Edge of range for integration (defaults to np.max(x))
:param ngl: order of Gauss-Legendre integration (defaults to 100)
:param spatial_cov: Instance of GPCSD1DSpatialCovSE
:param temporal_cov_list: list of instances of temporal covariance objects (GPSDTemporalCovSE or GPCSDTemporalCovMatern)
:param R_prior: Instance of a prior for R (defaults to GPCSDInvGammaPrior)
:param sig2n_prior: Instance of a prior for noise variance (defaults to GPCSDHalfNormalPrior)
"""
self.lfp = np.atleast_3d(lfp)
self.x = x
self.t = t
if a is None:
a = np.min(x)
if b is None:
b = np.max(x)
self.a = a
self.b = b
self.ngl = ngl
if spatial_cov is None:
spatial_cov = GPCSD1DSpatialCovSE(x, a=a, b=b, ngl=ngl)
self.spatial_cov = spatial_cov
if temporal_cov_list is None:
temporal_cov_list = [GPCSDTemporalCovSE(t), GPCSDTemporalCovMatern(t)]
self.temporal_cov_list = temporal_cov_list
if R_prior is None:
R_prior = GPCSDInvGammaPrior()
R_prior.set_params(np.min(np.diff(self.x.squeeze())), 0.5 * (np.max(self.x.squeeze()) - np.min(self.x.squeeze())))
self.R = {'value': R_prior.sample(), 'prior':R_prior,
'min':0.5 * np.min(np.diff(self.x.squeeze())), 'max':0.8 * (np.max(self.x) - np.min(self.x))}
if sig2n_prior is None:
sig2n_prior = GPCSDHalfNormalPrior(0.1)
self.sig2n = {'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 0.5}
elif isinstance(sig2n_prior, list):
self.sig2n = {'value': np.array([sp.sample() for sp in sig2n_prior]), 'prior': sig2n_prior,
'min': [1e-8] * len(sig2n_prior), 'max': [0.5] * len(sig2n_prior)}
else:
self.sig2n = {'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 0.5}
def __str__(self):
s = "GPCSD1D object\n"
s += "LFP shape: (%d, %d, %d)\n" % (self.lfp.shape[0], self.lfp.shape[1], self.lfp.shape[2])
s += "Integration bounds: (%d, %d)\n" % (self.a, self.b)
s += "Integration number points: %d\n" % self.ngl
s += "R parameter prior: %s\n" % str(self.R['prior'])
s += "R parameter value %0.4g\n" % self.R['value']
# TODO handle list if exists
#s += "sig2n parameter prior: %s\n" % str(self.sig2n['prior'])
#s += "sig2n parameter value %0.4g\n" % self.sig2n['value']
s += "Spatial covariance ell prior: %s\n" % str(self.spatial_cov.params['ell']['prior'])
s += "Spatial covariance ell value %0.4g\n" % self.spatial_cov.params['ell']['value']
for i in range(len(self.temporal_cov_list)):
s += "Temporal covariance %d class name: %s\n" % (i+1, type(self.temporal_cov_list[i]).__name__)
s += "Temporal covariance %d ell prior: %s\n" % (i+1, str(self.temporal_cov_list[i].params['ell']['prior']))
s += "Temporal covariance %d ell value %0.4g\n" % (i+1, self.temporal_cov_list[i].params['ell']['value'])
s += "Temporal covariance %d sigma2 prior: %s\n" % (i+1, str(self.temporal_cov_list[i].params['sigma2']['prior']))
s += "Temporal covariance %d sigma2 value %0.4g\n" % (i+1, self.temporal_cov_list[i].params['sigma2']['value'])
return s
def extract_model_params(self):
params = {}
params['R'] = self.R['value']
params['sig2n'] = self.sig2n['value'] # will be list possibly
params['spatial_ell'] = self.spatial_cov.params['ell']['value']
params['temporal_ell_list'] = [tc.params['ell']['value'] for tc in self.temporal_cov_list]
params['temporal_sigma2_list'] = [tc.params['sigma2']['value'] for tc in self.temporal_cov_list]
return params
def restore_model_params(self, params):
self.R['value'] = params['R']
self.sig2n['value'] = params['sig2n'] # will be list possibly
self.spatial_cov.params['ell']['value'] = params['spatial_ell']
if len(self.temporal_cov_list) != len(params['temporal_ell_list']):
print('different number of temporal covariance functions! stopping.')
return
for i, tc in enumerate(self.temporal_cov_list):
tc.params['ell']['value'] = params['temporal_ell_list'][i]
tc.params['sigma2']['value'] = params['temporal_sigma2_list'][i]
def update_lfp(self, new_lfp, t, x=None):
if x is not None:
self.x = x
self.spatial_cov.x = x
self.t = t
for tcov in self.temporal_cov_list:
tcov.t = t
self.lfp = new_lfp
def loglik(self):
nx = len(self.x)
nt = len(self.t)
ntrials = self.lfp.shape[2]
Ks = self.spatial_cov.compKphi_1d(self.R['value']) + JITTER * np.eye(nx)
Kt = np.zeros((nt, nt))
for i in range(len(self.temporal_cov_list)):
Kt = Kt + self.temporal_cov_list[i].compute_Kt()
Qs, Qt, Dvec = comp_eig_D(Ks, Kt, self.sig2n['value'])
logdet = -0.5*ntrials*np.sum(np.log(Dvec))
quad = 0
for trial in range(ntrials):
alpha = np.reshape(np.dot(np.dot(Qs.T,self.lfp[:, :, trial]),Qt),(nx*nt))
quad = quad + np.sum(np.square(alpha)/Dvec)
quad = -0.5 * quad
return np.squeeze(logdet + quad)
def fit(self, n_restarts=10, method='L-BFGS-B', fix_R=False, verbose=False,
options={'maxiter':1000, 'disp': False, 'gtol':1e-5, 'ftol':1e7 * np.finfo(float).eps}):
# Store nll values and params over restarts
nll_values = []
params = []
term_msg = []
# Get bounds from objects
bounds = []
bounds.append((np.log(self.R['min']/100), np.log(self.R['max']/100))) # Bounds on R
bounds.append((np.log(self.spatial_cov.params['ell']['min']/100), np.log(self.spatial_cov.params['ell']['max']/100)))
for i in range(len(self.temporal_cov_list)):
ell_min = self.temporal_cov_list[i].params['ell']['min']
ell_max = self.temporal_cov_list[i].params['ell']['max']
sig2_min = self.temporal_cov_list[i].params['sigma2']['min']
sig2_max = self.temporal_cov_list[i].params['sigma2']['max']
bounds.append((np.log(ell_min), np.log(ell_max)))
bounds.append((np.log(sig2_min), np.log(sig2_max)))
if np.isscalar(self.sig2n['value']):
bounds.append((np.log(self.sig2n['min']), np.log(self.sig2n['max']))) # bounds on log(sig2n)
else:
for i in range(len(self.sig2n['value'])):
bounds.append((np.log(self.sig2n['min'][i]), np.log(self.sig2n['max'][i])))
def obj_fun(tparams):
"""
Objective function (likelihood with priors)
:param tparams: list of log-transformed parameters
:return: value of negative log likelihood
"""
# Get parameters
if not fix_R:
self.R['value'] = np.exp(tparams[0]) * 100.0
self.spatial_cov.params['ell']['value'] = np.exp(tparams[1]) * 100.0
n_temp_cov = len(self.temporal_cov_list)
pind = 2
for i in range(n_temp_cov):
self.temporal_cov_list[i].params['ell']['value'] = np.exp(tparams[pind])
pind = pind + 1
self.temporal_cov_list[i].params['sigma2']['value'] = np.exp(tparams[pind])
pind = pind + 1
if np.isscalar(self.sig2n['value']):
self.sig2n['value'] = np.exp(tparams[pind])
else:
self.sig2n['value'] = np.exp(tparams[pind:])
# compute log priors
prior_lpdf = self.R['prior'].lpdf(self.R['value'])
prior_lpdf = prior_lpdf + self.spatial_cov.params['ell']['prior'].lpdf(self.spatial_cov.params['ell']['value'])
for i in range(n_temp_cov):
prior_lpdf = prior_lpdf + self.temporal_cov_list[i].params['ell']['prior'].lpdf(self.temporal_cov_list[i].params['ell']['value'])
prior_lpdf = prior_lpdf + self.temporal_cov_list[i].params['sigma2']['prior'].lpdf(self.temporal_cov_list[i].params['sigma2']['value'])
if np.isscalar(self.sig2n['value']):
prior_lpdf = prior_lpdf + self.sig2n['prior'].lpdf(self.sig2n['value'])
else:
for i in range(len(self.sig2n['prior'])):
prior_lpdf = prior_lpdf + self.sig2n['prior'][i].lpdf(self.sig2n['value'][i])
# Compute likelihood
llik = self.loglik()
nll = -1.0 * (llik + prior_lpdf)
return nll
for _ in tqdm(range(n_restarts), desc="Restarts"):
tparams0 = []
if fix_R:
tparams0.append(np.log(self.R['value']) - np.log(100))
else:
tparams0.append(np.log(self.R['prior'].sample()) - np.log(100)) # starting R
tparams0.append(np.log(self.spatial_cov.params['ell']['prior'].sample()) - np.log(100)) # starting spatial lengthscale
for i in range(len(self.temporal_cov_list)):
tparams0.append(np.log(self.temporal_cov_list[i].params['ell']['prior'].sample()))
tparams0.append(np.log(self.temporal_cov_list[i].params['sigma2']['prior'].sample()))
if np.isscalar(self.sig2n['value']):
tparams0.append(np.log(self.sig2n['prior'].sample())) # starting sig2n
else:
for i in range(len(self.sig2n['value'])):
tparams0.append(np.log(self.sig2n['prior'][i].sample())) # starting sig2n
tparams0 = np.array(tparams0)
try:
optrescov = scipy.optimize.minimize(obj_fun, tparams0, method=method, options=options, bounds=bounds, jac=grad(obj_fun))
tparams_fit = optrescov.x
nllcov = optrescov.fun
nll_values.append(nllcov)
params.append(tparams_fit)
term_msg.append(optrescov.message)
except (ValueError, np.linalg.LinAlgError) as e:
print(e)
nll_values = np.array(nll_values)
if len(nll_values) < 1:
print('problem with optimization!')
return
best_ind = np.argmin(nll_values[np.isfinite(nll_values)])
params = [params[i] for i in range(len(nll_values)) if np.isfinite(nll_values[i])]
if verbose:
print('\nNeg log lik values across different initializations:')
print(nll_values)
print('Best index termination message')
print(term_msg[best_ind])
if not fix_R:
self.R['value'] = np.exp(params[best_ind][0]) * 100
self.spatial_cov.params['ell']['value'] = np.exp(params[best_ind][1]) * 100
pind = 2
for i in range(len(self.temporal_cov_list)):
self.temporal_cov_list[i].params['ell']['value'] = np.exp(params[best_ind][pind])
pind += 1
self.temporal_cov_list[i].params['sigma2']['value'] = np.exp(params[best_ind][pind])
pind += 1
if np.isscalar(self.sig2n['value']):
self.sig2n['value'] = np.exp(params[best_ind][pind])
else:
self.sig2n['value'] = np.exp(params[best_ind][pind:])
def predict(self, z, t, type="csd"):
nx = self.x.shape[0]
nt = self.t.shape[0]
ntrials = self.lfp.shape[2]
nzstar = z.shape[0]
ntstar = t.shape[0]
yvec = np.reshape(self.lfp, (nx * nt, ntrials))
# Compute inverse matrix
Ks = self.spatial_cov.compKphi_1d(self.R['value'])
Kt = np.zeros((nt, nt))
for i in range(len(self.temporal_cov_list)):
Kt += self.temporal_cov_list[i].compute_Kt()
Qs, Qt, Dvec = comp_eig_D(Ks, Kt, self.sig2n['value'])
ktmp = mykron(Qs, Qt)
invmat = np.linalg.multi_dot([ktmp, np.diag(1. / Dvec), ktmp.T])
invy = np.dot(invmat,yvec)
# Compute cross cov
csd_list = []
csd = np.zeros((nzstar, ntstar, ntrials))
lfp_list = []
lfp = np.zeros((nzstar, ntstar, ntrials))
if type == "both" or type == "csd":
Kphig = self.spatial_cov.compKphig_1d(z=z, R=self.R['value'])
if type == "both" or type == "lfp":
Kphi = self.spatial_cov.compKphi_1d(R=self.R['value'], xp=z)
for i in range(len(self.temporal_cov_list)):
Ktstar = self.temporal_cov_list[i].compute_Kt(t)
if type == "both" or type == "csd":
csd_tmp = np.reshape(np.dot(mykron(Kphig,Ktstar).T, invy),(nzstar,ntstar,ntrials))
csd_list.append(csd_tmp)
csd += csd_tmp
if type == "both" or type == "lfp":
lfp_tmp = np.reshape(np.dot(mykron(Kphi,Ktstar).T, invy),(nzstar,ntstar,ntrials))
lfp_list.append(lfp_tmp)
lfp += lfp_tmp
if type == "both" or type == "csd":
self.csd_pred_list = csd_list
self.csd_pred = csd
if type == "both" or type == "lfp":
self.lfp_pred_list = lfp_list
self.lfp_pred = lfp
self.t_pred = t
self.x_pred = z
def sample_prior(self, ntrials):
nt = self.t.shape[0]
nx = self.x.shape[0]
Ks_csd = self.spatial_cov.compute_Ks()
Kt = np.zeros((nt, nt))
for i in range(len(self.temporal_cov_list)):
Kt += self.temporal_cov_list[i].compute_Kt()
Lt = np.linalg.cholesky(Kt)
Ls = np.linalg.cholesky(Ks_csd + JITTER * np.eye(nx))
csd = np.zeros((nx, nt, ntrials))
for trial in range(ntrials):
csd[:, :, trial] = np.dot(np.dot(Ls, np.random.normal(0, 1, (nx, nt))), Lt.T)
return csd
| 47.751613 | 151 | 0.575424 |
5240253b60ff967dae2da1531a3c23292c6ed236 | 2,688 | py | Python | tools/test-installed-numpy.py | IntelLabs/numpy | 1b4a877449313761d3de75c1ad725fdf396b170e | [
"BSD-3-Clause"
] | 7 | 2019-02-27T20:26:01.000Z | 2020-09-27T04:37:44.000Z | tools/test-installed-numpy.py | IntelLabs/numpy | 1b4a877449313761d3de75c1ad725fdf396b170e | [
"BSD-3-Clause"
] | 3 | 2019-12-23T09:26:27.000Z | 2021-10-31T13:34:17.000Z | tools/test-installed-numpy.py | IntelLabs/numpy | 1b4a877449313761d3de75c1ad725fdf396b170e | [
"BSD-3-Clause"
] | 3 | 2019-07-20T02:16:07.000Z | 2020-08-17T20:02:30.000Z | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
# A simple script to test the installed version of numpy by calling
# 'numpy.test()'. Key features:
# -- convenient command-line syntax
# -- sets exit status appropriately, useful for automated test environments
# It would be better to set this up as a module in the numpy namespace, so
# that it could be run as:
# python -m numpy.run_tests <args>
# But, python2.4's -m switch only works with top-level modules, not modules
# that are inside packages. So, once we drop 2.4 support, maybe...
import sys, os
# In case we are run from the source directory, we don't want to import numpy
# from there, we want to import the installed version:
sys.path.pop(0)
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] -- [nosetests options]")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=1,
help="increase verbosity")
parser.add_option("--doctests",
action="store_true", dest="doctests", default=False,
help="Run doctests in module")
parser.add_option("--coverage",
action="store_true", dest="coverage", default=False,
help="report coverage of NumPy code (requires 'pytest-cov' module")
parser.add_option("-m", "--mode",
action="store", dest="mode", default="fast",
help="'fast', 'full', or something that could be "
"passed to pytest [default: %default]")
parser.add_option("-n", "--durations",
dest="durations", default=-1,
help="show time to run slowest N tests [default: -1]")
(options, args) = parser.parse_args()
import numpy
# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
# The same flags check is also used in the tests to switch behavior.
if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
if not numpy.ones((10, 1), order='C').flags.f_contiguous:
print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
sys.exit(1)
elif numpy.ones((10, 1), order='C').flags.f_contiguous:
print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
sys.exit(1)
if options.coverage:
# Produce code coverage XML report for codecov.io
args += ["--cov-report=xml"]
result = numpy.test(options.mode,
verbose=options.verbose,
extra_argv=args,
doctests=options.doctests,
durations=int(options.durations),
coverage=options.coverage)
if result:
sys.exit(0)
else:
sys.exit(1)
| 40.119403 | 85 | 0.643601 |
280aae7ae067aef00d2d900a7a3f6acf68f1697f | 9,742 | py | Python | models/model.py | Koko-zep/Efficient_CapsuleNet | 1cdf8ffd7d95b27dbad1a320ccea8957995fffed | [
"Apache-2.0"
] | null | null | null | models/model.py | Koko-zep/Efficient_CapsuleNet | 1cdf8ffd7d95b27dbad1a320ccea8957995fffed | [
"Apache-2.0"
] | null | null | null | models/model.py | Koko-zep/Efficient_CapsuleNet | 1cdf8ffd7d95b27dbad1a320ccea8957995fffed | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Vittorio Mazzia & Francesco Salvetti. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from utils.layers import PrimaryCaps, FCCaps, Length
from utils.tools import get_callbacks, marginLoss, multiAccuracy
from utils.dataset import Dataset
from utils import pre_process_multimnist
from models import efficient_capsnet_graph_mnist, efficient_capsnet_graph_smallnorb, efficient_capsnet_graph_multimnist, original_capsnet_graph_mnist
import os
import json
from tqdm.notebook import tqdm
class Model(object):
"""
A class used to share common model functions and attributes.
...
Attributes
----------
model_name: str
name of the model (Ex. 'MNIST')
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
verbose: bool
Methods
-------
load_config():
load configuration file
load_graph_weights():
load network weights
predict(dataset_test):
use the model to predict dataset_test
evaluate(X_test, y_test):
comute accuracy and test error with the given dataset (X_test, y_test)
save_graph_weights():
save model weights
"""
def __init__(self, model_name, mode='test', config_path='config.json', verbose=True):
self.model_name = model_name
self.model = None
self.mode = mode
self.config_path = config_path
self.config = None
self.verbose = verbose
self.load_config()
def load_config(self):
"""
Load config file
"""
with open(self.config_path) as json_data_file:
self.config = json.load(json_data_file)
def load_graph_weights(self):
try:
self.model.load_weights(self.model_path)
except Exception as e:
print("[ERRROR] Graph Weights not found")
def predict(self, dataset_test):
return self.model.predict(dataset_test)
def evaluate(self, X_test, y_test):
print('-'*30 + f'{self.model_name} Evaluation' + '-'*30)
if self.model_name == "MULTIMNIST":
dataset_test = pre_process_multimnist.generate_tf_data_test(X_test, y_test, self.config["shift_multimnist"], n_multi=self.config['n_overlay_multimnist'])
acc = []
for X,y in tqdm(dataset_test,total=len(X_test)):
y_pred,X_gen1,X_gen2 = self.model.predict(X)
acc.append(multiAccuracy(y, y_pred))
acc = np.mean(acc)
else:
y_pred, X_gen = self.model.predict(X_test)
acc = np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0]
test_error = 1 - acc
print('Test acc:', acc)
print(f"Test error [%]: {(test_error):.4%}")
if self.model_name == "MULTIMNIST":
print(f"N° misclassified images: {int(test_error*len(y_test)*self.config['n_overlay_multimnist'])} out of {len(y_test)*self.config['n_overlay_multimnist']}")
else:
print(f"N° misclassified images: {int(test_error*len(y_test))} out of {len(y_test)}")
def save_graph_weights(self):
self.model.save_weights(self.model_path)
class EfficientCapsNet(Model):
"""
A class used to manage an Efficiet-CapsNet model. 'model_name' and 'mode' define the particular architecure and modality of the
generated network.
...
Attributes
----------
model_name: str
name of the model (Ex. 'MNIST')
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
custom_path: str
custom weights path
verbose: bool
Methods
-------
load_graph():
load the network graph given the model_name
train(dataset, initial_epoch)
train the constructed network with a given dataset. All train hyperparameters are defined in the configuration file
"""
def __init__(self, model_name, mode='test', config_path='config.json', custom_path=None, verbose=True):
Model.__init__(self, model_name, mode, config_path, verbose)
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet_{self.model_name}.h5")
self.model_path_new_train = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet{self.model_name}_new_train.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"efficient_capsnet_{self.model_name}")
self.load_graph()
def load_graph(self):
if self.model_name == 'MNIST' or 'rotMNIST' or 'shiftMNIST' or 'squashMNIST' or 'cutMNIST':
self.model = efficient_capsnet_graph_mnist.build_graph(self.config['MNIST_INPUT_SHAPE'], self.mode, self.verbose)
elif self.model_name == 'SMALLNORB':
self.model = efficient_capsnet_graph_smallnorb.build_graph(self.config['SMALLNORB_INPUT_SHAPE'], self.mode, self.verbose)
elif self.model_name == 'MULTIMNIST':
self.model = efficient_capsnet_graph_multimnist.build_graph(self.config['MULTIMNIST_INPUT_SHAPE'], self.mode, self.verbose)
def train(self, dataset=None, initial_epoch=0):
callbacks = get_callbacks(self.tb_path, self.model_path_new_train, self.config['lr_dec'], self.config['lr'])
if dataset == None:
dataset = Dataset(self.model_name, self.config_path)
dataset_train, dataset_val = dataset.get_tf_data()
if self.model_name == 'MULTIMNIST':
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
loss=[marginLoss, 'mse', 'mse'],
loss_weights=[1., self.config['lmd_gen']/2,self.config['lmd_gen']/2],
metrics={'Efficient_CapsNet': multiAccuracy})
steps = 10*int(dataset.y_train.shape[0] / self.config['batch_size'])
else:
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={'Efficient_CapsNet': 'accuracy'})
steps=None
print('-'*30 + f'{self.model_name} train' + '-'*30)
history = self.model.fit(dataset_train,
epochs=self.config[f'epochs'], steps_per_epoch=steps,
validation_data=(dataset_val), batch_size=self.config['batch_size'], initial_epoch=initial_epoch,
callbacks=callbacks)
return history
class CapsNet(Model):
"""
A class used to manage the original CapsNet architecture.
...
Attributes
----------
model_name: str
name of the model (only MNIST provided)
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
verbose: bool
n_routing: int
number of routing interations
Methods
-------
load_graph():
load the network graph given the model_name
train():
train the constructed network with a given dataset. All train hyperparameters are defined in the configuration file
"""
def __init__(self, model_name, mode='test', config_path='config.json', custom_path=None, verbose=True, n_routing=3):
Model.__init__(self, model_name, mode, config_path, verbose)
self.n_routing = n_routing
self.load_config()
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet_{self.model_name}.h5")
self.model_path_new_train = os.path.join(self.config['saved_model_dir'], f"original_capsnet_{self.model_name}_new_train.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"original_capsnet_{self.model_name}")
self.load_graph()
def load_graph(self):
self.model = original_capsnet_graph_mnist.build_graph(self.config['MNIST_INPUT_SHAPE'], self.mode, self.n_routing, self.verbose)
def train(self, dataset=None, initial_epoch=0):
callbacks = get_callbacks(self.tb_path, self.model_path_new_train, self.config['lr_dec'], self.config['lr'])
if dataset == None:
dataset = Dataset(self.model_name, self.config_path)
dataset_train, dataset_val = dataset.get_tf_data()
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={'Original_CapsNet': 'accuracy'})
print('-'*30 + f'{self.model_name} train' + '-'*30)
history = self.model.fit(dataset_train,
epochs=self.config['epochs'],
validation_data=(dataset_val), batch_size=self.config['batch_size'], initial_epoch=initial_epoch,
callbacks=callbacks)
return history
| 38.65873 | 169 | 0.645247 |
1085bf19a3648e73e6712d83eec6a30fe8acbe5d | 1,431 | py | Python | parse/test.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | 5 | 2021-09-30T16:28:48.000Z | 2022-03-30T05:20:27.000Z | parse/test.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | null | null | null | parse/test.py | sw32-seo/GTA | 86b102a14b78f6c8b50d742a56445c748e59b51e | [
"MIT"
] | null | null | null | import torch
import pdb
def main():
# vocab = torch.load('data/stanford_no_rxn_mask/stanford_no_rxn_mask.vocab.pt')
vocab_dict = {'<unk>': 0, '<blank>': 1, '<s>': 2, '</s>': 3, 'c': 4, 'C': 5, '(': 6, ')': 7, '1': 8, 'O': 9, '2': 10, '=': 11, 'N': 12, 'n': 13, '3': 14, 'F': 15, 'Cl': 16, '.': 17, '-': 18, '4': 19, 'S': 20, '[C@H]': 21, 'Br': 22, '[C@@H]': 23, '#': 24, '[nH]': 25, 's': 26, '[N+]': 27, '[O-]': 28, 'o': 29, '5': 30, '/': 31, 'B': 32, 'I': 33, '[Si]': 34, '[C@]': 35, '[C@@]': 36, '[N-]': 37, '\\': 38, '6': 39, 'P': 40, '[Mg+]': 41, '[Sn]': 42, '[n+]': 43, '[C-]': 44, '[SiH]': 45, '[P+]': 46, '7': 47, '[NH4+]': 48, '[S-]': 49, '[Cu]': 50, '[Zn+]': 51, '[SiH2]': 52, '[S@@]': 53, '[NH3+]': 54, '[S@]': 55, '[BH3-]': 56, '[S+]': 57, '[Li]': 58, '[se]': 59, '[BH-]': 60, '[NH2+]': 61, '[OH-]': 62, '[SH]': 63, '8': 64, '[PH]': 65, '[SnH]': 66, '[Se]': 67, '[Zn]': 68, '[Mg]': 69, '9': 70, '[K]': 71, '[NH-]': 72, '[PH2]': 73, '[PH4]': 74, '[Pt]': 75, '[s+]': 76, '<MASK>': 77, '[B-]': 78, '[Br-]': 79, '[Cl+3]': 80, '[Cl-]': 81, '[Fe]': 82, '[I+]': 83, '[N@+]': 84, '[NH+]': 85, '[Pd]': 86, '[n-]': 87}
token_set = set(vocab_dict.keys())
syntax_set = set(['(', ')', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
bond_set = set(['-', '=', '#', '/', '\\'])
symbols_set = token_set.difference(syntax_set).difference(bond_set)
pdb.set_trace()
if __name__ == '__main__':
main()
| 65.045455 | 1,007 | 0.387841 |
cc08cb3e9193e975201aecef3e939c0d4f3d2eb2 | 777 | py | Python | elvanto_subgroups/utils.py | monty5811/elvanto_subgroups | ef7a819787bbb5bf2a8bf6160e8476a613f67fa3 | [
"MIT"
] | null | null | null | elvanto_subgroups/utils.py | monty5811/elvanto_subgroups | ef7a819787bbb5bf2a8bf6160e8476a613f67fa3 | [
"MIT"
] | null | null | null | elvanto_subgroups/utils.py | monty5811/elvanto_subgroups | ef7a819787bbb5bf2a8bf6160e8476a613f67fa3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import namedtuple
import requests
def clean_emails(elvanto_emails=(), google_emails=()):
# exlude elvanto people with no email
elvanto_emails = [x for x in elvanto_emails if len(x) > 0]
emails = namedtuple('emails', ['elvanto', 'google'])
emails.elvanto = elvanto_emails
emails.google = google_emails
return emails
def retry_request(url, http_method, *args, **kwargs):
assert http_method in ['get', 'post', 'delete', 'patch', 'put']
MAX_TRIES = 3
r_func = getattr(requests, http_method)
tries = 0
while True:
resp = r_func(url, *args, **kwargs)
if resp.status_code != 200 and tries < MAX_TRIES:
tries += 1
continue
break
return resp
| 25.9 | 67 | 0.635779 |
01d2cf70768d27c64532900d19238906a85c79cd | 12,061 | py | Python | swift_metadata_sync/metadata_sync.py | fusionfoto/swift-metadata-sync | 5a88e262e6f3ee9f060ff643cc050b0b4ead2e30 | [
"Apache-2.0"
] | null | null | null | swift_metadata_sync/metadata_sync.py | fusionfoto/swift-metadata-sync | 5a88e262e6f3ee9f060ff643cc050b0b4ead2e30 | [
"Apache-2.0"
] | null | null | null | swift_metadata_sync/metadata_sync.py | fusionfoto/swift-metadata-sync | 5a88e262e6f3ee9f060ff643cc050b0b4ead2e30 | [
"Apache-2.0"
] | 1 | 2021-07-22T01:54:49.000Z | 2021-07-22T01:54:49.000Z | from distutils.version import StrictVersion
import elasticsearch
import elasticsearch.helpers
import email.utils
import hashlib
import json
import logging
import os
import os.path
from swift.common.utils import decode_timestamps
from container_crawler.base_sync import BaseSync
class MetadataSync(BaseSync):
DOC_TYPE = 'object'
DOC_MAPPING = {
"content-length": {"type": "long"},
"content-type": {"type": "string"},
"etag": {"type": "string", "index": "not_analyzed"},
"last-modified": {"type": "date"},
"x-object-manifest": {"type": "string"},
"x-static-large-object": {"type": "boolean"},
"x-swift-container": {"type": "string"},
"x-swift-account": {"type": "string"},
"x-swift-object": {"type": "string"},
"x-timestamp": {"type": "date"},
"x-trans-id": {"type": "string", "index": "not_analyzed"}
}
USER_META_PREFIX = 'x-object-meta-'
def __init__(self, status_dir, settings, per_account=False):
super(MetadataSync, self).__init__(status_dir, settings, per_account)
self.logger = logging.getLogger('swift-metadata-sync')
es_hosts = settings['es_hosts']
self._es_conn = elasticsearch.Elasticsearch(es_hosts)
self._server_version = StrictVersion(
self._es_conn.info()['version']['number'])
self._index = settings['index']
self._parse_json = settings.get('parse_json', False)
self._pipeline = settings.get('pipeline')
self._verify_mapping()
def get_last_row(self, db_id):
if not os.path.exists(self._status_file):
return 0
with open(self._status_file) as f:
try:
status = json.load(f)
entry = status.get(db_id, None)
if not entry:
return 0
if entry['index'] == self._index:
return entry['last_row']
else:
return 0
except ValueError:
return 0
return 0
def save_last_row(self, row_id, db_id):
if not os.path.exists(self._status_account_dir):
os.mkdir(self._status_account_dir)
if not os.path.exists(self._status_file):
with open(self._status_file, 'w') as f:
json.dump({db_id: dict(last_row=row_id,
index=self._index)}, f)
return
with open(self._status_file, 'r+') as f:
try:
status = json.load(f)
except ValueError:
status = {}
status[db_id] = dict(last_row=row_id, index=self._index)
f.seek(0)
json.dump(status, f)
f.truncate()
return
def handle(self, rows, internal_client):
self.logger.debug("Handling rows: %s" % repr(rows))
if not rows:
return []
errors = []
bulk_delete_ops = []
mget_map = {}
for row in rows:
if row['deleted']:
bulk_delete_ops.append({'_op_type': 'delete',
'_id': self._get_document_id(row),
'_index': self._index,
'_type': self.DOC_TYPE})
continue
mget_map[self._get_document_id(row)] = row
if bulk_delete_ops:
errors = self._bulk_delete(bulk_delete_ops)
if not mget_map:
self._check_errors(errors)
return
self.logger.debug("multiple get map: %s" % repr(mget_map))
stale_rows, mget_errors = self._get_stale_rows(mget_map)
errors += mget_errors
update_ops = [self._create_index_op(doc_id, row, internal_client)
for doc_id, row in stale_rows]
_, update_failures = elasticsearch.helpers.bulk(
self._es_conn,
update_ops,
raise_on_error=False,
raise_on_exception=False
)
self.logger.debug("Index operations: %s" % repr(update_ops))
for op in update_failures:
op_info = op['index']
if 'exception' in op_info:
errors.append(op_info['exception'])
else:
errors.append("%s: %s" % (
op_info['_id'], self._extract_error(op_info)))
self._check_errors(errors)
def _check_errors(self, errors):
if not errors:
return
for error in errors:
self.logger.error(str(error))
raise RuntimeError('Failed to process some entries')
def _bulk_delete(self, ops):
errors = []
success_count, delete_failures = elasticsearch.helpers.bulk(
self._es_conn, ops,
raise_on_error=False,
raise_on_exception=False
)
for op in delete_failures:
op_info = op['delete']
if op_info['status'] == 404:
if op_info.get('result') == 'not_found':
continue
# < 5.x Elasticsearch versions do not return "result"
if op_info.get('found') is False:
continue
if 'exception' in op_info:
errors.append(op_info['exception'])
else:
errors.append("%s: %s" % (op_info['_id'],
self._extract_error(op_info)))
return errors
def _get_stale_rows(self, mget_map):
errors = []
stale_rows = []
results = self._es_conn.mget(body={'ids': mget_map.keys()},
index=self._index,
refresh=True,
_source=['x-timestamp'])
docs = results['docs']
for doc in docs:
row = mget_map.get(doc['_id'])
if not row:
errors.append("Unknown row for ID %s" % doc['_id'])
continue
if 'error' in doc:
errors.append("Failed to query %s: %s" % (
doc['_id'], str(doc['error'])))
continue
object_date = self._get_last_modified_date(row)
# ElasticSearch only supports milliseconds
object_ts = int(float(object_date) * 1000)
if not doc['found'] or object_ts > doc['_source'].get(
'x-timestamp', 0):
stale_rows.append((doc['_id'], row))
continue
self.logger.debug("Stale rows: %s" % repr(stale_rows))
return stale_rows, errors
def _create_index_op(self, doc_id, row, internal_client):
swift_hdrs = {'X-Newest': True}
meta = internal_client.get_object_metadata(
self._account, self._container, row['name'], headers=swift_hdrs)
op = {'_op_type': 'index',
'_index': self._index,
'_type': self.DOC_TYPE,
'_source': self._create_es_doc(meta, self._account,
self._container,
row['name'].decode('utf-8'),
self._parse_json),
'_id': doc_id}
if self._pipeline:
op['pipeline'] = self._pipeline
return op
"""
Verify document mapping for the elastic search index. Does not include
any user-defined fields.
"""
def _verify_mapping(self):
index_client = elasticsearch.client.IndicesClient(self._es_conn)
try:
mapping = index_client.get_mapping(index=self._index,
doc_type=self.DOC_TYPE)
except elasticsearch.TransportError as e:
if e.status_code != 404:
raise
if e.error != 'type_missing_exception':
raise
mapping = {}
if not mapping.get(self._index, None) or \
self.DOC_TYPE not in mapping[self._index]['mappings']:
missing_fields = self.DOC_MAPPING.keys()
else:
current_mapping = mapping[self._index]['mappings'][
self.DOC_TYPE]['properties']
# We are not going to force re-indexing, so won't be checking the
# mapping format
missing_fields = [key for key in self.DOC_MAPPING.keys()
if key not in current_mapping]
if missing_fields:
new_mapping = dict([(k, v) for k, v in self.DOC_MAPPING.items()
if k in missing_fields])
# Elasticsearch 5.x deprecated the "string" type. We convert the
# string fields into the appropriate 5.x types.
# TODO: Once we remove support for the 2.x clusters, we should
# remove this code and create the new mappings for each field.
if self._server_version >= StrictVersion('5.0'):
new_mapping = dict([(k, self._update_string_mapping(v))
for k, v in new_mapping.items()])
index_client.put_mapping(index=self._index, doc_type=self.DOC_TYPE,
body={'properties': new_mapping})
@staticmethod
def _create_es_doc(meta, account, container, key, parse_json=False):
def _parse_document(value):
try:
return json.loads(value.decode('utf-8'))
except ValueError:
return value.decode('utf-8')
es_doc = {}
# ElasticSearch only supports millisecond resolution
es_doc['x-timestamp'] = int(float(meta['x-timestamp']) * 1000)
# Convert Last-Modified header into a millis since epoch date
ts = email.utils.mktime_tz(
email.utils.parsedate_tz(meta['last-modified'])) * 1000
es_doc['last-modified'] = ts
es_doc['x-swift-object'] = key
es_doc['x-swift-account'] = account
es_doc['x-swift-container'] = container
user_meta_keys = dict(
[(k.split(MetadataSync.USER_META_PREFIX, 1)[1].decode('utf-8'),
_parse_document(v) if parse_json else v.decode('utf-8'))
for k, v in meta.items()
if k.startswith(MetadataSync.USER_META_PREFIX)])
es_doc.update(user_meta_keys)
for field in MetadataSync.DOC_MAPPING.keys():
if field in es_doc:
continue
if field not in meta:
continue
es_doc[field] = meta[field]
return es_doc
@staticmethod
def _get_last_modified_date(row):
ts, content, meta = decode_timestamps(row['created_at'])
# NOTE: the meta timestamp will always be latest, as it will be updated
# when content type is updated
return meta
@staticmethod
def _extract_error(err_info):
if 'error' not in err_info or 'root_cause' not in err_info['error']:
return str(err_info['status'])
err = err_info['error']['root_cause']
try:
return '%s: %s' % (err, err_info['error']['caused_by']['reason'])
except KeyError:
return err
@staticmethod
def _update_string_mapping(mapping):
if mapping['type'] != 'string':
return mapping
if 'index' in mapping and mapping['index'] == 'not_analyzed':
return {'type': 'keyword'}
# This creates a mapping that is both searchable as a text and keyword
# (the default behavior in Elasticsearch for 2.x string types).
return {
'type': 'text',
'fields': {
'keyword': {
'type': 'keyword'}
}
}
def _get_document_id(self, row):
return hashlib.sha256(
'/'.join([self._account.encode('utf-8'),
self._container.encode('utf-8'),
row['name']])
).hexdigest()
| 38.657051 | 79 | 0.537103 |
168eaf8661ac83d56a18f697fa35a0e0d5d41e24 | 554 | py | Python | tests/system/test_base.py | breml/netsamplebeat | f9881edf49b771fc537048284a8417d3d2e7a196 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | breml/netsamplebeat | f9881edf49b771fc537048284a8417d3d2e7a196 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | breml/netsamplebeat | f9881edf49b771fc537048284a8417d3d2e7a196 | [
"Apache-2.0"
] | null | null | null | from netsamplebeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Netsamplebeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
netsamplebeat_proc = self.start_beat(extra_args=["-I", "pcaps/icmp/icmp4_ping.pcap", "-e"])
self.wait_until( lambda: self.log_contains("netsamplebeat is running"))
exit_code = netsamplebeat_proc.kill_and_wait()
assert exit_code == 0
| 27.7 | 99 | 0.640794 |
27bfdef46cd2c54e25434a5c3024ee4e43c2d227 | 258 | py | Python | manage.py | andreif/heroku_django | 824b88fbde8628f221f2af4c47a17f45d055aff3 | [
"MIT"
] | null | null | null | manage.py | andreif/heroku_django | 824b88fbde8628f221f2af4c47a17f45d055aff3 | [
"MIT"
] | null | null | null | manage.py | andreif/heroku_django | 824b88fbde8628f221f2af4c47a17f45d055aff3 | [
"MIT"
] | null | null | null | import os
import sys
if __name__ == '__main__':
assert os.environ.get('DJANGO_SETTINGS_MODULE') or \
'--settings=' in ''.join(sys.argv)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 25.8 | 64 | 0.717054 |
9eca82a02fb6c862e108fb9250223096c51ab274 | 3,967 | py | Python | venv/lib/python3.8/site-packages/pygments/lexers/devicetree.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/lib/python3.8/site-packages/pygments/lexers/devicetree.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/lib/python3.8/site-packages/pygments/lexers/devicetree.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 63 | 2015-01-04T07:11:06.000Z | 2020-11-28T21:24:42.000Z | """
pygments.lexers.devicetree
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Devicetree language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, default, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text
__all__ = ['DevicetreeLexer']
class DevicetreeLexer(RegexLexer):
"""
Lexer for `Devicetree <https://www.devicetree.org/>`_ files.
.. versionadded:: 2.7
"""
name = 'Devicetree'
aliases = ['devicetree', 'dts']
filenames = ['*.dts', '*.dtsi']
mimetypes = ['text/x-c']
#: optional Whitespace or /*...*/ style comment
_ws = r'\s*(?:/[*][^*/]*?[*]/\s*)*'
tokens = {
'macro': [
# Include preprocessor directives (C style):
(r'(#include)(' + _ws + r')([^\n]+)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
# Define preprocessor directives (C style):
(r'(#define)(' + _ws + r')([^\n]+)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc)),
# devicetree style with file:
(r'(/[^*/{]+/)(' + _ws + r')("[^\n{]+")',
bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
# devicetree style with property:
(r'(/[^*/{]+/)(' + _ws + r')([^\n;{]*)([;]?)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc, Punctuation)),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
# Open until EOF, so no ending delimeter
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
],
'statements': [
(r'(L?)(")', bygroups(String.Affix, String), 'string'),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([^\s{}/*]*)(\s*)(:)', bygroups(Name.Label, Text, Punctuation)),
(words(('compatible', 'model', 'phandle', 'status', '#address-cells',
'#size-cells', 'reg', 'virtual-reg', 'ranges', 'dma-ranges',
'device_type', 'name'), suffix=r'\b'), Keyword.Reserved),
(r'([~!%^&*+=|?:<>/#-])', Operator),
(r'[()\[\]{},.]', Punctuation),
(r'[a-zA-Z_][\w-]*(?=(?:\s*,\s*[a-zA-Z_][\w-]*|(?:' + _ws + r'))*\s*[=;])',
Name),
(r'[a-zA-Z_]\w*', Name.Attribute),
],
'root': [
include('whitespace'),
include('macro'),
# Nodes
(r'([^/*@\s&]+|/)(@?)([0-9a-fA-F,]*)(' + _ws + r')(\{)',
bygroups(Name.Function, Operator, Number.Integer,
Comment.Multiline, Punctuation), 'node'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
(';', Punctuation, '#pop'),
],
'node': [
include('whitespace'),
include('macro'),
(r'([^/*@\s&]+|/)(@?)([0-9a-fA-F,]*)(' + _ws + r')(\{)',
bygroups(Name.Function, Operator, Number.Integer,
Comment.Multiline, Punctuation), '#push'),
include('statements'),
(r'\};', Punctuation, '#pop'),
(';', Punctuation),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
| 36.394495 | 89 | 0.453743 |
cb8b6cb3b12d132f5e886edc7812db298ceed75f | 18,284 | py | Python | swat/tests/cas/test_builtins.py | arharvey918/python-swat | 0db2db2d7c049b23391de419950954c8d505b325 | [
"Apache-2.0"
] | null | null | null | swat/tests/cas/test_builtins.py | arharvey918/python-swat | 0db2db2d7c049b23391de419950954c8d505b325 | [
"Apache-2.0"
] | null | null | null | swat/tests/cas/test_builtins.py | arharvey918/python-swat | 0db2db2d7c049b23391de419950954c8d505b325 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This test requires a running CAS server. You must use an ~/.authinfo
# file to specify your username and password. The CAS host and port must
# be specified using the CASHOST and CASPORT environment variables.
# A specific protocol ('cas', 'http', 'https', or 'auto') can be set using
# the CASPROTOCOL environment variable.
import os
import swat
import swat.utils.testing as tm
import pprint
import time
import unittest
USER, PASSWD = tm.get_user_pass()
HOST, PORT, PROTOCOL = tm.get_host_port_proto()
class TestBuiltins(tm.TestCase):
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)
self.pp = pprint.PrettyPrinter(indent=4)
def tearDown(self):
self.s.endsession()
del self.s
swat.reset_option()
def test_echo(self):
r = self.s.builtins.echo()
self.assertEqual(r, {})
self.assertEqual(r.status, None)
r = self.s.builtins.echo(a=[1,2,3])
self.assertEqual( r, {'a': [1, 2, 3]} )
self.assertEqual(r.status, None)
def test_echo_null(self):
r = self.s.builtins.echo()
self.assertEqual( r, {} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_str(self):
r = self.s.builtins.echo(x='a')
self.assertEqual( r, {'x':'a'} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_3(self):
r = self.s.builtins.echo(x=3)
self.assertEqual( r, {'x':3} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_false(self):
r = self.s.builtins.echo(x=False)
self.assertEqual( r, {'x':0} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_list(self):
r = self.s.builtins.echo(w='a', x='b', y=3, z=False)
self.assertEqual( r, {'w':'a', 'x':'b', 'y':3, 'z':False} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_emptylist(self):
r = self.s.builtins.echo(x=[])
self.assertEqual( r, {'x':[]} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_emptydict(self):
r = self.s.builtins.echo(x={})
self.assertEqual( r, {'x':[]} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_emptytuple(self):
r = self.s.builtins.echo( emptyTuple=() )
self.assertEqual( r, {'emptyTuple':[]} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_singletuple(self):
# A tuple with one item is constructed by following a value with a comma.
# On output, tuples are always enclosed in parentheses.
st = 7,
r = self.s.builtins.echo( singleTuple=st )
# Because of the way that results come back from the server, there is no way to construct a
# list or tuple at the output. There is always a possibility of mixed keys and non-keys,
# so Python always has to use dictionaries for output objects.
#
self.assertEqual( r, {'singleTuple':[7]} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_nest(self):
mytuple = 12345, 54321, 'hello!'
r = self.s.builtins.echo( w=3, x=4, y={5}, z=6, a=[7], t=mytuple )
self.assertEqual( r, {'w':3, 'x':4, 'y':[5], 'z':6, 'a':[7], 't': [12345, 54321, 'hello!']} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_echo_nest_parms(self):
r = self.s.builtins.echo( x=3, y={0:5, 'alpha':'beta'}, test=4, orange=True, fred=6 )
self.assertEqual( r, {'x':3, 'y':[5, 'beta'], 'test':4, 'orange':True, 'fred':6} )
self.assertEqual( r.status, None )
#self.assertEqual( r.debug, None )
def test_addnode(self):
r = self.s.addnode()
if r.severity == 0:
# MPP mode
self.assertEqual( r['Nodes'], [] )
else:
# Could fail for a number of reasons such as we're in SMP mode,
# we don't have proper credentials for addnode, .etc
self.assertIn( r.status, ['Error parsing action parameters.',
"Authorization",
"Nodes cannot be added when the server is running with in SMP mode."] )
r = self.s.addnode(salt='controller', node=['pepper'])
self.assertContainsMessage(r, "ERROR: The action stopped due to errors.")
r = self.s.addnode(node=['salt', 'pepper'])
self.assertContainsMessage(r, "ERROR: The action stopped due to errors.")
# Can't be done in unit test because it changes state:
# self.assertEqual(self.s.addnode{'captain','snap046'}, None)
def test_help(self):
r = self.s.help()
b = r.get('builtins')
self.assertTrue(b is not None)
# prettyprint(b)
# The builtin library should be loaded at least
self.assertEqual(len(b.columns), 2)
self.assertTrue(len(b) >= 23)
self.assertEqual(b.columns[0], 'name')
self.assertEqual(b.colinfo['name'].label, 'Name')
self.assertEqual(b.colinfo['name'].name, 'name')
self.assertIn(b.colinfo['name'].dtype, ['char','varchar'])
self.assertEqual(b.colinfo['name'].width, 64)
self.assertEqual(b.columns[1], 'description')
self.assertEqual(b.colinfo['description'].label, 'Description')
self.assertEqual(b.colinfo['description'].name, 'description')
self.assertIn(b.colinfo['description'].dtype, ['char','varchar'])
self.assertEqual(b.colinfo['description'].width, 256)
data = list(list(x) for x in b.itertuples(index=False))
self.assertIn(['addNode','Adds a machine to the server'], data)
self.assertIn(['removeNode','Remove one or more machines from the server'], data)
self.assertIn(['help','Shows the parameters for an action or lists all available actions'], data)
self.assertIn(['listNodes','Shows the host names used by the server'], data)
self.assertIn(['loadActionSet','Loads an action set for use in this session'], data)
self.assertIn(['installActionSet','Loads an action set in new sessions automatically'], data)
self.assertIn(['log', 'Shows and modifies logging levels'], data)
self.assertIn(['queryActionSet','Shows whether an action set is loaded'], data)
self.assertIn(['queryName', 'Checks whether a name is an action or action set name'], data)
self.assertIn(['reflect','Shows detailed parameter information for an action or all actions in an action set'], data)
self.assertIn(['serverStatus','Shows the status of the server'], data)
self.assertIn(['about', 'Shows the status of the server'], data)
self.assertIn(['shutdown','Shuts down the server'], data)
self.assertIn(['userInfo','Shows the user information for your connection'], data)
self.assertIn(['actionSetInfo','Shows the build information from loaded action sets'], data)
self.assertIn(['history','Shows the actions that were run in this session'], data)
self.assertIn(['casCommon','Provides parameters that are common to many actions'], data)
self.assertIn(['ping','Sends a single request to the server to confirm that the connection is working'], data)
self.assertIn(['echo','Prints the supplied parameters to the client log'], data)
self.assertIn(['modifyQueue','Modifies the action response queue settings'], data)
self.assertIn(['getLicenseInfo','Shows the license information for a SAS product'], data)
self.assertIn(['refreshLicense','Refresh SAS license information from a file'], data)
self.assertIn(['httpAddress','Shows the HTTP address for the server monitor'], data)
def test_listactions(self):
# NOTE: 'listactions' is an alias to 'help'
self.assertNotEqual(self.s.builtins.help(), {})
# Try to list an actionset that is not loaded. Expect it to not be found.
out = self.s.builtins.help(actionSet='neuralNet')
self.assertEqual(self.s.builtins.help(actionSet='neuralNet'), {})
# Try to list an action that is not loaded. Expect it to not be found.
out = self.s.builtins.help(action='annTrain')
self.assertEqual(self.s.builtins.help(action='annTrain'), {})
# Load an actionSet and then list it.
r = self.s.builtins.loadactionset(actionset='neuralNet')
if r.severity != 0:
self.pp.pprint(r.messages)
self.assertEquals( r.status, None )
# List an actionSet that is loaded.
out = self.s.builtins.help(actionSet='neuralNet')
#aset = out.get('neuralNet')
aset = out['neuralNet']
self.assertTrue(aset is not None)
self.assertEqual(len(aset.columns), 2)
self.assertGreaterEqual(len(aset), 3)
self.assertEqual(aset.columns[0], 'name')
self.assertEqual(aset.columns[1], 'description')
self.assertTrue(aset.iloc[0].tolist() == ['annTrain','Train an artificial neural network'] or
aset.iloc[0].tolist() == ['annTrain','Trains an artificial neural network'])
self.assertTrue(aset.iloc[1].tolist() == ['annScore','Score a table using an artificial neural network model'] or
aset.iloc[1].tolist() == ['annScore','Scores a table using an artificial neural network model'])
self.assertTrue(aset.iloc[2].tolist() == ['annCode','Generate DATA step scoring code from an artificial neural network model'] or
aset.iloc[2].tolist() == ['annCode','Generates DATA step scoring code from an artificial neural network model'])
# List an action that is loaded.
act = self.s.builtins.help(action='annTrain')
self.assertTrue(act is not None)
self.assertTrue(act['annTrain'])
# We get back several hundred lines of output. Verify a few at the beginning of the response.
self.assertTrue("NOTE: Information for action 'neuralNet.annTrain':" in act.messages)
self.assertTrue("NOTE: The following parameters are accepted. Default values are shown." in act.messages)
# Verify a line of output near the end of the response.
self.assertTrue("NOTE: double dropOut=0 (0 <= value < 1)," in act.messages)
def test_listnodes(self):
self.assertNotEqual(self.s.listnodes(), {})
# Can we do more here? How do we know what nodes are loaded?
def test_loadactionset(self):
self.assertEqual(self.s.loadactionset(), {})
self.assertEqual(self.s.loadactionset('ohcrumbs'), {})
self.assertEqual(self.s.loadactionset(actionset="You mean, we can't get out? We're trapped! Ahah, ahah! Entombed in the blackness of night, doomed to die in the darkness of a zipless tomb."), {})
self.assertEqual(self.s.loadactionset('builtins'), {'actionset':'builtins'})
# One could argue that the following should return 'builtins', but we do
# not believe that it is worth fixing for that one special case.
self.assertEqual(self.s.loadactionset(actionset='tkcasablt'), {'actionset':'tkcasablt'})
self.assertEqual(self.s.loadactionset(actionset=None), {})
# not here self.assertEqual(self.s.invoke('loadactionset','actionTest'), (0,'actionTest'))
def test_queryactionset(self):
r = self.s.queryactionset('builtins')
self.assertEqual(r['builtins'], True)
r = self.s.queryactionset('unknown')
self.assertEqual(r['unknown'], False)
def test_actionsetinfo(self):
r = self.s.actionsetinfo()
self.assertEqual(list(r.keys())[0], 'setinfo')
setinfo = r['setinfo']
# Get the actionset column
actionsets = setinfo['actionset'].tolist()
self.assertIn('builtins', actionsets)
r = self.s.actionsetinfo(all=True)
self.assertEqual(list(r.keys())[0], 'setinfo')
setinfo = r['setinfo']
# Get the actionset column
allactionsets = setinfo['actionset'].tolist()
self.assertIn('builtins', allactionsets)
self.assertNotIn('unknown', allactionsets)
self.assertTrue(len(allactionsets) > len(actionsets))
def test_log(self):
r = self.s.log(logger='App.cas.builtins.log', level="debug")
if r.severity != 0:
self.assertIn( r.status, ["You must be an administrator to set logging levels. The logger is immutable."] )
self.skipTest("You must be an administrator to set logging levels")
r = self.s.log(invalid='parameter')
self.assertNotEqual(r.status, None)
r = self.s.log(logger='App.cas.builtins.log', level="invalid")
self.assertNotEqual(r.status, None)
r = self.s.log(logger='App.cas.builtins.log')
self.assertEqual(r.status, None)
self.assertEqual(r.messages[0], "NOTE: Logger: 'App.cas.builtins.log', level: 'debug'.")
r = self.s.log(logger='App.cas.builtins.log', level="info")
r = self.s.log(logger='App.cas.builtins.log')
self.assertEqual(r.messages[0], "NOTE: Logger: 'App.cas.builtins.log', level: 'info'.")
r = self.s.log(logger='App.cas.builtins.log', level="null")
r = self.s.log(logger='App.cas.builtins.log')
self.assertEqual(r.messages[0], "NOTE: Logger: 'App.cas.builtins.log', level: 'null'.")
r = self.s.log(logger='App.cas.builtins.log')
r = self.s.log()
self.assertEqual(r.status, None)
self.assertTrue(len(r.messages) >= 1)
def test_reflect(self):
r = self.s.reflect()
self.assertTrue('label' in r[0])
self.assertTrue('name' in r[0])
self.assertTrue('actions' in r[0])
self.assertTrue(len(r[0]['actions']) >= 9)
def test_serverstatus(self):
self.assertNotEqual([x for x in self.s.invoke('serverstatus')], [])
# Can we do more here? How do we know what nodes are loaded?
def test_userinfo(self):
r = self.s.userinfo()
self.assertNotEqual(r, None)
userInfo = r['userInfo']
self.assertTrue(len(userInfo) >= 7)
self.assertEqual(userInfo['anonymous'], 0)
# WX6 returns an empty string for groups
if isinstance(userInfo['groups'], list):
self.assertTrue('users' in userInfo['groups'] or
'Everyone' in userInfo['groups'])
self.assertIn(userInfo['hostAccount'], [1, False])
self.assertEqual(userInfo['providedName'].split('\\')[-1].split('@')[0],
self.s._username.split('\\')[-1].split('@')[0])
# WX6 returns 'Windows' for providerName
self.assertIn(userInfo['providerName'], ['Active Directory', 'Windows', 'OAuth/External PAM'])
# WX6 returns the domain for uniqueId
self.assertTrue(userInfo['uniqueId'], self.s._username.split('@')[0])
self.assertTrue(userInfo['userId'], self.s._username.split('@')[0])
# NOTE: These don't work in optimized builds
# def test_getusers(self):
# r = self.s.getusers()
# self.assertNotEqual(r, None)
# # Can we do more here? How do we know what users are on the system?
#
# def test_getgroups(self):
# r = self.s.getgroups()
# self.assertNotEqual(r, None)
# # Can we do more here? How do we know what groups are on the system?
# Can't be done in unit test because it changes state:
# def test_shutdown(self):
# self.assertEqual(self.s.shutdown(), {})
def test_loop(self):
for i in range(5):
mytuple = ["Iteration", i]
out = self.s.builtins.echo(t=mytuple)
d = out
self.assertEqual(d['t'], mytuple)
time.sleep( 0.25 )
def test_http(self):
r = self.s.builtins.httpAddress()
self.assertNotEqual(r, None)
self.assertTrue(r['protocol'] in ['http', 'https'])
if self.s._protocol in ['http', 'https']:
self.assertEqual(str(int(r['port'])), os.environ['CASPORT'])
# 02/20/2016: bosout: Documentation indicates the action should return virtualHost.
# However, that is not being returned. Developers notified. Comment out until we know more.
#self.assertNotEqual(r['virtualHost'], None)
def test_ping(self):
# The .prm file indicates that ping is a dummy action and does nothing.
# It says ping is used by UIs to poll for relevant changes like new caslibs.
#
r = self.s.builtins.ping()
self.assertEqual(r, {})
self.assertEqual(r.severity, 0)
# Ping with an invalid argument
try:
r = self.s.builtins.ping(invalidArgument)
except NameError:
pass
if __name__ == '__main__':
from swat.utils.testing import runtests
runtests()
| 46.882051 | 203 | 0.613597 |
f86668c4ee0c15fab11ae35bde6ce652909e64c6 | 888 | py | Python | google/appengine/_internal/django/core/management/commands/runfcgi.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/appengine/_internal/django/core/management/commands/runfcgi.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/appengine/_internal/django/core/management/commands/runfcgi.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null |
from google.appengine._internal.django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
args = '[various KEY=val options, use `runfcgi help` for help]'
def handle(self, *args, **options):
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from google.appengine._internal.django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from google.appengine._internal.django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
| 40.363636 | 87 | 0.716216 |
57fec792a226161ce9cebf3e4e65267bcc8cf1fa | 1,268 | py | Python | src/modules/deq.py | murata7oki/deq-jax | bfd88c694758769da1656b16c2e6ba8ff05e267c | [
"Apache-2.0"
] | 27 | 2020-10-07T07:36:29.000Z | 2022-01-01T20:57:15.000Z | src/modules/deq.py | murata7oki/deq-jax | bfd88c694758769da1656b16c2e6ba8ff05e267c | [
"Apache-2.0"
] | 1 | 2020-12-10T17:42:40.000Z | 2020-12-12T14:46:39.000Z | src/modules/deq.py | murata7oki/deq-jax | bfd88c694758769da1656b16c2e6ba8ff05e267c | [
"Apache-2.0"
] | 1 | 2021-06-11T07:08:04.000Z | 2021-06-11T07:08:04.000Z | from typing import Callable
import jax.numpy as jnp
from src.modules.rootfind import rootfind, rootfind_grad
def deq(params: dict, rng, z: jnp.ndarray, fun: Callable, max_iter: int, *args) -> jnp.ndarray:
"""
Apply Deep Equilibrium Network to haiku function.
:param params: params for haiku function
:param rng: rng for init and apply of haiku function
:param fun: func to apply in the deep equilibrium limit, f(params, rng, x, *args)
and only a function of JAX primatives (e.g can not be passed bool)
:param max_iter: maximum number of integers for the broyden method
:param z: initial guess for broyden method
:param args: all other JAX primatives which must be passed to the function
:return: z_star: equilibrium hidden state s.t lim_{i->inf}fun(z_i) = z_star
"""
# define equilibrium eq (f(z)-z)
def g(_params, _rng, _x, *args): return fun(_params, _rng, _x, *args) - _x
# find equilibrium point
z_star = rootfind(g, max_iter, params, rng, z, *args)
# set up correct graph for chain rule (bk pass)
# in original implementation this is run only if in_training
z_star = fun(params, rng, z_star, *args)
z_star = rootfind_grad(g, max_iter, params, rng, z_star, *args)
return z_star | 42.266667 | 95 | 0.701104 |
cdcc249cb26866ff2c3fd41a7a1f045fc7305eec | 5,445 | py | Python | Classes/MachineSCIM.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | 1 | 2021-02-26T12:28:45.000Z | 2021-02-26T12:28:45.000Z | Classes/MachineSCIM.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | null | null | null | Classes/MachineSCIM.py | magnetron/pyleecan | 2a3338f4ab080ad6488b5ab8746c3fea1f36f177 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""File generated according to pyleecan/Generator/ClassesRef/Machine/MachineSCIM.csv
WARNING! All changes made in this file will be lost!
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from .MachineDFIM import MachineDFIM
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Machine.MachineSCIM.check import check
except ImportError as error:
check = error
try:
from ..Methods.Machine.MachineSCIM.get_machine_type import get_machine_type
except ImportError as error:
get_machine_type = error
from ._check import InitUnKnowClassError
from .LamSlotWind import LamSlotWind
from .Frame import Frame
from .Shaft import Shaft
class MachineSCIM(MachineDFIM):
"""Squirrel Cage Induction Machine"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Machine.MachineSCIM.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use MachineSCIM method check: " + str(check))
)
)
else:
check = check
# cf Methods.Machine.MachineSCIM.get_machine_type
if isinstance(get_machine_type, ImportError):
get_machine_type = property(
fget=lambda x: raise_(
ImportError(
"Can't use MachineSCIM method get_machine_type: "
+ str(get_machine_type)
)
)
)
else:
get_machine_type = get_machine_type
# save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
rotor=-1,
stator=-1,
frame=-1,
shaft=-1,
name="default_machine",
desc="",
type_machine=1,
logger_name="Pyleecan.Machine",
init_dict=None,
):
"""Constructor of the class. Can be use in two ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if rotor == -1:
rotor = LamSlotWind()
if stator == -1:
stator = LamSlotWind()
if frame == -1:
frame = Frame()
if shaft == -1:
shaft = Shaft()
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "rotor" in list(init_dict.keys()):
rotor = init_dict["rotor"]
if "stator" in list(init_dict.keys()):
stator = init_dict["stator"]
if "frame" in list(init_dict.keys()):
frame = init_dict["frame"]
if "shaft" in list(init_dict.keys()):
shaft = init_dict["shaft"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "desc" in list(init_dict.keys()):
desc = init_dict["desc"]
if "type_machine" in list(init_dict.keys()):
type_machine = init_dict["type_machine"]
if "logger_name" in list(init_dict.keys()):
logger_name = init_dict["logger_name"]
# Initialisation by argument
# Call MachineDFIM init
super(MachineSCIM, self).__init__(
rotor=rotor,
stator=stator,
frame=frame,
shaft=shaft,
name=name,
desc=desc,
type_machine=type_machine,
logger_name=logger_name,
)
# The class is frozen (in MachineDFIM init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
MachineSCIM_str = ""
# Get the properties inherited from MachineDFIM
MachineSCIM_str += super(MachineSCIM, self).__str__()
return MachineSCIM_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from MachineDFIM
if not super(MachineSCIM, self).__eq__(other):
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
# Get the properties inherited from MachineDFIM
MachineSCIM_dict = super(MachineSCIM, self).as_dict()
# The class name is added to the dict fordeserialisation purpose
# Overwrite the mother class name
MachineSCIM_dict["__class__"] = "MachineSCIM"
return MachineSCIM_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
# Set to None the properties inherited from MachineDFIM
super(MachineSCIM, self)._set_None()
| 33.404908 | 88 | 0.613039 |
5d4dd3d640392cdded4aebc9a1979ff27277aba8 | 1,501 | py | Python | tests/interactive/image/depth_save.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 15 | 2015-01-21T12:29:01.000Z | 2018-12-09T09:17:33.000Z | tests/interactive/image/depth_save.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | null | null | null | tests/interactive/image/depth_save.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 9 | 2015-12-12T09:12:46.000Z | 2021-12-26T13:29:14.000Z | """Test depth buffer save.
A scene consisting of a single coloured triangle will be rendered. The
depth buffer will then be saved to a stream and loaded as a texture.
You will see the original scene first for up to several seconds before the
depth buffer image appears (because retrieving and saving the image is
a slow operation). Messages will be printed to stdout indicating
what stage is occuring.
"""
import unittest
from . import base_save
from pyglet.gl import *
from pyglet import image
from pyglet.compat import BytesIO
class TEST_DEPTH_SAVE(base_save.TestSave):
alpha = False
def draw_original(self):
glClear(GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glBegin(GL_TRIANGLES)
glColor4f(1, 0, 0, 1)
glVertex3f(0, 0, -1)
glColor4f(0, 1, 0, 1)
glVertex3f(200, 0, 0)
glColor4f(0, 0, 1, 1)
glVertex3f(0, 200, 1)
glEnd()
glDisable(GL_DEPTH_TEST)
glColor4f(1, 1, 1, 1)
def load_texture(self):
print('Drawing scene...')
self.window.set_visible()
self.window.dispatch_events()
self.draw()
print('Saving depth image...')
img = image.get_buffer_manager().get_depth_buffer()
file = BytesIO()
img.save('buffer.png', file)
print('Loading depth image as texture...')
file.seek(0)
self.saved_texture = image.load('buffer.png', file)
print('Done.')
self.window.set_visible(False)
| 26.333333 | 74 | 0.650899 |
f9aea89dd848d9354806e9240c8d37f46b9de498 | 4,917 | py | Python | backend/assertion_to_z3.py | uwplse/stng | ce12c2c079516df873382a5aa3c18c407833d130 | [
"MIT"
] | 14 | 2017-03-07T00:14:33.000Z | 2022-02-09T00:59:22.000Z | backend/assertion_to_z3.py | uwplse/stng | ce12c2c079516df873382a5aa3c18c407833d130 | [
"MIT"
] | 11 | 2016-11-22T13:14:55.000Z | 2021-12-14T00:56:51.000Z | backend/assertion_to_z3.py | uwplse/stng | ce12c2c079516df873382a5aa3c18c407833d130 | [
"MIT"
] | 6 | 2016-11-07T13:38:45.000Z | 2021-04-04T12:13:31.000Z | from stencil_ir import *
import asp.codegen.ast_tools as ast_tools
import os, binascii
import logging
from backend_halide import IndexResugar
class ToZ3(ast_tools.NodeVisitor):
"""
Converts a set of assertions into Z3 syntax.
"""
def __init__(self, tree, loopvars=None, additional_conds=None, convert_floats=False, funcdict={}, inputs=[]):
self.tree = tree
self.convert_floats = convert_floats
if loopvars:
self.loopvars = loopvars
else:
self.loopvars = ["i"]
if additional_conds:
self.additional_conds = additional_conds
else:
self.additional_conds = []
self.funcdict = funcdict
logging.debug("Func dict is %s", funcdict)
self.inputs = inputs
def to_str(self):
return self.visit(self.tree) + '\n'.join([self.visit(x) for x in self.additional_conds])
def visit_NumNode(self, node):
return str(node.val)
def visit_VarNode(self, node):
if self.convert_floats and "__float__" in node.name:
import re
num = re.sub("__float__", "", node.name)
num = re.sub("_", ".", num)
logging.debug("Converting %s to %s", node.name, float(num))
return str(float(num))
return node.name
def visit_BinExp(self, node):
translate_op = {"&&":"and", "||":"or"}
if node.op in translate_op.keys():
op = translate_op[node.op]
else:
op = node.op
return "(%s %s %s)" % (op, self.visit(node.left), self.visit(node.right))
def visit_CallExp(self, node):
return "(%s %s)" % (self.visit(node.fname), ' '.join([self.visit(x) for x in node.params]))
def visit_NotExp(self, node):
return "(not %s)" % (self.visit(node.ex))
def visit_ArrExp(self, node):
def in2pre(m):
"""Only handles t +|-|*|/ n"""
import re
splitted = re.split('(\+|\-|\*|\/)', m)
if len(splitted) > 1:
return "(%s %s %s)" % (splitted[1].strip(), splitted[0].strip(), splitted[2].strip())
else:
return m
def construct_select(terms):
if len(terms) == 2:
return "(select %s %s)" % (terms[0], terms[1])
else:
return "(select %s %s)" % (construct_select(terms[:-1]), terms[-1])
logging.debug("inputs: %s", self.inputs)
logging.debug("funcdict %s", self.funcdict)
logging.debug("loopvars %s", self.loopvars)
logging.debug("node: %s", tree_to_str(node))
idx_expression,self.should_reverse = IndexResugar(self.funcdict, self.loopvars, self.inputs).resugar(node.name.name, node.loc)
logging.debug("idx expression is: %s", idx_expression)
return construct_select([self.visit(node.name)] + map(in2pre, idx_expression.split(',')))
def visit_AugArrayVarNode(self, node):
# helper to turn into nested store/select statements
def construct_select(terms):
if len(terms) == 2:
return "(select %s %s)" % (terms[0], terms[1])
else:
return "(select %s %s)" % (construct_select(terms[:-1]), terms[-1])
def nester(m, aug):
if len(m)==2:
return "(store %s %s %s)" % (m[0], m[1], aug)
else:
logging.debug("Stuff to concat: %s and %s", m[:2], m[2:])
return "(store %s %s %s)" % (m[0], m[1], nester([construct_select(m[:2])]+m[2:], aug))
# helper to recursively add augmentations
def helper(node, aug_keys, cur_idx):
if cur_idx == len(aug_keys)-1:
name = self.visit(node.name)
idx_expression,should_reverse = IndexResugar(self.funcdict, self.loopvars, self.inputs).resugar(name, aug_keys[cur_idx])
return nester([name]+idx_expression.split(','), self.visit(node.augmentation[aug_keys[cur_idx]]))
else:
name = helper(node, aug_keys, cur_idx+1)
a = self.visit(aug_keys[cur_idx])
b = self.visit(node.augmentation[aug_keys[cur_idx]])
return "(store %s %s %s)" % (name, a, b)
return helper(node, node.augmentation.keys(), 0)
def visit_ImplicationExp(self, node):
# we need to change loopvar to loopvar_valp for the postcondition clause,
# due to the way the Z3 script is being structured
class PostconFinder(ast_tools.NodeVisitor):
def __init__(self):
self.found = False
def find(self, n):
self.visit(n)
return self.found
def visit_CallExp(self, n):
if n.fname.name == "postcondition":
self.found = True
ifx = node.ifx
return "(=> %s %s)\n" % (self.visit(ifx), self.visit(node.then))
| 39.653226 | 136 | 0.560911 |
74b530d5fe5b7a669f58ed5d54839ac3735b2ea6 | 1,017 | py | Python | pipeline/scripts/concat_all_nb_of_records_removed_with_mapq_sam_records_filter_files.py | iqbal-lab-org/pandora_paper_roc | bb21c76faefa8021c86c3be9d77b8b5999fe2ef5 | [
"MIT"
] | null | null | null | pipeline/scripts/concat_all_nb_of_records_removed_with_mapq_sam_records_filter_files.py | iqbal-lab-org/pandora_paper_roc | bb21c76faefa8021c86c3be9d77b8b5999fe2ef5 | [
"MIT"
] | null | null | null | pipeline/scripts/concat_all_nb_of_records_removed_with_mapq_sam_records_filter_files.py | iqbal-lab-org/pandora_paper_roc | bb21c76faefa8021c86c3be9d77b8b5999fe2ef5 | [
"MIT"
] | 2 | 2020-11-04T18:15:43.000Z | 2020-11-06T01:38:08.000Z | from utils import get_concatenated_df
# setup
all_nb_of_records_removed_with_mapq_sam_records_filter_files_for_precision = snakemake.input.all_nb_of_records_removed_with_mapq_sam_records_filter_files_for_precision
nb_of_records_removed_with_mapq_sam_records_filter_for_precision_filepath = snakemake.output.nb_of_records_removed_with_mapq_sam_records_filter_for_precision_filepath
# read
all_dfs = get_concatenated_df(all_nb_of_records_removed_with_mapq_sam_records_filter_files_for_precision,separator=",",
fields_to_keep=["tool", "nb_of_records_before_mapq_sam_records_filter",
"nb_of_records_after_mapq_sam_records_filter",
"nb_of_records_removed_with_mapq_sam_records_filter",
"nb_of_records_removed_with_mapq_sam_records_filter_proportion"])
# output
all_dfs.to_csv(nb_of_records_removed_with_mapq_sam_records_filter_for_precision_filepath)
| 63.5625 | 167 | 0.769912 |
76a6e9de0283f4c0ed8c7fe1458c93cfcadbc28c | 10,760 | py | Python | src/dependenpy/structures.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | null | null | null | src/dependenpy/structures.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | null | null | null | src/dependenpy/structures.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | null | null | null | # -*- coding: utf-8 -*-
"""dependenpy structures module."""
import json
from .helpers import PrintMixin
class Matrix(PrintMixin):
"""
Matrix class.
A class to build a matrix given a list of nodes. After instantiation,
it has two attributes: data, a 2-dimensions array, and keys, the names
of the entities in the corresponding order.
"""
def __init__(self, *nodes, depth=0):
"""
Initialization method.
Args:
*nodes (list of DSM/Package/Module):
the nodes on which to build the matrix.
depth (int): the depth of the matrix. This depth is always
absolute, meaning that building a matrix with a sub-package
"A.B.C" and a depth of 1 will return a matrix of size 1,
containing A only. To see the matrix for the sub-modules and
sub-packages in C, you will have to give depth=4.
"""
modules = []
for node in nodes:
if node.ismodule:
modules.append(node)
elif node.ispackage or node.isdsm:
modules.extend(node.submodules)
if depth < 1:
keys = modules
else:
keys = []
for m in modules:
if m.depth <= depth:
keys.append(m)
continue
package = m.package
while package.depth > depth and package.package and package not in nodes:
package = package.package
if package not in keys:
keys.append(package)
size = len(keys)
data = [[0 for _ in range(size)] for __ in range(size)]
keys = sorted(keys, key=lambda k: k.absolute_name())
if depth < 1:
for i, k in enumerate(keys):
k.index = i
for i, k in enumerate(keys):
for d in k.dependencies:
if d.external:
continue
if d.target.ismodule and d.target in keys:
data[i][d.target.index] += 1
elif d.target.ispackage:
m = d.target.get("__init__")
if m is not None and m in keys:
data[i][m.index] += 1
else:
for i, k in enumerate(keys):
for j, l in enumerate(keys):
data[i][j] = k.cardinal(to=l)
self.size = size
self.keys = [k.absolute_name() for k in keys]
self.data = data
@staticmethod
def cast(keys, data):
"""Cast a set of keys and an array to a Matrix object."""
matrix = Matrix()
matrix.keys = keys
matrix.data = data
return matrix
@property
def total(self):
"""Return the total number of dependencies within this matrix."""
return sum(j for i in self.data for j in i)
def _to_csv(self, **kwargs):
text = ["module,", ",".join(self.keys), "\n"]
for i, k in enumerate(self.keys):
text.append("%s,%s\n" % (k, ",".join(map(str, self.data[i]))))
return "".join(text)
def _to_json(self, **kwargs):
return json.dumps({"keys": self.keys, "data": self.data}, **kwargs)
def _to_text(self, **kwargs):
if not self.keys or not self.data:
return ""
max_key_length = max(len(k) for k in self.keys)
max_dep_length = len(str(max(j for i in self.data for j in i)))
key_col_length = len(str(len(self.keys)))
key_line_length = max(key_col_length, 2)
column_length = max(key_col_length, max_dep_length)
# first line left headers
text = [("\n {:>%s} | {:>%s} ||" % (max_key_length, key_line_length)).format("Module", "Id")]
# first line column headers
for i, _ in enumerate(self.keys):
text.append(("{:^%s}|" % column_length).format(i))
text.append("\n")
# line of dashes
text.append((" %s-+-%s-++" % ("-" * max_key_length, "-" * key_line_length)))
for i, _ in enumerate(self.keys):
text.append("%s+" % ("-" * column_length))
text.append("\n")
# lines
for i, k in enumerate(self.keys):
text.append((" {:>%s} | {:>%s} ||" % (max_key_length, key_line_length)).format(k, i))
for v in self.data[i]:
text.append(("{:>%s}|" % column_length).format(v))
text.append("\n")
text.append("\n")
return "".join(text)
class TreeMap(PrintMixin):
"""TreeMap class."""
def __init__(self, *nodes, value=-1, data=None, keys=None):
"""
Initialization method.
Arguments:
*nodes (list of Node): the nodes from which to build the treemap.
"""
# if nodes:
# matrix_lower_level = Matrix(*nodes, depth=2)
# matrix_current_level = Matrix(*nodes, depth=1)
# if value == -1:
# value = sum(c for row in matrix_current_level.data for c in row)
# splits = [0]
# key_comp = matrix_lower_level.keys[0].split('.')[0]
# i = 1
# for key in matrix_lower_level.keys[1:]:
# key = key.split('.')[0]
# if key != key_comp:
# splits.append(i)
# key_comp = key
# i += 1
# splits.append(i)
#
# self.data = []
# for i in range(len(splits) - 1):
# self.data.append([])
# rows = matrix_lower_level.data[splits[i]:splits[i+1]]
# for j in range(len(splits) - 1):
# self.data[i].append([row[splits[j]:splits[j+1]] for row in rows])
self.value = value
def _to_csv(self, **kwargs):
return ""
def _to_json(self, **kwargs):
return ""
def _to_text(self, **kwargs):
return ""
class Vertex(object):
"""Vertex class. Used in Graph class."""
def __init__(self, name):
"""
Initialization method.
Args:
name (str): name of the vertex.
"""
self.name = name
self.edges_in = set()
self.edges_out = set()
def __str__(self):
return self.name
def connect_to(self, vertex, weight=1):
"""
Connect this vertex to another one.
Args:
vertex (Vertex): vertex to connect to.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
"""
for edge in self.edges_out:
if vertex == edge.vertex_in:
return edge
return Edge(self, vertex, weight)
def connect_from(self, vertex, weight=1):
"""
Connect another vertex to this one.
Args:
vertex (Vertex): vertex to connect from.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
"""
for edge in self.edges_in:
if vertex == edge.vertex_out:
return edge
return Edge(vertex, self, weight)
class Edge(object):
"""Edge class. Used in Graph class."""
def __init__(self, vertex_out, vertex_in, weight=1):
"""
Initialization method.
Args:
vertex_out (Vertex): source vertex (edge going out).
vertex_in (Vertex): target vertex (edge going in).
weight (int): weight of the edge.
"""
self.vertex_out = None
self.vertex_in = None
self.weight = weight
self.go_from(vertex_out)
self.go_in(vertex_in)
def __str__(self):
return "%s --%d--> %s" % (self.vertex_out.name, self.weight, self.vertex_in.name)
def go_from(self, vertex):
"""
Tell the edge to go out from this vertex.
Args:
vertex (Vertex): vertex to go from.
"""
if self.vertex_out:
self.vertex_out.edges_out.remove(self)
self.vertex_out = vertex
vertex.edges_out.add(self)
def go_in(self, vertex):
"""
Tell the edge to go into this vertex.
Args:
vertex (Vertex): vertex to go into.
"""
if self.vertex_in:
self.vertex_in.edges_in.remove(self)
self.vertex_in = vertex
vertex.edges_in.add(self)
class Graph(PrintMixin):
"""
Graph class.
A class to build a graph given a list of nodes. After instantiation,
it has two attributes: vertices, the set of nodes,
and edges, the set of edges.
"""
def __init__(self, *nodes, depth=0):
"""
Initialization method.
An intermediary matrix is built to ease the creation of the graph.
Args:
*nodes (list of DSM/Package/Module):
the nodes on which to build the graph.
depth (int): the depth of the intermediary matrix. See
the documentation for Matrix class.
"""
self.edges = set()
vertices = []
matrix = Matrix(*nodes, depth=depth)
for key in matrix.keys:
vertices.append(Vertex(key))
for l, line in enumerate(matrix.data):
for c, cell in enumerate(line):
if cell > 0:
self.edges.add(Edge(vertices[l], vertices[c], weight=cell))
self.vertices = set(vertices)
def _to_csv(self, **kwargs):
header = kwargs.pop("header", True)
text = ["vertex_out,edge_weight,vertex_in\n" if header else ""]
for edge in self.edges:
text.append("%s,%s,%s\n" % (edge.vertex_out.name, edge.weight, edge.vertex_in.name))
for vertex in self.vertices:
if not (vertex.edges_out or vertex.edges_in):
text.append("%s,,\n" % vertex.name)
return "".join(text)
def _to_json(self, **kwargs):
return json.dumps(
{
"vertices": [vertex.name for vertex in self.vertices],
"edges": [
{"out": edge.vertex_out.name, "weight": edge.weight, "in": edge.vertex_in.name}
for edge in self.edges
],
},
**kwargs
)
def _to_text(self, **kwargs):
return ""
def split_array(mdata, splits):
data = []
for i in range(len(splits) - 1):
data.append([])
rows = mdata[splits[i] : splits[i + 1]]
for j in range(len(splits) - 1):
data[i].append([row[splits[j] : splits[j + 1]] for row in rows])
data[i].append([row[splits[-1] :] for row in rows])
return data
| 31.647059 | 101 | 0.522119 |
4e5e7f4732bbf28587d97ccccc0fe93ad000c197 | 1,860 | py | Python | bismite/config.py | Libera-Chat/bismite | 8d725cdbbf3c847c3a2fcc764f96e4db0e78801e | [
"MIT"
] | 9 | 2021-06-15T18:24:21.000Z | 2022-02-01T15:25:18.000Z | bismite/config.py | Libera-Chat/maskwatch-bot | 8d725cdbbf3c847c3a2fcc764f96e4db0e78801e | [
"MIT"
] | 28 | 2021-06-03T21:20:54.000Z | 2021-06-14T23:05:09.000Z | bismite/config.py | Libera-Chat/bismite | 8d725cdbbf3c847c3a2fcc764f96e4db0e78801e | [
"MIT"
] | 1 | 2021-10-29T20:39:10.000Z | 2021-10-29T20:39:10.000Z | from dataclasses import dataclass
from os.path import expanduser
from re import compile as re_compile
from typing import Dict, List, Optional, Pattern, Tuple
import yaml
@dataclass
class Config(object):
server: Tuple[str, int, bool]
nickname: str
username: str
realname: str
password: str
antiidle: bool
channel: str
history: int
database: str
sasl: Tuple[str, str]
oper: Tuple[str, str, Optional[str]]
bancmd: str
cliconnre: Pattern
cliexitre: Pattern
clinickre: Pattern
def load(filepath: str):
with open(filepath) as file:
config_yaml = yaml.safe_load(file.read())
nickname = config_yaml["nickname"]
server = config_yaml["server"]
hostname, port_s = server.split(":", 1)
tls = False
if port_s.startswith("+"):
tls = True
port_s = port_s.lstrip("+")
port = int(port_s)
oper_name = config_yaml["oper"]["name"]
oper_pass = config_yaml["oper"]["pass"]
oper_file: Optional[str] = None
if "file" in config_yaml["oper"]:
oper_file = expanduser(config_yaml["oper"]["file"])
cliconnre = re_compile(config_yaml["cliconnre"])
cliexitre = re_compile(config_yaml["cliexitre"])
clinickre = re_compile(config_yaml["clinickre"])
return Config(
(hostname, port, tls),
nickname,
config_yaml.get("username", nickname),
config_yaml.get("realname", nickname),
config_yaml["password"],
config_yaml["antiidle"],
config_yaml["channel"],
config_yaml["history"],
expanduser(config_yaml["database"]),
(config_yaml["sasl"]["username"], config_yaml["sasl"]["password"]),
(oper_name, oper_pass, oper_file),
config_yaml["bancmd"],
cliconnre,
cliexitre,
clinickre,
)
| 26.571429 | 75 | 0.622581 |
1066d9870aa0d3c715848098ba489fee05e4a886 | 1,956 | py | Python | time_tracker.py | KJoke70/i3-tools | bb4bb817e8e1c1d58aa2ef43d67dc6c32eb5753d | [
"BSD-3-Clause"
] | 26 | 2018-04-28T18:15:11.000Z | 2022-02-19T12:23:07.000Z | time_tracker.py | KJoke70/i3-tools | bb4bb817e8e1c1d58aa2ef43d67dc6c32eb5753d | [
"BSD-3-Clause"
] | 2 | 2018-12-05T17:35:50.000Z | 2020-02-16T13:50:29.000Z | time_tracker.py | KJoke70/i3-tools | bb4bb817e8e1c1d58aa2ef43d67dc6c32eb5753d | [
"BSD-3-Clause"
] | 1 | 2019-11-06T06:35:45.000Z | 2019-11-06T06:35:45.000Z | #!/usr/bin/env python3
#import sys
import os
import time
#import argparse
import configparser
import i3ipc
#import csv
import json
import socket #ipc
import threading
import subprocess
total_time = 0
current_time = 0
## Needs to track
## window::focus
## window::new
## window::close
## window::title
# TODO check focus bug with floating
def main():
# 1. arguments:
# - path to config
# 2. check backup_dir for file of same day
# - load backup, if from same day
# - otherwise, start anew
# 3. create notification threads to notify upon time x (specified in config)
# - class, instance, name, role, mark have reached time x
# - take break, drink, etc (config)
# + every x min (when active for x, TODO DPMS ??), only start next
# round, after notification was clicked
# + specific times
# 4. count time for currently focused
# 5. stop counting once focus changes -> change count
# 6. save count in dictionary
# 7. backup save every 5 minutes (or set in config)
# 8. send info to IPC calls # TODO how??
#
try:
i3 = i3ipc.Connction()
i3.on("window::focus", on_window_focus)
i3.on("window::new", on_window_new)
i3.on("window::title", on_window_title)
i3.on("window::close", on_window_close)
i3.main()
except Exception as e:
print(e)
finally:
i3.main_quit()
# save to file (race condition ?)
def on_window_focus(a, b):
pass
def on_window_new(a, b):
pass
def on_window_close(a, b):
pass
def on_window_title(a, b):
pass
def backup_data(data, path):
""" backup data to path. Is meant to be called periodically. """
pass # TODO
def load_backup(path):
""" load data from backup path. Compares if file is from the same day. If
not, returns empty data. """
pass # TODO
if __name__ == "__main__":
main()
| 18.45283 | 80 | 0.625256 |
8ff1639ed0870530e39aaede4d5b8a19c5820406 | 523 | py | Python | agtool/utils/path_utils.py | swyo/agtool | 136d09b20785196ff90e97cfb3c7717df47e7ff5 | [
"MIT"
] | null | null | null | agtool/utils/path_utils.py | swyo/agtool | 136d09b20785196ff90e97cfb3c7717df47e7ff5 | [
"MIT"
] | 2 | 2021-12-18T09:26:01.000Z | 2021-12-21T13:56:24.000Z | agtool/utils/path_utils.py | swyo/agtool | 136d09b20785196ff90e97cfb3c7717df47e7ff5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Description
===========
Helper functions are defined in this module.
"""
import os.path as osp
def module_path(module_name=''):
"""Get path of module.
Args:
module_name: Name of a module
Example:
>>> from mipack.utils import module_path
>>> # get the root of this package.
>>> module_path('.')
"""
rootdir = osp.dirname(osp.dirname(osp.abspath(__file__)))
if module_name:
return osp.join(rootdir, module_name)
return rootdir
| 20.92 | 61 | 0.617591 |
1876c5ad386c07738253f7c427df6565b618fb2e | 6,010 | py | Python | pyscf/solvent/ddpcm.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | null | null | null | pyscf/solvent/ddpcm.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | null | null | null | pyscf/solvent/ddpcm.py | fdmalone/pyscf | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
domain decomposition PCM (In testing)
See also
JCP, 144, 054101
JCP, 144, 160901
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf import df
from pyscf.dft import gen_grid, numint
from pyscf.data import radii
from pyscf.solvent import ddcosmo
from pyscf.symm import sph
def ddpcm_for_scf(mf, pcmobj=None):
if pcmobj is None:
pcmobj = DDPCM(mf.mol)
return ddcosmo.ddcosmo_for_scf(mf, pcmobj)
def gen_ddpcm_solver(pcmobj, verbose=None):
mol = pcmobj.mol
if pcmobj.grids.coords is None:
pcmobj.grids.build(with_non0tab=True)
natm = mol.natm
lmax = pcmobj.lmax
r_vdw = ddcosmo.get_atomic_radii(pcmobj)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
nexposed = numpy.count_nonzero(ui==1)
nbury = numpy.count_nonzero(ui==0)
on_shell = numpy.count_nonzero(ui>0) - nexposed
logger.debug(pcmobj, 'Num points exposed %d', nexposed)
logger.debug(pcmobj, 'Num points buried %d', nbury)
logger.debug(pcmobj, 'Num points on shell %d', on_shell)
nlm = (lmax+1)**2
Lmat = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
Lmat = Lmat.reshape(natm*nlm,-1)
Amat = make_A(pcmobj, r_vdw, ylm_1sph, ui).reshape(natm*nlm,-1)
fac = 2*numpy.pi * (pcmobj.eps+1) / (pcmobj.eps-1)
A_diele = Amat + fac * numpy.eye(natm*nlm)
A_inf = Amat + 2*numpy.pi * numpy.eye(natm*nlm)
cached_pol = ddcosmo.cache_fake_multipoles(pcmobj.grids, r_vdw, lmax)
def gen_vind(dm):
phi = ddcosmo.make_phi(pcmobj, dm, r_vdw, ui)
phi = numpy.linalg.solve(A_diele, A_inf.dot(phi.ravel()))
L_X = numpy.linalg.solve(Lmat, phi.ravel()).reshape(natm,-1)
psi, vmat = ddcosmo.make_psi_vmat(pcmobj, dm, r_vdw, ui, pcmobj.grids,
ylm_1sph, cached_pol, L_X, Lmat)[:2]
dielectric = pcmobj.eps
f_epsilon = (dielectric-1.)/dielectric
epcm = .5 * f_epsilon * numpy.einsum('jx,jx', psi, L_X)
return epcm, .5 * f_epsilon * vmat
return gen_vind
def energy(pcmobj, dm):
'''
ddPCM energy
Es = 1/2 f(eps) \int rho(r) W(r) dr
'''
epcm = gen_ddpcm_solver(pcmobj, pcmobj.verbose)(dm)[0]
return epcm
def regularize_xt(t, eta, scale=1):
eta *= scale
xt = numpy.zeros_like(t)
inner = t <= 1-eta
on_shell = (1-eta < t) & (t < 1)
xt[inner] = 1
ti = t[on_shell] - eta*.5
# JCP, 144, 054101
xt[on_shell] = 1./eta**4 * (1-ti)**2 * (ti-1+2*eta)**2
return xt
def make_A(pcmobj, r_vdw, ylm_1sph, ui):
# Part of A matrix defined in JCP, 144, 054101, Eq (43), (44)
mol = pcmobj.mol
natm = mol.natm
lmax = pcmobj.lmax
eta = pcmobj.eta
nlm = (lmax+1)**2
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = weights_1sph.size
atom_coords = mol.atom_coords()
ylm_1sph = ylm_1sph.reshape(nlm,ngrid_1sph)
Amat = numpy.zeros((natm,nlm,natm,nlm))
for ja in range(natm):
# w_u = precontract w_n U_j
w_u = weights_1sph * ui[ja]
p1 = 0
for l in range(lmax+1):
fac = 2*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
a = numpy.einsum('xn,n,mn->xm', ylm_1sph, w_u, ylm_1sph[p0:p1])
Amat[ja,:,ja,p0:p1] += -fac * a
for ka in ddcosmo.atoms_with_vdw_overlap(ja, atom_coords, r_vdw):
vjk = r_vdw[ja] * coords_1sph + atom_coords[ja] - atom_coords[ka]
rjk = lib.norm(vjk, axis=1)
pol = sph.multipoles(vjk, lmax)
p1 = 0
weights = w_u / rjk**(l*2+1)
for l in range(lmax+1):
fac = 4*numpy.pi*l/(l*2+1) * r_vdw[ka]**(l+1)
p0, p1 = p1, p1 + (l*2+1)
a = numpy.einsum('xn,n,mn->xm', ylm_1sph, weights, pol[l])
Amat[ja,:,ka,p0:p1] += -fac * a
return Amat
class DDPCM(ddcosmo.DDCOSMO):
def __init__(self, mol):
ddcosmo.DDCOSMO.__init__(self, mol)
logger.warn(self, 'ddPCM is an experimental feature. It is '
'still in testing.\nFeatures and APIs may be changed '
'in the future.')
gen_solver = as_solver = gen_ddpcm_solver
def regularize_xt(self, t, eta, scale=1):
return regularize_xt(t, eta, scale)
if __name__ == '__main__':
from pyscf import scf
mol = gto.M(atom='H 0 0 0; H 0 1 1.2; H 1. .1 0; H .5 .5 1')
numpy.random.seed(1)
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
#dm = scf.RHF(mol).run().make_rdm1()
e, vmat = DDPCM(mol).kernel(dm)
print(e + 1.2446306643473923)
print(lib.finger(vmat) - 0.77873361914445294)
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.basis = '3-21g' #cc-pvdz'
mol.build()
cm = DDPCM(mol)
cm.verbose = 4
mf = ddpcm_for_scf(scf.RHF(mol), cm)#.newton()
mf.verbose = 4
mf.kernel()
| 33.021978 | 83 | 0.614642 |
39eaeb907f2c19f8fa5c2f62af662e1ba13290e6 | 657 | py | Python | setup.py | Dascienz/streamingbandit | 219a0e7b3cf73c14579529b9d94db34b5214ef40 | [
"MIT"
] | null | null | null | setup.py | Dascienz/streamingbandit | 219a0e7b3cf73c14579529b9d94db34b5214ef40 | [
"MIT"
] | null | null | null | setup.py | Dascienz/streamingbandit | 219a0e7b3cf73c14579529b9d94db34b5214ef40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Author: Maurits Kaptein, Jules Kruijswijk.
# contributor: Robin van Emden, Vincent Gijsen
from setuptools import setup
setup(name='StreamingBandit',
version='1.0.4',
description='Python application to setup and run streaming (contextual) bandit experiments.',
author='Nth-iteration',
author_email='maurits@mauritskaptein.com',
url='https://github.com/nth-iteration-labs/streamingbandit',
license='MIT',
packages=[],
install_requires=[
'tornado',
'redis==2.10.6',
'pyyaml',
'pymongo',
'numpy',
'scipy',
'scikit-learn',
'apscheduler',
'bcrypt'
])
| 24.333333 | 97 | 0.649924 |
9436821420957305e99cc4d701b0aed0d7f86501 | 217 | py | Python | command_center/generate.py | NTUEELightDance/2019-LightDance | 2e2689f868364e16972465abc22801aaeaf3d8ba | [
"MIT"
] | 2 | 2019-07-16T10:40:52.000Z | 2022-03-14T00:26:42.000Z | command_center/generate.py | NTUEELightDance/2019-LightDance | 2e2689f868364e16972465abc22801aaeaf3d8ba | [
"MIT"
] | null | null | null | command_center/generate.py | NTUEELightDance/2019-LightDance | 2e2689f868364e16972465abc22801aaeaf3d8ba | [
"MIT"
] | 2 | 2019-12-01T07:40:04.000Z | 2020-02-15T09:58:50.000Z | import sys
sys.path.append('../editor')
import translate_new
import json
Data = translate_new.translate('../editor/tron.in')
with open('data.json', 'w') as outfile:
json.dump(Data, outfile)
print('Generate OK') | 19.727273 | 51 | 0.714286 |
d447cfc9e2e7dcd4fb90efd9b1db0c9a93ea4125 | 395 | py | Python | blog/sitemaps.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | blog/sitemaps.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | blog/sitemaps.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | from django.contrib.sitemaps import Sitemap
from django.urls import reverse
from blog.models import Post
class BlogSitemap(Sitemap):
changefreq='weekly'
priority=0.5
def items(self):
return Post.objects.filter(status=True)
def location(self,item):
return reverse('blog:single',kwargs={'pid':item.id})
def lastmod(self,obj):
return obj.published_date
| 28.214286 | 60 | 0.708861 |
f45319a2c8a994290435596b6d557e6a21b8afc0 | 8,983 | py | Python | tests/integration/widgets/test_dateslider.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/widgets/test_dateslider.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/widgets/test_dateslider.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from datetime import date, datetime, timedelta
from time import sleep
# External imports
from flaky import flaky
# Bokeh imports
from bokeh._testing.plugins.project import BokehModelPage, BokehServerPage
from bokeh._testing.util.selenium import (
RECORD,
drag_slider,
find_elements_for,
get_slider_bar_color,
get_slider_title_text,
get_slider_title_value,
)
from bokeh.layouts import column
from bokeh.models import (
Circle,
ColumnDataSource,
CustomJS,
DateSlider,
Plot,
Range1d,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
start = date(2017, 8, 3)
end = date(2017, 8, 10)
value = start + timedelta(days=1)
@pytest.mark.selenium
class Test_DateSlider:
def test_display(self, bokeh_model_page: BokehModelPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300)
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert page.has_no_console_errors()
def test_displays_title(self, bokeh_model_page: BokehModelPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300)
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert get_slider_title_text(page.driver, slider) == "04 Aug 2017"
assert get_slider_title_value(page.driver, slider) == "04 Aug 2017"
assert page.has_no_console_errors()
def test_title_updates(self, bokeh_model_page: BokehModelPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300)
page = bokeh_model_page(slider)
assert get_slider_title_value(page.driver, slider) == "04 Aug 2017"
drag_slider(page.driver, slider, 50)
assert get_slider_title_value(page.driver, slider) > "04 Aug 2017"
drag_slider(page.driver, slider, -70)
assert get_slider_title_value(page.driver, slider) == "03 Aug 2017"
assert page.has_no_console_errors()
def test_displays_bar_color(self, bokeh_model_page: BokehModelPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300, bar_color="red")
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert get_slider_bar_color(page.driver, slider) == "rgba(255, 0, 0, 1)"
assert page.has_no_console_errors()
def test_js_on_change_executes(self, bokeh_model_page: BokehModelPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300)
slider.js_on_change('value', CustomJS(code=RECORD("value", "cb_obj.value")))
page = bokeh_model_page(slider)
drag_slider(page.driver, slider, 150)
results = page.results
assert datetime.fromtimestamp(results['value']/1000) > datetime(*date.fromisoformat("2017-08-04").timetuple()[:3])
assert page.has_no_console_errors()
@flaky(max_runs=10)
def test_server_on_change_round_trip(self, bokeh_server_page: BokehServerPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300, step=24*3600*1000)
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1], val=["a", "b"]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.tags.append(CustomJS(name="custom-action", args=dict(s=source), code=RECORD("data", "s.data")))
def cb(attr, old, new):
source.data['val'] = [slider.value_as_date.isoformat()]
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 50)
page.eval_custom_action()
results = page.results
new = results['data']['val']
assert new[0] > '2017-08-04'
drag_slider(page.driver, slider, -70)
page.eval_custom_action()
results = page.results
new = results['data']['val']
assert new[0] == '2017-08-03'
# XXX (bev) skip keypress part of test until it can be fixed
# handle = find_element_for(page.driver, slider, ".noUi-handle")
# select_element_and_press_key(page.driver, handle, Keys.ARROW_RIGHT)
# page.eval_custom_action()
# results = page.results
# old, new = results['data']['val']
# assert float(new) == 1
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
@flaky(max_runs=10)
def test_server_callback_value_vs_value_throttled(self, bokeh_server_page: BokehServerPage) -> None:
junk = dict(v=0, vt=0)
slider = DateSlider(start=start, end=end, value=value, width=300)
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cbv(attr, old, new): junk['v'] += 1
def cbvt(attr, old, new): junk['vt'] += 1
slider.on_change('value', cbv)
slider.on_change('value_throttled', cbvt)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=True)
sleep(1) # noUiSlider does a transition that takes some time
assert junk['v'] == 4
assert junk['vt'] == 1
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
@flaky(max_runs=10)
def test_server_bar_color_updates(self, bokeh_server_page: BokehServerPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300, bar_color="red")
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cb(attr, old, new):
slider.bar_color = "rgba(255, 255, 0, 1)"
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 150)
sleep(1) # noUiSlider does a transition that takes some time
assert get_slider_bar_color(page.driver, slider) == "rgba(255, 255, 0, 1)"
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
@flaky(max_runs=10)
def test_server_title_updates(self, bokeh_server_page: BokehServerPage) -> None:
slider = DateSlider(start=start, end=end, value=value, width=300)
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cb(attr, old, new):
slider.title = "baz"
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 150)
sleep(1) # noUiSlider does a transition that takes some time
assert get_slider_title_text(page.driver, slider) > "04 Aug 2017"
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
| 35.788845 | 122 | 0.61082 |
a2a99c283044432f581dec2372c7662d06f55e1c | 1,189 | py | Python | _includes/code/02_parallel_jobs/count_pylibs_annotated.py | tkphd/hpc-parallel-novice | dd5870450c4f889694bf58ed2f53b5535684d399 | [
"CC-BY-4.0"
] | 32 | 2017-03-08T14:40:34.000Z | 2022-02-08T12:46:31.000Z | _includes/code/02_parallel_jobs/count_pylibs_annotated.py | tkphd/hpc-parallel-novice | dd5870450c4f889694bf58ed2f53b5535684d399 | [
"CC-BY-4.0"
] | 25 | 2017-03-08T20:32:00.000Z | 2021-11-25T16:07:10.000Z | _includes/code/02_parallel_jobs/count_pylibs_annotated.py | tkphd/hpc-parallel-novice | dd5870450c4f889694bf58ed2f53b5535684d399 | [
"CC-BY-4.0"
] | 21 | 2017-03-08T14:16:27.000Z | 2021-11-26T06:35:48.000Z | import os
import sys
import glob
import re
def load_text():
""" searchs for the path seen by python (aka sys.path) which contains os.py and reads all .py files in this directory into a large string """
path_of_ospy = ""
text = []
for d in sys.path:
if os.path.isdir(d) and os.path.exists(d+"/os.py"):
path_of_ospy = d
break
if not path_of_ospy or not os.path.exists(path_of_ospy):
print("no modules found in "+sys.path)
return text
std_files = glob.glob(path_of_ospy+"/*.py")
for fn in std_files:
fnf = open(fn,"r")
text.append("".join(fnf.readlines()))
fnf.close()
return "\n".join(text)
def word_count(text):
""" counts the number of words in the string given by <text> """
word_pattern = r'\b[^\W\d_]+\b'
result = re.split(word_pattern,text)
return len(result)
@profile
def main():
text = load_text()
nchars = len(text)
nwords = word_count(text)
print("%i characters and %i words found in standard python lib" % (nchars, nwords))
if len(text):
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
| 22.433962 | 145 | 0.603869 |
62e4e7e91056a53b37476db54f46c8e659ba6a6c | 435 | py | Python | setup.py | nickdelgrosso/genomics_workshop_demo | 9890017a4348d9a97eda8f5977a8a02ed24610c3 | [
"MIT"
] | 1 | 2019-04-12T02:40:54.000Z | 2019-04-12T02:40:54.000Z | setup.py | nickdelgrosso/genomics_workshop_demo | 9890017a4348d9a97eda8f5977a8a02ed24610c3 | [
"MIT"
] | 1 | 2018-10-01T13:11:51.000Z | 2018-10-01T13:14:17.000Z | setup.py | nickdelgrosso/genomics_workshop_demo | 9890017a4348d9a97eda8f5977a8a02ed24610c3 | [
"MIT"
] | 12 | 2018-10-01T09:35:35.000Z | 2018-10-01T09:49:27.000Z | from setuptools import setup
setup(
name='genomics_demo',
version='v0.0.1',
packages=['genomics_demo'],
url='',
license='MIT',
author='Nicholas A. Del Grosso',
author_email='delgrosso.nick@gmail.com',
description='The best genomics package ever. Just kidding--do NOT use this!',
entry_points='''
[console_scripts]
complement=genomics_demo.scripts:get_reverse_complement
''',
)
| 25.588235 | 82 | 0.662069 |
70102dcdbf221a8e9423cfe6e93ebbead95ef331 | 272 | py | Python | util/convert_to_dict.py | jkh911208/gluster_manager_api | ecc59c8d87d06eb0cb7981263e8b09851bbf0fea | [
"Apache-2.0"
] | null | null | null | util/convert_to_dict.py | jkh911208/gluster_manager_api | ecc59c8d87d06eb0cb7981263e8b09851bbf0fea | [
"Apache-2.0"
] | null | null | null | util/convert_to_dict.py | jkh911208/gluster_manager_api | ecc59c8d87d06eb0cb7981263e8b09851bbf0fea | [
"Apache-2.0"
] | null | null | null | def config_to_dict(data):
temp = {}
for line in data:
if "=" in line:
key_val = line.split("=")
if len(key_val) == 2:
temp[key_val[0].strip().replace("\"", "")] = key_val[1].strip().replace("\"", "")
return temp
| 30.222222 | 97 | 0.474265 |
c3dddc05a9cf970c2c6c7107f8329daa90aecc51 | 2,646 | py | Python | run-tests.py | sergev/pyjson5 | 589576553919dca0cff2fb4f7506b19370429a77 | [
"Apache-2.0"
] | 67 | 2018-05-03T16:50:10.000Z | 2022-03-16T14:43:47.000Z | run-tests.py | sergev/pyjson5 | 589576553919dca0cff2fb4f7506b19370429a77 | [
"Apache-2.0"
] | 21 | 2018-10-17T12:43:35.000Z | 2022-02-14T12:05:51.000Z | run-tests.py | sergev/pyjson5 | 589576553919dca0cff2fb4f7506b19370429a77 | [
"Apache-2.0"
] | 4 | 2020-09-16T08:48:33.000Z | 2021-08-07T08:45:10.000Z | #!/usr/bin/env python
from argparse import ArgumentParser
from logging import basicConfig, INFO, getLogger
from os import name
from pathlib import Path
from subprocess import Popen
from sys import executable
from colorama import init, Fore
from pyjson5 import decode_io
argparser = ArgumentParser(description='Run JSON5 parser tests')
argparser.add_argument('tests', nargs='?', type=Path, default=Path('third-party/json5-tests'))
suffix_implies_success = {
'.json': True,
'.json5': True,
'.txt': False,
}
if __name__ == '__main__':
basicConfig(level=INFO)
logger = getLogger(__name__)
init()
if name != 'nt':
code_severe = Fore.RED + '😱'
code_good = Fore.CYAN + '😄'
code_bad = Fore.YELLOW + '😠'
else:
code_severe = Fore.RED + 'SEVERE'
code_good = Fore.CYAN + 'GOOD'
code_bad = Fore.YELLOW + 'BAD'
good = 0
bad = 0
severe = 0
args = argparser.parse_args()
index = 0
for path in sorted(args.tests.glob('*/*.*')):
kind = path.suffix.split('.')[-1]
expect_success = suffix_implies_success.get(path.suffix)
if expect_success is None:
continue
index += 1
category = path.parent.name
name = path.stem
try:
p = Popen((executable, 'transcode-to-json.py', str(path)))
outcome = p.wait(5)
except Exception:
logger.error('Error while testing: %s', path, exc_info=True)
severe += 1
continue
is_success = outcome == 0
is_failure = outcome == 1
is_severe = outcome not in (0, 1)
is_good = is_success if expect_success else is_failure
code = code_severe if is_severe else code_good if is_good else code_bad
print(
'#', index, ' ', code, ' '
'Category <', category, '> | '
'Test <', name, '> | '
'Data <', kind, '> | '
'Expected <', 'pass' if expect_success else 'FAIL', '> | '
'Actual <', 'pass' if is_success else 'FAIL', '>',
Fore.RESET,
sep='',
)
if is_severe:
severe += 1
elif is_good:
good += 1
else:
bad += 1
is_severe = severe > 0
is_good = bad == 0
code = code_severe if is_severe else code_good if is_good else code_bad
print()
print(
code, ' ',
good, ' × correct outcome | ',
bad, ' × wrong outcome | ',
severe, ' × severe errors',
Fore.RESET,
sep=''
)
raise SystemExit(2 if is_severe else 0 if is_good else 1)
| 27.5625 | 94 | 0.557823 |
7565c9a38af4e9b816034d65c188a5da94533b85 | 3,186 | py | Python | lib/galaxy/webapps/galaxy/api/extended_metadata.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/api/extended_metadata.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | lib/galaxy/webapps/galaxy/api/extended_metadata.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | """
API operations on annotations.
"""
import logging
from typing import (
Generic,
Optional,
TypeVar,
)
from galaxy import (
managers,
model,
web,
)
from galaxy.webapps.base.controller import (
UsesExtendedMetadataMixin,
UsesLibraryMixinItems,
UsesStoredWorkflowMixin,
)
from . import (
BaseGalaxyAPIController,
depends,
)
log = logging.getLogger(__name__)
T = TypeVar("T")
class BaseExtendedMetadataController(
BaseGalaxyAPIController, UsesExtendedMetadataMixin, UsesLibraryMixinItems, UsesStoredWorkflowMixin, Generic[T]
):
exmeta_item_id: str
def _get_item_from_id(self, trans, idstr, check_writable=True) -> Optional[T]:
...
@web.expose_api
def index(self, trans, **kwd):
idnum = kwd[self.exmeta_item_id]
item = self._get_item_from_id(trans, idnum, check_writable=False)
if item is not None:
ex_meta = self.get_item_extended_metadata_obj(trans, item)
if ex_meta is not None:
return ex_meta.data
@web.expose_api
def create(self, trans, payload, **kwd):
idnum = kwd[self.exmeta_item_id]
item = self._get_item_from_id(trans, idnum, check_writable=True)
if item is not None:
ex_obj = self.get_item_extended_metadata_obj(trans, item)
if ex_obj is not None:
self.unset_item_extended_metadata_obj(trans, item)
self.delete_extended_metadata(trans, ex_obj)
ex_obj = self.create_extended_metadata(trans, payload)
self.set_item_extended_metadata_obj(trans, item, ex_obj)
class LibraryDatasetExtendMetadataController(BaseExtendedMetadataController[model.LibraryDatasetDatasetAssociation]):
controller_name = "library_dataset_extended_metadata"
exmeta_item_id = "library_content_id"
def _get_item_from_id(self, trans, idstr, check_writable=True) -> Optional[model.LibraryDatasetDatasetAssociation]:
if check_writable:
item = self.get_library_dataset_dataset_association(trans, idstr)
if trans.app.security_agent.can_modify_library_item(trans.get_current_user_roles(), item):
return item
else:
item = self.get_library_dataset_dataset_association(trans, idstr)
if trans.app.security_agent.can_access_library_item(trans.get_current_user_roles(), item, trans.user):
return item
return None
class HistoryDatasetExtendMetadataController(BaseExtendedMetadataController[model.HistoryDatasetAssociation]):
controller_name = "history_dataset_extended_metadata"
exmeta_item_id = "history_content_id"
hda_manager: managers.hdas.HDAManager = depends(managers.hdas.HDAManager)
def _get_item_from_id(self, trans, idstr, check_writable=True) -> Optional[model.HistoryDatasetAssociation]:
decoded_idstr = self.decode_id(idstr)
if check_writable:
return self.hda_manager.get_owned(decoded_idstr, trans.user, current_history=trans.history)
else:
hda = self.hda_manager.get_accessible(decoded_idstr, trans.user)
return self.hda_manager.error_if_uploading(hda)
| 35.797753 | 119 | 0.713748 |
7492c471b5169af6a1ba580cda130c0e5fb67470 | 1,946 | py | Python | tests/testapp/utils.py | harvardinformatics/django-mysql | c560cd9ccbc44828efea997624a7fb2548bda0b9 | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/utils.py | harvardinformatics/django-mysql | c560cd9ccbc44828efea997624a7fb2548bda0b9 | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/utils.py | harvardinformatics/django-mysql | c560cd9ccbc44828efea997624a7fb2548bda0b9 | [
"BSD-3-Clause"
] | null | null | null | from django.db import DEFAULT_DB_ALIAS, connection, connections
from django.test.utils import CaptureQueriesContext
def column_type(table_name, column_name):
with connection.cursor() as cursor:
cursor.execute(
"""SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = %s AND
COLUMN_NAME = %s""",
(table_name, column_name),
)
return cursor.fetchone()[0]
class CaptureLastQuery:
def __init__(self, conn=None):
if conn is None:
self.conn = connection
else:
self.conn = conn
def __enter__(self):
self.capturer = CaptureQueriesContext(self.conn)
self.capturer.__enter__()
return self
def __exit__(self, a, b, c):
self.capturer.__exit__(a, b, c)
@property
def query(self):
return self.capturer.captured_queries[-1]["sql"]
class print_all_queries:
def __init__(self, conn=None):
if conn is None:
self.conn = connection
else:
self.conn = conn
def __enter__(self):
self.capturer = CaptureQueriesContext(self.conn)
self.capturer.__enter__()
return self
def __exit__(self, a, b, c):
self.capturer.__exit__(a, b, c)
for q in self.capturer.captured_queries:
print(q["sql"])
def used_indexes(query, using=DEFAULT_DB_ALIAS):
"""
Given SQL 'query', run EXPLAIN and return the names of the indexes used
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute("EXPLAIN " + query)
return {row["key"] for row in fetchall_dicts(cursor) if row["key"] is not None}
def fetchall_dicts(cursor):
columns = [x[0] for x in cursor.description]
rows = []
for row in cursor.fetchall():
rows.append(dict(zip(columns, row)))
return rows
| 27.408451 | 87 | 0.620761 |
5cf5e10934cde7d5a434e2c0a07189cfcf2d868c | 1,967 | py | Python | whiteboard/migrations/0001_initial.py | vchrisb/emc_phoenix3 | 4ee59cc3ff3dfdc62a460308e157d702371df69c | [
"MIT"
] | null | null | null | whiteboard/migrations/0001_initial.py | vchrisb/emc_phoenix3 | 4ee59cc3ff3dfdc62a460308e157d702371df69c | [
"MIT"
] | null | null | null | whiteboard/migrations/0001_initial.py | vchrisb/emc_phoenix3 | 4ee59cc3ff3dfdc62a460308e157d702371df69c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-11 16:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_resized.forms
import phoenix.custom_storages
import uuid
import whiteboard.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Whiteboard',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('account', models.CharField(max_length=40)),
('text', models.TextField(max_length=320)),
('date', models.DateField()),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('image', models.ImageField(storage=phoenix.custom_storages.SecureStorage(), upload_to=whiteboard.models.WhiteboardImageName)),
('img_large', django_resized.forms.ResizedImageField(blank=True, storage=phoenix.custom_storages.SecureStorage(), upload_to='WhiteboardImages/thumb')),
('img_medium', django_resized.forms.ResizedImageField(blank=True, storage=phoenix.custom_storages.SecureStorage(), upload_to='WhiteboardImages/thumb')),
('img_small', django_resized.forms.ResizedImageField(blank=True, storage=phoenix.custom_storages.SecureStorage(), upload_to='WhiteboardImages/thumb')),
('img_thumb', django_resized.forms.ResizedImageField(blank=True, storage=phoenix.custom_storages.SecureStorage(), upload_to='WhiteboardImages/thumb')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| 47.97561 | 168 | 0.697001 |
2825cfe1d0303aa4db76d49c4be6e572d623a365 | 3,680 | py | Python | mac/google-cloud-sdk/lib/third_party/kubernetes/client/apis/networking_api.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/apis/networking_api.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/apis/networking_api.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class NetworkingApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_group_with_http_info(**kwargs)
else:
(data) = self.get_api_group_with_http_info(**kwargs)
return data
def get_api_group_with_http_info(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method get_api_group' % key)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/networking.k8s.io/',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 29.677419 | 115 | 0.661957 |
220c5d2aebf166fdb2bd92469c41cfda06a6689e | 326 | py | Python | B站/【pandas高级应用】Python自动化办公/0801/代码/011/011.py | zhaofeng092/python_auto_office | 3ef8420d5695aa3ab1184e9a76dc5e3dd6ab244d | [
"MIT"
] | 46 | 2020-12-29T06:19:15.000Z | 2022-03-26T00:17:23.000Z | B站/【pandas高级应用】Python自动化办公/0801/代码/011/011.py | zhaofeng092/python_auto_office | 3ef8420d5695aa3ab1184e9a76dc5e3dd6ab244d | [
"MIT"
] | 3 | 2021-03-20T05:38:58.000Z | 2021-12-13T20:50:24.000Z | B站/【pandas高级应用】Python自动化办公/0801/代码/011/011.py | zhaofeng092/python_auto_office | 3ef8420d5695aa3ab1184e9a76dc5e3dd6ab244d | [
"MIT"
] | 44 | 2020-12-27T12:58:30.000Z | 2022-03-27T14:18:54.000Z | import pandas as pd
import matplotlib.pyplot as plt
users = pd.read_excel('C:/Temp/Users.xlsx')
users['Total'] = users['Oct'] + users['Nov'] + users['Dec']
users.sort_values(by='Total', inplace=True, ascending=False)
print(users)
users.plot.bar(x='Name', y=['Oct', 'Nov', 'Dec'], stacked=True)
plt.tight_layout()
plt.show()
| 27.166667 | 63 | 0.693252 |
995e778bf4bb883fbcab53b62e79b8cbacc3c957 | 856 | py | Python | web/beats/apps/tracks/migrations/0002_fileupload.py | WilliamQLiu/beats | d3a8236c2f8820cf9ac78e5758eb700a1728d72b | [
"Apache-2.0"
] | null | null | null | web/beats/apps/tracks/migrations/0002_fileupload.py | WilliamQLiu/beats | d3a8236c2f8820cf9ac78e5758eb700a1728d72b | [
"Apache-2.0"
] | 5 | 2020-02-11T23:00:27.000Z | 2021-06-10T17:57:16.000Z | web/beats/apps/tracks/migrations/0002_fileupload.py | WilliamQLiu/beats | d3a8236c2f8820cf9ac78e5758eb700a1728d72b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-19 06:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracks', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FileUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255)),
('doc', models.FileField(upload_to='')),
('size', models.IntegerField(blank=True, null=True)),
],
),
]
| 31.703704 | 114 | 0.586449 |
1581e154ff6885660691730f51668539c0ac6bcd | 9,697 | py | Python | src/ui_manager.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | null | null | null | src/ui_manager.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | null | null | null | src/ui_manager.py | soyuka/botty | cc1670ea1db72c5e0ac91685897bf63c89b18896 | [
"MIT"
] | null | null | null | import keyboard
import os
import numpy as np
import time
from utils.custom_mouse import mouse
from utils.misc import wait
from logger import Logger
from config import Config
from screen import grab, convert_screen_to_monitor, convert_abs_to_monitor
from template_finder import TemplateFinder, TemplateMatch
from dataclasses import dataclass
from messages import Messenger
from game_stats import GameStats
messenger = Messenger()
game_stats = GameStats()
@dataclass
class ScreenObject:
_screen_object = None
ref: list[str]
inp_img: np.ndarray = None
roi: list[float] = None
time_out: float = 30
threshold: float = 0.68
normalize_monitor: bool = False
best_match: bool = False
use_grayscale: bool = False
color_match: list[np.array] = None
def __call__(self, cls):
cls._screen_object = self
return cls
class ScreenObjects:
InGame = ScreenObject(
ref=["GAMEBAR_ANCHOR", "GAMEBAR_ANCHOR_DARK"],
roi="gamebar_anchor",
threshold=0.8,
best_match=True,
use_grayscale=True
)
WaypointLabel=ScreenObject(
ref="LABEL_WAYPOINT",
roi="left_panel_header",
threshold=0.8,
use_grayscale=True
)
WaypointTabs=ScreenObject(
ref=["WP_A1_ACTIVE", "WP_A2_ACTIVE", "WP_A3_ACTIVE", "WP_A4_ACTIVE", "WP_A5_ACTIVE"],
roi="wp_act_roi",
threshold=0.8,
best_match=True,
use_grayscale=True
)
MercIcon=ScreenObject(
ref=["MERC_A2", "MERC_A1", "MERC_A5", "MERC_A3"],
roi="merc_icon",
threshold=0.9,
use_grayscale=True
)
PlayBtn=ScreenObject(
ref=["PLAY_BTN", "PLAY_BTN_GRAY"],
roi="play_btn",
best_match=True,
use_grayscale=True
)
MainMenu=ScreenObject(
ref=["MAIN_MENU_TOP_LEFT", "MAIN_MENU_TOP_LEFT_DARK"],
roi="main_menu_top_left",
best_match=True,
use_grayscale=True
)
Loading=ScreenObject(
ref=["LOADING", "CREATING_GAME"],
roi="difficulty_select",
threshold=0.9,
use_grayscale=True
)
Normal=ScreenObject(
ref=["NORMAL_BTN"],
roi="difficulty_select",
threshold=0.9,
use_grayscale=True
)
Nightmare=ScreenObject(
ref=["NIGHTMARE_BTN"],
roi="difficulty_select",
threshold=0.9,
use_grayscale=True
)
Hell=ScreenObject(
ref=["HELL_BTN"],
roi="difficulty_select",
threshold=0.9,
use_grayscale=True
)
CubeInventory=ScreenObject(
ref=["HORADRIC_CUBE"],
roi="left_inventory",
threshold=0.8,
use_grayscale=True
)
CubeOpened=ScreenObject(
ref=["CUBE_TRANSMUTE_BTN"],
roi="cube_btn_roi",
threshold=0.8,
use_grayscale=True
)
OnlineStatus=ScreenObject(
ref=["CHARACTER_STATE_ONLINE", "CHARACTER_STATE_OFFLINE"],
roi="character_online_status",
best_match=True
)
SelectedCharacter=ScreenObject(
ref=["CHARACTER_ACTIVE"],
roi="character_select",
threshold=0.8
)
ServerError=ScreenObject(
ref=["SERVER_ISSUES"]
)
SaveAndExit=ScreenObject(
ref=["SAVE_AND_EXIT_NO_HIGHLIGHT", "SAVE_AND_EXIT_HIGHLIGHT"],
roi="save_and_exit",
threshold=0.85
)
NeedRepair=ScreenObject(
ref="REPAIR_NEEDED",
roi="repair_needed"
)
ItemPickupText=ScreenObject(
ref=["ITEM_PICKUP_ENABLED","ITEM_PICKUP_DISABLED"],
roi="chat_line_1",
best_match=True
)
ShrineArea=ScreenObject(
ref=["SHRINE", "HIDDEN_STASH", "SKULL_PILE"],
roi="shrine_check",
threshold=0.8
)
TownPortal=ScreenObject(
ref="BLUE_PORTAL",
threshold=0.8,
roi="tp_search",
normalize_monitor=True
)
TownPortalReduced=ScreenObject(
ref="BLUE_PORTAL",
threshold=0.8,
roi="reduce_to_center",
normalize_monitor=True
)
GoldBtnInventory=ScreenObject(
ref="INVENTORY_GOLD_BTN",
roi="gold_btn",
normalize_monitor=True
)
GoldBtnStash=ScreenObject(
ref="INVENTORY_GOLD_BTN",
roi="gold_btn_stash",
normalize_monitor=True
)
GoldBtnVendor=ScreenObject(
ref="VENDOR_GOLD",
roi="gold_btn_stash",
normalize_monitor=True
)
GoldNone=ScreenObject(
ref="INVENTORY_NO_GOLD",
roi="inventory_gold",
threshold=0.83,
use_grayscale=True
)
TownPortalSkill=ScreenObject(
ref=["TP_ACTIVE", "TP_INACTIVE"],
roi="skill_right",
best_match=True,
threshold=0.79
)
RepairBtn=ScreenObject(
ref="REPAIR_BTN",
roi="repair_btn",
normalize_monitor=True,
use_grayscale=True
)
YouHaveDied=ScreenObject(
ref="YOU_HAVE_DIED",
roi="death",
threshold=0.9,
color_match=Config().colors["red"],
use_grayscale=True
)
Overburdened=ScreenObject(
ref=["INVENTORY_FULL_MSG_0", "INVENTORY_FULL_MSG_1"],
roi="chat_line_1",
threshold=0.9
)
Corpse=ScreenObject(
ref=["CORPSE", "CORPSE_BARB", "CORPSE_DRU", "CORPSE_NEC", "CORPSE_PAL", "CORPSE_SIN", "CORPSE_SORC", "CORPSE_ZON"],
roi="corpse",
threshold=0.8
)
BeltExpandable=ScreenObject(
ref="BELT_EXPANDABLE",
roi="gamebar_belt_expandable",
threshold=0.8
)
NPCMenu=ScreenObject(
ref=["TALK", "CANCEL"],
threshold=0.8,
use_grayscale=True
)
ChatIcon=ScreenObject(
ref="CHAT_ICON",
roi="chat_icon",
threshold=0.8,
use_grayscale=True
)
LeftPanel=ScreenObject(
ref="CLOSE_PANEL",
roi="left_panel_header",
threshold=0.8,
use_grayscale=True
)
RightPanel=ScreenObject(
ref=["CLOSE_PANEL", "CLOSE_PANEL_2"],
roi="right_panel_header",
threshold=0.8,
use_grayscale=True
)
NPCDialogue=ScreenObject(
ref="NPC_DIALOGUE",
roi="npc_dialogue",
threshold=0.8,
use_grayscale=True
)
SkillsExpanded=ScreenObject(
ref="BIND_SKILL",
roi="bind_skill",
threshold=0.8,
use_grayscale=True
)
Unidentified=ScreenObject(
ref="UNIDENTIFIED",
threshold=0.8,
color_match=Config().colors["red"]
)
Key=ScreenObject(
ref="INV_KEY",
threshold=0.8,
normalize_monitor=True
)
EmptyStashSlot=ScreenObject(
ref="STASH_EMPTY_SLOT",
roi="left_inventory",
threshold=0.8,
)
NotEnoughGold=ScreenObject(
ref="NOT_ENOUGH_GOLD",
threshold=0.9,
color_match=Config().colors["red"],
use_grayscale=True
)
QuestSkillBtn=ScreenObject(
ref="QUEST_SKILL_BTN",
threshold=0.9,
use_grayscale=True,
roi="quest_skill_btn"
)
def detect_screen_object(screen_object: ScreenObject, img: np.ndarray = None) -> TemplateMatch:
roi = Config().ui_roi[screen_object.roi] if screen_object.roi else None
img = grab() if img is None else img
match = TemplateFinder().search(
ref = screen_object.ref,
inp_img = img,
threshold = screen_object.threshold,
roi = roi,
best_match = screen_object.best_match,
use_grayscale = screen_object.use_grayscale,
normalize_monitor = screen_object.normalize_monitor)
if match.valid:
return match
return match
def select_screen_object_match(match: TemplateMatch, delay_factor: tuple[float, float] = (0.9, 1.1)) -> None:
mouse.move(*convert_screen_to_monitor(match.center), delay_factor=delay_factor)
wait(0.05, 0.09)
mouse.click("left")
wait(0.05, 0.09)
def wait_for_screen_object(screen_object: ScreenObject, time_out: int = None) -> TemplateMatch:
roi = Config().ui_roi[screen_object.roi] if screen_object.roi else None
time_out = time_out if time_out else 30
match = TemplateFinder().search_and_wait(
ref = screen_object.ref,
time_out = time_out,
threshold = screen_object.threshold,
roi = roi,
best_match = screen_object.best_match,
use_grayscale = screen_object.use_grayscale,
normalize_monitor = screen_object.normalize_monitor)
if match.valid:
return match
return match
def hover_over_screen_object_match(match) -> None:
mouse.move(*convert_screen_to_monitor(match.center))
wait(0.2, 0.4)
def list_visible_objects(img: np.ndarray = None) -> list:
img = grab() if img is None else img
visible=[]
for pair in [a for a in vars(ScreenObjects).items() if not a[0].startswith('__') and a[1] is not None]:
if (match := detect_screen_object(pair[1], img)).valid:
# visible.append(match)
visible.append(pair[0])
return visible
def center_mouse(delay_factor: list = None):
center_m = convert_abs_to_monitor((0, 0))
if delay_factor:
mouse.move(*center_m, randomize=20, delay_factor = delay_factor)
else:
mouse.move(*center_m, randomize=20)
# Testing: Move to whatever ui to test and run
if __name__ == "__main__":
import keyboard
from screen import start_detecting_window
start_detecting_window()
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
print("Go to D2R window and press f11 to start game")
keyboard.wait("f11")
from config import Config
while 1:
print(list_visible_objects())
time.sleep(1)
| 28.520588 | 123 | 0.630711 |
9391a1122d1f2fe0ef92081d3a68242ac49fd384 | 1,353 | py | Python | pyclesperanto_prototype/_tier1/_average_distance_of_n_nearest_distances.py | DrLachie/pyclesperanto_prototype | 56843fac2543265c40f108fd40eac3ecf85c8458 | [
"BSD-3-Clause"
] | null | null | null | pyclesperanto_prototype/_tier1/_average_distance_of_n_nearest_distances.py | DrLachie/pyclesperanto_prototype | 56843fac2543265c40f108fd40eac3ecf85c8458 | [
"BSD-3-Clause"
] | null | null | null | pyclesperanto_prototype/_tier1/_average_distance_of_n_nearest_distances.py | DrLachie/pyclesperanto_prototype | 56843fac2543265c40f108fd40eac3ecf85c8458 | [
"BSD-3-Clause"
] | null | null | null | from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
from .._tier0 import create_none
@plugin_function(output_creator=create_none)
def average_distance_of_n_nearest_distances(distance_matrix : Image, distance_vector_destination: Image = None, n : int = 1) -> Image:
"""Determines the n shortest distances for each column in a distance matrix and puts the average of these in a
vector.
Note: This function takes the distance to the identical label into account.
Parameters
----------
distance_matrix: Image
distance_vector_destination: Image, optional
n: int
Returns
-------
distance_vector_destination
"""
from .._tier0 import create
if distance_vector_destination is None:
distance_vector_destination = create([1, distance_matrix.shape[1]])
# todo: rename parameters in cl-kernel to make sense
parameters = {
"src_distancematrix": distance_matrix,
"dst_indexlist": distance_vector_destination,
"nPoints" : int(n)
}
# todo: rename kernel function to fulfill naming conventions
execute(__file__, 'average_distance_of_n_nearest_distances_x.cl',
'average_distance_of_n_nearest_distances', distance_vector_destination.shape,
parameters)
return distance_vector_destination
| 32.214286 | 134 | 0.726534 |
10b46e5ac5b1295c811c4283766b5badc1f39d11 | 9,672 | py | Python | tests/test_averager2d.py | PrincetonUniversity/ASPIRE-Python | 1bff8d3884183203bd77695a76bccb1efc909fd3 | [
"MIT"
] | 7 | 2018-11-07T16:45:35.000Z | 2020-01-10T16:54:26.000Z | tests/test_averager2d.py | PrincetonUniversity/ASPIRE-Python | 1bff8d3884183203bd77695a76bccb1efc909fd3 | [
"MIT"
] | 1 | 2019-04-05T18:41:39.000Z | 2019-04-05T18:41:39.000Z | tests/test_averager2d.py | PrincetonUniversity/ASPIRE-Python | 1bff8d3884183203bd77695a76bccb1efc909fd3 | [
"MIT"
] | 2 | 2019-06-04T17:01:53.000Z | 2019-07-08T19:01:40.000Z | import logging
import os
from unittest import TestCase
import numpy as np
import pytest
from aspire.basis import DiracBasis, FFBBasis2D
from aspire.classification import (
Averager2D,
BFRAverager2D,
BFSRAverager2D,
BFSReddyChatterjiAverager2D,
ReddyChatterjiAverager2D,
)
from aspire.source import Simulation
from aspire.utils import Rotation
from aspire.volume import Volume
logger = logging.getLogger(__name__)
DATA_DIR = os.path.join(os.path.dirname(__file__), "saved_test_data")
# Ignore Gimbal lock warning for our in plane rotations.
@pytest.mark.filterwarnings("ignore:Gimbal lock detected")
class Averager2DTestCase(TestCase):
# Subclasses should override `averager` with a different class.
averager = Averager2D
def setUp(self):
self.vols = Volume(
np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy"))
).downsample(17)
self.resolution = self.vols.resolution
self.n_img = 3
self.dtype = np.float64
# Create a Basis to use in averager.
self.basis = FFBBasis2D((self.resolution, self.resolution), dtype=self.dtype)
# This sets up a trivial class, where there is one group having all images.
self.classes = np.arange(self.n_img, dtype=int).reshape(1, self.n_img)
self.reflections = np.zeros(self.classes.shape, dtype=bool)
# This is a workaround to use a `pytest` fixture with `unittest` style cases.
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def tearDown(self):
pass
def _getSrc(self):
# Base Averager2D does not require anything from source.
# Subclasses implement specific src
return None
def testTypeMismatch(self):
# Work around ABC, which won't let us test the unimplemented base case.
self.averager.__abstractmethods__ = set()
# Intentionally mismatch Basis and Averager dtypes
if self.dtype == np.float32:
test_dtype = np.float64
else:
test_dtype = np.float32
with self._caplog.at_level(logging.WARN):
self.averager(self.basis, self._getSrc(), dtype=test_dtype)
assert "does not match dtype" in self._caplog.text
def _construct_rotations(self):
"""
Constructs a `Rotation` object which can yield `angles` as used by `Source`s.
"""
# Get a list of angles to test
self.thetas, self.step = np.linspace(
0, 2 * np.pi, num=self.n_img, endpoint=False, retstep=True, dtype=self.dtype
)
# Common 3D rotation matrix, about z.
def r(theta):
return np.array(
[
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
],
dtype=self.dtype,
)
# Construct a sequence of rotation matrices using thetas
_rots = np.empty((self.n_img, 3, 3), dtype=self.dtype)
for n, theta in enumerate(self.thetas):
# Note we negate theta to match Rotation convention.
_rots[n] = r(-theta)
# Use our Rotation class (maybe it should be able to do this one day?)
self.rots = Rotation.from_matrix(_rots)
@pytest.mark.filterwarnings("ignore:Gimbal lock detected")
class BFRAverager2DTestCase(Averager2DTestCase):
averager = BFRAverager2D
def setUp(self):
self.n_search_angles = 360
super().setUp()
# We'll construct our Rotations now
self._construct_rotations()
# Create a `src` to feed our tests
self.src = self._getSrc()
# Get the image coef
self.coefs = self.basis.evaluate_t(self.src.images(0, self.n_img))
def _getSrc(self):
if not hasattr(self, "shifts"):
self.shifts = np.zeros((self.n_img, 2))
return Simulation(
vols=self.vols,
L=self.resolution,
n=self.n_img,
C=1,
angles=self.rots.angles,
offsets=self.shifts,
amplitudes=np.ones(self.n_img),
seed=12345,
dtype=self.dtype,
)
def testNoRot(self):
"""
Test we raise an error when our basis does not provide `rotate` method.
"""
# DiracBasis does not provide `rotate`,
basis = DiracBasis((self.resolution, self.resolution), dtype=self.dtype)
# and that should raise an error during instantiation.
with pytest.raises(RuntimeError, match=r".* must provide a `rotate` method."):
_ = self.averager(basis, self._getSrc())
def testAverager(self):
"""
Construct a stack of images with known rotations.
Rotationally averager the stack and compare output with known rotations.
"""
# Construct the Averager and then call the `align` method
avgr = self.averager(self.basis, self._getSrc(), n_angles=self.n_search_angles)
_rotations, _shifts, _ = avgr.align(self.classes, self.reflections, self.coefs)
self.assertIsNone(_shifts)
# Crude check that we are closer to known angle than the next rotation
self.assertTrue(np.all((_rotations - self.thetas) <= (self.step / 2)))
# Fine check that we are within n_angles.
self.assertTrue(
np.all((_rotations - self.thetas) <= (2 * np.pi / self.n_search_angles))
)
@pytest.mark.filterwarnings("ignore:Gimbal lock detected")
class BFSRAverager2DTestCase(BFRAverager2DTestCase):
averager = BFSRAverager2D
def setUp(self):
# Inherit basic params from the base class
super(BFRAverager2DTestCase, self).setUp()
# Setup shifts, don't shift the base image
self.shifts = np.zeros((self.n_img, 2))
self.shifts[1:, 0] = 2
self.shifts[1:, 1] = 4
# Execute the remaining setup from BFRAverager2DTestCase
super().setUp()
def testNoShift(self):
"""
Test we raise an error when our basis does not provide `shift` method.
"""
# DiracBasis does not provide `rotate` or `shift`.
basis = DiracBasis((self.resolution, self.resolution), dtype=self.dtype)
# The missing `rotate` case was already covered by (inherited) NoRot.
# Add a dummy rotate method; we will still be missing `shift`,
basis.rotate = lambda x: x
# and that should raise an error during instantiation.
with pytest.raises(RuntimeError, match=r".* must provide a `shift` method."):
_ = self.averager(basis, self._getSrc())
def testAverager(self):
"""
Construct a stack of images with known rotations.
Rotationally averager the stack and compare output with known rotations.
"""
# Construct the Averager and then call the main `align` method
avgr = self.averager(
self.basis,
self._getSrc(),
n_angles=self.n_search_angles,
n_x_shifts=1,
n_y_shifts=1,
)
_rotations, _shifts, _ = avgr.align(self.classes, self.reflections, self.coefs)
# Crude check that we are closer to known angle than the next rotation
self.assertTrue(np.all((_rotations - self.thetas) <= (self.step / 2)))
# Fine check that we are within n_angles.
self.assertTrue(
np.all((_rotations - self.thetas) <= (2 * np.pi / self.n_search_angles))
)
# Check that we are _not_ shifting the base image
self.assertTrue(np.all(_shifts[0][0] == 0))
# Check that we produced estimated shifts away from origin
# Note that Simulation's rot+shift is generally not equal to shift+rot.
# Instead we check that some combination of
# non zero shift+rot improved corr.
# Perhaps in the future should check more details.
self.assertTrue(np.all(np.hypot(*_shifts[0][1:].T) >= 1))
@pytest.mark.filterwarnings("ignore:Gimbal lock detected")
class ReddyChatterjiAverager2DTestCase(BFSRAverager2DTestCase):
averager = ReddyChatterjiAverager2D
def testAverager(self):
"""
Construct a stack of images with known rotations.
Rotationally averager the stack and compare output with known rotations.
"""
# Construct the Averager and then call the main `align` method
avgr = self.averager(
composite_basis=self.basis,
src=self._getSrc(),
dtype=self.dtype,
)
_rotations, _shifts, _ = avgr.align(self.classes, self.reflections, self.coefs)
# Crude check that we are closer to known angle than the next rotation
self.assertTrue(np.all((_rotations - self.thetas) <= (self.step / 2)))
# Fine check that we are within one degree.
self.assertTrue(np.all((_rotations - self.thetas) <= (2 * np.pi / 360.0)))
# Check that we are _not_ shifting the base image
self.assertTrue(np.all(_shifts[0][0] == 0))
# Check that we produced estimated shifts away from origin
# Note that Simulation's rot+shift is generally not equal to shift+rot.
# Instead we check that some combination of
# non zero shift+rot improved corr.
# Perhaps in the future should check more details.
self.assertTrue(np.all(np.hypot(*_shifts[0][1:].T) >= 1))
@pytest.mark.filterwarnings("ignore:Gimbal lock detected")
class BFSReddyChatterjiAverager2DTestCase(ReddyChatterjiAverager2DTestCase):
averager = BFSReddyChatterjiAverager2D
| 33.936842 | 88 | 0.633581 |
3df9095cfb2b9c2980daa523894ab7380975fb22 | 4,272 | py | Python | parlai/scripts/multiprocessing_train.py | josharnoldjosh/ParlAI | 262f8414444c5a0cc70895e519ffc63a7909faf2 | [
"MIT"
] | 1 | 2019-07-25T17:30:18.000Z | 2019-07-25T17:30:18.000Z | parlai/scripts/multiprocessing_train.py | abisee/ParlAI | 5507d4745ca23b23af311673a6b0d1b7e72eb5cd | [
"MIT"
] | null | null | null | parlai/scripts/multiprocessing_train.py | abisee/ParlAI | 5507d4745ca23b23af311673a6b0d1b7e72eb5cd | [
"MIT"
] | 1 | 2019-07-28T14:53:18.000Z | 2019-07-28T14:53:18.000Z | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Main launch script for single-host, multi-GPU training.
This is a drop-in replacement for train_model.py. This script will launch N
subprocess, each which runs the full training loop independently.
Uses torch.nn.parallel.DistributedDataParallel for its main uses. Agents must
specifically implement the wrapper of DistributedDatParallel, but all
TorchRankerAgents and TorchGeneratorAgents support this.
"""
import torch
try:
# We need to run this *very first*, but subprocesses will throw an
# exception when running it
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
pass
import random
import copy
import os
import signal
import torch.distributed as dist
import parlai.scripts.train_model as single_train
import parlai.core.distributed_utils as distributed_utils
def multiprocess_train(
rank, opt, port=61337, rank_offset=0, gpu=None, hostname='localhost'
):
"""
Subprocess which initializes distributed training, and begins training.
This should be launched n times for n GPUs; this is handled either in main
or via srun.
:param int rank: This process's rank - 1. (Starts at -1 ... n - 2). See comments.
:param opt: command line options
:param int port: A TCP port to use. This will need to be changed to run
multiple distributed training setups on the same machine.
:param int gpu: Which GPU to use. Defaults to using rank and local devices,
but must be manually specified when using many-hosts.
:param str hostname: Hostname of the main server.
"""
# Set per-host options
opt = copy.deepcopy(opt)
# we need to manually adjust the rank differently in multiprocessing
# and distributed train
rank = rank + rank_offset
opt['rank'] = rank
if gpu is None:
# default assumption is local GPUs
gpu = rank % torch.cuda.device_count()
opt['gpu'] = gpu
# make sure we don't just use whatever GPU was saved in the model file
if 'override' not in opt:
opt['override'] = {}
opt['override']['gpu'] = gpu
# Suppress output of workers except the main host.
if opt.get('verbose') or rank != 0:
print_prefix = '[rank:{:3d}]'.format(rank)
else:
print_prefix = None
suppress_output = not opt.get('verbose') and rank != 0
with distributed_utils.override_print(suppress_output, print_prefix):
# perform distributed setup, ensuring all hosts are ready
torch.cuda.set_device(opt['gpu'])
dist.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format(hostname, port),
world_size=opt['distributed_world_size'],
rank=rank,
)
print("Distributed group initialized")
# make sure all parameters will be in sync
torch.manual_seed(42)
# Run the actual training
return single_train.TrainLoop(opt).train()
def launch_and_train(opt, port):
"""Perform a fork() to many processes."""
# Launch multiple subprocesses
spawncontext = torch.multiprocessing.spawn(
multiprocess_train,
# need to give rank offset as 1 to cover the fact that the main
# process is rank 0, but that spawn() doesn't let you control rank
(opt, port, 1),
nprocs=opt['distributed_world_size'] - 1, # main proc will also run loop
join=False,
)
try:
retval = multiprocess_train(0, opt, port)
spawncontext.join()
return retval
except KeyboardInterrupt:
# tell the subprocesses to stop too
for p in spawncontext.processes:
if p.is_alive():
os.kill(p.pid, signal.SIGINT)
raise
def setup_args():
parser = single_train.setup_args()
parser.add_distributed_training_args()
parser.set_defaults(distributed_world_size=torch.cuda.device_count())
return parser
def main():
opt = setup_args().parse_args()
port = random.randint(32000, 48000)
return launch_and_train(opt, port)
if __name__ == '__main__':
main()
| 32.363636 | 85 | 0.681414 |
334dc54f4613c8c819902b34a067c7f7077bca78 | 4,872 | py | Python | openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_vendored_deps/filter_plugins/openshift_version.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | null | null | null | openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_vendored_deps/filter_plugins/openshift_version.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 3 | 2016-12-01T23:01:36.000Z | 2016-12-02T00:16:48.000Z | openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_vendored_deps/filter_plugins/openshift_version.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 2 | 2018-10-16T05:11:13.000Z | 2018-11-07T01:46:29.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom version comparison filters for use in openshift-ansible
"""
# pylint can't locate distutils.version within virtualenv
# https://github.com/PyCQA/pylint/issues/73
# pylint: disable=no-name-in-module, import-error
from distutils.version import LooseVersion
def legacy_gte_function_builder(name, versions):
"""
Build and return a version comparison function.
Ex: name = 'oo_version_gte_3_1_or_1_1'
versions = {'enterprise': '3.1', 'origin': '1.1'}
returns oo_version_gte_3_1_or_1_1, a function which based on the
version and deployment type will return true if the provided
version is greater than or equal to the function's version
"""
enterprise_version = versions['enterprise']
origin_version = versions['origin']
def _gte_function(version, deployment_type):
"""
Dynamic function created by gte_function_builder.
Ex: version = '3.1'
deployment_type = 'openshift-enterprise'
returns True/False
"""
version_gte = False
if 'enterprise' in deployment_type:
if str(version) >= LooseVersion(enterprise_version):
version_gte = True
elif 'origin' in deployment_type:
if str(version) >= LooseVersion(origin_version):
version_gte = True
return version_gte
_gte_function.__name__ = name
return _gte_function
def gte_function_builder(name, gte_version):
"""
Build and return a version comparison function.
Ex: name = 'oo_version_gte_3_6'
version = '3.6'
returns oo_version_gte_3_6, a function which based on the
version will return true if the provided version is greater
than or equal to the function's version
"""
def _gte_function(version):
"""
Dynamic function created by gte_function_builder.
Ex: version = '3.1'
returns True/False
"""
version_gte = False
if str(version) >= LooseVersion(gte_version):
version_gte = True
return version_gte
_gte_function.__name__ = name
return _gte_function
# pylint: disable=too-few-public-methods
class FilterModule(object):
"""
Filters for version checking.
"""
# Each element of versions is composed of (major, minor_start, minor_end)
# Origin began versioning 3.x with 3.6, so begin 3.x with 3.6.
versions = [(3, 6, 10)]
def __init__(self):
"""
Creates a new FilterModule for ose version checking.
"""
self._filters = {}
# For each set of (major, minor, minor_iterations)
for major, minor_start, minor_end in self.versions:
# For each minor version in the range
for minor in range(minor_start, minor_end):
# Create the function name
func_name = 'oo_version_gte_{}_{}'.format(major, minor)
# Create the function with the builder
func = gte_function_builder(func_name, "{}.{}.0".format(major, minor))
# Add the function to the mapping
self._filters[func_name] = func
# Create filters with special versioning requirements.
# Treat all Origin 1.x as special case.
legacy_filters = [{'name': 'oo_version_gte_3_1_or_1_1',
'versions': {'enterprise': '3.0.2.905',
'origin': '1.1.0'}},
{'name': 'oo_version_gte_3_1_1_or_1_1_1',
'versions': {'enterprise': '3.1.1',
'origin': '1.1.1'}},
{'name': 'oo_version_gte_3_2_or_1_2',
'versions': {'enterprise': '3.1.1.901',
'origin': '1.2.0'}},
{'name': 'oo_version_gte_3_3_or_1_3',
'versions': {'enterprise': '3.3.0',
'origin': '1.3.0'}},
{'name': 'oo_version_gte_3_4_or_1_4',
'versions': {'enterprise': '3.4.0',
'origin': '1.4.0'}},
{'name': 'oo_version_gte_3_5_or_1_5',
'versions': {'enterprise': '3.5.0',
'origin': '1.5.0'}}]
for legacy_filter in legacy_filters:
self._filters[legacy_filter['name']] = legacy_gte_function_builder(legacy_filter['name'],
legacy_filter['versions'])
def filters(self):
"""
Return the filters mapping.
"""
return self._filters
| 37.476923 | 105 | 0.55624 |
23a18dc3ce6d43c5c205febfbe033d3c70a2738e | 5,102 | py | Python | contrib/linearize/linearize-hashes.py | Soptq/bitgesell | 4e31314180d8cadaee5868c4d797208ddac7d392 | [
"MIT"
] | 12 | 2020-05-14T20:22:20.000Z | 2021-06-07T19:21:34.000Z | contrib/linearize/linearize-hashes.py | slowriot/bitgesell | 9b7f9e207323e9863253ad2598068b0ad0b159d7 | [
"MIT"
] | 55 | 2021-03-24T15:00:42.000Z | 2022-02-22T10:07:14.000Z | contrib/linearize/linearize-hashes.py | slowriot/bitgesell | 9b7f9e207323e9863253ad2598068b0ad0b159d7 | [
"MIT"
] | 35 | 2021-02-03T03:02:04.000Z | 2021-11-22T07:27:55.000Z | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BGLRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BGLRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| 33.346405 | 108 | 0.606429 |
57756e9dee00799aedf4faff7d697c0b84560f83 | 19,164 | py | Python | torchlib/save_segneuralnet.py | CarlosPena00/pytorch-unet | 8365bace23e4b04b9c5b75cd6720807ea8cac5ab | [
"MIT"
] | null | null | null | torchlib/save_segneuralnet.py | CarlosPena00/pytorch-unet | 8365bace23e4b04b9c5b75cd6720807ea8cac5ab | [
"MIT"
] | null | null | null | torchlib/save_segneuralnet.py | CarlosPena00/pytorch-unet | 8365bace23e4b04b9c5b75cd6720807ea8cac5ab | [
"MIT"
] | null | null | null |
import os
import math
import shutil
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
from tqdm import tqdm
from . import models as nnmodels
from . import losses as nloss
from . import metrics
from pytvision.neuralnet import NeuralNetAbstract
from pytvision.logger import Logger, AverageFilterMeter, AverageMeter
from pytvision import graphic as gph
from pytvision import netlearningrate
from pytvision import utils as pytutils
#----------------------------------------------------------------------------------------------
# Neural Net for Segmentation
class SegmentationNeuralNet(NeuralNetAbstract):
"""
Segmentation Neural Net
"""
def __init__(self,
patchproject,
nameproject,
no_cuda=True,
parallel=False,
seed=1,
print_freq=10,
gpu=0,
view_freq=1,
half_precision=False
):
"""
Initialization
-patchproject (str): path project
-nameproject (str): name project
-no_cuda (bool): system cuda (default is True)
-parallel (bool)
-seed (int)
-print_freq (int)
-gpu (int)
-view_freq (in epochs)
"""
super(SegmentationNeuralNet, self).__init__( patchproject, nameproject, no_cuda, parallel, seed, print_freq, gpu )
self.view_freq = view_freq
self.half_precision = half_precision
def create(self,
arch,
num_output_channels,
num_input_channels,
loss,
lr,
optimizer,
lrsch,
momentum=0.9,
weight_decay=5e-4,
pretrained=False,
size_input=388,
cascade_type='none'
):
"""
Create
Args:
-arch (string): architecture
-num_output_channels,
-num_input_channels,
-loss (string):
-lr (float): learning rate
-optimizer (string) :
-lrsch (string): scheduler learning rate
-pretrained (bool)
"""
cfg_opt={ 'momentum':momentum, 'weight_decay':weight_decay }
cfg_scheduler={ 'step_size':100, 'gamma':0.1 }
super(SegmentationNeuralNet, self).create(
arch,
num_output_channels,
num_input_channels,
loss,
lr,
optimizer,
lrsch,
pretrained,
cfg_opt=cfg_opt,
cfg_scheduler=cfg_scheduler
)
self.size_input = size_input
self.num_output_channels = num_output_channels
self.cascade_type = cascade_type
self.segs_per_forward = 7
if self.cascade_type == 'none':
self.step = self.default_step
elif self.cascade_type == 'ransac':
self.step = self.ransac_step
elif self.cascade_type == 'ransac2':
self.step = self.ransac_step2
elif self.cascade_type == 'simple':
self.step = self.cascate_step
else:
raise "Cascada not found"
self.accuracy = nloss.Accuracy()
if num_output_channels == 2:
dice_dim = (1,)
if num_output_channels == 4:
dice_dim = (1,2,3)
self.dice = nloss.Dice(dice_dim)
# Set the graphic visualization
self.logger_train = Logger( 'Train', ['loss'], ['accs', 'dices'], self.plotter )
self.logger_val = Logger( 'Val ', ['loss'], ['accs', 'dices', 'PQ'], self.plotter )
self.visheatmap = gph.HeatMapVisdom(env_name=self.nameproject, heatsize=(256,256) )
self.visimshow = gph.ImageVisdom(env_name=self.nameproject, imsize=(256,256) )
if self.half_precision:
self.scaler = torch.cuda.amp.GradScaler()
def ransac_step(self, inputs, targets, max_deep=4, segs_per_forward=20, src_c=3, verbose=False):
srcs = inputs[:, :src_c]
segs = inputs[:, src_c:]
lv_segs = segs#.clone()
first = True
final_loss = 0.0
for lv in range(max_deep):
n_segs = segs.shape[1]
new_segs = []
actual_c = self.segs_per_forward ** (max_deep - lv)
if verbose: print(segs.shape, actual_c)
actual_seg_ids = np.random.choice(range(n_segs), size=actual_c)
step_segs = segs[:, actual_seg_ids]
for idx in range(0, actual_c, self.segs_per_forward):
mini_inp = torch.cat((srcs, step_segs[:, idx:idx+self.segs_per_forward]), dim=1)
mini_out = self.net(mini_inp)
## calculate loss
final_loss += self.criterion(mini_out, targets) * 1
new_segs.append(mini_out.argmax(1, keepdim=True))
if verbose: print(mini_inp.shape, idx, idx+self.segs_per_forward, actual_loss.item())
segs = torch.cat(new_segs, dim=1)
return final_loss, mini_out
def ransac_step2(self, inputs, targets, max_deep=4, n_times=10, segs_per_forward=20, src_c=3, verbose=False):
srcs = inputs[:, :src_c]
segs = inputs[:, src_c:]
first = True
final_loss = 0.0
for lv in range(n_times):
n_segs = segs.shape[1]
actual_seg_ids = np.random.choice(range(n_segs), size=self.segs_per_forward)
step_segs = segs[:, actual_seg_ids]
mini_inp = torch.cat((srcs, step_segs), dim=1)
mini_out = self.net(mini_inp)
## calculate loss
final_loss += self.criterion(mini_out, targets) * 1
segs = torch.cat((segs, mini_out.argmax(1, keepdim=True)), dim=1)
return final_loss, mini_out
def cascate_step(self, inputs, targets, segs_per_forward=20, src_c=3, verbose=False):
srcs = inputs[:, :src_c]
segs = inputs[:, src_c:]
lv_segs = segs.clone()
final_loss = 0.0
n_segs = lv_segs.shape[1]
actual_seg_ids = np.random.choice(range(n_segs), size=n_segs, replace=False)
lv_segs = lv_segs[:, actual_seg_ids]
while n_segs > 1:
if verbose: print(n_segs)
inputs_seg = lv_segs[:, :self.segs_per_forward]
inputs_seg_ids = np.random.choice(range(inputs_seg.shape[1]),
size=self.segs_per_forward, replace=inputs_seg.shape[1]<self.segs_per_forward)
inputs_seg = inputs_seg[:, inputs_seg_ids]
mini_inp = torch.cat((srcs, inputs_seg), dim=1)
mini_out = self.net(mini_inp)
## calculate loss
final_loss += self.criterion(mini_out, targets)
if verbose: print(mini_inp.shape, self.segs_per_forward, actual_loss.item())
lv_segs = torch.cat((lv_segs[:, self.segs_per_forward:], mini_out.argmax(1, keepdim=True)), dim=1)
n_segs = lv_segs.shape[1]
return final_loss, mini_out
def default_step(self, inputs, targets):
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
return loss, outputs
def training(self, data_loader, epoch=0):
#reset logger
self.logger_train.reset()
data_time = AverageMeter()
batch_time = AverageMeter()
# switch to evaluate mode
self.net.train()
end = time.time()
for i, sample in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
# get data (image, label, weight)
inputs, targets = sample['image'], sample['label']
weights = None
if 'weight' in sample.keys():
weights = sample['weight']
batch_size = inputs.shape[0]
if self.cuda:
inputs = inputs.cuda()
targets = targets.cuda()
if type(weights) is not type(None):
weights = weights.cuda()
# fit (forward)
if self.half_precision:
with torch.cuda.amp.autocast():
loss, outputs = self.step(inputs, targets)
self.optimizer.zero_grad()
self.scaler.scale(loss*batch_size).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss, outputs = self.step(inputs, targets)
self.optimizer.zero_grad()
(batch_size*loss).backward() #batch_size
self.optimizer.step()
accs = self.accuracy(outputs, targets)
dices = self.dice(outputs, targets)
#pq = metrics.pq_metric(outputs.cpu().detach().numpy(), targets.cpu().detach().numpy())
#pq, n_cells = metrics.pq_metric(targets, outputs)
# update
self.logger_train.update(
{'loss': loss.item() },
{'accs': accs.item(),
#'PQ': pq,
'dices': dices.item() },
batch_size,
)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % self.print_freq == 0:
self.logger_train.logger( epoch, epoch + float(i+1)/len(data_loader), i, len(data_loader), batch_time, )
def evaluate(self, data_loader, epoch=0):
pq_sum = 0
total_cells = 0
self.logger_val.reset()
batch_time = AverageMeter()
# switch to evaluate mode
self.net.eval()
with torch.no_grad():
end = time.time()
for i, sample in enumerate(data_loader):
# get data (image, label)
inputs, targets = sample['image'], sample['label']
weights = None
if 'weight' in sample.keys():
weights = sample['weight']
#inputs, targets = sample['image'], sample['label']
batch_size = inputs.shape[0]
#print(inputs.shape)
if self.cuda:
inputs = inputs.cuda()
targets = targets.cuda()
if type(weights) is not type(None):
weights = weights.cuda()
#print(inputs.shape)
# fit (forward)
if self.half_precision:
with torch.cuda.amp.autocast():
loss, outputs = self.step(inputs, targets)
else:
loss, outputs = self.step(inputs, targets)
# measure accuracy and record loss
accs = self.accuracy(outputs, targets )
dices = self.dice( outputs, targets )
#targets_np = targets[0][1].cpu().numpy().astype(int)
if epoch == 0:
pq = 0
n_cells = 1
else:
#pq, n_cells = metrics.pq_metric(targets, outputs)
if False:#self.skip_background:
out_shape = outputs.shape
zeros = torch.zeros((out_shape[0], 1, out_shape[2], out_shape[3])).cuda()
outputs = torch.cat([zeros, outputs], 1)
all_metrics, n_cells, _ = metrics.get_metrics(targets,outputs)
pq = all_metrics['pq']
pq_sum += pq * n_cells
total_cells += n_cells
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# update
#print(loss.item(), accs, dices, batch_size)
self.logger_val.update(
{'loss': loss.item() },
{'accs': accs.item(),
'PQ': (pq_sum/total_cells) if total_cells > 0 else 0,
'dices': dices.item() },
batch_size,
)
if i % self.print_freq == 0:
self.logger_val.logger(
epoch, epoch, i,len(data_loader),
batch_time,
bplotter=False,
bavg=True,
bsummary=False,
)
#save validation loss
if total_cells == 0:
pq_weight = 0
else:
pq_weight = pq_sum / total_cells
print(f"PQ: {pq_weight:0.4f}, {pq_sum:0.4f}, {total_cells}")
self.vallosses = self.logger_val.info['loss']['loss'].avg
acc = self.logger_val.info['metrics']['accs'].avg
#pq = pq_weight
self.logger_val.logger(
epoch, epoch, i, len(data_loader),
batch_time,
bplotter=True,
bavg=True,
bsummary=True,
)
#vizual_freq
if epoch % self.view_freq == 0:
prob = F.softmax(outputs.cpu().float(), dim=1)
prob = prob.data[0]
maxprob = torch.argmax(prob, 0)
self.visheatmap.show('Label', targets.data.cpu()[0].numpy()[1,:,:] )
#self.visheatmap.show('Weight map', weights.data.cpu()[0].numpy()[0,:,:])
self.visheatmap.show('Image', inputs.data.cpu()[0].numpy()[0,:,:])
self.visheatmap.show('Max prob',maxprob.cpu().numpy().astype(np.float32) )
for k in range(prob.shape[0]):
self.visheatmap.show('Heat map {}'.format(k), prob.cpu()[k].numpy() )
print(f"End Val: wPQ{pq_weight}")
return pq_weight
def test(self, data_loader ):
masks = []
ids = []
k=0
# switch to evaluate mode
self.net.eval()
with torch.no_grad():
end = time.time()
for i, sample in enumerate( tqdm(data_loader) ):
# get data (image, label)
inputs, meta = sample['image'], sample['metadata']
idd = meta[:,0]
x = inputs.cuda() if self.cuda else inputs
# fit (forward)
yhat = self.net(x)
yhat = F.softmax(yhat, dim=1)
yhat = pytutils.to_np(yhat)
masks.append(yhat)
ids.append(idd)
return ids, masks
def __call__(self, image ):
# switch to evaluate mode
self.net.eval()
with torch.no_grad():
x = image.cuda() if self.cuda else image
yhat = self.net(x)
yhat = F.softmax( yhat, dim=1 )
#yhat = pytutils.to_np(yhat).transpose(2,3,1,0)[...,0]
return yhat
def _create_model(self, arch, num_output_channels, num_input_channels, pretrained ):
"""
Create model
-arch (string): select architecture
-num_classes (int)
-num_channels (int)
-pretrained (bool)
"""
self.net = None
#--------------------------------------------------------------------------------------------
# select architecture
#--------------------------------------------------------------------------------------------
#kw = {'num_classes': num_output_channels, 'num_channels': num_input_channels, 'pretrained': pretrained}
kw = {'num_classes': num_output_channels, 'in_channels': num_input_channels, 'pretrained': pretrained}
self.net = nnmodels.__dict__[arch](**kw)
self.s_arch = arch
self.num_output_channels = num_output_channels
self.num_input_channels = num_input_channels
if self.cuda == True:
self.net.cuda()
if self.parallel == True and self.cuda == True:
self.net = nn.DataParallel(self.net, device_ids= range( torch.cuda.device_count() ))
def _create_loss(self, loss):
# create loss
if loss == 'wmce': # Not tested
self.criterion = nloss.WeightedMCEloss()
elif loss == 'bdice': # Fail
self.criterion = nloss.BDiceLoss()
elif loss == 'wbdice': # Fail
self.criterion = nloss.WeightedBDiceLoss()
elif loss == 'wmcedice': # Fail
self.criterion = nloss.WeightedMCEDiceLoss()
elif loss == 'mcedice': # Fail
self.criterion = nloss.MCEDiceLoss()
elif loss == 'bce': # Pass
self.criterion = nloss.BCELoss()
elif loss == 'bce2c': # Pass
self.criterion = nloss.BCELoss2c()
elif loss == 'mce': # Pass
self.criterion = nloss.MCELoss()
elif loss == 'wbce':
self.criterion = nloss.WeightedBCELoss()
elif loss == 'wce': # Pass
self.criterion = nloss.WeightedCrossEntropyLoss()
elif loss == 'wfocalce': # Pass
self.criterion = nloss.WeightedCEFocalLoss()
elif loss == 'focaldice': # Pass
self.criterion = nloss.FocalDiceLoss()
elif loss == 'wfocaldice': # Pass
self.criterion = nloss.WeightedFocalDiceLoss()
elif loss == 'dice':# FAIL
self.criterion = nloss.DiceLoss()
elif loss == 'msedice':# FAIL
self.criterion = nloss.MSEDICELoss()
elif loss == 'mcc': # FAIL
self.criterion = nloss.MCCLoss()
elif loss == 'mdice': # FAIL
self.criterion = nloss.MDiceLoss()
elif loss == 'wcefd':
self.criterion = nloss.WeightedCEFocalDice()
elif loss == 'jreg':
if self.num_output_channels == 2:
lambda_dict={'0':{'0': '1', '1':'0.5'},
'1':{'0':'0.5', '1': '1'}}
if self.num_output_channels == 4:
lambda_dict={'0':{'0': '1', '1':'0.5', '2':'0.5', '3':'0.5'},
'1':{'0':'0.5', '1': '1', '2':'0.5', '3':'0.5'},
'2':{'0':'0.5', '1':'0.5', '2':'1' , '3':'0.5'},
'3':{'0':'0.5', '1':'0.5', '2':'0.5', '3': '1'}}
self.criterion = nloss.WCE_J_SIMPL(lambda_dict=lambda_dict)
else:
assert(False)
self.s_loss = loss
| 34.843636 | 124 | 0.495512 |
97e535165514712553cbcd0b5270a719c6cb82eb | 5,258 | py | Python | examples/embedded_boundary.py | dbstein/ipde | 834e16a617f47a3eabe3307ba151d5b7db527b30 | [
"Apache-2.0"
] | 2 | 2019-10-17T15:29:38.000Z | 2021-02-19T20:01:34.000Z | examples/embedded_boundary.py | dbstein/ipde | 834e16a617f47a3eabe3307ba151d5b7db527b30 | [
"Apache-2.0"
] | null | null | null | examples/embedded_boundary.py | dbstein/ipde | 834e16a617f47a3eabe3307ba151d5b7db527b30 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pybie2d
from ipde.embedded_boundary import EmbeddedBoundary
from ipde.heavisides import SlepianMollifier
from ipde.derivatives import fd_x_4, fd_y_4, fourier
from personal_utilities.arc_length_reparametrization import arc_length_parameterize
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
nb = 200
ng = int(nb/2)
M = 16
pad_zone = 4
interior = False
slepian_r = 20
reparametrize = False
# get heaviside function
MOL = SlepianMollifier(slepian_r)
# construct boundary
bdy = GSB(c=star(nb, a=0.1, f=5))
# reparametrize if reparametrizing
if reparametrize:
bdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))
# construct a grid
grid = Grid([-1.5, 1.5], ng, [-1.5, 1.5], ng, x_endpoints=[True, False], y_endpoints=[True, False])
# construct embedded boundary
ebdy = EmbeddedBoundary(bdy, interior, M, grid.xh*0.75, pad_zone, MOL.step)
# register the grid
print('\nRegistering the grid')
ebdy.register_grid(grid, verbose=True)
################################################################################
# Make basic plots
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.phys)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Phys')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_in_annulus)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('In Annulus')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_step)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Heaviside')
fig, ax = plt.subplots()
ax.scatter(ebdy.radial_x, ebdy.radial_y, color='blue', s=10, label='special coordinates')
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
ax.legend()
ax.set_title('Special Coordinates')
################################################################################
# Test interpolation operations
k = 2*np.pi/3
test_func = lambda x, y: np.exp(np.sin(k*x))*np.sin(k*y)
test_func_x = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*x)*np.sin(k*y)
test_func_y = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*y)
# Interpolation of a globally smooth function on grid to radial
f = test_func(grid.xg, grid.yg)
fr = test_func(ebdy.radial_x, ebdy.radial_y)
fe = ebdy.interpolate_grid_to_radial(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> radial interpolation: {:0.2e}'.format(err))
# Interpolation of a function to the interface
fr = test_func(ebdy.interface.x, ebdy.interface.y)
fe = ebdy.interpolate_grid_to_interface(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> interface interpolation: {:0.2e}'.format(err))
# Interpolation of a function from radial to grid
fr = test_func(ebdy.radial_x, ebdy.radial_y)
ft = ebdy.interpolate_radial_to_grid(fr)
fe = test_func(ebdy.grid_ia_x, ebdy.grid_ia_y)
err = np.abs(fe-ft).max()
print('Error in radial --> grid interpolation: {:0.2e}'.format(err))
################################################################################
# Test derivatives
# radial gradient
frxe, frye = ebdy.radial_grid_derivatives(fr)
frxt = test_func_x(ebdy.radial_x, ebdy.radial_y)
fryt = test_func_y(ebdy.radial_x, ebdy.radial_y)
err_x = np.abs(frxt-frxe).max()
err_y = np.abs(fryt-frye).max()
err = max(err_x, err_y)
print('Error in radial grid differentiation: {:0.2e}'.format(err))
# fourth order accurate gradient on whole domain
dx = lambda x: fd_x_4(x, grid.xh, periodic_fix=not interior)
dy = lambda x: fd_y_4(x, grid.yh, periodic_fix=not interior)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
fxt = test_func_x(grid.xg, grid.yg)
fyt = test_func_y(grid.xg, grid.yg)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, 4th order FD: {:0.2e}'.format(err))
# spectrally accurate gradient on whole domain
kxv = np.fft.fftfreq(grid.Nx, grid.xh/(2*np.pi))
kyv = np.fft.fftfreq(grid.Ny, grid.yh/(2*np.pi))
kx, ky = np.meshgrid(kxv, kyv, indexing='ij')
ikx, iky = 1j*kx, 1j*ky
dx = lambda x: fourier(x, ikx)
dy = lambda x: fourier(x, iky)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, Fourier: {:0.2e}'.format(err))
################################################################################
# Plot QFS Boundaries
fig, ax = plt.subplots()
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
bb = ebdy.bdy_qfs.interior_source_bdy if interior else ebdy.bdy_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='blue', s=10, label='boundary effective')
bb = ebdy.interface_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='red', s=10, label='interface effective 1')
bb = ebdy.interface_qfs.interior_source_bdy
ax.scatter(bb.x, bb.y, color='pink', s=10, label='interface effective 2')
ax.legend()
ax.set_title('QFS Boundaries')
| 37.028169 | 99 | 0.681818 |
16ac2cc5f2a91ba83196c9fadd252c2d0b1779da | 11,289 | py | Python | tests/api/endpoints/test_share_links.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | tests/api/endpoints/test_share_links.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | tests/api/endpoints/test_share_links.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from mock import patch
from seaserv import seafile_api
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
from seahub.share.models import FileShare
from seahub.api2.permissions import CanGenerateShareLink
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class ShareLinksTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.file_path= self.file
self.folder_path= self.folder
self.url = reverse('api-v2.1-share-links')
def tearDown(self):
self.remove_repo()
def _add_file_share_link(self):
fs = FileShare.objects.create_file_link(self.user.username,
self.repo.id, self.file, None, None)
return fs.token
def _add_dir_share_link(self):
fs = FileShare.objects.create_dir_link(self.user.username,
self.repo.id, self.folder, None, None)
return fs.token
def _remove_share_link(self, token):
link = FileShare.objects.get(token=token)
link.delete()
def test_get_file_share_link(self):
self.login_as(self.user)
token = self._add_file_share_link()
resp = self.client.get(self.url + '?path=' + self.file_path + '&repo_id=' + self.repo_id)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['link'] is not None
assert json_resp[0]['token'] is not None
assert json_resp[0]['is_expired'] is not None
assert token in json_resp[0]['link']
assert 'f' in json_resp[0]['link']
assert token == json_resp[0]['token']
self._remove_share_link(token)
def test_get_dir_share_link(self):
self.login_as(self.user)
token = self._add_dir_share_link()
resp = self.client.get(self.url + '?path=' + self.folder_path + '&repo_id=' + self.repo_id)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['link'] is not None
assert json_resp[0]['token'] is not None
assert json_resp[0]['is_expired'] is not None
assert token in json_resp[0]['link']
assert 'd' in json_resp[0]['link']
assert token == json_resp[0]['token']
self._remove_share_link(token)
@patch.object(CanGenerateShareLink, 'has_permission')
def test_get_link_with_invalid_user_role_permission(self, mock_has_permission):
self.login_as(self.user)
mock_has_permission.return_value = False
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
def test_create_file_share_link(self):
self.login_as(self.user)
resp = self.client.post(self.url, {'path': self.file_path, 'repo_id': self.repo_id})
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['link'] is not None
assert json_resp['token'] is not None
assert json_resp['is_expired'] is not None
assert json_resp['token'] in json_resp['link']
assert 'f' in json_resp['link']
self._remove_share_link(json_resp['token'])
def test_create_file_share_link_in_enc_repo(self):
self.login_as(self.user)
resp = self.client.post(self.url, {'path': '/', 'repo_id': self.enc_repo.id})
self.assertEqual(403, resp.status_code)
def test_create_file_share_link_in_other_repo(self):
self.login_as(self.admin)
resp = self.client.post(self.url, {'path': self.file_path, 'repo_id': self.repo_id})
self.assertEqual(403, resp.status_code)
def test_create_file_share_link_with_permissions(self):
self.login_as(self.user)
json_str = json.dumps({'path': self.file_path, 'repo_id': self.repo_id,
'permissions': {
'can_edit': False,
'can_download': True
}})
resp = self.client.post(self.url, json_str,
content_type="application/json")
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['link'] is not None
assert json_resp['token'] is not None
assert json_resp['is_expired'] is not None
assert json_resp['token'] in json_resp['link']
assert 'f' in json_resp['link']
assert json_resp['permissions']['can_edit'] is False
assert json_resp['permissions']['can_download'] is True
self._remove_share_link(json_resp['token'])
def test_create_file_share_link_with_invalid_permissions(self):
self.login_as(self.user)
json_str = json.dumps({'path': self.file_path, 'repo_id': self.repo_id,
'permissions': {
'can_editrrr': False,
'can_downloadrrr': True
}})
resp = self.client.post(self.url, json_str,
content_type="application/json")
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['link'] is not None
assert json_resp['token'] is not None
assert json_resp['is_expired'] is not None
assert json_resp['token'] in json_resp['link']
assert 'f' in json_resp['link']
assert json_resp['permissions']['can_edit'] is False
assert json_resp['permissions']['can_download'] is True
self._remove_share_link(json_resp['token'])
def test_create_file_share_link_with_view_only_permission(self):
self.login_as(self.user)
json_str = json.dumps({'path': self.file_path, 'repo_id': self.repo_id,
'permissions': {
'can_edit': False,
'can_download': False
}})
resp = self.client.post(self.url, json_str,
content_type="application/json")
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['link'] is not None
assert json_resp['token'] is not None
assert json_resp['is_expired'] is not None
assert json_resp['token'] in json_resp['link']
assert 'f' in json_resp['link']
assert json_resp['permissions']['can_edit'] is False
assert json_resp['permissions']['can_download'] is False
self._remove_share_link(json_resp['token'])
def test_create_dir_share_link(self):
self.login_as(self.user)
resp = self.client.post(self.url, {'path': self.folder_path, 'repo_id': self.repo_id})
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['link'] is not None
assert json_resp['token'] is not None
assert json_resp['is_expired'] is not None
assert json_resp['token'] in json_resp['link']
assert 'd' in json_resp['link']
self._remove_share_link(json_resp['token'])
def test_create_link_with_invalid_repo_permission(self):
# login with admin to create share link in user repo
self.login_as(self.admin)
data = {'path': self.file_path, 'repo_id': self.repo_id}
resp = self.client.post(self.url, data)
self.assertEqual(403, resp.status_code)
def test_create_link_with_rw_permission_folder(self):
if not LOCAL_PRO_DEV_ENV:
return
self.set_user_folder_rw_permission_to_admin()
# login with admin to create share link for 'r' permission folder
self.login_as(self.admin)
data = {'path': self.file_path, 'repo_id': self.repo_id}
resp = self.client.post(self.url, data)
self.assertEqual(200, resp.status_code)
def test_create_link_with_rw_permission_folder_in_group(self):
self.share_repo_to_group_with_rw_permission()
self.add_admin_to_group()
# login with admin to create share link for 'r' permission folder
self.login_as(self.admin)
data = {'path': self.file_path, 'repo_id': self.repo_id}
resp = self.client.post(self.url, data)
self.assertEqual(200, resp.status_code)
def test_create_link_with_r_permission_folder(self):
if not LOCAL_PRO_DEV_ENV:
return
self.set_user_folder_r_permission_to_admin()
# login with admin to create share link for 'r' permission folder
self.login_as(self.admin)
data = {'path': self.file_path, 'repo_id': self.repo_id}
resp = self.client.post(self.url, data)
self.assertEqual(200, resp.status_code)
def test_create_link_with_r_permission_folder_in_group(self):
self.share_repo_to_group_with_r_permission()
self.add_admin_to_group()
# login with admin to create share link for 'r' permission folder
self.login_as(self.admin)
data = {'path': self.file_path, 'repo_id': self.repo_id}
resp = self.client.post(self.url, data)
self.assertEqual(200, resp.status_code)
@patch.object(CanGenerateShareLink, 'has_permission')
def test_create_link_with_invalid_user_role_permission(self, mock_has_permission):
self.login_as(self.user)
mock_has_permission.return_value = False
resp = self.client.post(self.url, {'path': self.folder_path, 'repo_id': self.repo_id})
self.assertEqual(403, resp.status_code)
def test_delete_file_share_link(self):
self.login_as(self.user)
token = self._add_file_share_link()
url = reverse('api-v2.1-share-link', args=[token])
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['success'] is True
def test_delete_dir_share_link(self):
self.login_as(self.user)
token = self._add_file_share_link()
url = reverse('api-v2.1-share-link', args=[token])
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['success'] is True
def test_delete_link_if_not_owner(self):
self.login_as(self.admin)
token = self._add_file_share_link()
url = reverse('api-v2.1-share-link', args=[token])
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
@patch.object(CanGenerateShareLink, 'has_permission')
def test_delete_link_with_invalid_user_repo_permission(self, mock_has_permission):
token = self._add_file_share_link()
self.login_as(self.user)
mock_has_permission.return_value = False
url = reverse('api-v2.1-share-link', args=[token])
resp = self.client.delete(url, {}, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
| 36.182692 | 99 | 0.641244 |
0751d93fe59e50649cc838fc13271ed068a3a16a | 1,076 | py | Python | fuse-version.py | monash-wsrn/psoc-camera-firmware | 0c58b41b0eb72a533de42dfc179a5bc8d478352d | [
"MIT"
] | null | null | null | fuse-version.py | monash-wsrn/psoc-camera-firmware | 0c58b41b0eb72a533de42dfc179a5bc8d478352d | [
"MIT"
] | null | null | null | fuse-version.py | monash-wsrn/psoc-camera-firmware | 0c58b41b0eb72a533de42dfc179a5bc8d478352d | [
"MIT"
] | null | null | null | #!/usr/bin/python
from fuse import FUSE,Operations,FuseOSError
from subprocess import check_output
from errno import ENOENT
from stat import S_IFDIR,S_IFREG
class git_version(Operations):
def getattr(self,path,fh=None):
self.version=check_output(['git','describe','--abbrev=8','--dirty','--always','--tags'])
self.version='#define GIT_VERSION "%s"\n'%self.version[:-1]
if path=='/':
return {'st_mode':S_IFDIR|0755}
elif path=='/version.h':
return {'st_mode':S_IFREG|0444,'st_size':len(self.version)}
else: raise FuseOSError(ENOENT)
def read(self,path,size,offset,fh):
if path=='/version.h': return self.version[offset:offset+size]
else: raise RuntimeError('wrong path: '+path)
def readdir(self,path,fh):
return ['.','..','version.h']
access=None
flush=None
getxattr=None
listxattr=None
open=None
opendir=None
release=None
releasedir=None
statfs=None
fuse=FUSE(git_version(),'version',foreground=True,ro=True,nonempty=True)
| 30.742857 | 96 | 0.645911 |
864dce2a6678d15acd7e82f7dc140723550b55a9 | 182 | py | Python | lab/lab8/genmat.py | YZL24/SUSTech-CS205 | c15e4055b3e260e84e94c8db46b4180448c3619f | [
"MIT"
] | null | null | null | lab/lab8/genmat.py | YZL24/SUSTech-CS205 | c15e4055b3e260e84e94c8db46b4180448c3619f | [
"MIT"
] | null | null | null | lab/lab8/genmat.py | YZL24/SUSTech-CS205 | c15e4055b3e260e84e94c8db46b4180448c3619f | [
"MIT"
] | null | null | null | from random import random
with open('mat2.txt', 'w+') as f:
for _ in range(100):
for _ in range(100):
f.write(f'{random()*100:.2f} ')
f.write('\n')
| 20.222222 | 43 | 0.516484 |
06857c5a05e536dade348771f844193c030997b4 | 44,206 | py | Python | manila/tests/share/drivers/glusterfs/test_layout_volume.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 159 | 2015-01-02T09:35:15.000Z | 2022-01-04T11:51:34.000Z | manila/tests/share/drivers/glusterfs/test_layout_volume.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 5 | 2015-07-24T09:28:21.000Z | 2020-11-20T04:33:51.000Z | manila/tests/share/drivers/glusterfs/test_layout_volume.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 128 | 2015-01-05T22:52:28.000Z | 2021-12-29T14:00:58.000Z | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" GlusterFS volume mapped share layout testcases.
"""
import re
import shutil
import tempfile
from unittest import mock
import ddt
from oslo_config import cfg
from manila.common import constants
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share.drivers.glusterfs import common
from manila.share.drivers.glusterfs import layout_volume
from manila import test
from manila.tests import fake_utils
CONF = cfg.CONF
def new_share(**kwargs):
share = {
'id': 'fakeid',
'name': 'fakename',
'size': 1,
'share_proto': 'glusterfs',
}
share.update(kwargs)
return share
def glusterXMLOut(**kwargs):
template = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>%(ret)d</opRet>
<opErrno>%(errno)d</opErrno>
<opErrstr>fake error</opErrstr>
</cliOutput>"""
return template % kwargs, ''
FAKE_UUID1 = '11111111-1111-1111-1111-111111111111'
FAKE_UUID2 = '22222222-2222-2222-2222-222222222222'
@ddt.ddt
class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
"""Tests GlusterfsVolumeMappedLayout."""
def setUp(self):
super(GlusterfsVolumeMappedLayoutTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self._context = context.get_admin_context()
self.glusterfs_target1 = 'root@host1:/gv1'
self.glusterfs_target2 = 'root@host2:/gv2'
self.glusterfs_server1 = 'root@host1'
self.glusterfs_server2 = 'root@host2'
self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1'
self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2'
self.share1 = new_share(
export_location=self.glusterfs_target1,
status=constants.STATUS_AVAILABLE)
self.share2 = new_share(
export_location=self.glusterfs_target2,
status=constants.STATUS_AVAILABLE)
gmgr = common.GlusterManager
self.gmgr1 = gmgr(self.glusterfs_server1, self._execute, None, None,
requires={'volume': False})
self.gmgr2 = gmgr(self.glusterfs_server2, self._execute, None, None,
requires={'volume': False})
self.glusterfs_volumes_dict = (
{'root@host1:/manila-share-1-1G': {'size': 1},
'root@host2:/manila-share-2-2G': {'size': 2}})
self.glusterfs_used_vols = set([
'root@host1:/manila-share-1-1G',
'root@host2:/manila-share-2-2G'])
CONF.set_default('glusterfs_servers',
[self.glusterfs_server1, self.glusterfs_server2])
CONF.set_default('glusterfs_server_password',
'fake_password')
CONF.set_default('glusterfs_path_to_private_key',
'/fakepath/to/privatekey')
CONF.set_default('glusterfs_volume_pattern',
r'manila-share-\d+-#{size}G$')
CONF.set_default('driver_handles_share_servers', False)
self.fake_driver = mock.Mock()
self.mock_object(self.fake_driver, '_execute',
self._execute)
self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6)
self.fake_conf = config.Configuration(None)
self.mock_object(tempfile, 'mkdtemp',
mock.Mock(return_value='/tmp/tmpKGHKJ'))
self.mock_object(common.GlusterManager, 'make_gluster_call')
self.fake_private_storage = mock.Mock()
with mock.patch.object(layout_volume.GlusterfsVolumeMappedLayout,
'_glustermanager',
side_effect=[self.gmgr1, self.gmgr2]):
self._layout = layout_volume.GlusterfsVolumeMappedLayout(
self.fake_driver, configuration=self.fake_conf,
private_storage=self.fake_private_storage)
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6'),
self.glusterfs_server2: ('3', '7')}
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
@ddt.data({"test_kwargs": {}, "requires": {"volume": True}},
{"test_kwargs": {'req_volume': False},
"requires": {"volume": False}})
@ddt.unpack
def test_glustermanager(self, test_kwargs, requires):
fake_obj = mock.Mock()
self.mock_object(common, 'GlusterManager',
mock.Mock(return_value=fake_obj))
ret = self._layout._glustermanager(self.glusterfs_target1,
**test_kwargs)
common.GlusterManager.assert_called_once_with(
self.glusterfs_target1, self._execute,
self._layout.configuration.glusterfs_path_to_private_key,
self._layout.configuration.glusterfs_server_password,
requires=requires)
self.assertEqual(fake_obj, ret)
def test_compile_volume_pattern(self):
volume_pattern = r'manila-share-\d+-(?P<size>\d+)G$'
ret = self._layout._compile_volume_pattern()
self.assertEqual(re.compile(volume_pattern), ret)
@ddt.data({'root@host1:/manila-share-1-1G': 'NONE',
'root@host2:/manila-share-2-2G': None},
{'root@host1:/manila-share-1-1G': FAKE_UUID1,
'root@host2:/manila-share-2-2G': None},
{'root@host1:/manila-share-1-1G': 'foobarbaz',
'root@host2:/manila-share-2-2G': FAKE_UUID2},
{'root@host1:/manila-share-1-1G': FAKE_UUID1,
'root@host2:/manila-share-2-2G': FAKE_UUID2})
def test_fetch_gluster_volumes(self, sharemark):
vol1_qualified = 'root@host1:/manila-share-1-1G'
gmgr_vol1 = common.GlusterManager(vol1_qualified)
gmgr_vol1.get_vol_option = mock.Mock(
return_value=sharemark[vol1_qualified])
vol2_qualified = 'root@host2:/manila-share-2-2G'
gmgr_vol2 = common.GlusterManager(vol2_qualified)
gmgr_vol2.get_vol_option = mock.Mock(
return_value=sharemark[vol2_qualified])
self.mock_object(
self.gmgr1, 'gluster_call',
mock.Mock(return_value=(self.glusterfs_server1_volumes, '')))
self.mock_object(
self.gmgr2, 'gluster_call',
mock.Mock(return_value=(self.glusterfs_server2_volumes, '')))
_glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2)
self.mock_object(self._layout, '_glustermanager',
mock.Mock(side_effect=_glustermanager_calls))
expected_output = {}
for q, d in self.glusterfs_volumes_dict.items():
if sharemark[q] not in (FAKE_UUID1, FAKE_UUID2):
expected_output[q] = d
ret = self._layout._fetch_gluster_volumes()
test_args = ('volume', 'list')
self.gmgr1.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
self.gmgr2.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
gmgr_vol1.get_vol_option.assert_called_once_with(
'user.manila-share')
gmgr_vol2.get_vol_option.assert_called_once_with(
'user.manila-share')
self.assertEqual(expected_output, ret)
def test_fetch_gluster_volumes_no_filter_used(self):
vol1_qualified = 'root@host1:/manila-share-1-1G'
gmgr_vol1 = common.GlusterManager(vol1_qualified)
gmgr_vol1.get_vol_option = mock.Mock()
vol2_qualified = 'root@host2:/manila-share-2-2G'
gmgr_vol2 = common.GlusterManager(vol2_qualified)
gmgr_vol2.get_vol_option = mock.Mock()
self.mock_object(
self.gmgr1, 'gluster_call',
mock.Mock(return_value=(self.glusterfs_server1_volumes, '')))
self.mock_object(
self.gmgr2, 'gluster_call',
mock.Mock(return_value=(self.glusterfs_server2_volumes, '')))
_glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2)
self.mock_object(self._layout, '_glustermanager',
mock.Mock(side_effect=_glustermanager_calls))
expected_output = self.glusterfs_volumes_dict
ret = self._layout._fetch_gluster_volumes(filter_used=False)
test_args = ('volume', 'list')
self.gmgr1.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
self.gmgr2.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
self.assertFalse(gmgr_vol1.get_vol_option.called)
self.assertFalse(gmgr_vol2.get_vol_option.called)
self.assertEqual(expected_output, ret)
def test_fetch_gluster_volumes_no_keymatch(self):
vol1_qualified = 'root@host1:/manila-share-1'
gmgr_vol1 = common.GlusterManager(vol1_qualified)
gmgr_vol1.get_vol_option = mock.Mock(return_value=None)
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(
self.gmgr1, 'gluster_call',
mock.Mock(return_value=('manila-share-1', '')))
_glustermanager_calls = (self.gmgr1, gmgr_vol1)
self.mock_object(self._layout, '_glustermanager',
mock.Mock(side_effect=_glustermanager_calls))
self.mock_object(self._layout, 'volume_pattern',
re.compile(r'manila-share-\d+(-(?P<size>\d+)G)?$'))
expected_output = {'root@host1:/manila-share-1': {'size': None}}
ret = self._layout._fetch_gluster_volumes()
test_args = ('volume', 'list')
self.gmgr1.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
self.assertEqual(expected_output, ret)
def test_fetch_gluster_volumes_error(self):
test_args = ('volume', 'list')
def raise_exception(*args, **kwargs):
if(args == test_args):
raise exception.GlusterfsException()
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(self.gmgr1, 'gluster_call',
mock.Mock(side_effect=raise_exception))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.mock_object(layout_volume.LOG, 'error')
self.assertRaises(exception.GlusterfsException,
self._layout._fetch_gluster_volumes)
self.gmgr1.gluster_call.assert_called_once_with(*test_args,
log=mock.ANY)
def test_do_setup(self):
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(self.gmgr1, 'get_gluster_version',
mock.Mock(return_value=('3', '6')))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.mock_object(self._layout, '_fetch_gluster_volumes',
mock.Mock(return_value=self.glusterfs_volumes_dict))
self.mock_object(self._layout, '_check_mount_glusterfs')
self._layout.gluster_used_vols = self.glusterfs_used_vols
self.mock_object(layout_volume.LOG, 'warning')
self._layout.do_setup(self._context)
self._layout._fetch_gluster_volumes.assert_called_once_with(
filter_used=False)
self._layout._check_mount_glusterfs.assert_called_once_with()
self.gmgr1.get_gluster_version.assert_called_once_with()
def test_do_setup_unsupported_glusterfs_version(self):
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(self.gmgr1, 'get_gluster_version',
mock.Mock(return_value=('3', '5')))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.assertRaises(exception.GlusterfsException,
self._layout.do_setup, self._context)
self.gmgr1.get_gluster_version.assert_called_once_with()
@ddt.data(exception.GlusterfsException, RuntimeError)
def test_do_setup_get_gluster_version_fails(self, exc):
def raise_exception(*args, **kwargs):
raise exc
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(self.gmgr1, 'get_gluster_version',
mock.Mock(side_effect=raise_exception))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.assertRaises(exc, self._layout.do_setup, self._context)
self.gmgr1.get_gluster_version.assert_called_once_with()
def test_do_setup_glusterfs_no_volumes_provided_by_backend(self):
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
self.mock_object(self.gmgr1, 'get_gluster_version',
mock.Mock(return_value=('3', '6')))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.mock_object(self._layout, '_fetch_gluster_volumes',
mock.Mock(return_value={}))
self.assertRaises(exception.GlusterfsException,
self._layout.do_setup, self._context)
self._layout._fetch_gluster_volumes.assert_called_once_with(
filter_used=False)
def test_share_manager(self):
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=self.gmgr1))
self.mock_object(self._layout.private_storage,
'get', mock.Mock(return_value='host1:/gv1'))
ret = self._layout._share_manager(self.share1)
self._layout.private_storage.get.assert_called_once_with(
self.share1['id'], 'volume')
self._layout._glustermanager.assert_called_once_with('host1:/gv1')
self.assertEqual(self.gmgr1, ret)
def test_share_manager_no_privdata(self):
self.mock_object(self._layout.private_storage,
'get', mock.Mock(return_value=None))
ret = self._layout._share_manager(self.share1)
self._layout.private_storage.get.assert_called_once_with(
self.share1['id'], 'volume')
self.assertIsNone(ret)
def test_ensure_share(self):
share = self.share1
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
gmgr1.set_vol_option = mock.Mock()
self.mock_object(self._layout, '_share_manager',
mock.Mock(return_value=gmgr1))
self._layout.ensure_share(self._context, share)
self._layout._share_manager.assert_called_once_with(share)
self.assertIn(self.glusterfs_target1, self._layout.gluster_used_vols)
gmgr1.set_vol_option.assert_called_once_with(
'user.manila-share', share['id'])
@ddt.data({"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
"size": 1, "expected": "host:/share2G"},
{"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
"size": 2, "expected": "host:/share2G"},
{"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
"size": None, "expected": "host:/share2G"},
{"voldict": {"host:/share2G": {"size": 2},
"host:/share": {"size": None}},
"used_vols": set(["host:/share2G"]), "size": 1,
"expected": "host:/share"},
{"voldict": {"host:/share2G": {"size": 2},
"host:/share": {"size": None}},
"used_vols": set(["host:/share2G"]), "size": 2,
"expected": "host:/share"},
{"voldict": {"host:/share2G": {"size": 2},
"host:/share": {"size": None}},
"used_vols": set(["host:/share2G"]), "size": 3,
"expected": "host:/share"},
{"voldict": {"host:/share2G": {"size": 2},
"host:/share": {"size": None}},
"used_vols": set(["host:/share2G"]), "size": None,
"expected": "host:/share"},
{"voldict": {"host:/share": {}}, "used_vols": set(), "size": 1,
"expected": "host:/share"},
{"voldict": {"host:/share": {}}, "used_vols": set(),
"size": None, "expected": "host:/share"})
@ddt.unpack
def test_pop_gluster_vol(self, voldict, used_vols, size, expected):
gmgr = common.GlusterManager
gmgr1 = gmgr(expected, self._execute, None, None)
self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict)
self._layout.gluster_used_vols = used_vols
self._layout._glustermanager = mock.Mock(return_value=gmgr1)
self._layout.volume_pattern_keys = list(voldict.values())[0].keys()
result = self._layout._pop_gluster_vol(size=size)
self.assertEqual(expected, result)
self.assertIn(result, used_vols)
self._layout._fetch_gluster_volumes.assert_called_once_with()
self._layout._glustermanager.assert_called_once_with(result)
@ddt.data({"voldict": {"share2G": {"size": 2}},
"used_vols": set(), "size": 3},
{"voldict": {"share2G": {"size": 2}},
"used_vols": set(["share2G"]), "size": None})
@ddt.unpack
def test_pop_gluster_vol_excp(self, voldict, used_vols, size):
self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict)
self._layout.gluster_used_vols = used_vols
self._layout.volume_pattern_keys = list(voldict.values())[0].keys()
self.assertRaises(exception.GlusterfsException,
self._layout._pop_gluster_vol, size=size)
self._layout._fetch_gluster_volumes.assert_called_once_with()
self.assertFalse(
self.fake_driver._setup_via_manager.called)
def test_push_gluster_vol(self):
self._layout.gluster_used_vols = set([
self.glusterfs_target1, self.glusterfs_target2])
self._layout._push_gluster_vol(self.glusterfs_target2)
self.assertEqual(1, len(self._layout.gluster_used_vols))
self.assertFalse(
self.glusterfs_target2 in self._layout.gluster_used_vols)
def test_push_gluster_vol_excp(self):
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self._layout.gluster_unused_vols_dict = {}
self.assertRaises(exception.GlusterfsException,
self._layout._push_gluster_vol,
self.glusterfs_target2)
@ddt.data({'vers_minor': '6',
'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1',
'-delete']},
{'vers_minor': '7',
'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '!',
'-path', '/tmp/tmpKGHKJ/.trashcan', '!', '-path',
'/tmp/tmpKGHKJ/.trashcan/internal_op', '-delete']})
@ddt.unpack
def test_wipe_gluster_vol(self, vers_minor, cmd):
tmpdir = '/tmp/tmpKGHKJ'
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.glusterfs_versions = {
self.glusterfs_server1: ('3', vers_minor)}
self.mock_object(tempfile, 'mkdtemp',
mock.Mock(return_value=tmpdir))
self.mock_object(self.fake_driver, '_execute', mock.Mock())
self.mock_object(common, '_mount_gluster_vol', mock.Mock())
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
self.mock_object(shutil, 'rmtree', mock.Mock())
self._layout._wipe_gluster_vol(gmgr1)
tempfile.mkdtemp.assert_called_once_with()
common._mount_gluster_vol.assert_called_once_with(
self.fake_driver._execute, gmgr1.export,
tmpdir)
kwargs = {'run_as_root': True}
self.fake_driver._execute.assert_called_once_with(
*cmd, **kwargs)
common._umount_gluster_vol.assert_called_once_with(
self.fake_driver._execute, tmpdir)
kwargs = {'ignore_errors': True}
shutil.rmtree.assert_called_once_with(tmpdir,
**kwargs)
def test_wipe_gluster_vol_mount_fail(self):
tmpdir = '/tmp/tmpKGHKJ'
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.glusterfs_versions = {
self.glusterfs_server1: ('3', '6')}
self.mock_object(tempfile, 'mkdtemp',
mock.Mock(return_value=tmpdir))
self.mock_object(self.fake_driver, '_execute', mock.Mock())
self.mock_object(common, '_mount_gluster_vol',
mock.Mock(side_effect=exception.GlusterfsException))
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
self.mock_object(shutil, 'rmtree', mock.Mock())
self.assertRaises(exception.GlusterfsException,
self._layout._wipe_gluster_vol,
gmgr1)
tempfile.mkdtemp.assert_called_once_with()
common._mount_gluster_vol.assert_called_once_with(
self.fake_driver._execute, gmgr1.export,
tmpdir)
self.assertFalse(self.fake_driver._execute.called)
self.assertFalse(common._umount_gluster_vol.called)
kwargs = {'ignore_errors': True}
shutil.rmtree.assert_called_once_with(tmpdir,
**kwargs)
def test_wipe_gluster_vol_error_wiping_gluster_vol(self):
tmpdir = '/tmp/tmpKGHKJ'
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.glusterfs_versions = {
self.glusterfs_server1: ('3', '6')}
cmd = ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '-delete']
self.mock_object(tempfile, 'mkdtemp',
mock.Mock(return_value=tmpdir))
self.mock_object(
self.fake_driver, '_execute',
mock.Mock(side_effect=exception.ProcessExecutionError))
self.mock_object(common, '_mount_gluster_vol', mock.Mock())
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
self.mock_object(shutil, 'rmtree', mock.Mock())
self.assertRaises(exception.GlusterfsException,
self._layout._wipe_gluster_vol,
gmgr1)
tempfile.mkdtemp.assert_called_once_with()
common._mount_gluster_vol.assert_called_once_with(
self.fake_driver._execute, gmgr1.export,
tmpdir)
kwargs = {'run_as_root': True}
self.fake_driver._execute.assert_called_once_with(
*cmd, **kwargs)
common._umount_gluster_vol.assert_called_once_with(
self.fake_driver._execute, tmpdir)
kwargs = {'ignore_errors': True}
shutil.rmtree.assert_called_once_with(tmpdir,
**kwargs)
def test_create_share(self):
self._layout._pop_gluster_vol = mock.Mock(
return_value=self.glusterfs_target1)
gmgr1 = common.GlusterManager(self.glusterfs_target1)
gmgr1.set_vol_option = mock.Mock()
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
self.mock_object(self.fake_driver, '_setup_via_manager',
mock.Mock(return_value='host1:/gv1'))
share = new_share()
exp_locn = self._layout.create_share(self._context, share)
self._layout._pop_gluster_vol.assert_called_once_with(share['size'])
self.fake_driver._setup_via_manager.assert_called_once_with(
{'manager': gmgr1, 'share': share})
self._layout.private_storage.update.assert_called_once_with(
share['id'], {'volume': self.glusterfs_target1})
gmgr1.set_vol_option.assert_called_once_with(
'user.manila-share', share['id'])
self.assertEqual('host1:/gv1', exp_locn)
def test_create_share_error(self):
self._layout._pop_gluster_vol = mock.Mock(
side_effect=exception.GlusterfsException)
share = new_share()
self.assertRaises(exception.GlusterfsException,
self._layout.create_share, self._context, share)
self._layout._pop_gluster_vol.assert_called_once_with(
share['size'])
@ddt.data(None, '', 'Eeyore')
def test_delete_share(self, clone_of):
self._layout._push_gluster_vol = mock.Mock()
self._layout._wipe_gluster_vol = mock.Mock()
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
gmgr1.set_vol_option = mock.Mock()
gmgr1.get_vol_option = mock.Mock(return_value=clone_of)
new_vol_addr = self.glusterfs_target1
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self._layout.delete_share(self._context, self.share1)
gmgr1.get_vol_option.assert_called_once_with(
'user.manila-cloned-from')
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
self.assertIn(new_vol_addr, self._layout.gluster_used_vols)
self._layout._push_gluster_vol.assert_called_once_with(
self.glusterfs_target1)
self._layout.private_storage.delete.assert_called_once_with(
self.share1['id'])
gmgr1.set_vol_option.assert_has_calls([
mock.call('user.manila-share', 'NONE'),
mock.call('nfs.disable', 'on')
])
def test_delete_share_clone(self):
self._layout._push_gluster_vol = mock.Mock()
self._layout._wipe_gluster_vol = mock.Mock()
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
gmgr1.gluster_call = mock.Mock()
gmgr1.get_vol_option = mock.Mock(return_value=FAKE_UUID1)
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self._layout.delete_share(self._context, self.share1)
gmgr1.get_vol_option.assert_called_once_with(
'user.manila-cloned-from')
self.assertFalse(self._layout._wipe_gluster_vol.called)
self._layout._push_gluster_vol.assert_called_once_with(
self.glusterfs_target1)
self._layout.private_storage.delete.assert_called_once_with(
self.share1['id'])
gmgr1.gluster_call.assert_called_once_with(
'volume', 'delete', 'gv1')
def test_delete_share_error(self):
self._layout._wipe_gluster_vol = mock.Mock()
self._layout._wipe_gluster_vol.side_effect = (
exception.GlusterfsException)
self._layout._push_gluster_vol = mock.Mock()
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
gmgr1.get_vol_option = mock.Mock(return_value=None)
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.assertRaises(exception.GlusterfsException,
self._layout.delete_share, self._context,
self.share1)
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
self.assertFalse(self._layout._push_gluster_vol.called)
def test_delete_share_missing_record(self):
self.mock_object(self._layout, '_share_manager',
mock.Mock(return_value=None))
self._layout.delete_share(self._context, self.share1)
self._layout._share_manager.assert_called_once_with(self.share1)
def test_create_snapshot(self):
self._layout.gluster_nosnap_vols_dict = {}
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')}
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(
side_effect=(glusterXMLOut(ret=0, errno=0),)))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
ret = self._layout.create_snapshot(self._context, snapshot)
self.assertIsNone(ret)
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
gmgr1.volume)
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
@ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=2),),
'_exception': exception.GlusterfsException},
{'side_effect': (('', ''),),
'_exception': exception.GlusterfsException})
@ddt.unpack
def test_create_snapshot_error(self, side_effect, _exception):
self._layout.gluster_nosnap_vols_dict = {}
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')}
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(side_effect=side_effect))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
self.assertRaises(_exception, self._layout.create_snapshot,
self._context, snapshot)
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
gmgr1.volume)
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
@ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException},
{"vers_minor": '7',
"exctype": exception.ShareSnapshotNotSupported})
@ddt.unpack
def test_create_snapshot_no_snap(self, vers_minor, exctype):
self._layout.gluster_nosnap_vols_dict = {}
self._layout.glusterfs_versions = {
self.glusterfs_server1: ('3', vers_minor)}
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(
side_effect=(glusterXMLOut(ret=-1, errno=0),)))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
self.assertRaises(exctype, self._layout.create_snapshot, self._context,
snapshot)
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
gmgr1.volume)
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
@ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException},
{"vers_minor": '7',
"exctype": exception.ShareSnapshotNotSupported})
@ddt.unpack
def test_create_snapshot_no_snap_cached(self, vers_minor, exctype):
self._layout.gluster_nosnap_vols_dict = {
self.glusterfs_target1: 'fake error'}
self._layout.glusterfs_versions = {
self.glusterfs_server1: ('3', vers_minor)}
self._layout.gluster_used_vols = set([self.glusterfs_target1])
gmgr = common.GlusterManager
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
self.mock_object(self._layout, '_share_manager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
self.assertRaises(exctype, self._layout.create_snapshot, self._context,
snapshot)
def test_find_actual_backend_snapshot_name(self):
gmgr = common.GlusterManager
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(return_value=('fake_snap_id_xyz', '')))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
ret = self._layout._find_actual_backend_snapshot_name(gmgr1, snapshot)
args = ('snapshot', 'list', gmgr1.volume, '--mode=script')
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
self.assertEqual('fake_snap_id_xyz', ret)
@ddt.data('this is too bad', 'fake_snap_id_xyx\nfake_snap_id_pqr')
def test_find_actual_backend_snapshot_name_bad_snap_list(self, snaplist):
gmgr = common.GlusterManager
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(return_value=(snaplist, '')))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
self.assertRaises(exception.GlusterfsException,
self._layout._find_actual_backend_snapshot_name,
gmgr1, snapshot)
args = ('snapshot', 'list', gmgr1.volume, '--mode=script')
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
@ddt.data({'glusterfs_target': 'root@host1:/gv1',
'glusterfs_server': 'root@host1'},
{'glusterfs_target': 'host1:/gv1',
'glusterfs_server': 'host1'})
@ddt.unpack
def test_create_share_from_snapshot(self, glusterfs_target,
glusterfs_server):
share = new_share()
snapshot = {
'id': 'fake_snap_id',
'share_instance': new_share(export_location=glusterfs_target),
'share_id': 'fake_share_id',
}
volume = ''.join(['manila-', share['id']])
new_vol_addr = ':/'.join([glusterfs_server, volume])
gmgr = common.GlusterManager
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
new_gmgr = gmgr(new_vol_addr, self._execute, None, None)
self._layout.gluster_used_vols = set([glusterfs_target])
self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')}
self.mock_object(old_gmgr, 'gluster_call',
mock.Mock(side_effect=[
('', ''), ('', ''), ('', ''), ('', '')]))
self.mock_object(new_gmgr, 'gluster_call',
mock.Mock(side_effect=[('', ''), ('', '')]))
self.mock_object(new_gmgr, 'get_vol_option',
mock.Mock())
new_gmgr.get_vol_option.return_value = (
'glusterfs-server-1,client')
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
mock.Mock(return_value='fake_snap_id_xyz'))
self.mock_object(self._layout, '_share_manager',
mock.Mock(return_value=old_gmgr))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=new_gmgr))
self.mock_object(self.fake_driver, '_setup_via_manager',
mock.Mock(return_value='host1:/gv1'))
ret = self._layout.create_share_from_snapshot(
self._context, share, snapshot, None)
(self._layout._find_actual_backend_snapshot_name.
assert_called_once_with(old_gmgr, snapshot))
args = (('snapshot', 'activate', 'fake_snap_id_xyz',
'force', '--mode=script'),
('snapshot', 'clone', volume, 'fake_snap_id_xyz'),
('volume', 'start', volume))
old_gmgr.gluster_call.assert_has_calls(
[mock.call(*a, log=mock.ANY) for a in args])
args = (('volume', 'set', volume, 'user.manila-share', share['id']),
('volume', 'set', volume, 'user.manila-cloned-from',
snapshot['share_id']))
new_gmgr.gluster_call.assert_has_calls(
[mock.call(*a, log=mock.ANY) for a in args], any_order=True)
self._layout._share_manager.assert_called_once_with(
snapshot['share_instance'])
self._layout._glustermanager.assert_called_once_with(
gmgr.parse(new_vol_addr))
self._layout.driver._setup_via_manager.assert_called_once_with(
{'manager': new_gmgr, 'share': share},
{'manager': old_gmgr, 'share': snapshot['share_instance']})
self._layout.private_storage.update.assert_called_once_with(
share['id'], {'volume': new_vol_addr})
self.assertIn(
new_vol_addr,
self._layout.gluster_used_vols)
self.assertEqual(['host1:/gv1'], ret)
def test_create_share_from_snapshot_error_unsupported_gluster_version(
self):
glusterfs_target = 'root@host1:/gv1'
glusterfs_server = 'root@host1'
share = new_share()
volume = ''.join(['manila-', share['id']])
new_vol_addr = ':/'.join([glusterfs_server, volume])
gmgr = common.GlusterManager
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
new_gmgr = gmgr(new_vol_addr, self._execute, None, None)
self._layout.gluster_used_vols_dict = {glusterfs_target: old_gmgr}
self._layout.glusterfs_versions = {glusterfs_server: ('3', '6')}
self.mock_object(
old_gmgr, 'gluster_call',
mock.Mock(side_effect=[('', ''), ('', '')]))
self.mock_object(new_gmgr, 'get_vol_option',
mock.Mock())
new_gmgr.get_vol_option.return_value = (
'glusterfs-server-1,client')
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
mock.Mock(return_value='fake_snap_id_xyz'))
self.mock_object(self._layout, '_share_manager',
mock.Mock(return_value=old_gmgr))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=new_gmgr))
snapshot = {
'id': 'fake_snap_id',
'share_instance': new_share(export_location=glusterfs_target)
}
self.assertRaises(exception.GlusterfsException,
self._layout.create_share_from_snapshot,
self._context, share, snapshot)
self.assertFalse(
self._layout._find_actual_backend_snapshot_name.called)
self.assertFalse(old_gmgr.gluster_call.called)
self._layout._share_manager.assert_called_once_with(
snapshot['share_instance'])
self.assertFalse(self._layout._glustermanager.called)
self.assertFalse(new_gmgr.get_vol_option.called)
self.assertFalse(new_gmgr.gluster_call.called)
self.assertNotIn(new_vol_addr,
self._layout.glusterfs_versions.keys())
def test_delete_snapshot(self):
self._layout.gluster_nosnap_vols_dict = {}
gmgr = common.GlusterManager
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
mock.Mock(return_value='fake_snap_id_xyz'))
self.mock_object(
gmgr1, 'gluster_call',
mock.Mock(return_value=glusterXMLOut(ret=0, errno=0)))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
ret = self._layout.delete_snapshot(self._context, snapshot)
self.assertIsNone(ret)
args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz',
'--mode=script')
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
(self._layout._find_actual_backend_snapshot_name.
assert_called_once_with(gmgr1, snapshot))
@ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=0),),
'_exception': exception.GlusterfsException},
{'side_effect': (('', ''),),
'_exception': exception.GlusterfsException})
@ddt.unpack
def test_delete_snapshot_error(self, side_effect, _exception):
self._layout.gluster_nosnap_vols_dict = {}
gmgr = common.GlusterManager
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
self._layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
mock.Mock(return_value='fake_snap_id_xyz'))
args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz',
'--mode=script')
self.mock_object(
gmgr1, 'gluster_call',
mock.Mock(side_effect=side_effect))
self.mock_object(self._layout, '_glustermanager',
mock.Mock(return_value=gmgr1))
snapshot = {
'id': 'fake_snap_id',
'share_id': self.share1['id'],
'share': self.share1
}
self.assertRaises(_exception, self._layout.delete_snapshot,
self._context, snapshot)
gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY)
(self._layout._find_actual_backend_snapshot_name.
assert_called_once_with(gmgr1, snapshot))
@ddt.data(
('manage_existing', ('share', 'driver_options'), {}),
('unmanage', ('share',), {}),
('extend_share', ('share', 'new_size'), {'share_server': None}),
('shrink_share', ('share', 'new_size'), {'share_server': None}))
def test_nonimplemented_methods(self, method_invocation):
method, args, kwargs = method_invocation
self.assertRaises(NotImplementedError, getattr(self._layout, method),
*args, **kwargs)
| 45.108163 | 79 | 0.618626 |
f24f591aaffdb5881087819a0461f1383878b689 | 581 | py | Python | fun.py | Shridat/Assignment3 | 8a19d3fb0553260a95d7320117de7bcd76cd002d | [
"MIT"
] | null | null | null | fun.py | Shridat/Assignment3 | 8a19d3fb0553260a95d7320117de7bcd76cd002d | [
"MIT"
] | null | null | null | fun.py | Shridat/Assignment3 | 8a19d3fb0553260a95d7320117de7bcd76cd002d | [
"MIT"
] | null | null | null | dict1={}
string=str(input("Enter String="))
i=0
j=0
def most_frequent(string):
for i in range(0,len(string)):
count=0
for j in range(0,len(string)):
if(string[i]==string[j]):
count=count+1
if string[i] not in dict1.keys():
dict1.update({string[i]:count})
res={key:val for key, val in sorted(dict1.items(),key=lambda ele:ele[1],reverse=True)}
keys=list(res.keys())
val=list(res.values())
for i in range(0,len(dict1)):
print(str(keys[i])+"="+str(val[i]))
most_frequent(string)
| 27.666667 | 90 | 0.567986 |
b3bf97cde7fa0e4b2270da06d782a96f5a5e50b8 | 4,748 | py | Python | skrebate/surf.py | bukson/scikit-rebate | b3236154e5e6951007829a2ab34a043e7bc84fee | [
"MIT"
] | null | null | null | skrebate/surf.py | bukson/scikit-rebate | b3236154e5e6951007829a2ab34a043e7bc84fee | [
"MIT"
] | null | null | null | skrebate/surf.py | bukson/scikit-rebate | b3236154e5e6951007829a2ab34a043e7bc84fee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Pete Schmitt (pschmitt@upenn.edu)
- Ryan J. Urbanowicz (ryanurb@upenn.edu)
- Weixuan Fu (weixuanf@upenn.edu)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
from joblib import Parallel, delayed
from .relieff import ReliefF
from .scoring_utils import SURF_compute_scores
class SURF(ReliefF):
"""Feature selection using data-mined expert knowledge.
Based on the SURF algorithm as introduced in:
Moore, Jason et al. Multiple Threshold Spatially Uniform ReliefF
for the Genetic Analysis of Complex Human Diseases.
"""
def __init__(self, n_features_to_select=10, discrete_threshold=10, verbose=False, n_jobs=1):
"""Sets up ReliefF to perform feature selection.
Parameters
----------
n_features_to_select: int (default: 10)
the number of top features (according to the relieff score) to
retain after feature selection is applied.
discrete_threshold: int (default: 10)
Value used to determine if a feature is discrete or continuous.
If the number of unique levels in a feature is > discrete_threshold, then it is
considered continuous, or discrete otherwise.
verbose: bool (default: False)
If True, output timing of distance array and scoring
n_jobs: int (default: 1)
The number of cores to dedicate to computing the scores with joblib.
Assigning this parameter to -1 will dedicate as many cores as are available on your system.
We recommend setting this parameter to -1 to speed up the algorithm as much as possible.
"""
self.n_features_to_select = n_features_to_select
self.discrete_threshold = discrete_threshold
self.verbose = verbose
self.n_jobs = n_jobs
############################# SURF ############################################
def _find_neighbors(self, inst, avg_dist):
""" Identify nearest hits and misses within radius defined by average distance over whole distance array.
This works the same regardless of endpoint type. """
NN = []
min_indicies = []
for i in range(self._datalen):
if inst != i:
locator = [inst, i]
if i > inst:
locator.reverse()
d = self._distance_array[locator[0]][locator[1]]
if d < avg_dist: # Defining the neighborhood with an average distance radius.
min_indicies.append(i)
for i in range(len(min_indicies)):
NN.append(min_indicies[i])
return np.array(NN, dtype=np.int32)
def _run_algorithm(self):
""" Runs nearest neighbor (NN) identification and feature scoring to yield SURF scores. """
sm = cnt = 0
for i in range(self._datalen):
sm += sum(self._distance_array[i])
cnt += len(self._distance_array[i])
avg_dist = sm / float(cnt)
nan_entries = np.isnan(self._X)
NNlist = [self._find_neighbors(datalen, avg_dist) for datalen in range(self._datalen)]
scores = np.sum(Parallel(n_jobs=self.n_jobs)(delayed(
SURF_compute_scores)(instance_num, self.attr, nan_entries, self._num_attributes, self.mcmap,
NN, self._headers, self._class_type, self._X, self._y, self._labels_std, self.data_type)
for instance_num, NN in zip(range(self._datalen), NNlist)), axis=0)
return np.array(scores)
| 44.792453 | 121 | 0.672072 |
bb2a79036e4fa6ba0d516e951b8bb49143fa94df | 382 | py | Python | tests/test_sdf_time.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | tests/test_sdf_time.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | tests/test_sdf_time.py | soundmaking/sdfspu | 164af2602d07b18c45a8182cd5e9638628c7e165 | [
"MIT"
] | null | null | null | from sdfspu import sdf_time
def test_stamp_utc_now_no_arg():
stamp = sdf_time.stamp_utc_now()
assert "_" in stamp
assert "." in stamp
assert len(stamp) == len("YYYYMMDD_HHMMSS.mss")
def test_stamp_utc_now_ms_false():
stamp = sdf_time.stamp_utc_now(ms=False)
assert "_" in stamp
assert "." not in stamp
assert len(stamp) == len("YYYYMMDD_HHMMSS")
| 23.875 | 51 | 0.693717 |
df68f091832b9d67747a370da4ae806e25a47e8e | 672 | py | Python | app/core/management/commands/wait_for_db.py | Adam-Palucki/recipe-app-api | 1b44fd74d04ffd8688ec92d0381c00070acd8e71 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | Adam-Palucki/recipe-app-api | 1b44fd74d04ffd8688ec92d0381c00070acd8e71 | [
"MIT"
] | 1 | 2020-07-01T18:12:13.000Z | 2020-07-01T18:12:13.000Z | app/core/management/commands/wait_for_db.py | Adam-Palucki/recipe-app-api | 1b44fd74d04ffd8688ec92d0381c00070acd8e71 | [
"MIT"
] | 1 | 2019-09-19T06:40:43.000Z | 2019-09-19T06:40:43.000Z | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
'''Django command to pause execution until database is available'''
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 30.545455 | 78 | 0.651786 |
6284958bd8500f1c1eea8df6d11b3e7cb60a90a1 | 312 | py | Python | algo-week-1/WeWantMilk.py | ilkerkosaroglu/METU-Contests | a917548ed2755406c962c7ead7d8f615e5508f6e | [
"MIT"
] | null | null | null | algo-week-1/WeWantMilk.py | ilkerkosaroglu/METU-Contests | a917548ed2755406c962c7ead7d8f615e5508f6e | [
"MIT"
] | null | null | null | algo-week-1/WeWantMilk.py | ilkerkosaroglu/METU-Contests | a917548ed2755406c962c7ead7d8f615e5508f6e | [
"MIT"
] | 1 | 2019-03-15T12:53:05.000Z | 2019-03-15T12:53:05.000Z | inp,M=map(int,raw_input().split())
bucketList=[]
for i in xrange(inp):
cow=map(int,raw_input().split())
bucketList.append(cow)
bucketList.sort()
sum=0
for i in bucketList:
if i[1]<M:
sum+=i[0]*i[1]
M-=i[1]
if M==0:break
else:
sum+=i[0]*M
break
print sum
| 16.421053 | 36 | 0.551282 |
90c7dddae47d3c10b354b6c5bb5239382a1715f6 | 8,317 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_default_security_rules_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_default_security_rules_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_default_security_rules_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def list(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SecurityRule
:rtype:
~azure.mgmt.network.v2018_04_01.models.SecurityRulePaged[~azure.mgmt.network.v2018_04_01.models.SecurityRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SecurityRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'}
def get(
self, resource_group_name, network_security_group_name, default_security_rule_name, custom_headers=None, raw=False, **operation_config):
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security
rule.
:type default_security_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_04_01.models.SecurityRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'}
| 46.205556 | 220 | 0.667909 |
7c7c9b354433945ac9ca01d1d51f45da61254b06 | 629 | py | Python | DQM/L1TMonitorClient/python/L1TStage2OMTFDEQualityTests_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | DQM/L1TMonitorClient/python/L1TStage2OMTFDEQualityTests_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | DQM/L1TMonitorClient/python/L1TStage2OMTFDEQualityTests_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | # quality tests for L1T Stage2 OMTF Data vs Emulator
import FWCore.ParameterSet.Config as cms
l1TStage2OMTFDEQualityTests = cms.EDAnalyzer("QualityTester",
qtList=cms.untracked.FileInPath('DQM/L1TMonitorClient/data/L1TStage2OMTFDEQualityTests.xml'),
QualityTestPrescaler=cms.untracked.int32(1),
getQualityTestsFromFile=cms.untracked.bool(True),
testInEventloop=cms.untracked.bool(False),
qtestOnEndLumi=cms.untracked.bool(True),
qtestOnEndRun=cms.untracked.bool(True),
qtestOnEndJob=cms.untracked.bool(False),
reportThreshold=cms.untracked.string(""),
verboseQT=cms.untracked.bool(True)
)
| 39.3125 | 97 | 0.780604 |
583634cf76bdf6418c49f45d8cc54edba8d50e80 | 557 | py | Python | batch.py | UNIST-LIM-Lab/NeuBoots | 196adf9e1ece2abc145f69966504bac2676e5b5e | [
"MIT"
] | null | null | null | batch.py | UNIST-LIM-Lab/NeuBoots | 196adf9e1ece2abc145f69966504bac2676e5b5e | [
"MIT"
] | null | null | null | batch.py | UNIST-LIM-Lab/NeuBoots | 196adf9e1ece2abc145f69966504bac2676e5b5e | [
"MIT"
] | null | null | null | import os
import torch.multiprocessing as mp
mp.set_start_method('spawn', force=True)
def run(str):
os.system(str)
return 0
def main():
strs = [f"python -m torch.distributed.launch --nproc_per_node=1 main.py deep --gpus {i%8} --seed {i}" for i in range(40)]
processes = []
for i in range(5):
for rank in range(8):
p = mp.Process(target=run, args=(strs[i * 8 + rank],))
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
| 21.423077 | 125 | 0.574506 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.