text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Support for Modbus Register sensors."""
import logging
import struct
from typing import Any, Union
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_OFFSET,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COUNT = "count"
CONF_DATA_TYPE = "data_type"
CONF_PRECISION = "precision"
CONF_REGISTER = "register"
CONF_REGISTER_TYPE = "register_type"
CONF_REGISTERS = "registers"
CONF_REVERSE_ORDER = "reverse_order"
CONF_SCALE = "scale"
DATA_TYPE_CUSTOM = "custom"
DATA_TYPE_FLOAT = "float"
DATA_TYPE_INT = "int"
DATA_TYPE_UINT = "uint"
REGISTER_TYPE_HOLDING = "holding"
REGISTER_TYPE_INPUT = "input"
def number(value: Any) -> Union[int, float]:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, str):
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError):
raise vol.Invalid(f"invalid number {value}")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_REGISTERS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_COUNT, default=1): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_INT): vol.In(
[DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT, DATA_TYPE_CUSTOM]
),
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_OFFSET, default=0): number,
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING): vol.In(
[REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]
),
vol.Optional(CONF_REVERSE_ORDER, default=False): cv.boolean,
vol.Optional(CONF_SCALE, default=1): number,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
]
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus sensors."""
sensors = []
data_types = {DATA_TYPE_INT: {1: "h", 2: "i", 4: "q"}}
data_types[DATA_TYPE_UINT] = {1: "H", 2: "I", 4: "Q"}
data_types[DATA_TYPE_FLOAT] = {1: "e", 2: "f", 4: "d"}
for register in config.get(CONF_REGISTERS):
structure = ">i"
if register.get(CONF_DATA_TYPE) != DATA_TYPE_CUSTOM:
try:
structure = ">{}".format(
data_types[register.get(CONF_DATA_TYPE)][register.get(CONF_COUNT)]
)
except KeyError:
_LOGGER.error(
"Unable to detect data type for %s sensor, " "try a custom type",
register.get(CONF_NAME),
)
continue
else:
structure = register.get(CONF_STRUCTURE)
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error(
"Error in sensor %s structure: %s", register.get(CONF_NAME), err
)
continue
if register.get(CONF_COUNT) * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count " "(%d words)",
size,
register.get(CONF_COUNT),
)
continue
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
sensors.append(
ModbusRegisterSensor(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_UNIT_OF_MEASUREMENT),
register.get(CONF_COUNT),
register.get(CONF_REVERSE_ORDER),
register.get(CONF_SCALE),
register.get(CONF_OFFSET),
structure,
register.get(CONF_PRECISION),
)
)
if not sensors:
return False
add_entities(sensors)
class ModbusRegisterSensor(RestoreEntity):
"""Modbus register sensor."""
def __init__(
self,
hub,
name,
slave,
register,
register_type,
unit_of_measurement,
count,
reverse_order,
scale,
offset,
structure,
precision,
):
"""Initialize the modbus register sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._reverse_order = reverse_order
self._scale = scale
self._offset = offset
self._precision = precision
self._structure = structure
self._value = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._value = state.state
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Update the state of the sensor."""
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, self._count
)
val = 0
try:
registers = result.registers
if self._reverse_order:
registers.reverse()
except AttributeError:
_LOGGER.error(
"No response from hub %s, slave %s, register %s",
self._hub.name,
self._slave,
self._register,
)
return
byte_string = b"".join([x.to_bytes(2, byteorder="big") for x in registers])
val = struct.unpack(self._structure, byte_string)[0]
val = self._scale * val + self._offset
if isinstance(val, int):
self._value = str(val)
if self._precision > 0:
self._value += "." + "0" * self._precision
else:
self._value = f"{val:.{self._precision}f}"
|
{
"content_hash": "6df54218151196690b5722fe5ac939f8",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 88,
"avg_line_length": 31.219409282700422,
"alnum_prop": 0.5508852547641573,
"repo_name": "qedi-r/home-assistant",
"id": "1a5c71812d610b63dcbd12a5941741a9104b7024",
"size": "7399",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/modbus/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from __future__ import division
import sys
import os
import shutil
import warnings
import inspect
import configparser
import threading as thread
import traceback
import copy
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('agg') # Prevents crash when GUI runs matplotlib in thread on Linux
import matplotlib.pyplot as plt
from .. import empirical as emp
from .. import models as mod
from .. import compare as comp
from .. import misc
import time
def _better_time(gmtime=None):
return
def main(param_path='parameters.txt'):
"""
Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file
"""
# Confirm parameters file is present
if not os.path.isfile(param_path):
raise IOError, "Parameter file not found at %s" % param_path
# Get raw params and base options (non-run-dependent options)
params, base_options = _get_params_base_options(param_path)
# Configure and start logging
# Done here instead of in function so will affect all subsequent calls
log_path = os.path.join(base_options['results_dir'], '_log.txt')
if os.path.isfile(log_path):
os.remove(log_path)
logging.basicConfig(level=logging.INFO, format='%(message)s')
fileh = logging.FileHandler(log_path)
fileh.setLevel(logging.DEBUG)
filefmt = logging.Formatter(
time.strftime("%Y/%m/%d %H:%M:%S %p", time.localtime()) +
' - %(name)s - %(levelname)s - %(message)s')
fileh.setFormatter(filefmt)
logging.getLogger('').addHandler(fileh)
def log_uncaught(type1, value1, traceback1):
tb_list = traceback.format_exception(type1, value1, traceback1)
tb_str = ''.join(tb_list)
logging.critical('\n\n'+tb_str)
sys.excepthook = log_uncaught
logging.info('Running macroeco') # v%s' % __version__)
logging.info('Parameters file at %s' % os.path.abspath(param_path))
# Preliminary check for errors in parameters file
bad_params = misc.check_parameter_file(param_path)
if len(bad_params[0]) > 0:
logging.warning("Possible formatting error(s) in" +
" %s: parameters %s on lines %s"
% (param_path, bad_params[0], bad_params[1]))
logging.info('Starting analysis')
# Do analysis for each run
for run_name in base_options['run_names']:
logging.info('Starting run %s' % run_name)
options = dict(params[run_name]) # All parameters from this run
options.update(base_options) # Add base parameters
options['run_dir'] = os.path.join(base_options['results_dir'],run_name)
if 'format' in options['analysis']:
_do_format(options)
else:
_do_analysis(options)
logging.info('Finished run %s' % run_name)
logging.info('Finished analysis successfully')
logging.info('Results available at %s' % options['param_dir'])
# Close logging - releases log file lock in Windows GUI
logging.shutdown()
def _get_params_base_options(param_path):
# Read parameter file into params object
params = configparser.ConfigParser()
try:
params.read(param_path)
except:
raise ValueError, "Parameter file is invalid"
# Setup param_dir and results_dir, get run_names
param_dir = os.path.abspath(os.path.dirname(param_path))
results_dir = os.path.join(param_dir, 'results')
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
os.makedirs(results_dir)
run_names = params.sections()
# Check there's at least one run
if not run_names:
raise NameError, "Parameters file must contain at least one run"
# Create options dict
base_options = {}
base_options['param_dir'] = param_dir
base_options['results_dir'] = results_dir
base_options['run_names'] = run_names
return params, base_options
def _do_format(options):
datapath = os.path.normpath(os.path.join(options['param_dir'],
options['data']))
out_path = os.path.splitext(datapath)[0] + "_formatted.csv"
format_type = options['analysis'].split('_')[1]
misc.data_read_write(datapath, out_path, format_type, **options)
def _do_analysis(options):
"""
Do analysis for a single run, as specified by options.
Parameters
----------
options : dict
Option names and values for analysis
"""
module = _function_location(options)
core_results = _call_analysis_function(options, module)
if module == 'emp' and ('models' in options.keys()):
fit_results = _fit_models(options, core_results)
else:
fit_results = None
_save_results(options, module, core_results, fit_results)
def _function_location(options):
# TODO: Add spec and misc modules
# This relies on the assumption that there are no duplicate member names
# in the different modules.
func_name = options['analysis'].split('.')[0] # Ignore method if present
emp_members = [x[0] for x in inspect.getmembers(emp)]
mod_members = [x[0] for x in inspect.getmembers(mod)]
if func_name in emp_members:
module = 'emp'
elif func_name in mod_members:
module = 'mod'
else:
raise ValueError, ("No analysis of type '%s' is available" %
options['analysis'])
return module
def _call_analysis_function(options, module):
"""
Call function from module and get result, using inputs from options
Parameters
----------
options : dict
Option names and values for analysis
module : str
Short name of module within macroeco containing analysis function
Returns
-------
dataframe, array, value, list of tuples
Functions from emp module return a list of tuples in which first
element of the tuple gives a string describing the result and the
second element giving the result of the analysis as a dataframe.
Functions in other modules return dataframe, array, or value.
"""
args, kwargs = _get_args_kwargs(options, module)
return eval("%s.%s(*args, **kwargs)" % (module, options['analysis']))
def _get_args_kwargs(options, module):
"""
Given an options (including analysis), and module, extract args and kwargs
"""
if module == 'emp':
options = _emp_extra_options(options)
arg_names, kw_names = _arg_kwarg_lists(module, options['analysis'])
# Create list of values for arg_names
args = []
for arg_name in arg_names:
if arg_name == 'patch': # For patch arg, append actual patch obj
args.append(options['patch'])
continue
if arg_name == 'self': # Ignore self from class methods
continue
if arg_name == 'k': # scipy dists use k and x, we always use x
arg_name = 'x'
try:
exec 'args.append(eval("%s"))' % options[arg_name]
except SyntaxError: # eval failing because option is a string
args.append(options[arg_name])
except:
raise ValueError, ("Value for required argument %s not provided"
% arg_name)
# Create dict with vals for kw_names
kwargs = {}
for kw_name in kw_names:
if kw_name in options.keys(): # If a value is given for this kwarg
try:
exec 'kwargs[kw_name] = eval("%s")' % options[kw_name]
except SyntaxError: # eval failing because value is a string
kwargs[kw_name] = options[kw_name]
except:
raise ValueError, ("Value for optional argument %s is invalid"
% kw_name)
return args, kwargs
def _emp_extra_options(options):
"""
Get special options patch, cols, and splits if analysis in emp module
"""
# Check that metadata is valid
metadata_path = os.path.normpath(os.path.join(options['param_dir'],
options['metadata']))
if not os.path.isfile(metadata_path):
raise IOError, ("Path to metadata file %s is invalid." %
metadata_path)
options['metadata_path'] = metadata_path
# Using subset if given, create and store patch
subset = options.get('subset', '')
options['patch'] = emp.Patch(metadata_path, subset)
# If cols or splits not given in options, make empty strings
if 'cols' not in options.keys():
options['cols'] = ''
if 'splits' not in options.keys():
options['splits'] = ''
return options
def _arg_kwarg_lists(module, analysis):
# Get names of args and kwargs to method specified by analysis option
exec ("arg_and_kwd_names, _, _, kw_defaults = "
"inspect.getargspec(%s.%s)" % (module, analysis))
if kw_defaults: # If there are kwargs
arg_names = arg_and_kwd_names[:-len(kw_defaults)]
kw_names = arg_and_kwd_names[-len(kw_defaults):]
else: # If no kwargs
arg_names = arg_and_kwd_names
kw_names = []
# Inspection for rv classes doesn't work since it uses args internally
# Unless method is translate_args or fit_mle, appends shapes to args
try:
obj_meth = analysis.split('.')
if obj_meth[1] not in ['fit_mle', 'translate_args']:
arg_names += eval(module + '.' + obj_meth[0] + '.' +
"shapes.replace(' ','').split(',')")
if obj_meth[1] == 'rvs': # Inspection for size not working
kw_names.append('size')
except:
pass
return arg_names, kw_names
def _fit_models(options, core_results):
"""
Fit models to empirical result from a function in emp module
Parameters
----------
options : dict
Option names and values for analysis
core_results : list of tuples
Output of function in emp
Returns
-------
list of dicts
Each element in list corresponds to a subset. The dict has a key for
each model given in options, and the value is a list of fitted
parameters (tuple), values (array), comparison statistic names (list),
and comparison statistic values (list).
Notes
-----
To determine if the empirical result refers to a curve or a distribution,
the result dataframe is inspected for a column 'x', which indicates a
curve.
"""
logging.info("Fitting models")
models = options['models'].replace(' ', '').split(';')
# TODO: Make work for 2D results, i.e., curves, comm_sep, o_ring
# TODO: Make work for curves in general (check if 'x' present in core_res)
fit_results = []
for core_result in core_results: # Each subset
fit_result = {}
for model in models:
fits = _get_fits(core_result, model, options)
values = _get_values(core_result, model, fits)
stat_names, stats = _get_comparison_stat(core_result, values,
model, fits)
fit_result[model] = [fits, values, stat_names, stats]
fit_results.append(fit_result)
return fit_results
def _get_fits(core_result, model, options):
options_copy = {}
for key, val in options.iteritems():
if key not in ['patch']: # Ignore patch since won't deepcopy
options_copy[key] = copy.deepcopy(val)
model_obj = eval('mod.' + model)
if hasattr(model_obj, 'fit_mle'):
options_copy['analysis'] = model + '.' + 'fit_mle'
options_copy['data'] = core_result[1]['y'].values
else:
options_copy['analysis'] = model + '.' + 'fit_lsq'
options_copy['x'] = core_result[1]['x'].values
options_copy['y_obs'] = core_result[1]['y'].values
options_copy['df'] = core_result[1] # Entire result df, for mete_sar
return _call_analysis_function(options_copy, 'mod')
def _get_values(core_result, model, fits):
model_obj = eval('mod.' + model)
if hasattr(model_obj, 'vals'):
x = core_result[1]['x'].values # Calc model at x values
values = eval("mod.%s.vals(x, *fits)" % model)
else:
n = len(core_result[1]) # Calc model at data values
values = eval("mod.%s.rank(n, *fits)" % model)
return values
def _get_comparison_stat(core_result, values, model, fits):
# Uses AIC for distributions, R2 one-to-one for curves
try: # Only curves have vals
eval("mod.%s" % model + ".vals.__doc__")
obs = core_result[1]['y'].values
pred = values
name = ['R2']
stat = comp.r_squared(obs, pred, one_to_one=True)
except AttributeError:
obs = core_result[1]['y'].values
name = ['AIC']
stat = comp.AIC(obs, eval("mod.%s" % model + "(*fits)"))
return name, stat
def _save_results(options, module, core_results, fit_results):
"""
Save results of analysis as tables and figures
Parameters
----------
options : dict
Option names and values for analysis
module : str
Module that contained function used to generate core_results
core_results : dataframe, array, value, list of tuples
Results of main analysis
fit_results : list or None
Results of comparing emp analysis to models, None if not applicable
"""
logging.info("Saving all results")
# Use custom plot format
mpl.rcParams.update(misc.rcparams.ggplot_rc)
# Make run directory
os.makedirs(options['run_dir'])
# Write core results
_write_core_tables(options, module, core_results)
# Write additional results if analysis from emp
if module == 'emp':
_write_subset_index_file(options, core_results)
# Write model/data comparison if models were given
if fit_results:
models = options['models'].replace(' ','').split(';')
for i, core_result in enumerate(core_results):
_write_fitted_params(i, models, options, fit_results)
_write_test_statistics(i, models, options, fit_results)
_write_comparison_plot_table(i, models, options,
core_results, fit_results)
def _write_core_tables(options, module, core_results):
"""
Notes
-----
Depending on function that was called for analysis, core_results may be a
list of tuples (empirical), a dataframe, an array, or a single value.
For the list of tuples from empirical, the second element of each tuple is
the raw result, and we write them all with the appropriate prefix. For
dataframes, we write them. For arrays or single values, we convert to data
frames and write them.
"""
table_name = 'core_result.csv'
single_file_path = os.path.join(options['run_dir'], table_name)
if module == 'emp': # List of tuples
for i, core_result in enumerate(core_results):
file_path = _get_file_path(i, options, table_name)
core_result[1].to_csv(file_path, index=False, float_format='%.4f')
elif type(core_results) == type(pd.DataFrame()): # DataFrame
core_results.to_csv(single_file_path, index=False, float_format='%.4f')
else: # Array or single value (atleast_1d corrects for unsized array)
df = pd.DataFrame({'y': np.atleast_1d(core_results)})
df.to_csv(single_file_path, index=False, float_format='%.4f')
def _get_file_path(spid, options, file_name):
return os.path.join(options['run_dir'],
'%i_%s' % (spid+1, file_name))
def _write_subset_index_file(options, core_results):
"""
Write table giving index of subsets, giving number and subset string
"""
f_path = os.path.join(options['run_dir'], '_subset_index.csv')
subset_strs = zip(*core_results)[0]
index = np.arange(len(subset_strs)) + 1
df = pd.DataFrame({'subsets': subset_strs}, index=index)
df.to_csv(f_path)
def _write_fitted_params(spid, models, options, fit_results):
# TODO: Consider converting to pandas, need to deal with variable length
# TODO: Possibility - empty data frame max length, max width = nparams
f = open(_get_file_path(spid, options, 'fitted_params.csv'), 'w')
f.write("Model, Fit Parameters\n")
for model in models:
fit_result = fit_results[spid][model]
mod_fits = str(fit_result[0])[1:-1] # Drop parens around tuple
f.write("%s,%s\n" % (model, mod_fits))
f.close()
def _write_test_statistics(spid, models, options, fit_results):
# TODO: Add delta test statistics columns
# TODO: Make dataframe?
f = open(_get_file_path(spid, options, 'test_statistics.csv'), 'w')
# Gets stat name list from any element of result dict - same for all
stat_names_list = next(fit_results[spid].itervalues())[2]
stat_names_str = str(stat_names_list)[1:-1].strip("'")
f.write("Model, %s\n" % stat_names_str)
for model in models:
fit_result = fit_results[spid][model]
fit_stats = str(fit_result[3])[:]
f.write("%s,%s\n" % (model, fit_stats))
f.close()
def _write_comparison_plot_table(spid, models, options, core_results,
fit_results):
"""
Notes
-----
Only applies to analysis using functions from empirical in which models are
also given.
"""
# TODO: Clean up sorting, may not work if SAR x out of order, e.g.
is_curve = 'x' in core_results[0][1]
df = core_results[spid][1]
df.rename(columns={'y': 'empirical'}, inplace=True)
# If distribution, need to sort values so will match sorted rank in fits
if not is_curve:
x = np.arange(len(df)) + 1
df = df.sort(columns='empirical')
df.insert(0, 'x', x[::-1])
# Add residual column for each model
for model in models:
fit_result = fit_results[spid][model]
df[model] = fit_result[1]
df[model + "_residual"] = df[model] - df['empirical']
# If curve, sort now for plotting purposes
if is_curve:
df = df.sort(columns='x')
# Set up file paths
f_path = _get_file_path(spid, options, 'data_models.csv')
p_path = _get_file_path(spid, options, 'data_models.pdf')
# Save table
df.to_csv(f_path, index=False, float_format='%.4f') # Table
# Save plot
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(df['x'], df['empirical'], color='k')
ax1.plot(df['x'], df[models])
ax1.legend(models + ['empirical'], loc='best')
ax1.set_xlabel('x')
ax1.set_ylabel('value')
ax2.hlines(0, np.min(df['x']), np.max(df['x']))
ax2.plot(df['x'], df[[x + '_residual' for x in models]])
ax2.legend(models + ['empirical'], loc='best')
ax2.set_xlabel('x')
ax2.set_ylabel('residual')
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(min(ax2.get_ylim()[0], -1), max(ax2.get_ylim()[1], 1))
if options.get('log_y', None):
ax1.set_yscale('log')
ax2.set_yscale('symlog', linthreshy=1)
if options.get('log_x', None):
ax1.set_xscale('log')
ax2.set_xscale('log')
if not options.get('log_x', None) and not options.get('log_y', None):
ax1.set_ylim(bottom=0)
ax1.set_xlim(left=0)
ax1 = _pad_plot_frame(ax1)
ax2 = _pad_plot_frame(ax2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout()
fig.savefig(p_path)
plt.close('all')
def _pad_plot_frame(ax, pad=0.01):
"""
Provides padding on sides of frame equal to pad fraction of plot
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
ax.set_xlim(xmin - xr*pad, xmax + xr*pad)
ax.set_ylim(ymin - yr*pad, ymax + yr*pad)
return ax
def _output_cdf_plot(core_result, spid, models, options, fit_results):
"""Function for plotting cdf"""
# CDF
x = core_result['y'].values
df = emp.empirical_cdf(x)
df.columns = ['x', 'empirical']
def calc_func(model, df, shapes):
return eval("mod.%s.cdf(df['x'], *shapes)" % model)
plot_exec_str = "ax.step(df['x'], emp, color='k', lw=3);ax.set_ylim(top=1)"
_save_table_and_plot(spid, models, options, fit_results, 'data_pred_cdf',
df, calc_func, plot_exec_str)
def output_pdf_plot(core_result, spid, models, options, fit_results):
""" Function for plotting pdf/pmf """
# PDF/PMF
hist_bins = 11
emp_hist, edges = np.histogram(core_result['y'].values, hist_bins,
normed=True)
x = (np.array(edges[:-1]) + np.array(edges[1:])) / 2
df = pd.DataFrame({'x': x, 'empirical': emp_hist})
def calc_func(model, df, shapes):
try:
return eval("mod.%s.pmf(np.floor(df['x']), *shapes)" % model)
except:
return eval("mod.%s.pdf(df['x'], *shapes)" % model)
plot_exec_str = "ax.bar(df['x']-width/2, emp, width=width, color='gray')"
_save_table_and_plot(spid, models, options, fit_results, 'data_pred_pdf',
df, calc_func, plot_exec_str)
|
{
"content_hash": "405ed554eac4fb51f59c8ddd457bc73b",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 79,
"avg_line_length": 32.92391304347826,
"alnum_prop": 0.6090647549875018,
"repo_name": "jkitzes/macroeco",
"id": "102a50005436d61042a63d896d8a6ee3f1739209",
"size": "21203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "macroeco/main/_main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "247340"
}
],
"symlink_target": ""
}
|
import json
import django_filters
from django.contrib.gis.geos import GEOSGeometry
from dateutil.parser import parse
from django.core.exceptions import ImproperlyConfigured
from django.contrib.gis.db import models as gis_models
from django.contrib.gis.gdal.error import GDALException
from django.db.models import Q
from rest_framework.exceptions import ParseError, NotFound
from rest_framework.filters import BaseFilterBackend
from rest_framework_gis.filterset import GeoFilterSet
from ashlar.models import Boundary, BoundaryPolygon, Record, RecordType
from ashlar.exceptions import QueryParameterException
from djsonb import fields as jsb
# Map custom fields to CharField so that django-filter knows how to handle them.
FILTER_OVERRIDES = {
jsb.JsonBField: {
'filter_class': django_filters.CharFilter
},
gis_models.PointField: {
'filter_class': django_filters.CharFilter
}
}
class RecordFilter(GeoFilterSet):
record_type = django_filters.Filter(field_name='record_type', method='filter_record_type')
polygon = django_filters.Filter(field_name='polygon', method='filter_polygon')
polygon_id = django_filters.Filter(field_name='polygon_id', method='filter_polygon_id')
def filter_polygon(self, queryset, field_name, geojson):
""" Method filter for arbitrary polygon, sent in as geojson.
"""
try:
poly = GEOSGeometry(geojson)
except GDALException as e:
raise ParseError('Failed to parse geometry: ' + str(e))
# In practically all cases, Django's GEOSGeometry object will throw a
# GDALException when it attempts to parse an invalid GeoJSON object.
# However, the docs reccommend using the `valid` and `valid_reason`
# attributes to check the validity of the input geometries. Support
# both validity checks here.
if poly.valid:
return queryset.filter(geom__intersects=poly)
else:
raise ParseError('Input polygon must be valid GeoJSON: ' + poly.valid_reason)
def filter_polygon_id(self, queryset, field_name, poly_uuid):
""" Method filter for containment within the polygon specified by poly_uuid"""
if not poly_uuid:
return queryset
try:
return queryset.filter(geom__intersects=BoundaryPolygon.objects.get(pk=poly_uuid).geom)
except ValueError as e:
raise ParseError(e)
except BoundaryPolygon.DoesNotExist as e:
raise NotFound(e)
# It would be preferable to do something like this to avoid loading the whole geometry into
# Python, but this currently raises 'Complex expressions not supported for GeometryField'
#return queryset.filter(geom__intersects=RawSQL(
# 'SELECT geom FROM ashlar_boundarypolygon WHERE uuid=%s', (poly_uuid,)
#))
def filter_record_type(self, queryset, field_name, value):
""" Method filter for records having a desired record type (uuid)
e.g. /api/records/?record_type=44a51b83-470f-4e3d-b71b-e3770ec79772
"""
return queryset.filter(schema__record_type=value)
class Meta:
model = Record
fields = ['data', 'record_type', 'geom', 'archived']
filter_overrides = FILTER_OVERRIDES
class RecordTypeFilter(django_filters.FilterSet):
record = django_filters.Filter(field_name='record', method='type_for_record')
def type_for_record(self, queryset, field_name, record_id):
""" Filter down to only the record type that corresponds to the given record. """
record_type_id = Record.objects.filter(pk=record_id).values_list(
'schema__record_type_id', flat=True).first()
return queryset.filter(pk=record_type_id)
class Meta:
model = RecordType
fields = ['active', 'label', 'record']
class BoundaryFilter(GeoFilterSet):
STATUS_SET = {status[0] for status in Boundary.StatusTypes.CHOICES}
status = django_filters.Filter(field_name='status', method='multi_filter_status')
def multi_filter_status(self, queryset, field_name, value):
""" Method filter for multiple choice query on status
e.g. /api/boundary/?status=ERROR,WARNING
"""
statuses = value.split(',')
statuses = set(statuses) & self.STATUS_SET
return queryset.filter(status__in=statuses)
class Meta:
model = Boundary
fields = ['status']
class BoundaryPolygonFilter(GeoFilterSet):
boundary = django_filters.Filter(field_name='boundary', method='filter_boundary')
def filter_boundary(self, queryset, field_name, value):
""" Method filter for boundary polygons having a desired boundary (uuid)
e.g. /api/boundarypolygons/?boundary=44a51b83-470f-4e3d-b71b-e3770ec79772
"""
return queryset.filter(boundary=value)
class Meta:
model = BoundaryPolygon
fields = ['data', 'boundary']
filter_overrides = FILTER_OVERRIDES
class DateRangeFilterBackend(BaseFilterBackend):
"""Used to filter querysets based on a given START_FIELD and END_FIELD
NOTE: This filter must be inherited from in order to be used because
1. its Meta class must be set with a model and list of fields
2. you likely want to override START_FIELD/END_FIELD
This is a simple filter which takes two (optional) limits and returns all records
whose 'occurred_from' field falls on or between the maximum and minimum provided.
If only a maximum or a minimum are provided, the MIN_DATETIME or MAX_DATETIME will
be used instead.
An example [truncated] query: /api/records/?occurred_min=1901-01-01T00:00:00+00:00Z
"""
MIN_DATETIME = '1901-01-01T00:00:00+00:00'
MAX_DATETIME = '9999-12-31T23:59:59.999999+00:00'
FIELD = 'occurred_from'
# This message will be formatted by ParseError, which creates a string like
# "<parameter> must be <ERR_MSG>". This accounts for the slightly strange
# wording of the error.
ERR_MSG = 'ISO 8601 formatted with timezone information. Please check that the URL is properly encoded.'
def filter_queryset(self, request, queryset, view):
"""Filter records by date
Arguments
:param request: django rest framework request instance
:param queryset: queryset to apply filter to
:param view: view that this filter is being used by
QUERY PARAMS
:param valid_from: ISO 8601 timestamp
:param valid_to: ISO 8601 timestamp
"""
occurred_min = 'occurred_min'
occurred_max = 'occurred_max'
if occurred_min not in request.query_params and occurred_max not in request.query_params:
return queryset
try:
min_date = parse(request.query_params.get(occurred_min, self.MIN_DATETIME))
except:
raise QueryParameterException(occurred_min, self.ERR_MSG)
try:
max_date = parse(request.query_params.get(occurred_max, self.MAX_DATETIME))
except:
raise QueryParameterException(occurred_max, self.ERR_MSG)
if not min_date.tzinfo or not max_date.tzinfo:
raise QueryParameterException('datetimes', self.ERR_MSG)
return queryset.filter(occurred_from__gte=min_date, occurred_from__lte=max_date)
class JsonBFilterBackend(BaseFilterBackend):
""" Generic configurable filter for JsonBField
Requires the following properties, configured on the view using this filter backend:
jsonb_filter_field: The name of the django model field to filter against
NOTE: Currently, there can be at most one jsonb field to filter over. parametrizing
the fieldnames will allow indefinitely many filtered columns.
EXAMPLE USAGE: /api/records/?jcontains={"Site": {"DPWH province name": "CAGAYAN"}}
"""
def filter_queryset(self, request, queryset, view):
""" Filter by configured jsonb_filters on jsonb_filter_field """
lookup_name = 'jsonb'
filter_field = getattr(view, 'jsonb_filter_field', None)
if not filter_field:
raise ImproperlyConfigured('JsonBFilterBackend requires property ' +
'`jsonb_filter_field` on view')
filter_value = request.query_params.get(lookup_name, None)
if not filter_value:
return queryset
filter_key = '{0}__{1}'.format(filter_field, lookup_name)
try:
json_data = json.loads(filter_value)
except ValueError as e:
raise ParseError(str(e))
if isinstance(json_data, dict):
queryset = queryset.filter(Q(**{filter_key: json_data}))
else:
raise ParseError('Lookup must be an object')
return queryset
|
{
"content_hash": "70dcfa5f7b00963e2b20a1a5522a6e77",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 108,
"avg_line_length": 37.866379310344826,
"alnum_prop": 0.6761525327262379,
"repo_name": "azavea/ashlar",
"id": "3a181ccb568b3251769730b7daed665140151546",
"size": "8785",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ashlar/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104910"
},
{
"name": "Shell",
"bytes": "6036"
}
],
"symlink_target": ""
}
|
import islpy as isl
simple_send_receive = False
def print_set(name, s):
print name, " := ",
if (s.is_empty()):
print "Empty"
else:
print s, ";"
# The set S is assumed to be blocked. The size of each multidimensional
# block is indicated using the "block" list.
# Each block is partitioned into partitions.
# Each processors of the list "processors" will run a partition.
# "dims" indicates the dimensions of "S" that are divided into blocks.
# "dims", "processors" and "block" are all integer lists.
# This function returns a map that partitions the set S.
# The name of the processor in this map is cpu_name. It can be either "r" or "rp".
# orientation: "row" or "col". "row" means that processors are distributed
# in row major order, while "col" means that they are distributed
# in column major order.
def generate_partitionning_map(S, dims, processors, block, orientation, cpu_name):
name = S.get_tuple_name();
dim_number = S.dim(isl.dim_type.set)
# suffix is the name of the buffer that is being partitioned. If the suffix = A, then
# processors that partition this array will be called rA0, rA1, ...
# This is necessary because the partitionning of the array A may be different from
# the partitionning of the array B.
suffix = name
NP = 1;
for i in range(0,len(processors)):
NP = NP * processors[i];
dims_str = ""
for i in range(0,dim_number):
dims_str += "i" + str(i)
if (i < dim_number-1):
dims_str += ", "
params = ""
for i in range(0,len(dims)):
params += cpu_name + str(i) + ", "
for i in range(0,len(dims)):
params += cpu_name + suffix + str(i)
if (i < len(dims)-1):
params += ", "
processor_definitions = ""
for i in range(0,len(dims)):
processor_definitions += "0 <= " + cpu_name + str(i) + " < " + str(NP) + " and "
processor_definitions += cpu_name + suffix + str(i) + " = " + cpu_name + ((str(i)) if (orientation == "row") else (str(len(dims)-1-i)))
if (i < len(dims)):
processor_definitions += " and "
partitions_str = "[" + params + "]->{" + name + "[" + dims_str + "]->" + name + "[" + dims_str + "]: "
partitions_str += processor_definitions
for i in range(0, len(dims)):
partitions_str += str(block[i]/processors[i]) + cpu_name + suffix + str(i) + "<=i" + str(dims[i]) + "<" + str(block[i]/processors[i]) + "*(" + cpu_name + suffix + str(i) + "+1)"
if (i < len(dims) - 1):
partitions_str += " and "
partitions_str += "}"
print "Processor definitions = ", processor_definitions
print "Partitions: ", partitions_str
partitions = isl.Map(partitions_str)
# The parameters have the following form : [r0, r1, ..., rA0, rA1, ...].
# The first set of parameters are always (r0, r1, ...) or (rp1, rp1, ...). We need to project
# all the parameters except the rX or rpX parameters so that we get every thing in function of rX or rpX only
# instead of rA0, rA1, ... This works because we have already provided constraints
# that indicate the relation between rA0, rA1, ... and (rX or rpX). So projecting rA0, rA1, ...
# would leave the map in function of rX or rpX only which is what we want.
partitions = partitions.project_out(isl.dim_type.param, len(dims), partitions.dim(isl.dim_type.param)-len(dims)).coalesce()
return partitions
# The set S is assumed to be blocked. The size of each multidimensional
# block is indicated using the "block" list.
# Each block is partitioned into partitions.
# Each processors of the list "processors" will run a partition.
# "dims" indicates the dimensions of "S" that are divided into blocks.
# "dims", "processors" and "block" are all integer lists.
# This function returns two maps, each map partitions the set into
# the selected partitions but one of the map is in function of the
# processor pr while the other is in function of the processor ps.
# orientation: "row" or "col". "row" means that processors are distributed
# in row major order, while "col" means that they are distributed
# in column major order.
def partition(S, dims, processors, block, orientation):
rp_2D_partitions_map = generate_partitionning_map(S, dims, processors, block, orientation, "rp")
r_2D_partitions_map = generate_partitionning_map(S, dims, processors, block, orientation, "r")
return {'ps':rp_2D_partitions_map, 'pr':r_2D_partitions_map}
# The set r_S is defined in function of r and rp. in the send and receive sets
# r represents the rank of this processor while rp (r prime) is the rank of
# the processor that will send data to this processor or that will receive
# data from this processor. That is, it represents the processor ID from which
# this processor receives data or to which it sends its data.
# We want to represent rp using a loop so that we can send/receive from all
# the processors that are identified using the loop iterator.
# To do that, we create a variable "p" that is equal to "rp", then
# we project the parameter "rp".
def augment_set_with_processor(r_S, nb_processors):
name = r_S.get_tuple_name();
dim_number = r_S.dim(isl.dim_type.set)
dims_str = ""
for i in range(0, dim_number):
dims_str += "i" + str(i)
if (i < dim_number-1):
dims_str += ", "
params = ""
processor_dims = ""
processor_constraints = ""
for i in range(0, nb_processors):
params += "r" + str(i) + ", "
processor_dims += "p" + str(i)
processor_constraints += " p" + str(i) + " = rp" + str(i)
if (i < nb_processors-1):
processor_dims += ","
processor_constraints += " and "
for i in range(0, nb_processors):
params += "rp" + str(i)
if (i < nb_processors-1):
params += ","
map_str = "["+ params + "]->{" + name + "[" + dims_str + "]->" + name + "[" + processor_dims + "," + dims_str + "]: " + processor_constraints + "}"
print "Augmentation map (adds news dimensions) : ", map_str
# Add a new dimension p0 to the set.
augmentation_map = isl.Map(map_str)
augmented_r_S = r_S.apply(augmentation_map)
print "Projecting out 1 dimension of the parameters starting from the dimension 1. This dimension represents the parameter rp."
augmented_r_S = augmented_r_S.project_out(isl.dim_type.param, nb_processors, nb_processors).coalesce()
return augmented_r_S
# r is my_rank().
def compute_communication(S0_dom, Access_A, Access_B, Access_C, str_schedule, A, B, C, r_partitioner_A, rp_partitioner_A, r_partitioner_B, rp_partitioner_B, r_partitioner_C, rp_partitioner_C, context):
# 0 - Print all inputs
##########################################@
print "S0_dom = ", S0_dom
print "Access_A = ", Access_A
print "Access_B = ", Access_B
print "Access_C = ", Access_C
print "Schedule = ", str_schedule
print "A = ", A
print "B = ", B
print "C = ", C
print "\n\n"
print "r_partitioner_A = ", r_partitioner_A
print "rp_partitioner_A = ", rp_partitioner_A
print "r_partitioner_B = ", r_partitioner_B
print "rp_partitioner_B = ", rp_partitioner_B
print "r_partitioner_C = ", r_partitioner_C
print "rp_partitioner_C = ", rp_partitioner_C
print "\n\n"
# I - Compute the "Have" sets for r and rp.
############################################
r_have_A = A.apply(r_partitioner_A)
rp_have_A = A.apply(rp_partitioner_A)
r_have_B = B.apply(r_partitioner_B)
rp_have_B = B.apply(rp_partitioner_B)
r_have_C = C.apply(r_partitioner_C)
rp_have_C = C.apply(rp_partitioner_C)
print "r_have_A = ", r_have_A;
print "r_have_B = ", r_have_B
print "r_have_C = ", r_have_C
print "rp_have_A = ", rp_have_A
print "rp_have_B = ", rp_have_B
print "rp_have_C = ", rp_have_C
# II - Compute the "Need" parts of A, B, and C
# II.1- Reverse access to C
# II.2- Apply the have_C to the reverse of C to get the corresponding domain of S0
# II.2- Apply the domain of S0 to Access_A and Access_B to derive the needed parts
# of A and B
################################################
reverse_Access_C = Access_C.reverse()
S0_corresponding_to_p_owned_C = r_have_C.apply(reverse_Access_C)
S0_corresponding_to_pp_owned_C = rp_have_C.apply(reverse_Access_C)
r_need_A = S0_corresponding_to_p_owned_C.apply(Access_A)
r_need_B = S0_corresponding_to_p_owned_C.apply(Access_B)
rp_need_A = S0_corresponding_to_pp_owned_C.apply(Access_A)
rp_need_B = S0_corresponding_to_pp_owned_C.apply(Access_B)
print "r_need_A = ", r_need_A;
print "r_need_B = ", r_need_B
print "rp_need_A = ", rp_need_A;
print "rp_need_B = ", rp_need_B
print "\n"
# III- Compute the missing part for r. We interpret r as the receiver in this section
# (i.e., the part that r has to receive)
#################################################
# what needs to be received for A = r_missing_A
r_missing_A = r_need_A - r_have_A
r_missing_B = r_need_B - r_have_B
r_receive_A = r_missing_A.intersect(rp_have_A)
r_receive_B = r_missing_B.intersect(rp_have_B)
print "r_missing_A = r_need_A - r_have_A = ", r_missing_A
print "r_missing_B = r_need_B - r_have_B = ", r_missing_B
print "r_receive_A = r_missing_A.intersect(rp_have_A) = ", r_receive_A
print "r_receive_B = r_missing_B.intersect(rp_have_B) = ", r_receive_B
print "\n\n"
# IV- Compute the missing part for the senders.
#################################################
rp_missing_A = rp_need_A - rp_have_A
rp_missing_B = rp_need_B - rp_have_B
r_send_A = rp_missing_A.intersect(r_have_A)
r_send_B = rp_missing_B.intersect(r_have_B)
print "rp_missing_A = rp_need_A - rp_have_A = ", rp_missing_A
print "rp_missing_B = rp_need_B - rp_have_B = ", rp_missing_B
print "r_send_A = rp_missing_A.intersect(r_have_A) = ", r_send_A
print "r_send_B = rp_missing_B.intersect(r_have_B) = ", r_send_B
print "\n\n"
# V- Code generation
##################################################
if (r_send_A.is_empty() == False):
r_send_A = augment_set_with_processor(r_send_A, r_partitioner_A.dim(isl.dim_type.param))
if (r_send_B.is_empty() == False):
r_send_B = augment_set_with_processor(r_send_B, r_partitioner_B.dim(isl.dim_type.param))
if (r_receive_A.is_empty() == False):
r_receive_A = augment_set_with_processor(r_receive_A, r_partitioner_A.dim(isl.dim_type.param))
if (r_receive_B.is_empty() == False):
r_receive_B = augment_set_with_processor(r_receive_B, r_partitioner_B.dim(isl.dim_type.param))
print "\n\n"
r_send_A = r_send_A.set_tuple_name("Send_A")
r_send_B = r_send_B.set_tuple_name("Send_B")
r_receive_A = r_receive_A.set_tuple_name("Receive_A")
r_receive_B = r_receive_B.set_tuple_name("Receive_B")
print_set("Send_A ", r_send_A)
print "\n"
print_set("Send_B ", r_send_B)
print "\n"
print_set("Receive_A", r_receive_A)
print "\n"
print_set("Receive_B", r_receive_B)
print "\n"
uA_to_send_p = isl.UnionSet.from_set(r_send_A)
uB_to_send_p = isl.UnionSet.from_set(r_send_B)
ur_receive_A = isl.UnionSet.from_set(r_receive_A)
ur_receive_B = isl.UnionSet.from_set(r_receive_B)
uS0_dom = isl.UnionSet.from_set(S0_dom)
domain = uA_to_send_p.union(uB_to_send_p)
domain = domain.union(ur_receive_A)
domain = domain.union(ur_receive_B)
domain = domain.union(uS0_dom)
schedule = isl.UnionMap(str_schedule);
schedule = schedule.intersect_domain(domain)
print "Schedule intersect domain = ", schedule
build = isl.AstBuild.alloc(schedule.get_ctx())
build = build.restrict(context)
node = build.node_from_schedule_map(schedule)
print node.to_C_str()
|
{
"content_hash": "c0fd613cd5c270d887fdba68aac507d2",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 201,
"avg_line_length": 41.12820512820513,
"alnum_prop": 0.6526540790879943,
"repo_name": "rbaghdadi/ISIR",
"id": "45e3c32cf8d323bb365854f4728ea22db2b4aadb",
"size": "11228",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/distributed_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1056"
},
{
"name": "C++",
"bytes": "36385"
},
{
"name": "Makefile",
"bytes": "1181"
}
],
"symlink_target": ""
}
|
"""Tests for mongodb backend
Authors:
* Min RK
"""
#-------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------
import os
from unittest import TestCase
from nose import SkipTest
from pymongo import Connection
from IPython.parallel.controller.mongodb import MongoDB
from . import test_db
conn_kwargs = {}
if 'DB_IP' in os.environ:
conn_kwargs['host'] = os.environ['DB_IP']
if 'DBA_MONGODB_ADMIN_URI' in os.environ:
# On ShiningPanda, we need a username and password to connect. They are
# passed in a mongodb:// URI.
conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI']
if 'DB_PORT' in os.environ:
conn_kwargs['port'] = int(os.environ['DB_PORT'])
try:
c = Connection(**conn_kwargs)
except Exception:
c = None
class TestMongoBackend(test_db.TaskDBTest, TestCase):
"""MongoDB backend tests"""
def create_db(self):
try:
return MongoDB(database='iptestdb', _connection=c)
except Exception:
raise SkipTest("Couldn't connect to mongodb")
def teardown(self):
if c is not None:
c.drop_database('iptestdb')
|
{
"content_hash": "95d49b78f1338b882f527837ea2b0817",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 26.16949152542373,
"alnum_prop": 0.5485751295336787,
"repo_name": "mattvonrocketstein/smash",
"id": "873076ebbde859c5470464f1d4b1951e47bdd921",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/parallel/tests/test_mongodb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
}
|
from RPi.GPIO import *
class Relay:
def __init__(self, pin):
self.pin = pin
setup(self.pin, OUT)
self.Enabled = False
def Enable(self):
output(self.pin, True)
self.Enabled = True
def Disable(self):
output(self.pin, False)
self.Enabled = False
|
{
"content_hash": "a0a3faeaa7fefb64b025fb816b9e4737",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 31,
"avg_line_length": 20.933333333333334,
"alnum_prop": 0.5573248407643312,
"repo_name": "curtisblack/R2D2",
"id": "4ae79f2e4e073d0e521240788660f965ce978186",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Relay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64121"
},
{
"name": "Shell",
"bytes": "163"
}
],
"symlink_target": ""
}
|
import numpy as np
import numpy.testing as npt
import pytest
from pytest import raises as assert_raises
from scipy.integrate import IntegrationWarning
from scipy import stats
from scipy.special import betainc
from .common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_entropy_vect_scale,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
check_pickling, check_rvs_broadcast, check_freezing)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
distslow = ['kstwo', 'genexpon', 'ksone', 'recipinvgauss', 'vonmises',
'kappa4', 'vonmises_line', 'gausshyper', 'norminvgauss',
'geninvgauss', 'genhyperbolic']
# distslow are sorted by speed (very slow to slow)
distxslow = ['studentized_range']
# distxslow are sorted by speed (very slow to slow)
# skip check_fit_args (test is slow)
skip_fit_test_mle = ['exponpow', 'exponweib', 'gausshyper', 'genexpon',
'halfgennorm', 'gompertz', 'johnsonsb', 'johnsonsu',
'kappa4', 'ksone', 'kstwo', 'kstwobign', 'mielke', 'ncf',
'nct', 'powerlognorm', 'powernorm', 'recipinvgauss',
'trapezoid', 'vonmises', 'vonmises_line', 'levy_stable',
'rv_histogram_instance', 'studentized_range']
# these were really slow in `test_fit`.py.
# note that this list is used to skip both fit_test and fit_fix tests
slow_fit_test_mm = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
'kappa4', 'kstwobign', 'recipinvgauss', 'skewnorm',
'trapezoid', 'truncexpon', 'vonmises', 'vonmises_line',
'studentized_range']
# pearson3 fails due to something weird
# the first list fails due to non-finite distribution moments encountered
# most of the rest fail due to integration warnings
# pearson3 is overriden as not implemented due to gh-11746
fail_fit_test_mm = (['alpha', 'betaprime', 'bradford', 'burr', 'burr12',
'cauchy', 'crystalball', 'f', 'fisk', 'foldcauchy',
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
'mielke', 'nakagami', 'ncf', 'skewcauchy', 't',
'tukeylambda', 'invweibull']
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
'nct', 'pareto', 'powernorm', 'powerlognorm']
+ ['pearson3'])
skip_fit_test = {"MLE": skip_fit_test_mle,
"MM": slow_fit_test_mm + fail_fit_test_mm}
# skip check_fit_args_fix (test is slow)
skip_fit_fix_test_mle = ['burr', 'exponpow', 'exponweib', 'gausshyper',
'genexpon', 'halfgennorm', 'gompertz', 'johnsonsb',
'johnsonsu', 'kappa4', 'ksone', 'kstwo', 'kstwobign',
'levy_stable', 'mielke', 'ncf', 'ncx2',
'powerlognorm', 'powernorm', 'rdist', 'recipinvgauss',
'trapezoid', 'vonmises', 'vonmises_line',
'studentized_range']
# the first list fails due to non-finite distribution moments encountered
# most of the rest fail due to integration warnings
# pearson3 is overriden as not implemented due to gh-11746
fail_fit_fix_test_mm = (['alpha', 'betaprime', 'burr', 'burr12', 'cauchy',
'crystalball', 'f', 'fisk', 'foldcauchy',
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
'mielke', 'nakagami', 'ncf', 'nct', 'skewcauchy', 't',
'invweibull']
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
'pareto', 'powernorm', 'powerlognorm']
+ ['pearson3'])
skip_fit_fix_test = {"MLE": skip_fit_fix_test_mle,
"MM": slow_fit_test_mm + fail_fit_fix_test_mm}
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = set(['argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine',
'dgamma', 'dweibull', 'erlang', 'f', 'gamma',
'gausshyper', 'gengamma', 'genhyperbolic',
'geninvgauss', 'gennorm', 'genpareto',
'halfgennorm', 'invgamma',
'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma',
'logistic', 'loguniform', 'maxwell', 'nakagami',
'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3', 'rdist',
'reciprocal', 'rice', 'skewnorm', 't', 'tukeylambda',
'vonmises', 'vonmises_line', 'rv_histogram_instance',
'truncnorm', 'studentized_range'])
_h = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
histogram_test_instance = stats.rv_histogram(_h)
def cases_test_cont_basic():
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
elif distname in distslow:
yield pytest.param(distname, arg, marks=pytest.mark.slow)
elif distname in distxslow:
yield pytest.param(distname, arg, marks=pytest.mark.xslow)
else:
yield distname, arg
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
@pytest.mark.parametrize('sn, n_fit_samples', [(500, 200)])
def test_cont_basic(distname, arg, sn, n_fit_samples):
# this test skips slow distributions
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
rng = np.random.RandomState(765456)
rvs = distfn.rvs(size=sn, *arg, random_state=rng)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, distname + 'sample mean test')
check_cdf_ppf(distfn, arg, distname)
check_sf_isf(distfn, arg, distname)
check_pdf(distfn, arg, distname)
check_pdf_logpdf(distfn, arg, distname)
check_pdf_logpdf_at_endpoints(distfn, arg, distname)
check_cdf_logcdf(distfn, arg, distname)
check_sf_logsf(distfn, arg, distname)
check_ppf_broadcast(distfn, arg, distname)
alpha = 0.01
if distname == 'rv_histogram_instance':
check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
elif distname != 'geninvgauss':
# skip kstest for geninvgauss since cdf is too slow; see test for
# rv generation in TestGenInvGauss in test_distributions.py
check_distribution_rvs(distname, arg, alpha, rvs)
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3,
'rv_histogram_instance': 5.0}
x = spec_x.get(distname, 0.5)
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
check_named_args(distfn, x, arg, locscale_defaults, meths)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
check_freezing(distfn, arg)
# Entropy
if distname not in ['kstwobign', 'kstwo', 'ncf']:
check_entropy(distfn, arg, distname)
if distfn.numargs == 0:
check_vecentropy(distfn, arg)
if (distfn.__class__._entropy != stats.rv_continuous._entropy
and distname != 'vonmises'):
check_private_entropy(distfn, arg, stats.rv_continuous)
with npt.suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "Extremely bad integrand")
sup.filter(RuntimeWarning, "invalid value")
check_entropy_vect_scale(distfn, arg)
check_retrieving_support(distfn, arg)
check_edge_support(distfn, arg)
check_meth_dtype(distfn, arg, meths)
check_ppf_dtype(distfn, arg)
if distname not in fails_cmplx:
check_cmplx_deriv(distfn, arg)
if distname != 'truncnorm':
check_ppf_private(distfn, arg, distname)
for method in ["MLE", "MM"]:
if distname not in skip_fit_test[method]:
check_fit_args(distfn, arg, rvs[:n_fit_samples], method)
if distname not in skip_fit_fix_test[method]:
check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method)
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
def test_rvs_scalar(distname, arg):
# rvs should return a scalar when given scalar arguments (gh-12428)
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
assert np.isscalar(distfn.rvs(*arg))
assert np.isscalar(distfn.rvs(*arg, size=()))
assert np.isscalar(distfn.rvs(*arg, size=None))
def test_levy_stable_random_state_property():
# levy_stable only implements rvs(), so it is skipped in the
# main loop in test_cont_basic(). Here we apply just the test
# check_random_state_property to levy_stable.
check_random_state_property(stats.levy_stable, (0.5, 0.1))
def cases_test_moments():
fail_normalization = set(['vonmises'])
fail_higher = set(['vonmises', 'ncf'])
fail_loc_scale = set(['kappa3', 'kappa4']) # see gh-13582
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
if distname == 'studentized_range':
msg = ("studentized_range is far too slow for this test and it is "
"redundant with test_distributions::TestStudentizedRange::"
"test_moment_against_mp")
yield pytest.param(distname, arg, True, True, True, True,
marks=pytest.mark.xslow(reason=msg))
continue
cond1 = distname not in fail_normalization
cond2 = distname not in fail_higher
cond3 = distname not in fail_loc_scale
yield distname, arg, cond1, cond2, cond3, False
if not cond1 or not cond2 or not cond3:
# Run the distributions that have issues twice, once skipping the
# not_ok parts, once with the not_ok parts but marked as knownfail
yield pytest.param(distname, arg, True, True, True, True,
marks=pytest.mark.xfail)
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,'
'loc_scale_ok,is_xfailing',
cases_test_moments())
def test_moments(distname, arg, normalization_ok, higher_ok, loc_scale_ok,
is_xfailing):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
with npt.suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The integral is probably divergent, or slowly convergent.")
if is_xfailing:
sup.filter(IntegrationWarning)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
if normalization_ok:
check_normalization(distfn, arg, distname)
if higher_ok:
check_mean_expect(distfn, arg, m, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
check_var_expect(distfn, arg, m, v, distname)
check_kurt_expect(distfn, arg, m, v, k, distname)
if loc_scale_ok:
check_loc_scale(distfn, arg, m, v, distname)
check_moment(distfn, arg, m, v, distname)
@pytest.mark.parametrize('dist,shape_args', distcont)
def test_rvs_broadcast(dist, shape_args):
if dist in ['gausshyper', 'genexpon', 'studentized_range']:
pytest.skip("too slow")
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull',
'exponnorm', 'genhyperbolic', 'geninvgauss',
'levy_stable', 'nct', 'norminvgauss', 'rice',
'skewnorm', 'semicircular']
distfunc = getattr(stats, dist)
loc = np.zeros(2)
scale = np.ones((3, 1))
nargs = distfunc.numargs
allargs = []
bshape = [3, 2]
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 4,) + (1,)*(k + 2)
allargs.append(shape_args[k]*np.ones(shp))
bshape.insert(0, k + 4)
allargs.extend([loc, scale])
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
def test_rvs_gh2069_regression():
# Regression tests for gh-2069. In scipy 0.17 and earlier,
# these tests would fail.
#
# A typical example of the broken behavior:
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
rng = np.random.RandomState(123)
vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5),
random_state=rng)
d = np.diff(vals.ravel())
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
[[1, 1], [1, 1]], 1)
assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
[[1], [2]], (4,))
def test_nomodify_gh9900_regression():
# Regression test for gh-9990
# Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was
# set inside the stats.truncnorm instance during stats.truncnorm.cdf().
# This could cause issues wth multi-threaded code.
# Since then, the calls to cdf() are not permitted to modify the global
# stats.truncnorm instance.
tn = stats.truncnorm
# Use the right-half truncated normal
# Check that the cdf and _cdf return the same result.
npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
npt.assert_almost_equal(tn._cdf(1, 0, np.inf), 0.6826894921370859)
# Now use the left-half truncated normal
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415)
npt.assert_almost_equal(tn._cdf(-1, -np.inf, 0), 0.31731050786291415)
# Check that the right-half truncated normal _cdf hasn't changed
npt.assert_almost_equal(tn._cdf(1, 0, np.inf), 0.6826894921370859) # NOT 1.6826894921370859
npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
# Check that the left-half truncated normal _cdf hasn't changed
npt.assert_almost_equal(tn._cdf(-1, -np.inf, 0), 0.31731050786291415) # Not -0.6826894921370859
npt.assert_almost_equal(tn.cdf(1, -np.inf, 0), 1) # Not 1.6826894921370859
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415) # Not -0.6826894921370859
def test_broadcast_gh9990_regression():
# Regression test for gh-9990
# The x-value 7 only lies within the support of 4 of the supplied
# distributions. Prior to 9990, one array passed to
# stats.reciprocal._cdf would have 4 elements, but an array
# previously stored by stats.reciprocal_argcheck() would have 6, leading
# to a broadcast error.
a = np.array([1, 2, 3, 4, 5, 6])
b = np.array([8, 16, 1, 32, 1, 48])
ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans)
ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans)
ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans)
ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans)
def test_broadcast_gh7933_regression():
# Check broadcast works
stats.truncnorm.logpdf(
np.array([3.0, 2.0, 1.0]),
a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0,
b=np.inf,
loc=np.array([6.0, 5.0, 4.0]),
scale=3.0
)
def test_gh2002_regression():
# Add a check that broadcast works in situations where only some
# x-values are compatible with some of the shape arguments.
x = np.r_[-2:2:101j]
a = np.r_[-np.ones(50), np.ones(51)]
expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)]
ans = stats.truncnorm.pdf(x, a, np.inf)
npt.assert_array_almost_equal(ans, expected)
def test_gh1320_regression():
# Check that the first example from gh-1320 now works.
c = 2.62
stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]]))
# The other examples in gh-1320 appear to have stopped working
# some time ago.
# ans = stats.genextreme.moment(2, np.array([c, c + 0.5]))
# expected = np.array([25.50105963, 115.11191437])
# stats.genextreme.moment(5, np.array([[c], [c + 0.5]]))
# stats.genextreme.moment(5, np.array([c, c + 0.5]))
def test_method_of_moments():
# example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics)
np.random.seed(1234)
x = [0, 0, 0, 0, 1]
a = 1/5 - 2*np.sqrt(3)/5
b = 1/5 + 2*np.sqrt(3)/5
# force use of method of moments (uniform.fit is overriden)
loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM")
npt.assert_almost_equal(loc, a, decimal=4)
npt.assert_almost_equal(loc+scale, b, decimal=4)
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm, v, n, popmean):
# from stats._stats_py.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to
# hypothesized variance
df = n-1
chi2 = (n - 1)*sv/popvar
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_pdf_logpdf_at_endpoints(distfn, args, msg):
# compares pdf with the log of the pdf at the (finite) end points
points = np.array([0, 1])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
with npt.suppress_warnings() as sup:
# Several distributions incur divide by zero or encounter invalid values when computing
# the pdf or logpdf at the endpoints.
suppress_messsages = [
"divide by zero encountered in true_divide", # multiple distributions
"divide by zero encountered in log", # multiple distributions
"divide by zero encountered in power", # gengamma
"invalid value encountered in add", # genextreme
"invalid value encountered in subtract", # gengamma
"invalid value encountered in multiply" # recipinvgauss
]
for msg in suppress_messsages:
sup.filter(category=RuntimeWarning, message=msg)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_ppf_broadcast(distfn, arg, msg):
# compares ppf for multiple argsets.
num_repeats = 5
args = [] * num_repeats
if arg:
args = [np.array([_] * num_repeats) for _ in arg]
median = distfn.ppf(0.5, *arg)
medians = distfn.ppf(0.5, *args)
msg += " - ppf multiple"
npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# dist is either a cdf function or name of a distribution in scipy.stats.
# args are the args for scipy.stats.dist(*args)
# alpha is a significance level, ~0.01
# rvs is array_like of random variables
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
# The rvs passed in failed the K-S test, which _could_ happen
# but is unlikely if alpha is small enough.
# Repeat the the test with a new sample of rvs.
# Generate 1000 rvs, perform a K-S test that the new sample of rvs
# are distributed according to the distribution.
D, pval = stats.kstest(dist, dist, args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
def check_loc_scale(distfn, arg, m, v, msg):
# Make `loc` and `scale` arrays to catch bugs like gh-13580 where
# `loc` and `scale` arrays improperly broadcast with shapes.
loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0])
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
def check_retrieving_support(distfn, args):
loc, scale = 1, 2
supp = distfn.support(*args)
supp_loc_scale = distfn.support(*args, loc=loc, scale=scale)
npt.assert_almost_equal(np.array(supp)*scale + loc,
np.array(supp_loc_scale))
def check_fit_args(distfn, arg, rvs, method):
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="The shape parameter of the erlang")
sup.filter(category=RuntimeWarning,
message="floating point number truncated")
vals = distfn.fit(rvs, method=method)
vals2 = distfn.fit(rvs, optimizer='powell', method=method)
# Only check the length of the return; accuracy tested in test_fit.py
npt.assert_(len(vals) == 2+len(arg))
npt.assert_(len(vals2) == 2+len(arg))
def check_fit_args_fix(distfn, arg, rvs, method):
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="The shape parameter of the erlang")
vals = distfn.fit(rvs, floc=0, method=method)
vals2 = distfn.fit(rvs, fscale=1, method=method)
npt.assert_(len(vals) == 2+len(arg))
npt.assert_(vals[-2] == 0)
npt.assert_(vals2[-1] == 1)
npt.assert_(len(vals2) == 2+len(arg))
if len(arg) > 0:
vals3 = distfn.fit(rvs, f0=arg[0], method=method)
npt.assert_(len(vals3) == 2+len(arg))
npt.assert_(vals3[0] == arg[0])
if len(arg) > 1:
vals4 = distfn.fit(rvs, f1=arg[1], method=method)
npt.assert_(len(vals4) == 2+len(arg))
npt.assert_(vals4[1] == arg[1])
if len(arg) > 2:
vals5 = distfn.fit(rvs, f2=arg[2], method=method)
npt.assert_(len(vals5) == 2+len(arg))
npt.assert_(vals5[2] == arg[2])
@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
'sf', 'logsf', 'ppf', 'isf'])
@pytest.mark.parametrize('distname, args', distcont)
def test_methods_with_lists(method, distname, args):
# Test that the continuous distributions can accept Python lists
# as arguments.
dist = getattr(stats, distname)
f = getattr(dist, method)
if distname == 'invweibull' and method.startswith('log'):
x = [1.5, 2]
else:
x = [0.1, 0.2]
shape2 = [[a]*2 for a in args]
loc = [0, 0.1]
scale = [1, 1.01]
result = f(x, *shape2, loc=loc, scale=scale)
npt.assert_allclose(result,
[f(*v) for v in zip(x, *shape2, loc, scale)],
rtol=1e-14, atol=5e-14)
def test_burr_fisk_moment_gh13234_regression():
vals0 = stats.burr.moment(1, 5, 4)
assert isinstance(vals0, float)
vals1 = stats.fisk.moment(1, 8)
assert isinstance(vals1, float)
def test_moments_with_array_gh12192_regression():
# array loc and scalar scale
vals0 = stats.norm.moment(n=1, loc=np.array([1, 2, 3]), scale=1)
expected0 = np.array([1., 2., 3.])
npt.assert_equal(vals0, expected0)
# array loc and invalid scalar scale
vals1 = stats.norm.moment(n=1, loc=np.array([1, 2, 3]), scale=-1)
expected1 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals1, expected1)
# array loc and array scale with invalid entries
vals2 = stats.norm.moment(n=1, loc=np.array([1, 2, 3]), scale=[-3, 1, 0])
expected2 = np.array([np.nan, 2., np.nan])
npt.assert_equal(vals2, expected2)
# (loc == 0) & (scale < 0)
vals3 = stats.norm.moment(n=2, loc=0, scale=-4)
expected3 = np.nan
npt.assert_equal(vals3, expected3)
assert isinstance(vals3, expected3.__class__)
# array loc with 0 entries and scale with invalid entries
vals4 = stats.norm.moment(n=2, loc=[1, 0, 2], scale=[3, -4, -5])
expected4 = np.array([10., np.nan, np.nan])
npt.assert_equal(vals4, expected4)
# all(loc == 0) & (array scale with invalid entries)
vals5 = stats.norm.moment(n=2, loc=[0, 0, 0], scale=[5., -2, 100.])
expected5 = np.array([25., np.nan, 10000.])
npt.assert_equal(vals5, expected5)
# all( (loc == 0) & (scale < 0) )
vals6 = stats.norm.moment(n=2, loc=[0, 0, 0], scale=[-5., -2, -100.])
expected6 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals6, expected6)
# scalar args, loc, and scale
vals7 = stats.chi.moment(n=2, df=1, loc=0, scale=0)
expected7 = np.nan
npt.assert_equal(vals7, expected7)
assert isinstance(vals7, expected7.__class__)
# array args, scalar loc, and scalar scale
vals8 = stats.chi.moment(n=2, df=[1, 2, 3], loc=0, scale=0)
expected8 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals8, expected8)
# array args, array loc, and array scale
vals9 = stats.chi.moment(n=2, df=[1, 2, 3], loc=[1., 0., 2.],
scale=[1., -3., 0.])
expected9 = np.array([3.59576912, np.nan, np.nan])
npt.assert_allclose(vals9, expected9, rtol=1e-8)
# (n > 4), all(loc != 0), and all(scale != 0)
vals10 = stats.norm.moment(5, [1., 2.], [1., 2.])
expected10 = np.array([26., 832.])
npt.assert_allclose(vals10, expected10, rtol=1e-13)
# test broadcasting and more
a = [-1.1, 0, 1, 2.2, np.pi]
b = [-1.1, 0, 1, 2.2, np.pi]
loc = [-1.1, 0, np.sqrt(2)]
scale = [-2.1, 0, 1, 2.2, np.pi]
a = np.array(a).reshape((-1, 1, 1, 1))
b = np.array(b).reshape((-1, 1, 1))
loc = np.array(loc).reshape((-1, 1))
scale = np.array(scale)
vals11 = stats.beta.moment(n=2, a=a, b=b, loc=loc, scale=scale)
a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale)
for i in np.ndenumerate(a):
with np.errstate(invalid='ignore', divide='ignore'):
i = i[0] # just get the index
# check against same function with scalar input
expected = stats.beta.moment(n=2, a=a[i], b=b[i],
loc=loc[i], scale=scale[i])
np.testing.assert_equal(vals11[i], expected)
def test_broadcasting_in_moments_gh12192_regression():
vals0 = stats.norm.moment(n=1, loc=np.array([1, 2, 3]), scale=[[1]])
expected0 = np.array([[1., 2., 3.]])
npt.assert_equal(vals0, expected0)
assert vals0.shape == expected0.shape
vals1 = stats.norm.moment(n=1, loc=np.array([[1], [2], [3]]),
scale=[1, 2, 3])
expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]])
npt.assert_equal(vals1, expected1)
assert vals1.shape == expected1.shape
vals2 = stats.chi.moment(n=1, df=[1., 2., 3.], loc=0., scale=1.)
expected2 = np.array([0.79788456, 1.25331414, 1.59576912])
npt.assert_allclose(vals2, expected2, rtol=1e-8)
assert vals2.shape == expected2.shape
vals3 = stats.chi.moment(n=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.],
scale=[-1., 0., 3.])
expected3 = np.array([[np.nan, np.nan, 4.39365368],
[np.nan, np.nan, 5.75994241],
[np.nan, np.nan, 6.78730736]])
npt.assert_allclose(vals3, expected3, rtol=1e-8)
assert vals3.shape == expected3.shape
|
{
"content_hash": "0c5f9c3f07393c37939e4a6306abe8ab",
"timestamp": "",
"source": "github",
"line_count": 834,
"max_line_length": 100,
"avg_line_length": 41.66546762589928,
"alnum_prop": 0.601283490172379,
"repo_name": "matthew-brett/scipy",
"id": "797769f677300788a519692725c498025fb86ca2",
"size": "34750",
"binary": false,
"copies": "1",
"ref": "refs/heads/polished-meson-windows",
"path": "scipy/stats/tests/test_continuous_basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""Support for (EMEA/EU-based) Honeywell evohome systems."""
# Glossary:
# TCS - temperature control system (a.k.a. Controller, Parent), which can
# have up to 13 Children:
# 0-12 Heating zones (a.k.a. Zone), and
# 0-1 DHW controller, (a.k.a. Boiler)
# The TCS & Zones are implemented as Climate devices, Boiler as a WaterHeater
from datetime import timedelta
import logging
import requests.exceptions
import voluptuous as vol
from homeassistant.const import (
CONF_SCAN_INTERVAL, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_START)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'evohome'
DATA_EVOHOME = 'data_' + DOMAIN
DISPATCHER_EVOHOME = 'dispatcher_' + DOMAIN
CONF_LOCATION_IDX = 'location_idx'
SCAN_INTERVAL_DEFAULT = timedelta(seconds=300)
SCAN_INTERVAL_MINIMUM = timedelta(seconds=180)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT):
vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)),
}),
}, extra=vol.ALLOW_EXTRA)
CONF_SECRETS = [
CONF_USERNAME, CONF_PASSWORD,
]
# These are used to help prevent E501 (line too long) violations.
GWS = 'gateways'
TCS = 'temperatureControlSystems'
# bit masks for dispatcher packets
EVO_PARENT = 0x01
EVO_CHILD = 0x02
def setup(hass, hass_config):
"""Create a (EMEA/EU-based) Honeywell evohome system.
Currently, only the Controller and the Zones are implemented here.
"""
evo_data = hass.data[DATA_EVOHOME] = {}
evo_data['timers'] = {}
# use a copy, since scan_interval is rounded up to nearest 60s
evo_data['params'] = dict(hass_config[DOMAIN])
scan_interval = evo_data['params'][CONF_SCAN_INTERVAL]
scan_interval = timedelta(
minutes=(scan_interval.total_seconds() + 59) // 60)
import evohomeclient2
try:
client = evo_data['client'] = evohomeclient2.EvohomeClient(
evo_data['params'][CONF_USERNAME],
evo_data['params'][CONF_PASSWORD],
debug=False
)
except evohomeclient2.AuthenticationError as err:
_LOGGER.error(
"setup(): Failed to authenticate with the vendor's server. "
"Check your username and password are correct. "
"Resolve any errors and restart HA. Message is: %s",
err
)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error(
"setup(): Unable to connect with the vendor's server. "
"Check your network and the vendor's status page. "
"Resolve any errors and restart HA."
)
return False
finally: # Redact any config data that's no longer needed
for parameter in CONF_SECRETS:
evo_data['params'][parameter] = 'REDACTED' \
if evo_data['params'][parameter] else None
evo_data['status'] = {}
# Redact any installation data that's no longer needed
for loc in client.installation_info:
loc['locationInfo']['locationId'] = 'REDACTED'
loc['locationInfo']['locationOwner'] = 'REDACTED'
loc['locationInfo']['streetAddress'] = 'REDACTED'
loc['locationInfo']['city'] = 'REDACTED'
loc[GWS][0]['gatewayInfo'] = 'REDACTED'
# Pull down the installation configuration
loc_idx = evo_data['params'][CONF_LOCATION_IDX]
try:
evo_data['config'] = client.installation_info[loc_idx]
except IndexError:
_LOGGER.error(
"setup(): config error, '%s' = %s, but its valid range is 0-%s. "
"Unable to continue. Fix any configuration errors and restart HA.",
CONF_LOCATION_IDX, loc_idx, len(client.installation_info) - 1
)
return False
if _LOGGER.isEnabledFor(logging.DEBUG):
tmp_loc = dict(evo_data['config'])
tmp_loc['locationInfo']['postcode'] = 'REDACTED'
if 'dhw' in tmp_loc[GWS][0][TCS][0]: # if this location has DHW...
tmp_loc[GWS][0][TCS][0]['dhw'] = '...'
_LOGGER.debug("setup(): evo_data['config']=%s", tmp_loc)
load_platform(hass, 'climate', DOMAIN, {}, hass_config)
if 'dhw' in evo_data['config'][GWS][0][TCS][0]:
_LOGGER.warning(
"setup(): DHW found, but this component doesn't support DHW."
)
@callback
def _first_update(event):
"""When HA has started, the hub knows to retrieve it's first update."""
pkt = {'sender': 'setup()', 'signal': 'refresh', 'to': EVO_PARENT}
async_dispatcher_send(hass, DISPATCHER_EVOHOME, pkt)
hass.bus.listen(EVENT_HOMEASSISTANT_START, _first_update)
return True
|
{
"content_hash": "ea0631dc3810254247e8724ec1c891c4",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 34.29931972789116,
"alnum_prop": 0.6443871479571599,
"repo_name": "auduny/home-assistant",
"id": "459a3636a06d3f5f728f34360007c7829d0a3c19",
"size": "5042",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/evohome/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15129018"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Handles configuration options for the tests.
The tests are capable of running in other contexts, such as in a VM or against
a real deployment. Using this configuration ensures we can run them in other
environments if we choose to.
"""
import json
import os
from collections import Mapping
#TODO(tim.simpson): I feel like this class already exists somewhere in core
# Python.
class FrozenDict(Mapping):
def __init__(self, original):
self.original = original
def __len__(self):
return self.original.__len__()
def __iter__(self, *args, **kwargs):
return self.original.__iter__(self, *args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.original.__getitem__(*args, **kwargs)
def __str__(self):
return self.original.__str__()
USAGE_ENDPOINT = os.environ.get("USAGE_ENDPOINT",
"trove.tests.util.usage.UsageVerifier")
class TestConfig(object):
"""
Holds test configuration values which can be accessed as attributes
or using the values dictionary.
"""
def __init__(self):
"""
Create TestConfig, and set default values. These will be overwritten by
the "load_from" methods below.
"""
self._loaded_files = []
self._values = {
'clean_slate': os.environ.get("CLEAN_SLATE", "False") == "True",
'fake_mode': os.environ.get("FAKE_MODE", "False") == "True",
'nova_auth_url': "http://localhost:5000/v2.0",
'trove_auth_url': "http://localhost:5000/v2.0/tokens",
'dbaas_url': "http://localhost:8775/v1.0/dbaas",
'version_url': "http://localhost:8775/",
'nova_url': "http://localhost:8774/v2",
'dbaas_datastore': "mysql",
'dbaas_datastore_id': "a00000a0-00a0-0a00-00a0-000a000000aa",
'dbaas_datastore_name_no_versions': "Test_Datastore_1",
'dbaas_datastore_version': "5.5",
'dbaas_datastore_version_id': "b00000b0-00b0-0b00-00b0-"
"000b000000bb",
'dbaas_inactive_datastore_version': "mysql_inactive_version",
'instance_create_time': 16 * 60,
'mysql_connection_method': {"type": "direct"},
'typical_nova_image_name': None,
'white_box': os.environ.get("WHITE_BOX", "False") == "True",
'test_mgmt': False,
'use_local_ovz': False,
"known_bugs": {},
"in_proc_server": True,
"report_directory": os.environ.get("REPORT_DIRECTORY", None),
"trove_volume_support": True,
"trove_max_volumes_per_user": 100,
"usage_endpoint": USAGE_ENDPOINT,
"root_on_create": False,
"mysql": {
"configurations": {
"valid_values": {
"connect_timeout": 120,
"local_infile": 0,
"collation_server": "latin1_swedish_ci"
},
"appending_values": {
"join_buffer_size": 1048576,
"connect_timeout": 15
},
"nondynamic_parameter": {
"join_buffer_size": 1048576,
"innodb_buffer_pool_size": 57671680
},
"out_of_bounds_under": {
"connect_timeout": -10
},
"out_of_bounds_over": {
"connect_timeout": 1000000
},
"parameters_list": [
"key_buffer_size",
"connect_timeout"
]
},
"volume_support": True,
},
"redis": {"volume_support": False},
}
self._frozen_values = FrozenDict(self._values)
self._users = None
def get(self, name, default_value):
return self.values.get(name, default_value)
def get_report(self):
return PrintReporter()
def load_from_line(self, line):
index = line.find("=")
if index >= 0:
key = line[:index]
value = line[index + 1:]
self._values[key] = value
def load_include_files(self, original_file, files):
directory = os.path.dirname(original_file)
for file_sub_path in files:
file_full_path = os.path.join(directory, file_sub_path)
self.load_from_file(file_full_path)
def load_from_file(self, file_path):
if file_path in self._loaded_files:
return
file_contents = open(file_path, "r").read()
try:
contents = json.loads(file_contents)
except Exception as exception:
raise RuntimeError("Error loading conf file \"%s\"." % file_path,
exception)
finally:
self._loaded_files.append(file_path)
if "include-files" in contents:
self.load_include_files(file_path, contents['include-files'])
del contents['include-files']
self._values.update(contents)
def __getattr__(self, name):
if name not in self._values:
raise AttributeError('Configuration value "%s" not found.' % name)
else:
return self._values[name]
def python_cmd_list(self):
"""The start of a command list to use when running Python scripts."""
commands = []
if self.use_venv:
commands.append("%s/tools/with_venv.sh" % self.nova_code_root)
return list
commands.append("python")
return commands
@property
def users(self):
if self._users is None:
from trove.tests.util.users import Users
self._users = Users(self.values['users'])
return self._users
@property
def values(self):
return self._frozen_values
class PrintReporter(object):
def log(self, msg):
print("[REPORT] %s" % msg)
def update(self):
pass # Ignore. This is used in other reporters.
CONFIG = TestConfig()
del TestConfig.__init__
|
{
"content_hash": "c577b4a80a21a82b2f3ffae6a71a408b",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 34.10326086956522,
"alnum_prop": 0.5266932270916335,
"repo_name": "CMSS-BCRDB/RDS",
"id": "b5722fd77fa4074094ac0c74f425ac6951753d5e",
"size": "6915",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "trove/tests/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2811396"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
__all__ = [
"job_template",
"vars_template",
"parent_template"
]
job_template = 'JOB {job_id} {condor}\n'.format
vars_template = 'VARS {job_id} input="{input}" arguments="{arguments}"\n'.format
_parent_template = 'PARENT {parents} CHILD {children}\n'.format
def parent_template(*, parents, children):
return _parent_template(parents=" ".join(parents),
children=" ".join(children))
def make_job(job_id, condor, input, arguments):
return (
job_template(job_id=job_id, condor=condor) +
vars_template(job_id=job_id, input=input, arguments=arguments)
)
dot_template = 'DOT {dot}'.format
|
{
"content_hash": "56ce396b2e1d0cb37129b063615563f6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 80,
"avg_line_length": 29.772727272727273,
"alnum_prop": 0.6290076335877862,
"repo_name": "dwysocki/CondorScheduler",
"id": "85634c609a7265daa95546f758e37dad2203c95f",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/CondorScheduler/dag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15710"
}
],
"symlink_target": ""
}
|
"""
Example of using the InputImageButton class.
"""
from ggame.inputpoint import InputImageButton
from ggame.mathapp import MathApp
from ggame.asset import Frame
def pressbutton(_button):
"""
Callback function executed when button is pressed.
"""
print("Button Pressed!")
BUTTON = InputImageButton(
"images/button-round.png", pressbutton, (0, 0), frame=Frame(0, 0, 100, 100), qty=2
)
BUTTON.scale = 0.5
MathApp().run()
|
{
"content_hash": "121c621e66ab335bb4083a72cb74d6f9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 21.19047619047619,
"alnum_prop": 0.7056179775280899,
"repo_name": "tiggerntatie/ggame",
"id": "38e3aeb32beef7b0dc32a781d43d98486949a311",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/inputpointinputimagebutton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "203298"
},
{
"name": "Shell",
"bytes": "1010"
}
],
"symlink_target": ""
}
|
import os
import pytest
import fuzzinator
from common_formatter import mock_issue, mock_templates_dir
@pytest.mark.parametrize('issue, formatter_init_kwargs, exp_short, exp_long', [
(mock_issue,
{'short': '{{id}}', 'long': 'id: {{id}}, bar: {{bar}}, baz: {{baz}}, qux.xyz: {{qux.xyz}}'},
'foo',
'id: foo, bar: True, baz: False, qux.xyz: 42'),
(mock_issue,
{'short_file': os.path.join(mock_templates_dir, 'doublecurly_short.txt'), 'long_file': os.path.join(mock_templates_dir, 'doublecurly_long.md')},
'issue: foo\n',
'# Issue: foo\n\n- bar: True\n- baz: False\n- qux.xyz: 42\n'),
(mock_issue,
{},
'',
''),
])
def test_chevron_formatter(issue, formatter_init_kwargs, exp_short, exp_long):
formatter = fuzzinator.formatter.ChevronFormatter(**formatter_init_kwargs)
assert exp_long == formatter(issue=issue)
assert exp_short == formatter.summary(issue=issue)
|
{
"content_hash": "5fc25fea2e19992eb47b6aab44c85909",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 149,
"avg_line_length": 35.73076923076923,
"alnum_prop": 0.635091496232508,
"repo_name": "akosthekiss/fuzzinator",
"id": "cac95c1f3519d18642fda4705711313f678d5ccc",
"size": "1183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/formatter/test_chevron_formatter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "339"
},
{
"name": "C++",
"bytes": "3678"
},
{
"name": "CSS",
"bytes": "10728"
},
{
"name": "HTML",
"bytes": "44477"
},
{
"name": "JavaScript",
"bytes": "25491"
},
{
"name": "Makefile",
"bytes": "1755"
},
{
"name": "Python",
"bytes": "412762"
}
],
"symlink_target": ""
}
|
"""
The LinearMap class keeps track of things that occur over
a 1-dimensial space, thus the "linear" in "LinearMap". The LinearMap
helps you visualize the layout of "chunks" of linear space. The
chunks may overlap, or they may not. Sample applications include
the visualization of headers, segments, and sections within an ELF file
(a file being a 1-dimensional series of bytes), or figuring out which
jobs ran at the same time during a 'make' with -jN > 1 (time is
1-dimensional).
"""
from __future__ import nested_scopes
B_START = "START"
B_END = "END"
class Boundary:
"""The boundaries in the linear-space. They indicate the start
or end of something. This class is used by LinearMap; the user
doesn't need to worry about it."""
def __init__(self, btype, name, id):
self.type = btype
self.name = name
self.id = id
def Type(self):
return self.type
def Name(self):
return self.name
def ID(self):
return self.id
def __repr__(self):
return "%s of %s" % (self.type, self.name)
class Chunk:
"""Represent a chunk of the linear-space, i.e., the space between
2 boundaries in the linear map. Each chunk contains 0 or more
item IDs, which identify the items that were Add()ed to the LinearMap.
The user doesn't create these, but LinearMap can return
a list of Chunk objects."""
def __init__(self, start):
self.start = start
self.ids = []
self.end = None
def SetIDs(self, ids):
self.ids = ids
def SetEnd(self, end):
self.end = end
def IDs(self):
return self.ids
def Start(self):
return self.start
def End(self):
return self.end
def Length(self):
return self.end - self.start
def __repr__(self):
return "<START:%s END:%s IDs:%s>" % (self.start, self.end, self.ids)
class LinearMap:
"Produces a map of linear space."
def __init__(self, start=None, length=None, name=None):
"""Initialize a single 1-dimensional space. Without
start/length/name parameters, the size of the space is unknown
and unnamed. With start/length/name parameters, the linear space
is given an initial definition. This initial definition does not
impose limits on the size of the linear space, but nevertheless
can be useful, especially if your linear space should be bounded,
and shown so on a printed representation.."""
self.boundaries = {}
self.num_chunks = 0
self.boundary_pairs = []
self.print_as_hex = 1
self.print_diff = 1
self.print_map_key = 1
self.ids_start_at = 0
self.comments = {}
if start != None:
assert length != None and name != None
elif length != None:
assert start != None and name != None
elif name != None:
assert start != None and length != None
# Start with a block?
if start != None and length != None and name != None:
self.Add(start, length, name)
def PrintRawOffsets(self):
"""During the printing of the map, print offsets as decimal
instead of hexadecimal."""
self.print_as_hex = 0
def NoPrintDiff(self):
"""Don't print the size of the linear chunks; i.e., the difference
between boundaries."""
self.print_diff = 0
def NoPrintMapKey(self):
"""Don't print the map key when printing the map."""
self.print_map_key = 0
def IDsStartAt(self, num):
self.ids_start_at = num
def Add(self, start, length, name, comment=None):
"""Add an item in the linear space. The item has a start
and a length, and a name. An optional comment can be attached
to the chunk. The comment appears in the Map Key to better
identify the named chunk. Returns an ID, which is a number
unique to the LinearMap that identifies this added item."""
# The ID is a unique number for each item. The 'name'
# may not be unique, so LinearMap forces uniqueness
# by keeping track of an ID.
id = self.num_chunks + self.ids_start_at
if comment:
self.comments[id] = comment
boundary1 = Boundary(B_START, name, id)
records = self.boundaries.setdefault(start, [])
records.append(boundary1)
boundary2 = Boundary(B_END, name, id)
end = start + length
records = self.boundaries.setdefault(end, [])
records.append(boundary2)
self.num_chunks += 1
self.boundary_pairs.append((boundary1, boundary2))
return id
def Dump(self):
"""Dump the data to stdout."""
boundary_ids = self.boundaries.keys()
boundary_ids.sort()
if self.print_as_hex:
for boundary_id in boundary_ids:
print "0x%x : %s" % (boundary_id, self.boundaries[boundary_id])
else:
for boundary_id in boundary_ids:
print "%s : %s" % (boundary_id, self.boundaries[boundary_id])
def BoundaryArrays(self):
"""Return an array of arrays of Boundary objects,
in time-sorted order."""
boundary_ids = self.boundaries.keys()
boundary_ids.sort()
return map(lambda x: self.boundaries[x], boundary_ids)
def Chunks(self):
"""Return an array of Chunk objects, in time-sorted order.
Each Chunk object represents the space between 2 boundaries
in the linear space, and can contain 0 or more item IDs."""
chunks = []
boundary_ids = self.boundaries.keys()
boundary_ids.sort()
current_ids = []
old_chunk = None
for boundary_id in boundary_ids:
if old_chunk:
old_chunk.SetEnd(boundary_id)
chunks.append(old_chunk)
chunk = Chunk(boundary_id)
boundaries = self.boundaries[boundary_id]
for b in boundaries:
if b.Type() == B_START:
current_ids.append(b.ID())
elif b.Type() == B_END:
current_ids.remove(b.ID())
else:
assert 0
chunk.SetIDs(current_ids[:])
old_chunk = chunk
return chunks
def ItemName(self, id):
"""Given the ID of an item, return the name."""
return self.boundary_pairs[id][0].Name()
def ItemComments(self, id):
"""Given the ID of an item, return the name. If the item
has no comment, None is returned."""
if self.comments.has_key(id):
return self.comments[id]
else:
return None
def PrintMap(self, fh):
"""Print the linear map."""
if self.print_map_key:
print "Map Key:"
print "========"
for (b1, b2) in self.boundary_pairs:
name = b1.Name()
id = b1.ID()
if self.comments.has_key(id):
print "%2d. %s -- %s" % (id, name, self.comments[id])
else:
print "%2d. %s" % (id, name)
print
boundary_ids = self.boundaries.keys()
boundary_ids.sort()
current_chunks = []
chunk_str = None
old_boundary_id = 0
for boundary_id in boundary_ids:
if self.print_diff:
# Print chunk size for previous chunk
if chunk_str != None:
if chunk_str:
spaces = max(20 - len(chunk_str), 1)
else:
spaces = 21
print " " * spaces,
diff = boundary_id - old_boundary_id
print "%d bytes (0x%x)" % (diff, diff)
else:
print
boundaries = self.boundaries[boundary_id]
if self.print_as_hex:
print "-------- 0x%08x : %s" % (boundary_id, boundaries)
else:
print "-------- %s : %s" % (boundary_id, boundaries)
for b in boundaries:
if b.Type() == B_START:
current_chunks.append(b.ID())
elif b.Type() == B_END:
current_chunks.remove(b.ID())
else:
assert 0
if current_chunks:
chunk_str = ', '.join(map(str, current_chunks))
print chunk_str,
else:
chunk_str = ""
old_boundary_id = boundary_id
|
{
"content_hash": "d43b38979fc0903372c7fcb692d3f804",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 31.92962962962963,
"alnum_prop": 0.5543440436144299,
"repo_name": "gilramir/instmake",
"id": "51575be46c0185d7fc42dea234f47b0c0637cc1d",
"size": "8665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instmakelib/linearmap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "528"
},
{
"name": "Python",
"bytes": "470386"
},
{
"name": "Shell",
"bytes": "887"
}
],
"symlink_target": ""
}
|
import os
from rackattack.virtual import sh
def create(image, sizeGB):
dirname = os.path.dirname(image)
if not os.path.isdir(dirname):
os.makedirs(dirname, 0777)
sh.run(['qemu-img', 'create', '-f', 'qcow2', image, '%dG' % sizeGB])
os.chmod(image, 0666)
def deriveCopyOnWrite(original, newImage, originalFormat='qcow2'):
sh.run(['qemu-img', 'create', '-F', originalFormat, '-f', 'qcow2', '-b', original, newImage])
os.chmod(newImage, 0666)
|
{
"content_hash": "90c2b2feabdc65c82e7ae5519d388ca8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 97,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6505263157894737,
"repo_name": "eliran-stratoscale/rackattack-virtual",
"id": "d608ac81fe1d71da33df96625864ffd23d0396c4",
"size": "475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rackattack/virtual/kvm/imagecommands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2946"
},
{
"name": "Python",
"bytes": "171528"
},
{
"name": "Shell",
"bytes": "796"
}
],
"symlink_target": ""
}
|
import icalendar
from django.conf import settings
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, render
from django.template import TemplateDoesNotExist
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django_date_extensions.fields import ApproximateDate
from patreonmanager.models import FundraisingStatus
from .models import Event, User
from story.models import Story
def index(request):
blogs = Story.objects.filter(is_story=False).order_by('-created')[:3]
city_count = Event.objects.values('city').distinct().count()
country_count = Event.objects.values('country').distinct().count()
future_events = Event.objects.future()
organizers = User.objects.all().count()
stories = Story.objects.filter(is_story=True).order_by('-created')[:2]
return render(request, 'core/index.html', {
'future_events': future_events,
'stories': stories,
'blogposts': blogs,
'patreon_stats': FundraisingStatus.objects.all().first(), # TODO: This isn't used
'organizers_count': organizers,
'cities_count': city_count,
'country_count': country_count,
})
def events(request):
return render(request, 'core/events.html', {
'future_events': Event.objects.future(),
'past_events': Event.objects.past(),
})
def events_map(request):
return render(request, 'core/events_map.html', {
'events': Event.objects.public().order_by('date'),
'mapbox_map_id': settings.MAPBOX_MAP_ID,
})
def resources(request):
return render(request, 'core/resources.html', {})
def event(request, page_url):
now = timezone.now()
now_approx = ApproximateDate(year=now.year, month=now.month, day=now.day)
event_obj = get_object_or_404(Event, page_url=page_url.lower())
user = request.user
user_is_organizer = user.is_authenticated and event_obj.has_organizer(user)
is_preview = 'preview' in request.GET
can_preview = user.is_superuser or user_is_organizer or is_preview
if event_obj.date:
is_past = event_obj.date <= now_approx
else:
is_past = False
if not (event_obj.is_page_live or can_preview) or event_obj.is_frozen:
return render(
request,
'applications/event_not_live.html',
{'city': event_obj.city, 'page_url': page_url, 'past': is_past}
)
return render(request, "core/event.html", {
'event': event_obj,
'menu': event_obj.menu.all(),
'content': event_obj.content.prefetch_related('coaches', 'sponsors').filter(is_public=True),
})
def events_ical(request):
events = Event.objects.public().order_by('-date')
calendar = icalendar.Calendar()
calendar['summary'] = _("List of Django Girls events around the world")
for event in events:
ical_event = event.as_ical()
if ical_event is None:
continue # Skip events with an approximate date
calendar.add_component(ical_event)
return HttpResponse(
calendar.to_ical(),
content_type='text/calendar; charset=UTF-8'
)
def newsletter(request):
return render(request, 'core/newsletter.html', {})
def faq(request):
return render(request, 'core/faq.html', {})
def foundation(request):
return render(request, 'core/foundation.html', {})
def governing_document(request):
return render(request, 'core/governing_document.html', {})
def contribute(request):
return render(request, 'core/contribute.html', {})
def year_2015(request):
return render(request, 'core/2015.html', {
'events': Event.objects.public().filter(date__lt='2016-01-01').order_by('date'),
'mapbox_map_id': settings.MAPBOX_MAP_ID,
})
def year_2016_2017(request):
return render(request, 'core/2016-2017.html', {
'events_2015': Event.objects.public().filter(date__lt='2016-01-01').order_by('date'),
'events_20162017': Event.objects.public().filter(
date__lt='2017-08-01', date__gte='2016-01-01'
).order_by('date'),
'mapbox_map_id': settings.MAPBOX_MAP_ID,
})
def terms_conditions(request):
return render(request, 'core/terms_conditions.html', {})
def privacy_cookies(request):
return render(request, 'core/privacy_cookies.html', {})
# This view's URL is commented out, so avoid coverage hit by commenting out the view also
# def workshop_box(request):
# return render(request, 'core/workshop_box.html', {})
def server_error(request):
return HttpResponse(status=500)
def coc(request):
template_name = "core/coc.html"
return render(request, template_name)
def coc_legacy(request, lang=None):
if lang is None:
lang = 'en'
template_name = f"core/coc/{lang}.html"
try:
return render(request, template_name)
except TemplateDoesNotExist:
raise Http404(_("No translation for language %(lang)s") % {'lang': lang})
# This view's URL is commented out, so avoid coverage hit by commenting out the view also
# def crowdfunding_donors(request):
# donor_list = Donor.objects.filter(visible=True).order_by('-amount')
# return render(request, 'core/crowdfunding_donors.html', {
# 'donor_list': donor_list,
# 'quotes': DONOR_QUOTES,
# })
|
{
"content_hash": "613890ce4700a88136d6532f32d73b1c",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 100,
"avg_line_length": 30.386363636363637,
"alnum_prop": 0.6602468212415856,
"repo_name": "DjangoGirls/djangogirls",
"id": "b6947cd9a2868d6313ae9c4e208b436fc94dc5f0",
"size": "5348",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "core/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "428291"
},
{
"name": "JavaScript",
"bytes": "13711"
},
{
"name": "Python",
"bytes": "422267"
},
{
"name": "Stylus",
"bytes": "32803"
}
],
"symlink_target": ""
}
|
number = 9
print(type(number)) # print type of variable "number"
# FLOAT variables are numbers with decimal points:
float_number = 9.0
print (type(float_number))
# Here's a STRING variable:
foo = "hi there"
# Here we're changing it to an INT:
foo = 1000000
print (type(foo))
# What kind of variable is shopping?
shopping = ["Ham", "Eggs", "bread", "dogfood"]
print (type(shopping))
# It's a LIST variable!
# Here's how we get the first element of the list called shopping:
print(shopping[0])
# things is a list that contains another list, shopping
things = [ shopping, 'chemistry', 1997, 2000];
####################################
# Assign a new value to the 4th value in shopping, using things:
things[0][3] = "CatFood"
# Print some other values:
print(things[0][3])
print (shopping[3])
print(type(shopping[3]))
####################################
foo = "This is the new foo"
print(things[0])
print(foo)
|
{
"content_hash": "4bd7242dab1f4306c1fdf99515ac0bd2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 66,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.6410810810810811,
"repo_name": "jimoconnell/electrobrain",
"id": "ae102f734191ecc80136559a494002b4a7dca00f",
"size": "1011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "variable_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11485"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import re
import posixpath
from sentry.grouping.component import GroupingComponent
from sentry.grouping.strategies.base import strategy
from sentry.grouping.strategies.utils import remove_non_stacktrace_variants, has_url_origin
from sentry.grouping.strategies.similarity_encoders import text_shingle_encoder, ident_encoder
_ruby_anon_func = re.compile(r"_\d{2,}")
_filename_version_re = re.compile(
r"""(?:
v?(?:\d+\.)*\d+| # version numbers, v1, 1.0.0
[a-f0-9]{7,8}| # short sha
[a-f0-9]{32}| # md5
[a-f0-9]{40} # sha1
)/""",
re.X | re.I,
)
# OpenJDK auto-generated classes for reflection access:
# sun.reflect.GeneratedSerializationConstructorAccessor123
# sun.reflect.GeneratedConstructorAccessor456
# Note that this doesn't cover the following pattern for the sake of
# backward compatibility (to not to change the existing grouping):
# sun.reflect.GeneratedMethodAccessor789
_java_reflect_enhancer_re = re.compile(
r"""(sun\.reflect\.Generated(?:Serialization)?ConstructorAccessor)\d+""", re.X
)
# Java Spring specific anonymous classes.
# see: http://mydailyjava.blogspot.co.at/2013/11/cglib-missing-manual.html
_java_cglib_enhancer_re = re.compile(r"""(\$\$[\w_]+?CGLIB\$\$)[a-fA-F0-9]+(_[0-9]+)?""", re.X)
# Handle Javassist auto-generated classes and filenames:
# com.example.api.entry.EntriesResource_$$_javassist_74
# com.example.api.entry.EntriesResource_$$_javassist_seam_74
# EntriesResource_$$_javassist_seam_74.java
_java_assist_enhancer_re = re.compile(r"""(\$\$_javassist)(?:_seam)?(?:_[0-9]+)?""", re.X)
# Clojure anon functions are compiled down to myapp.mymodule$fn__12345
_clojure_enhancer_re = re.compile(r"""(\$fn__)\d+""", re.X)
# fields that need to be the same between frames for them to be considered
# recursive calls
RECURSION_COMPARISON_FIELDS = [
"abs_path",
"package",
"module",
"filename",
"function",
"lineno",
"colno",
]
def is_unhashable_module_legacy(frame, platform):
# Fix for the case where module is a partial copy of the URL
# and should not be hashed
if (
platform == "javascript"
and "/" in frame.module
and frame.abs_path
and frame.abs_path.endswith(frame.module)
):
return True
elif platform == "java" and "$$Lambda$" in frame.module:
return True
return False
def is_unhashable_function_legacy(func):
# TODO(dcramer): lambda$ is Java specific
# TODO(dcramer): [Anonymous is PHP specific (used for things like SQL
# queries and JSON data)
return func.startswith(("lambda$", "[Anonymous"))
def is_recursion_legacy(frame1, frame2):
"Returns a boolean indicating whether frames are recursive calls."
for field in RECURSION_COMPARISON_FIELDS:
if getattr(frame1, field, None) != getattr(frame2, field, None):
return False
return True
def remove_module_outliers_legacy(module, platform):
"""Remove things that augment the module but really should not."""
if platform == "java":
if module[:35] == "sun.reflect.GeneratedMethodAccessor":
return "sun.reflect.GeneratedMethodAccessor", "removed reflection marker"
if module[:44] == "jdk.internal.reflect.GeneratedMethodAccessor":
return "jdk.internal.reflect.GeneratedMethodAccessor", "removed reflection marker"
old_module = module
module = _java_reflect_enhancer_re.sub(r"\1<auto>", module)
module = _java_cglib_enhancer_re.sub(r"\1<auto>", module)
module = _java_assist_enhancer_re.sub(r"\1<auto>", module)
module = _clojure_enhancer_re.sub(r"\1<auto>", module)
if old_module != module:
return module, "removed codegen marker"
return module, None
def remove_filename_outliers_legacy(filename, platform):
"""
Attempt to normalize filenames by removing common platform outliers.
- Sometimes filename paths contain build numbers
"""
# On cocoa we generally only want to use the last path component as
# the filename. The reason for this is that the chances are very high
# that full filenames contain information we do want to strip but
# currently can't (for instance because the information we get from
# the dwarf files does not contain prefix information) and that might
# contain things like /Users/foo/Dropbox/...
if platform == "cocoa":
return posixpath.basename(filename), "stripped to basename"
removed = []
if platform == "java":
new_filename = _java_assist_enhancer_re.sub(r"\1<auto>", filename)
if new_filename != filename:
removed.append("javassist parts")
filename = new_filename
new_filename = _filename_version_re.sub("<version>/", filename)
if new_filename != filename:
removed.append("version")
filename = new_filename
if removed:
return filename, "removed %s" % " and ".join(removed)
return filename, None
def remove_function_outliers_legacy(function):
"""
Attempt to normalize functions by removing common platform outliers.
- Ruby generates (random?) integers for various anonymous style functions
such as in erb and the active_support library.
- Block functions have metadata that we don't care about.
"""
if function.startswith("block "):
return "block", "ruby block"
new_function = _ruby_anon_func.sub("_<anon>", function)
if new_function != function:
return new_function, "trimmed integer suffix"
return new_function, None
@strategy(id="single-exception:legacy", interfaces=["singleexception"], variants=["!system", "app"])
def single_exception_legacy(exception, config, **meta):
type_component = GroupingComponent(
id="type",
values=[exception.type] if exception.type else [],
similarity_encoder=ident_encoder,
contributes=False,
)
value_component = GroupingComponent(
id="value",
values=[exception.value] if exception.value else [],
similarity_encoder=text_shingle_encoder(5),
contributes=False,
)
stacktrace_component = GroupingComponent(id="stacktrace")
if exception.stacktrace is not None:
stacktrace_component = config.get_grouping_component(exception.stacktrace, **meta)
if stacktrace_component.contributes:
if exception.type:
type_component.update(contributes=True)
if exception.value:
value_component.update(hint="stacktrace and type take precedence")
elif exception.value:
value_component.update(hint="stacktrace takes precedence")
if not stacktrace_component.contributes:
if exception.type:
type_component.update(contributes=True)
if exception.value:
value_component.update(contributes=True)
return GroupingComponent(
id="exception", values=[stacktrace_component, type_component, value_component]
)
@strategy(
id="chained-exception:legacy", interfaces=["exception"], variants=["!system", "app"], score=2000
)
def chained_exception_legacy(chained_exception, config, **meta):
# Case 1: we have a single exception, use the single exception
# component directly
exceptions = chained_exception.exceptions()
if len(exceptions) == 1:
return config.get_grouping_component(exceptions[0], **meta)
# Case 2: try to build a new component out of the individual
# errors however with a trick. In case any exception has a
# stacktrace we want to ignore all other exceptions.
any_stacktraces = False
values = []
for exception in exceptions:
exception_component = config.get_grouping_component(exception, **meta)
stacktrace_component = exception_component.get_subcomponent("stacktrace")
if stacktrace_component is not None and stacktrace_component.contributes:
any_stacktraces = True
values.append(exception_component)
if any_stacktraces:
for value in values:
stacktrace_component = value.get_subcomponent("stacktrace")
if stacktrace_component is None or not stacktrace_component.contributes:
value.update(contributes=False, hint="exception has no stacktrace")
return GroupingComponent(id="chained-exception", values=values)
@chained_exception_legacy.variant_processor
def chained_exception_legacy_variant_processor(variants, config, **meta):
return remove_non_stacktrace_variants(variants)
@strategy(id="frame:legacy", interfaces=["frame"], variants=["!system", "app"])
def frame_legacy(frame, event, **meta):
platform = frame.platform or event.platform
# In certain situations we want to disregard the entire frame.
contributes = None
hint = None
# this requires some explanation: older sentry versions did not have
# raw_function but only function. For some platforms like native
# we now instead store a trimmed function name in frame.function so
# and the original value moved to raw_function. This requires us to
# prioritize raw_function over function in the legacy grouping code to
# avoid creating new groups.
func = frame.raw_function or frame.function
# Safari throws [native code] frames in for calls like ``forEach``
# whereas Chrome ignores these. Let's remove it from the hashing algo
# so that they're more likely to group together
filename_component = GroupingComponent(id="filename", similarity_encoder=ident_encoder)
if frame.filename == "<anonymous>":
filename_component.update(
contributes=False, values=[frame.filename], hint="anonymous filename discarded"
)
elif frame.filename == "[native code]":
contributes = False
hint = "native code indicated by filename"
elif frame.filename:
if has_url_origin(frame.abs_path):
filename_component.update(
contributes=False, values=[frame.filename], hint="ignored because filename is a URL"
)
# XXX(dcramer): dont compute hash using frames containing the 'Caused by'
# text as it contains an exception value which may may contain dynamic
# values (see raven-java#125)
elif frame.filename.startswith("Caused by: "):
filename_component.update(
values=[frame.filename], contributes=False, hint="ignored because invalid"
)
else:
hashable_filename, hashable_filename_hint = remove_filename_outliers_legacy(
frame.filename, platform
)
filename_component.update(values=[hashable_filename], hint=hashable_filename_hint)
# if we have a module we use that for grouping. This will always
# take precedence over the filename, even if the module is
# considered unhashable.
module_component = GroupingComponent(id="module", similarity_encoder=ident_encoder)
if frame.module:
if is_unhashable_module_legacy(frame, platform):
module_component.update(
values=[
GroupingComponent(
id="salt", values=["<module>"], hint="normalized generated module name"
)
],
hint="ignored module",
)
# <module> still contributes, though it should not contribute to
# similarity
module_component.similarity_encoder = None
else:
module_name, module_hint = remove_module_outliers_legacy(frame.module, platform)
module_component.update(values=[module_name], hint=module_hint)
if frame.filename:
filename_component.update(
values=[frame.filename], contributes=False, hint="module takes precedence"
)
# Context line when available is the primary contributor
context_line_component = GroupingComponent(id="context-line", similarity_encoder=ident_encoder)
if frame.context_line is not None:
if len(frame.context_line) > 120:
context_line_component.update(hint="discarded because line too long")
elif has_url_origin(frame.abs_path) and not func:
context_line_component.update(hint="discarded because from URL origin")
else:
context_line_component.update(values=[frame.context_line])
symbol_component = GroupingComponent(id="symbol", similarity_encoder=ident_encoder)
function_component = GroupingComponent(id="function", similarity_encoder=ident_encoder)
lineno_component = GroupingComponent(id="lineno", similarity_encoder=ident_encoder)
# The context line grouping information is the most reliable one.
# If we did not manage to find some information there, we want to
# see if we can come up with some extra information. We only want
# to do that if we managed to get a module of filename.
if not context_line_component.contributes and (
module_component.contributes or filename_component.contributes
):
if frame.symbol:
symbol_component.update(values=[frame.symbol])
if func:
function_component.update(
contributes=False, values=[func], hint="symbol takes precedence"
)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="symbol takes precedence"
)
elif func:
if is_unhashable_function_legacy(func):
function_component.update(
values=[
GroupingComponent(
id="salt", values=["<function>"], hint="normalized lambda function name"
)
]
)
# <module> still contributes, though it should not contribute to
# similarity
function_component.similarity_encoder = None
else:
function, function_hint = remove_function_outliers_legacy(func)
function_component.update(values=[function], hint=function_hint)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="function takes precedence"
)
elif frame.lineno:
lineno_component.update(values=[frame.lineno])
else:
if context_line_component.contributes:
fallback_hint = "is not used if context-line is available"
else:
fallback_hint = "is not used if module or filename are available"
if frame.symbol:
symbol_component.update(
contributes=False, values=[frame.symbol], hint="symbol " + fallback_hint
)
if func:
function_component.update(
contributes=False, values=[func], hint="function name " + fallback_hint
)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="line number " + fallback_hint
)
return GroupingComponent(
id="frame",
values=[
module_component,
filename_component,
context_line_component,
symbol_component,
function_component,
lineno_component,
],
contributes=contributes,
hint=hint,
)
@strategy(
id="stacktrace:legacy", interfaces=["stacktrace"], variants=["!system", "app"], score=1800
)
def stacktrace_legacy(stacktrace, config, variant, **meta):
frames = stacktrace.frames
contributes = None
hint = None
all_frames_considered_in_app = False
# TODO(dcramer): this should apply only to platform=javascript
# Browser JS will often throw errors (from inlined code in an HTML page)
# which contain only a single frame, no function name, and have the HTML
# document as the filename. In this case the hash is often not usable as
# the context cannot be trusted and the URL is dynamic (this also means
# the line number cannot be trusted).
if len(frames) == 1 and not frames[0].function and frames[0].is_url():
contributes = False
hint = "ignored single frame stack"
elif variant == "app":
total_frames = len(frames)
in_app_count = sum(1 if f.in_app else 0 for f in frames)
if in_app_count == 0:
in_app_count = total_frames
all_frames_considered_in_app = True
# if app frames make up less than 10% of the stacktrace discard
# the hash as invalid
if total_frames > 0 and in_app_count / float(total_frames) < 0.10:
contributes = False
hint = "less than 10% of frames are in-app"
values = []
prev_frame = None
frames_for_filtering = []
for frame in frames:
frame_component = config.get_grouping_component(frame, variant=variant, **meta)
if variant == "app" and not frame.in_app and not all_frames_considered_in_app:
frame_component.update(contributes=False, hint="non app frame")
elif prev_frame is not None and is_recursion_legacy(frame, prev_frame):
frame_component.update(contributes=False, hint="ignored due to recursion")
elif variant == "app" and not frame.in_app and all_frames_considered_in_app:
frame_component.update(hint="frame considered in-app because no frame is in-app")
values.append(frame_component)
frames_for_filtering.append(frame.get_raw_data())
prev_frame = frame
rv = config.enhancements.assemble_stacktrace_component(
values, frames_for_filtering, meta["event"].platform
)
rv.update(contributes=contributes, hint=hint)
return rv
@strategy(id="threads:legacy", interfaces=["threads"], variants=["!system", "app"], score=1900)
def threads_legacy(threads_interface, config, **meta):
thread_count = len(threads_interface.values)
if thread_count != 1:
return GroupingComponent(
id="threads",
contributes=False,
hint="ignored because contains %d threads" % thread_count,
)
stacktrace = threads_interface.values[0].get("stacktrace")
if not stacktrace:
return GroupingComponent(id="threads", contributes=False, hint="thread has no stacktrace")
return GroupingComponent(
id="threads", values=[config.get_grouping_component(stacktrace, **meta)]
)
|
{
"content_hash": "7a746da0dea064b3eebef162a3d9010a",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 100,
"avg_line_length": 41.1766004415011,
"alnum_prop": 0.6515841955717578,
"repo_name": "beeftornado/sentry",
"id": "d372e99a1f81b49f2911261a5f64d5fa42454033",
"size": "18653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/grouping/strategies/legacy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
__author__ = 'farooq.sheikh'
from setuptools import setup, find_packages
setup(
name = 'asposecellscloudexamples',
packages = find_packages(),
version = '1.2,
description = 'Aspose.Cells Cloud SDK for Python allows you to use Aspose.Cells APIs in your Python applications',
author='Farooq Sheikh',
author_email='farooq.sheikh@aspose.com',
url='https://github.com/asposecells/Aspose_Cells_Cloud/tree/master/SDKs/Aspose.Cells_Cloud_SDK_For_Python',
install_requires=[
'asposestoragecloud','asposecellscloud'
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
{
"content_hash": "c4153d0a348e9da18a598a9263bc657d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 118,
"avg_line_length": 34.77272727272727,
"alnum_prop": 0.6692810457516339,
"repo_name": "asposecells/Aspose_Cells_Cloud",
"id": "05d3b4e783280cebc160ad97ea01c22990204744",
"size": "765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "897367"
},
{
"name": "HTML",
"bytes": "110"
},
{
"name": "Java",
"bytes": "900042"
},
{
"name": "JavaScript",
"bytes": "664643"
},
{
"name": "Objective-C",
"bytes": "1142444"
},
{
"name": "PHP",
"bytes": "626745"
},
{
"name": "Python",
"bytes": "833397"
},
{
"name": "Ruby",
"bytes": "799033"
}
],
"symlink_target": ""
}
|
import Inline
info = {
"friendly_name": "Emphasised text",
"summary": "Applies the 'emphasis' typestyle to the contained markup.",
}
def SpanHandler(rest, acc):
(inner, rest) = Inline.parse(rest)
acc.append(Inline.TagFragment('pyle_em', inner))
return rest
|
{
"content_hash": "0ba9e30913cb4aa480364b0861016e24",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 75,
"avg_line_length": 25.363636363636363,
"alnum_prop": 0.6738351254480287,
"repo_name": "CymaticLabs/Unity3D.Amqp",
"id": "8bf66cba84658ad8bb49f75edde0ed14ac3af2b6",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/spanhandlers/em.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13572"
},
{
"name": "C#",
"bytes": "3448460"
},
{
"name": "CSS",
"bytes": "4291"
},
{
"name": "HTML",
"bytes": "511888"
},
{
"name": "JavaScript",
"bytes": "4843"
},
{
"name": "Makefile",
"bytes": "3989"
},
{
"name": "Perl",
"bytes": "3838"
},
{
"name": "Python",
"bytes": "987455"
},
{
"name": "Roff",
"bytes": "9846"
},
{
"name": "Shell",
"bytes": "25576"
},
{
"name": "XSLT",
"bytes": "62862"
}
],
"symlink_target": ""
}
|
from icalendar import iCalendar # NOQA
from mail import Mailer # NOQA
|
{
"content_hash": "61b0dd55d906998758df65127b8a73f6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.7,
"repo_name": "angstwad/fortnight",
"id": "7095522682344313aad70bc2f789cde963500c02",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fortnight/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28483"
}
],
"symlink_target": ""
}
|
from pypy.rlib.parsing.ebnfparse import parse_ebnf, make_parse_function
from pypy.rlib.parsing.tree import RPythonVisitor
import py
from ripe import compiler, ripedir
from ripe.objects import W_Integer
grammar = py.path.local(ripedir).join("grammar.txt").read("rt")
regexs, rules, ToAST = parse_ebnf(grammar)
_parse = make_parse_function(regexs, rules, eof=True)
BASES = {"BINARY" : 2, "OCTAL" : 8, "DECIMAL" : 10, "HEX" : 16}
class Node(object):
"""
An AST node.
"""
def __eq__(self, other):
return (
self.__class__ == other.__class__ and
self.__dict__ == other.__dict__
)
def __ne__(self, other):
return not self == other
def __repr__(self):
contents = ("%s=%r" % (k, v) for k, v in self.__dict__.iteritems())
return "<%s %s>" % (self.__class__.__name__, ", ".join(contents))
class Compound(Node):
def __init__(self, statements=None):
if statements is None:
statements = []
self.statements = statements
def compile(self, context):
for statement in self.statements:
statement.compile(context)
class Expression(Node):
def __init__(self, expr):
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.DISCARD_TOP)
class Assign(Node):
def __init__(self, name, expr):
self.name = name
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.ASSIGN, context.register_variable(self.name))
class Variable(Node):
def __init__(self, name):
self.name = name
def compile(self, context):
context.emit(
compiler.LOAD_VARIABLE, context.register_variable(self.name),
)
class BinOp(Node):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def compile(self, context):
self.left.compile(context)
self.right.compile(context)
context.emit(compiler.BINOP[self.op])
class Int(Node):
def __init__(self, value):
self.value = value
def compile(self, context):
w_self = W_Integer(self.value)
context.emit(compiler.LOAD_CONSTANT, context.register_constant(w_self))
def neg(self):
return self.__class__(-self.value)
class SingleQString(Node):
def __init__(self, value):
self.value = value
class DoubleQString(Node):
def __init__(self, value):
self.value = value
class If(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
self.condition.compile(context)
context.emit(compiler.JUMP_IF_FALSE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.data[jmp_pos] = chr(len(context.data))
class Unless(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
self.condition.compile(context)
context.emit(compiler.JUMP_IF_TRUE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.data[jmp_pos] = chr(len(context.data))
class While(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
start_pos = len(context.data)
self.condition.compile(context)
context.emit(compiler.JUMP_IF_FALSE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.emit(compiler.JUMP_BACKWARD, start_pos)
context.data[jmp_pos] = chr(len(context.data))
class Until(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
start_pos = len(context.data)
self.condition.compile(context)
context.emit(compiler.JUMP_IF_TRUE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.emit(compiler.JUMP_BACKWARD, start_pos)
context.data[jmp_pos] = chr(len(context.data))
class Puts(Node):
# XXX
def __init__(self, expr):
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.PUTS, 0)
class Method(Node):
def __init__(self, name, params, body):
self.name = name
self.params = params
self.body = body
class Transformer(RPythonVisitor):
def dispatch(self, node):
return getattr(self, "visit_%s" % node.symbol)(node)
def visit_program(self, node):
if not node.children:
return Compound()
statements, = node.children
return self.dispatch(statements)
def visit_statements(self, node):
return Compound([
self.dispatch(statement) for statement in node.children
])
def visit_expression_statement(self, node):
expression, = node.children
return Expression(self.dispatch(expression))
def visit_assignment_statement(self, node):
variable, obj = node.children[0].children[0].children
variable = "".join(v.additional_info for v in variable.children)
obj, = obj.children
return Assign(variable, self.dispatch(obj))
def visit_numeric_literal(self, node):
number = self.dispatch(node.children[-1])
if len(node.children) == 2:
sign, _ = node.children
if sign.additional_info.count("-") % 2:
return number.neg()
return number
def visit_integer_literal(self, node):
integer, = node.children
base = integer.symbol.split("_", 1)[0]
value = integer.additional_info
value = value[2:] if value.startswith("0") and value != "0" else value
return Int(int(value, BASES[base]))
def visit_string_literal(self, node):
string, = node.children
value = string.additional_info
end = len(value) - 1
assert end > 0
value = value[1:end]
if string.symbol == "SINGLE_QUOTED_STRING":
return SingleQString(value)
elif string.symbol == "DOUBLE_QUOTED_STRING":
return DoubleQString(value)
raise NotImplementedError
def visit_variable(self, node):
name = "".join(v.additional_info for v in node.children)
return Variable(name)
def visit_equality_expression(self, node):
left, op, right = node.children
return BinOp(
self.dispatch(left), op.additional_info, self.dispatch(right)
)
def visit_if_expression(self, node):
condition, then = node.children
body, = then.children
return If(self.dispatch(condition), self.dispatch(body))
def visit_unless_expression(self, node):
condition, then = node.children
body, = then.children
return Unless(self.dispatch(condition), self.dispatch(body))
def visit_while_expression(self, node):
condition, do = node.children
body, = do.children
return While(self.dispatch(condition), self.dispatch(body))
def visit_until_expression(self, node):
condition, do = node.children
body, = do.children
return Until(self.dispatch(condition), self.dispatch(body))
def visit_puts_statement(self, node):
# XXX
return Puts(self.dispatch(node.children[0]))
def visit_method_definition(self, node):
name, params_node, body = node.children
name, body = name.additional_info, self.dispatch(body)
if params_node.children:
params_list, = params_node.children
params = [param.additional_info for param in params_list.children]
else:
params = []
return Method(name, params, body)
transformer = Transformer()
def parse(source, transformer=transformer):
"""
Parse the source code and produce an AST.
"""
ast = ToAST().transform(_parse(source))
return transformer.visit_program(ast)
|
{
"content_hash": "aa703da6c1fa276883f72eb249e45dea",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 79,
"avg_line_length": 27.658783783783782,
"alnum_prop": 0.6092585806766826,
"repo_name": "Julian/Ripe",
"id": "85729559c3adabcbe14e7c5562e57019d46e41fe",
"size": "8187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ripe/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29322"
},
{
"name": "Ruby",
"bytes": "3049"
}
],
"symlink_target": ""
}
|
import os
import os.path
import logging
import string
import unicodedata
# Django
from django.utils.functional import LazyObject
from django.core.files.base import ContentFile
from django.utils.encoding import force_unicode
class ImproperlyConfigured(Exception):
pass
def ensure_path(filename):
"""Makes sure the path exists."""
d = os.path.dirname(filename)
if not os.path.isdir(d):
os.makedirs(d)
def to_unicode(an_object, date_format='%Y-%m-%dT%H:%M:%S.%f%z'):
"""Converts any objects into a unicode string.
A date format can be passed and will be used if the object is a date. (= any objects with a strftime function)
Args:
an_object: The object to convert.
date_format: A Date format string to use if the passed object is a date. Default: YYYY-MM-DDTHH:MM:SS.mmmmmm+HHMM (See http://docs.python.org/library/datetime.html)
Returns:
A unicode string.
"""
if hasattr(an_object, 'strftime'):
return force_unicode(an_object.strftime(date_format))
return force_unicode(an_object)
def windows_safe(s):
"""Replaces colon by semicolon on Windows because Windows cannot handle filename with colon."""
if os.name == 'nt':
return s.replace(':', ';')
return s
def make_safe(s):
"""Makes a string safe to be used as a filename."""
valid_chars = ":;+-_.() %s%s" % (string.ascii_letters, string.digits)
if isinstance(s, unicode):
s = unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
s = s.replace('/', '_')
return windows_safe(u''.join(c for c in s if c in valid_chars))
class DefaultLogger(LazyObject):
"""Logger with default file handler if no one exists."""
def __init__(self, logger_name=None, log_file=None, level=logging.INFO, log_size=5 * 1024 * 2 ** 10, logger_format='%(asctime)s %(levelname)s %(module)s %(message)s'):
# IMPORTANT: must be here to avoid recursion
super(DefaultLogger, self).__init__()
if not logger_name:
raise ImproperlyConfigured('A logger name must be provided.')
logger = logging.getLogger(logger_name)
if not logger.handlers and not log_file:
raise ImproperlyConfigured('A log_file must be provided to use the default RotatingFileHandler when the logger does not exist or does not contain any handler.')
self.__dict__['logger'] = logger
self.__dict__['logger_name'] = logger_name
self.__dict__['log_file'] = log_file
self.__dict__['log_size'] = log_size
self.__dict__['logger_format'] = logger_format
self.__dict__['level'] = level
self.__dict__['messages'] = []
def _setup(self):
if self.logger.level == 0:
self.logger.setLevel(self.level)
# Create default handler if no one exists.
if not self.logger.handlers:
# Make sure the directory exists.
ensure_path(self.log_file)
handler = logging.handlers.RotatingFileHandler(
filename=self.log_file,
maxBytes=self.log_size,
backupCount=50
)
handler.formatter = logging.Formatter(fmt=self.logger_format)
self.logger.handlers.append(handler)
self._wrapped = self.logger
def append_msg(self, msg):
"""Appends a message to the log buffer."""
self.messages.append(msg)
def flush_messages(self):
"""Flushes the log buffer and returns the messages as one merged message."""
msg = '\n'.join(self.messages)
self.__dict__['messages'] = []
return msg
def log_messages(self, lvl=logging.ERROR, start='', end=''):
"""Writes the log buffer in the log."""
msg = start + self.flush_messages() + end
self.log(lvl, msg)
return msg
class LoggerWithStorage(DefaultLogger):
"""Logger with a Storage."""
def __init__(self, storage=None, logger_name=None, log_file=None, level=logging.INFO, log_size=5 * 1024 * 2 ** 10, logger_format='%(asctime)s %(levelname)s %(module)s %(message)s'):
# IMPORTANT: must be here to avoid recursion
super(LoggerWithStorage, self).__init__(logger_name, log_file, level, log_size, logger_format)
if not (storage and logger_name):
raise ImproperlyConfigured('A storage AND a logger name must be provided.')
logger = logging.getLogger(logger_name)
if not logger.handlers and not log_file:
raise ImproperlyConfigured('A log file must be provided to use the default RotatingFileHandler when the logger does not exist or does not contain any handler.')
self.__dict__['storage'] = storage
@classmethod
def make_filename(cls, *args, **kwargs):
"""Makes a suitable filename.
Pass an argument named delimiter to set up which delimiter to use. Default: __
"""
delimiter = kwargs.get('delimiter', u'__')
s = [to_unicode(arg).lower() for arg in args]
filename = delimiter.join(s)
return make_safe(filename)
def store(self, data, *args, **kwargs):
"""Stores and returns the log message."""
log_msg = '[Storage attempt] =>'
filename = self.make_filename(*args, **kwargs)
if self.storage.exists(filename):
log_msg = '%s File already exists: <%s> ' % (log_msg, filename,)
else:
try:
output = self.storage.save(filename, ContentFile(data))
log_msg = '%s File saved: <%s> in <%s>.' % (log_msg, output, getattr(self.storage, 'location', 'No location'))
except StandardError as err:
log_msg = '%s Cannot save the file <%s> in <%s>.\n%s' % (log_msg, filename, getattr(self.storage, 'location', 'No location'), err)
return log_msg
def _log_and_store(self, msg, data, lvl, *args):
"""Logs a message and stores some data in the file."""
log_msg = '%s\n%s' % (msg, self.store(data, *args))
self.log(lvl, log_msg)
return log_msg
def debug_and_store(self, msg, data, *args):
return self._log_and_store(msg, data, logging.DEBUG, *args)
def info_and_store(self, msg, data, *args):
return self._log_and_store(msg, data, logging.INFO, *args)
def warning_and_store(self, msg, data, *args):
return self._log_and_store(msg, data, logging.WARNING, *args)
def error_and_store(self, msg, data, *args):
return self._log_and_store(msg, data, logging.ERROR, *args)
def critical_and_store(self, msg, data, *args):
return self._log_and_store(msg, data, logging.CRITICAL, *args)
|
{
"content_hash": "37775ea6f6ac332c2dfecb1ca1606dc6",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 185,
"avg_line_length": 38.42696629213483,
"alnum_prop": 0.6051169590643275,
"repo_name": "YAmikep/django-feedstorage",
"id": "0909de0bc5d6e58d79af9f6a8d9f8800134a33a9",
"size": "6857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "feedstorage/utils/loggers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "47917"
}
],
"symlink_target": ""
}
|
"""kolibri URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
.. moduleauthor:: Learning Equality <info@learningequality.org>
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from morango import urls as morango_urls
from kolibri.plugins.utils.urls import get_root_urls
from kolibri.utils.conf import OPTIONS
path_prefix = OPTIONS["Deployment"]["URL_PATH_PREFIX"]
if path_prefix == "/":
path_prefix = ""
url_patterns_prefixed = [
url(r"^admin/", include(admin.site.urls)),
url(r"", include(morango_urls)),
url(r"", include("kolibri.core.urls")),
url(r"", include(get_root_urls())),
]
urlpatterns = [url(path_prefix, include(url_patterns_prefixed))]
|
{
"content_hash": "cd06403175bccc09641aa3b9f8dd86e1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 32.58139534883721,
"alnum_prop": 0.7102069950035689,
"repo_name": "mrpau/kolibri",
"id": "d9537df4c2953be5d385ef6243163438658cd646",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/deployment/default/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import os
gpio_pin_number=40
GPIO.setmode(GPIO.BOARD)
GPIO.setup(gpio_pin_number, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#It's very important the pin is an input to avoid short-circuits
#The pull-up resistor means the pin is high by default
try:
GPIO.wait_for_edge(gpio_pin_number, GPIO.FALLING)
#Use falling edge detection to see if pin is pulled
#low to avoid repeated polling
os.system("sudo shutdown -h now")
#Send command to system to shutdown
except:
pass
GPIO.cleanup()
#Revert all GPIO pins to their normal states (i.e. input = safe)
|
{
"content_hash": "1e5cd781447e021065a353c6d66312a7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 26.90909090909091,
"alnum_prop": 0.7297297297297297,
"repo_name": "merose/MakeyPiano",
"id": "d2bf95459a394d6a516c48d4a78ff3a22cb172d4",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shutdownSwitch.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14727"
},
{
"name": "Shell",
"bytes": "84"
}
],
"symlink_target": ""
}
|
from decimal import Decimal, DecimalException
from logging import getLogger
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.http import HttpResponse, Http404, HttpRequest, JsonResponse
from django.shortcuts import render
from django.utils.decorators import classonlymethod
from django.utils.timezone import now
from django.utils.translation import ugettext
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from .forms import SubscriptionForm, PartialPayForm, ManualTransactionForm
from .models import Event, Subscription, SubsState, Transaction
from .notify import Notifier
from .payment.base import get_payment, get_payment_names
from .queue import cron, QueueAgent
from .utils import named, prg_redirect
log = getLogger(__name__)
BLANK_PAGE = HttpResponse()
@named('esupa-splash')
@login_required
def redirect_to_view_or_edit(request: HttpRequest, slug: str) -> HttpResponse:
try:
event = Event.objects.get(slug=slug)
except Event.DoesNotExist:
look_to_the_future = Event.objects.filter(starts_at__gt=now()).order_by('starts_at')
look_to_the_past = Event.objects.filter(starts_at__lt=now()).order_by('-starts_at')
event = look_to_the_future.first() or look_to_the_past.first()
if event:
exists = Subscription.objects.filter(event=event, user=request.user).exists()
return prg_redirect(view.name if exists else edit.name, event.slug)
else:
raise Http404(ugettext('There is no event. Create one in /admin/'))
def _get_subscription(event_slug: str, user: User) -> Subscription:
"""Takes existing subscription if available, creates a new one otherwise."""
try:
event = Event.objects.get(slug=event_slug)
except Event.DoesNotExist:
raise Http404(ugettext('Unknown event %s.') % event_slug)
kwargs = dict(event=event, user=user)
try:
subscription = Subscription.objects.get(**kwargs)
except Subscription.DoesNotExist:
subscription = Subscription(**kwargs)
if subscription.state == SubsState.DENIED:
raise PermissionDenied
return subscription
@named('esupa-view')
@login_required
def view(request: HttpRequest, slug: str) -> HttpResponse:
subscription = _get_subscription(slug, request.user)
if subscription.id:
context = {
'sub': subscription,
'event': subscription.event,
'state': SubsState(subscription.state),
'pending_trans': subscription.transaction_set.filter(document__isnull=False, ended_at__isnull=True),
'confirmed_trans': subscription.transaction_set.filter(accepted=True),
'partial_pay_form': PartialPayForm(subscription.get_owing()),
'pay_buttons': get_payment_names(),
}
if 'pay_with' in request.POST:
queue = QueueAgent(subscription)
subscription.position = queue.add()
subscription.waiting = queue.within_capacity
subscription.raise_state(SubsState.EXPECTING_PAY if queue.within_capacity else SubsState.QUEUED_FOR_PAY)
subscription.save()
if queue.within_capacity:
payment = get_payment(int(request.POST['pay_with']))(subscription)
try:
amount = Decimal(request.POST.get('amount', ''))
except DecimalException:
amount = subscription.get_owing()
return payment.start_payment(request, amount)
return render(request, 'esupa/view.html', context)
else:
return prg_redirect(edit.name, slug)
@named('esupa-edit')
@login_required
def edit(request: HttpRequest, slug: str) -> HttpResponse:
subscription = _get_subscription(slug, request.user)
if not subscription.id and subscription.user.email:
subscription.email = subscription.user.email
form = SubscriptionForm(data=request.POST or None, instance=subscription)
if request.POST and form.is_valid():
old_state = subscription.state
form.save()
s = map(str.lower, (subscription.full_name, subscription.email, subscription.document, subscription.badge))
b = tuple(map(str.lower, filter(bool, subscription.event.data_to_be_checked.splitlines())))
acceptable = True not in (t in d for d in s for t in b)
if not acceptable:
subscription.state = SubsState.VERIFYING_DATA # Lowers the state.
elif subscription.paid_any:
if subscription.get_owing() <= 0:
subscription.raise_state(SubsState.CONFIRMED)
elif subscription.state == SubsState.CONFIRMED:
subscription.state = SubsState.PARTIALLY_PAID # Lowers the state.
else:
subscription.raise_state(SubsState.ACCEPTABLE)
subscription.save()
Notifier(subscription).saved(old_state, request.build_absolute_uri)
return prg_redirect(view.name, slug)
else:
return render(request, 'esupa/edit.html', {
'form': form,
'event': subscription.event,
'subscription': subscription,
})
@named('esupa-trans-doc')
@login_required
def transaction_document(request: HttpRequest, tid) -> HttpResponse:
trans = Transaction.objects.get(id=tid)
if trans is None or not trans.document:
raise Http404(ugettext("No such document."))
if not request.user.is_staff and trans.subscription.user != request.user:
return PermissionDenied
response = HttpResponse(trans.document, content_type=trans.mimetype)
return response
@named('esupa-cron')
def cron_view(request: HttpRequest, secret) -> HttpResponse:
if request.user and request.user.is_staff:
return cron() or BLANK_PAGE
elif secret != getattr(settings, 'ESUPA_CRON_SECRET', None):
cron()
return BLANK_PAGE
else:
raise SuspiciousOperation
@named('esupa-pay')
@csrf_exempt
def paying(request: HttpRequest, code) -> HttpResponse:
resolved_view = get_payment(int(code)).class_view
return resolved_view(request) or BLANK_PAGE
@named('esupa-json-state')
def json_state(_: HttpRequest, slug: str) -> JsonResponse:
result = JsonResponse(_json_state(slug))
result['Access-Control-Allow-Origin'] = '*'
return result
def _json_state(slug: str) -> dict:
try:
event = Event.objects.get(slug=slug)
except Event.DoesNotExist:
return {'exists': False, 'slug': slug}
threshold = event.reveal_openings_under
potentially = max(0, event.capacity - event.num_confirmed)
currently = max(0, potentially - event.num_pending)
if threshold > 0:
potentially = str(threshold) + '+' if potentially > threshold else str(potentially)
currently = str(threshold) + '+' if currently > threshold else str(currently)
return {'exists': True, 'slug': slug, 'id': event.id,
'registrationOpen': event.subs_open, 'salesOpen': event.sales_open,
'potentiallyAvailable': potentially, 'currentlyAvailable': currently}
class EsupaListView(ListView):
name = ''
@classonlymethod
def as_view(cls, **initkwargs):
view_ = login_required(super().as_view(**initkwargs))
view_.name = cls.name
return view_
def dispatch(self, request, *args, **kwargs):
user = request.user
assert isinstance(user, User)
if not user.is_staff:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return super().get_context_data(user=self.request.user, **kwargs)
class EventList(EsupaListView):
model = Event
name = 'esupa-check-all'
class SubscriptionList(EsupaListView):
model = Subscription
name = 'esupa-check-event'
_event = None
sort_dict = {
'state': '-state',
'sid': 'id',
'pos': 'position',
}
@property
def event(self) -> Event:
if not self._event:
try:
self._event = Event.objects.get(slug=self.args[0])
except Event.DoesNotExist:
raise Http404
return self._event
def get_queryset(self):
queryset = self.event.subscription_set
sort = self.request.GET.get('sort')
if sort == 'pos':
queryset = queryset.filter(position__isnull=False)
return queryset.order_by(self.sort_dict.get(sort, '-state'))
def get_context_data(self, **kwargs):
return super().get_context_data(event=self.event, **kwargs)
class TransactionList(EsupaListView):
model = Transaction
name = 'esupa-check-docs'
_event = None
_subscription = None
@property
def event(self) -> Event:
if not self._event:
self._event = self.subscription.event
return self._event
@property
def subscription(self) -> Subscription:
if not self._subscription:
try:
self._subscription = Subscription.objects.get(id=int(self.args[0]))
except Subscription.DoesNotExist:
raise Http404
self._event = self._subscription.event
return self._subscription
def get_queryset(self):
return self.subscription.transaction_set.order_by('-id')
def get_context_data(self, **kwargs):
return super().get_context_data(
event=self.event,
sub=self.subscription,
state=SubsState(),
manual_transaction_form=ManualTransactionForm(self.subscription),
**kwargs)
def post(self, request: HttpRequest, sid: str):
if 'action' in request.POST:
tid, decision = request.POST.get('action').split()
transaction = Transaction.objects.get(id=tid, subscription_id=int(sid))
transaction.end(decision == 'yes')
transaction.verifier = request.user
else:
form = ManualTransactionForm(request.POST)
if form.is_valid():
transaction = Transaction(subscription_id=int(sid))
transaction.amount = form.cleaned_data['amount']
transaction.created_at = form.cleaned_data['when']
transaction.method = 1
if request.FILES:
transaction.mimetype = request.FILES['attachment'].content_type or 'application/octet-stream'
transaction.document = request.FILES['attachment'].read()
transaction.filled_at = transaction.created_at
transaction.verifier = request.user
transaction.notes = form.cleaned_data['notes']
transaction.end(True)
else:
return self.get(request, sid)
return prg_redirect(TransactionList.name, sid)
|
{
"content_hash": "6398ec0b920a433bed62480ee27a69a4",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 116,
"avg_line_length": 38.12847222222222,
"alnum_prop": 0.6500318732355888,
"repo_name": "ekevoo/esupa",
"id": "3bfc4925d7d34c97a28a286003fc19db77ee2b67",
"size": "11582",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "esupa/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "961"
},
{
"name": "HTML",
"bytes": "13252"
},
{
"name": "Python",
"bytes": "81988"
}
],
"symlink_target": ""
}
|
from autopyfactory.interfaces import SchedInterface
import logging
class Fillactivated(SchedInterface):
id = 'fillactivated'
def __init__(self, apfqueue, config, section):
try:
self.apfqueue = apfqueue
self.log = logging.getLogger('autopyfactory.sched.%s' %apfqueue.apfqname)
# A default value is required.
self.default = self.apfqueue.qcl.generic_get(self.apfqueue.apfqname, 'sched.fillactivated.default', 'getint', default_value=0)
self.log.debug('SchedPlugin: default = %s' %self.default)
self.log.info("SchedPlugin: Object initialized.")
except Exception, ex:
self.log.error("SchedPlugin object initialization failed. Raising exception")
raise ex
def calcSubmitNum(self, n=0):
"""
returns nb of Activated Jobs - nb of Pending Pilots
"""
self.log.debug('Starting.')
self.wmsinfo = self.apfqueue.wmsstatus_plugin.getInfo()
self.batchinfo = self.apfqueue.batchstatus_plugin.getInfo()
if self.wmsinfo is None:
self.log.warning("wmsinfo is None!")
out = self.default
msg = "Invalid wmsinfo"
elif self.batchinfo is None:
self.log.warning("self.batchinfo is None!")
out = self.default
msg = "Invalid batchinfo"
elif not self.wmsinfo.valid() and self.batchinfo.valid():
out = self.default
msg = "Invalid wms/batchinfo"
self.log.warn('a status is not valid, returning default = %s' %out)
else:
# Carefully get wmsinfo, activated.
self.wmsqueue = self.apfqueue.wmsqueue
self.log.info("Siteid is %s" % self.wmsqueue)
(out, msg) = _calc_online(n)
return (out, msg)
def _calc_online(self, n):
"""
algorithm when wmssite is in online mode
"""
# initial default values.
activated_jobs = 0
pending_pilots = 0
running_pilots = 0
jobsinfo = self.wmsinfo.jobs
self.log.debug("jobsinfo class is %s" % jobsinfo.__class__ )
try:
sitedict = jobsinfo[self.wmsqueue]
self.log.debug("sitedict class is %s" % sitedict.__class__ )
#activated_jobs = sitedict['activated']
activated_jobs = sitedict.ready
except KeyError:
# This is OK--it just means no jobs in any state at the wmsqueue.
self.log.error("wmsqueue: %s not present in jobs info from WMS" % self.wmsqueue)
activated_jobs = 0
try:
pending_pilots = self.batchinfo[self.apfqueue.apfqname].pending # using the new info objects
except KeyError:
# This is OK--it just means no jobs.
pass
try:
running_pilots = self.batchinfo[self.apfqueue.apfqname].running # using the new info objects
except KeyError:
# This is OK--it just means no jobs.
pass
all_pilots = pending_pilots + running_pilots
out = max(0, activated_jobs - pending_pilots)
self.log.info('activated=%s; pending=%s; running=%s; Return=%s' %(activated_jobs,
pending_pilots,
running_pilots,
out))
msg = "Fillactivated:in=%s,activated=%s,pending=%s,running=%s,ret=%s" %(n, activated_jobs, pending_pilots, running_pilots, out)
return (out, msg)
|
{
"content_hash": "adc43e470032c8259bfbafd6ae2ff4ef",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 142,
"avg_line_length": 39.787234042553195,
"alnum_prop": 0.5483957219251336,
"repo_name": "btovar/autopyfactory",
"id": "f2d43f02eff1fb1ff0592b32120746ed7dd2fdc5",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autopyfactory/plugins/queue/sched/Fillactivated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "827948"
},
{
"name": "Shell",
"bytes": "97872"
}
],
"symlink_target": ""
}
|
import pytest
import random
import os
MAX = int(1e7)
def generate_input_file(input_file):
""" 生成输入文件 """
# 只生成 1M 个
print('Generating')
with open(input_file, 'w') as fd:
for i in range(1024):
cache = []
for j in range(1024):
cache.append(random.randint(0, MAX))
s = '\n'.join(('%07d' % num for num in cache))
fd.write(s+'\n')
def check(file):
""" 检查文件是否已按小到大排序
:param file: 待检查文件
:return: 是否已排序
"""
with open(file, 'r') as fd:
# 第一个整数
line = fd.readline()
if not line:
return True
prev = int(line.strip())
# 后续整数
while True:
line = fd.readline()
if not line:
return True
else:
num = int(line.strip())
if num < prev:
return False
@pytest.fixture(scope='module')
def input_file():
file_name = 'input.txt'
if not os.path.isfile(file_name):
generate_input_file(file_name)
return file_name
def test_merge_sort_1(input_file):
from .a1_3_file_merge_sort_1 import split, merge
temp_files = []
try:
print('Testing ch01 -> merge sort 1')
print('Splitting')
temp_files = split(input_file, 1024*1024/4)
print('Merging')
merge(temp_files, 'output.txt', 1024*1024/4)
print('Checking')
result = check('output.txt')
assert result
finally:
for temp_file in temp_files:
os.remove(temp_file)
def test_merge_sort_2(input_file):
from .a1_3_file_merge_sort_2 import split, merge
temp_files = []
try:
print('Testing ch01 -> merge sort 2')
print('Splitting')
temp_files = split(input_file, 1024*1024/4)
print('Merging')
merge(temp_files, 'output.txt')
print('Checking')
result = check('output.txt')
assert result
finally:
for temp_file in temp_files:
os.remove(temp_file)
def test_multi_select_sort(input_file):
from .a1_3_file_multi_select_sort import sort
print('Testing ch01 -> multi select sort')
print('Sorting')
sort(input_file, 'output.txt', 1024*1024/4)
print('Checking')
result = check('output.txt')
assert result
def test_bit_vector(input_file):
from .a1_4_file_bit_vector import bit_sort
print('Testing ch01 -> bit vector')
print('Sorting')
bit_sort(input_file, 'output.txt')
print('Checking')
result = check('output.txt')
assert result
def test_simple_sort(input_file):
from .a1_6_1_sort import sort
print('Testing ch01 -> simple sort')
print('Sorting')
sort(input_file, 'output.txt')
print('Checking')
assert check('output.txt')
|
{
"content_hash": "c0b24537cfc034fedb5fbe438b88e84c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 58,
"avg_line_length": 23.847457627118644,
"alnum_prop": 0.562544420753376,
"repo_name": "fengyc/programming-pearls-2nd-python",
"id": "477e1ffbebb87335d2898770af32e2234c937f93",
"size": "2933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/ch01/test_sort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81"
}
],
"symlink_target": ""
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/ApiKeys").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(
src=src,
dest=dest,
copy_excludes=[
src / "**/[A-Z]*_*.php",
src / "**/*GrpcClient.php"
]
)
# remove class_alias code
s.replace(
"src/V*/**/*.php",
r"^// Adding a class alias for backwards compatibility with the previous class name.$"
+ "\n"
+ r"^class_alias\(.*\);$"
+ "\n",
'')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
# format generated clients
subprocess.run([
'npm',
'exec',
'--yes',
'--package=@prettier/plugin-php@^0.16',
'--',
'prettier',
'**/Gapic/*',
'--write',
'--parser=php',
'--single-quote',
'--print-width=80'])
|
{
"content_hash": "49f6e1421f528642ff4a3a6ab753f1af",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 90,
"avg_line_length": 25.042105263157893,
"alnum_prop": 0.6376628835645229,
"repo_name": "googleapis/google-cloud-php",
"id": "cbd8b306a83ebf449fb70ce652b8521087cd2c81",
"size": "2379",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ApiKeys/owlbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3333"
},
{
"name": "PHP",
"bytes": "47981731"
},
{
"name": "Python",
"bytes": "413107"
},
{
"name": "Shell",
"bytes": "8171"
}
],
"symlink_target": ""
}
|
import d1_test.d1_test_case
import d1_test.instance_generator.access_policy
# ===============================================================================
@d1_test.d1_test_case.reproducible_random_decorator("TestAccessPolicy")
class TestAccessPolicy(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""select_random_set_of_permissions()"""
permissions = (
d1_test.instance_generator.access_policy.random_set_of_permissions()
)
self.sample.assert_equals(
permissions, "inst_gen_select_random_set_of_permissions"
)
def test_1010(self):
"""permissions_to_tag_string()"""
permissions = (
d1_test.instance_generator.access_policy.random_set_of_permissions()
)
s = d1_test.instance_generator.access_policy.permissions_to_tag_string(
permissions
)
self.sample.assert_equals(s, "inst_gen_permissions_to_tag_string")
def test_1020(self):
"""random_subject_with_permission_labels()"""
permissions = (
d1_test.instance_generator.access_policy.random_set_of_permissions()
)
s = d1_test.instance_generator.access_policy.random_subject_with_permission_labels(
permissions
)
self.sample.assert_equals(s, "inst_gen_random_subject_with_permission_labels")
def test_1030(self):
"""random_subjects_with_permission_labels()"""
permissions = (
d1_test.instance_generator.access_policy.random_set_of_permissions()
)
subjects = d1_test.instance_generator.access_policy.random_subject_list_with_permission_labels(
permissions
)
self.sample.assert_equals(
subjects, "inst_gen_random_subjects_with_permission_labels"
)
def test_1040(self):
"""generate()"""
access_policy_pyxb = d1_test.instance_generator.access_policy.generate()
self.sample.assert_equals(access_policy_pyxb, "inst_gen_generate")
def test_1050(self):
"""random_subject_list()"""
subject_list = d1_test.instance_generator.access_policy.random_subject_list()
self.sample.assert_equals(subject_list, "inst_gen_random_subject_list")
|
{
"content_hash": "a11485c27b42a6c55c7275713ef021ec",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 103,
"avg_line_length": 38.94827586206897,
"alnum_prop": 0.6193005754758742,
"repo_name": "DataONEorg/d1_python",
"id": "d9d0be85088ed9dccd074402aea8bd116a9ba8f0",
"size": "3071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_utilities/src/d1_test/instance_generator/tests/test_access_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4798"
},
{
"name": "HTML",
"bytes": "13358"
},
{
"name": "Inno Setup",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "3547939"
},
{
"name": "Shell",
"bytes": "5670"
},
{
"name": "XSLT",
"bytes": "89205"
}
],
"symlink_target": ""
}
|
import abc
from pathlib import Path
from virtualenv.create.describe import PosixSupports, Python3Supports, WindowsSupports
from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest
from .common import PyPy
class PyPy3(PyPy, Python3Supports, metaclass=abc.ABCMeta):
@classmethod
def exe_stem(cls):
return "pypy3"
@classmethod
def exe_names(cls, interpreter):
return super().exe_names(interpreter) | {"pypy"}
class PyPy3Posix(PyPy3, PosixSupports):
"""PyPy 3 on POSIX"""
@property
def stdlib(self):
"""PyPy3 respects sysconfig only for the host python, virtual envs is instead lib/pythonx.y/site-packages"""
return self.dest / "lib" / f"pypy{self.interpreter.version_release_str}" / "site-packages"
@classmethod
def _shared_libs(cls, python_dir):
# glob for libpypy3-c.so, libpypy3-c.dylib, libpypy3.9-c.so ...
return python_dir.glob("libpypy3*.*")
def to_lib(self, src):
return self.dest / "lib" / src.name
@classmethod
def sources(cls, interpreter):
yield from super().sources(interpreter)
# PyPy >= 3.8 supports a standard prefix installation, where older
# versions always used a portable/development style installation.
# If this is a standard prefix installation, skip the below:
if interpreter.system_prefix == "/usr":
return
# Also copy/symlink anything under prefix/lib, which, for "portable"
# PyPy builds, includes the tk,tcl runtime and a number of shared
# objects. In distro-specific builds or on conda this should be empty
# (on PyPy3.8+ it will, like on CPython, hold the stdlib).
host_lib = Path(interpreter.system_prefix) / "lib"
stdlib = Path(interpreter.system_stdlib)
if host_lib.exists() and host_lib.is_dir():
for path in host_lib.iterdir():
if stdlib == path:
# For PyPy3.8+ the stdlib lives in lib/pypy3.8
# We need to avoid creating a symlink to it since that
# will defeat the purpose of a virtualenv
continue
yield PathRefToDest(path, dest=cls.to_lib)
class Pypy3Windows(PyPy3, WindowsSupports):
"""PyPy 3 on Windows"""
@property
def less_v37(self):
return self.interpreter.version_info.minor < 7
@property
def stdlib(self):
"""PyPy3 respects sysconfig only for the host python, virtual envs is instead Lib/site-packages"""
if self.less_v37:
return self.dest / "site-packages"
return self.dest / "Lib" / "site-packages"
@property
def bin_dir(self):
"""PyPy3 needs to fallback to pypy definition"""
return self.dest / "Scripts"
@classmethod
def _shared_libs(cls, python_dir):
# glob for libpypy*.dll and libffi*.dll
for pattern in ["libpypy*.dll", "libffi*.dll"]:
srcs = python_dir.glob(pattern)
yield from srcs
__all__ = [
"PyPy3",
"PyPy3Posix",
"Pypy3Windows",
]
|
{
"content_hash": "ea37051a08eb2d2fac7b947257a901bb",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 116,
"avg_line_length": 34.13186813186813,
"alnum_prop": 0.6313586606567934,
"repo_name": "joshcai/utdcs",
"id": "ca5778c2c34810574411949b4de2ba2c8500bba5",
"size": "3106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42668"
},
{
"name": "C++",
"bytes": "48490"
},
{
"name": "CSS",
"bytes": "41109"
},
{
"name": "Cython",
"bytes": "133874"
},
{
"name": "Fortran",
"bytes": "298"
},
{
"name": "HTML",
"bytes": "8407"
},
{
"name": "JavaScript",
"bytes": "440875"
},
{
"name": "Nix",
"bytes": "382"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "PowerShell",
"bytes": "8834"
},
{
"name": "Python",
"bytes": "8302122"
},
{
"name": "Shell",
"bytes": "3828"
}
],
"symlink_target": ""
}
|
import pickle
import tensorflow as tf
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Input, Flatten
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
from tensorflow.python.ops import control_flow_ops
tf.python.control_flow_ops = control_flow_ops
#
# np.random.seed(1337) # for reproducibility
#
# n_classes = 10
# flat_img_size = 32*32*3
# pool_size = (2, 2)
#
flags = tf.app.flags
FLAGS = flags.FLAGS
#
# command line flags
flags.DEFINE_string('training_file', '', "Bottleneck features training file (.p)")
flags.DEFINE_string('validation_file', '', "Bottleneck features validation file (.p)")
flags.DEFINE_integer('epochs', 50, "The number of epochs.")
flags.DEFINE_integer('batch_size', 256, "The batch size.")
def load_bottleneck_data(training_file, validation_file):
"""
Utility function to load bottleneck features.
Arguments:
training_file - String
validation_file - String
"""
print("Training file", training_file)
print("Validation file", validation_file)
with open(training_file, 'rb') as f:
train_data = pickle.load(f)
with open(validation_file, 'rb') as f:
validation_data = pickle.load(f)
X_train = train_data['features']
y_train = train_data['labels']
X_val = validation_data['features']
y_val = validation_data['labels']
return X_train, y_train, X_val, y_val
def main(_):
# load bottleneck data
X_train, y_train, X_val, y_val = load_bottleneck_data(FLAGS.training_file, FLAGS.validation_file)
# Y_train = np_utils.to_categorical(y_train, n_classes)
# Y_val = np_utils.to_categorical(y_val, n_classes)
Y_train = y_train
Y_val = y_val
nb_classes = len(np.unique(y_train))
input_shape = X_train.shape[1:]
# define model
model = Sequential()
# model.add(Input(shape=input_shape))
model.add(Flatten(input_shape=input_shape))
model.add(Dense(nb_classes))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, Y_train, batch_size=FLAGS.batch_size, nb_epoch=FLAGS.epochs,
validation_data=(X_val, Y_val), verbose=1)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "020b6d74b99c28a425ed58d9212c439e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 101,
"avg_line_length": 31.15,
"alnum_prop": 0.6813804173354735,
"repo_name": "thomasantony/CarND-Projects",
"id": "267b2ae1a3a11cb40253fceb6cc8f5fcd3f6c5f8",
"size": "4540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercises/Term1/transfer-learning-lab/feature_extraction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "23311677"
},
{
"name": "Python",
"bytes": "122635"
},
{
"name": "Shell",
"bytes": "943"
}
],
"symlink_target": ""
}
|
import calendar
import urlparse
import re
import time_util
import struct
import base64
# Also defined in saml2.saml but can't import from there
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
# ---------------------------------------------------------
class NotValid(Exception):
pass
class OutsideCardinality(Exception):
pass
class MustValueError(ValueError):
pass
class ShouldValueError(ValueError):
pass
# --------------------- validators -------------------------------------
#
NCNAME = re.compile("(?P<NCName>[a-zA-Z_](\w|[_.-])*)")
def valid_ncname(name):
match = NCNAME.match(name)
if not match:
raise NotValid("NCName")
return True
def valid_id(oid):
valid_ncname(oid)
def valid_any_uri(item):
"""very simplistic, ..."""
try:
part = urlparse.urlparse(item)
except Exception:
raise NotValid("AnyURI")
if part[0] == "urn" and part[1] == "": # A urn
return True
# elif part[1] == "localhost" or part[1] == "127.0.0.1":
# raise NotValid("AnyURI")
return True
def valid_date_time(item):
try:
time_util.str_to_time(item)
except Exception:
raise NotValid("dateTime")
return True
def valid_url(url):
try:
_ = urlparse.urlparse(url)
except Exception:
raise NotValid("URL")
# if part[1] == "localhost" or part[1] == "127.0.0.1":
# raise NotValid("URL")
return True
def validate_on_or_after(not_on_or_after, slack):
if not_on_or_after:
now = time_util.utc_now()
nooa = calendar.timegm(time_util.str_to_time(not_on_or_after))
if now > nooa + slack:
raise Exception("Can't use it, it's too old %d > %d" %
(nooa, now))
return nooa
else:
return False
def validate_before(not_before, slack):
if not_before:
now = time_util.utc_now()
nbefore = calendar.timegm(time_util.str_to_time(not_before))
if nbefore > now + slack:
raise Exception("Can't use it yet %d <= %d" % (nbefore, now))
return True
def valid_address(address):
if not (valid_ipv4(address) or valid_ipv6(address)):
raise NotValid("address")
return True
def valid_ipv4(address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
raise NotValid("ipv4")
except ValueError:
return False
return True
#
IPV6_PATTERN = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single wildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
def valid_ipv6(address):
"""Validates IPv6 addresses. """
return IPV6_PATTERN.match(address) is not None
def valid_boolean(val):
vall = val.lower()
if vall in ["true", "false", "0", "1"]:
return True
else:
raise NotValid("boolean")
def valid_duration(val):
try:
time_util.parse_duration(val)
except Exception:
raise NotValid("duration")
return True
def valid_string(val):
""" Expects unicode
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] |
[#x10000-#x10FFFF]
"""
for char in val:
try:
char = ord(char)
except TypeError:
raise NotValid("string")
if char == 0x09 or char == 0x0A or char == 0x0D:
continue
elif 0x20 <= char <= 0xD7FF:
continue
elif 0xE000 <= char <= 0xFFFD:
continue
elif 0x10000 <= char <= 0x10FFFF:
continue
else:
raise NotValid("string")
return True
def valid_unsigned_short(val):
try:
struct.pack("H", int(val))
except struct.error:
raise NotValid("unsigned short")
except ValueError:
raise NotValid("unsigned short")
return True
def valid_positive_integer(val):
try:
integer = int(val)
except ValueError:
raise NotValid("positive integer")
if integer > 0:
return True
else:
raise NotValid("positive integer")
def valid_non_negative_integer(val):
try:
integer = int(val)
except ValueError:
raise NotValid("non negative integer")
if integer < 0:
raise NotValid("non negative integer")
return True
def valid_integer(val):
try:
int(val)
except ValueError:
raise NotValid("integer")
return True
def valid_base64(val):
try:
base64.b64decode(val)
except Exception:
raise NotValid("base64")
return True
def valid_qname(val):
""" A qname is either
NCName or
NCName ':' NCName
"""
try:
(prefix, localpart) = val.split(":")
return valid_ncname(prefix) and valid_ncname(localpart)
except ValueError:
return valid_ncname(val)
def valid_anytype(val):
""" Goes through all known type validators
:param val: The value to validate
:return: True is value is valid otherwise an exception is raised
"""
for validator in VALIDATOR.values():
if validator == valid_anytype: # To hinder recursion
continue
try:
if validator(val):
return True
except NotValid:
pass
if isinstance(val, type):
return True
raise NotValid("AnyType")
# -----------------------------------------------------------------------------
VALIDATOR = {
"ID": valid_id,
"NCName": valid_ncname,
"dateTime": valid_date_time,
"anyURI": valid_any_uri,
"nonNegativeInteger": valid_non_negative_integer,
"PositiveInteger": valid_positive_integer,
"boolean": valid_boolean,
"unsignedShort": valid_unsigned_short,
"duration": valid_duration,
"base64Binary": valid_base64,
"integer": valid_integer,
"QName": valid_qname,
"anyType": valid_anytype,
"string": valid_string,
}
# -----------------------------------------------------------------------------
def validate_value_type(value, spec):
"""
c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny',
'Indeterminate']}
{'member': 'anyURI', 'base': 'list'}
{'base': 'anyURI'}
{'base': 'NCName'}
{'base': 'string'}
"""
if "maxlen" in spec:
return len(value) <= spec["maxlen"]
if spec["base"] == "string":
if "enumeration" in spec:
if value not in spec["enumeration"]:
raise NotValid("value not in enumeration")
else:
return valid_string(value)
elif spec["base"] == "list": # comma separated list of values
for val in [v.strip() for v in value.split(",")]:
valid(spec["member"], val)
else:
return valid(spec["base"], value)
return True
def valid(typ, value):
try:
return VALIDATOR[typ](value)
except KeyError:
try:
(_namespace, typ) = typ.split(":")
except ValueError:
if typ == "":
typ = "string"
return VALIDATOR[typ](value)
def _valid_instance(instance, val):
try:
val.verify()
except NotValid, exc:
raise NotValid("Class '%s' instance: %s" % (
instance.__class__.__name__, exc.args[0]))
except OutsideCardinality, exc:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
instance.__class__.__name__, exc.args[0]))
ERROR_TEXT = "Wrong type of value '%s' on attribute '%s' expected it to be %s"
def valid_instance(instance):
instclass = instance.__class__
class_name = instclass.__name__
# if instance.text:
# _has_val = True
# else:
# _has_val = False
if instclass.c_value_type and instance.text:
try:
validate_value_type(instance.text.strip(),
instclass.c_value_type)
except NotValid, exc:
raise NotValid("Class '%s' instance: %s" % (class_name,
exc.args[0]))
for (name, typ, required) in instclass.c_attributes.values():
value = getattr(instance, name, '')
if required and not value:
txt = "Required value on property '%s' missing" % name
raise MustValueError("Class '%s' instance: %s" % (class_name, txt))
if value:
try:
if isinstance(typ, type):
if typ.c_value_type:
spec = typ.c_value_type
else:
spec = {"base": "string"} # do I need a default
validate_value_type(value, spec)
else:
valid(typ, value)
except (NotValid, ValueError), exc:
txt = ERROR_TEXT % (value, name, exc.args[0])
raise NotValid("Class '%s' instance: %s" % (class_name, txt))
for (name, _spec) in instclass.c_children.values():
value = getattr(instance, name, '')
try:
_card = instclass.c_cardinality[name]
try:
_cmin = _card["min"]
except KeyError:
_cmin = None
try:
_cmax = _card["max"]
except KeyError:
_cmax = None
except KeyError:
_cmin = _cmax = _card = None
if value:
#_has_val = True
if isinstance(value, list):
_list = True
vlen = len(value)
else:
_list = False
vlen = 1
if _card:
if _cmin is not None and _cmin > vlen:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "less then min (%s<%s)" % (vlen,
_cmin)))
if _cmax is not None and vlen > _cmax:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "more then max (%s>%s)" % (vlen,
_cmax)))
if _list:
for val in value:
# That it is the right class is handled elsewhere
_valid_instance(instance, val)
else:
_valid_instance(instance, value)
else:
if _cmin:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "too few values on %s" % name))
# if not _has_val:
# if class_name != "RequestedAttribute":
# # Not allow unless xsi:nil="true"
# assert instance.extension_attributes
# assert instance.extension_attributes[XSI_NIL] == "true"
return True
def valid_domain_name(dns_name):
m = re.match(
"^[a-z0-9]+([-.]{ 1 }[a-z0-9]+).[a-z]{2,5}(:[0-9]{1,5})?(\/.)?$",
dns_name, "ix")
if not m:
raise ValueError("Not a proper domain name")
|
{
"content_hash": "076ea03572ebbf0ff587afdb02c7e600",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 79,
"avg_line_length": 27.712389380530972,
"alnum_prop": 0.489701421044228,
"repo_name": "Runscope/pysaml2",
"id": "376df9ed6115183fddf25f1ba4940f6e788025c3",
"size": "12526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/saml2/validate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46"
},
{
"name": "Python",
"bytes": "2602966"
},
{
"name": "Shell",
"bytes": "4371"
}
],
"symlink_target": ""
}
|
"""
Helios Signals
Effectively callbacks that other apps can wait and be notified about
"""
import django.dispatch
# when an election is created
election_created = django.dispatch.Signal(providing_args=["election"])
# when a vote is cast
vote_cast = django.dispatch.Signal(providing_args=["user", "voter", "election", "cast_vote"])
# when an election is tallied
election_tallied = django.dispatch.Signal(providing_args=["election"])
|
{
"content_hash": "7c6ffdffef8bdf3bfbe249a933d28650",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 27.3125,
"alnum_prop": 0.7551487414187643,
"repo_name": "shirlei/helios-server",
"id": "4c82d2ed854c0dbc79587d126c09895adb93e2dd",
"size": "437",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "helios/signals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "82677"
},
{
"name": "HTML",
"bytes": "433803"
},
{
"name": "Java",
"bytes": "2271"
},
{
"name": "JavaScript",
"bytes": "473703"
},
{
"name": "Python",
"bytes": "829739"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from functional.tests.identity.v3 import test_identity
from tempest.lib.common.utils import data_utils
class IdentityProviderTests(test_identity.IdentityTests):
# Introduce functional test case for command 'Identity Provider'
def test_idp_create(self):
self._create_dummy_idp()
def test_idp_delete(self):
identity_provider = self._create_dummy_idp(add_clean_up=False)
raw_output = self.openstack('identity provider delete %s'
% identity_provider)
self.assertEqual(0, len(raw_output))
def test_idp_show(self):
identity_provider = self._create_dummy_idp(add_clean_up=True)
raw_output = self.openstack('identity provider show %s'
% identity_provider)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.IDENTITY_PROVIDER_FIELDS)
def test_idp_list(self):
self._create_dummy_idp(add_clean_up=True)
raw_output = self.openstack('identity provider list')
items = self.parse_listing(raw_output)
self.assert_table_structure(items, self.IDENTITY_PROVIDER_LIST_HEADERS)
def test_idp_set(self):
identity_provider = self._create_dummy_idp(add_clean_up=True)
new_remoteid = data_utils.rand_name('newRemoteId')
raw_output = self.openstack('identity provider set '
'%(identity-provider)s '
'--remote-id %(remote-id)s '
% {'identity-provider': identity_provider,
'remote-id': new_remoteid})
self.assertEqual(0, len(raw_output))
raw_output = self.openstack('identity provider show %s'
% identity_provider)
updated_value = self.parse_show_as_object(raw_output)
self.assertIn(new_remoteid, updated_value['remote_ids'])
|
{
"content_hash": "afe48cf60ac5ffe47ab0d751e3f3f32f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 46.57142857142857,
"alnum_prop": 0.6007157464212679,
"repo_name": "redhat-openstack/python-openstackclient",
"id": "08f660f66da20c311069f412aee62208d1b302a8",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-patches",
"path": "functional/tests/identity/v3/test_idp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2229284"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
}
|
import re
import uuid
from django import forms
from django.db import models
from .utils import prepare_uuid_string
try:
from psycopg2 import extras
extras.register_uuid()
except (ImportError, AttributeError):
pass
def uuid_validator(value):
if re.search("[^a-f0-9]+", value):
raise forms.ValidationError("Invalid UUID value")
class UUIDField(models.Field):
def __init__(self, **kwargs):
kwargs.update(max_length=32, editable=False, blank=True, unique=True)
super().__init__(**kwargs)
def db_type(self, connection=None):
if connection and "postgres" in connection.vendor:
return "uuid"
return "char(%s)" % self.max_length
def pre_save(self, model_instance, add):
if add:
value = uuid.uuid4().hex
setattr(model_instance, self.attname, value)
else:
value = getattr(model_instance, self.attname, None)
return value
def get_db_prep_value(self, value, connection, prepared=False):
return prepare_uuid_string(value)
def from_db_value(self, value, _, __):
return prepare_uuid_string(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return prepare_uuid_string(value, default="")
def to_python(self, value):
return prepare_uuid_string(value)
def formfield(self, **kwargs):
kwargs.update(
form_class=forms.CharField,
max_length=self.max_length,
min_length=self.max_length,
validators=[
uuid_validator,
forms.validators.MaxLengthValidator,
forms.validators.MinLengthValidator,
],
)
return super().formfield(**kwargs)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [r"^formapi\.fields\.UUIDField"])
except ImportError:
pass
|
{
"content_hash": "d1a4fafe42c654c5cb50f9be878b6c83",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 26.98611111111111,
"alnum_prop": 0.6212043232115285,
"repo_name": "5monkeys/django-formapi",
"id": "c5594c5c66b2cefe649c05f4922740f2b45c1124",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formapi/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3946"
},
{
"name": "Makefile",
"bytes": "189"
},
{
"name": "Python",
"bytes": "37357"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
from marvin.api.api import Interaction
from marvin import config
import pytest
pytestmark = pytest.mark.uses_web
auths = [None, 'token', 'netrc']
@pytest.fixture(params=auths)
def mint(request):
base = 'https://lore.sdss.utah.edu/'
url = '/marvin/api/general/getroutemap/'
if request.param is None:
pytest.skip("no auth should fail")
ii = Interaction(url, auth=request.param, send=False, base=base)
yield ii
ii = None
class TestInteraction(object):
def test_auth(self, mint):
assert mint.authtype in auths
if mint.authtype:
assert mint.authtype == mint.session.auth.authtype
def test_auth_fail(self, monkeypatch):
monkeypatch.setattr(config, 'access', 'collab')
base = 'https://lore.sdss.utah.edu/'
url = '/marvin/api/general/getroutemap/'
with pytest.raises(AssertionError, match='Must have an authorization type set for collab access to MPLs!'):
Interaction(url, auth=None, send=False, base=base, params={'release': 'MPL-11'})
|
{
"content_hash": "42acf9541349d032be26be721b73fd2a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 115,
"avg_line_length": 30.18918918918919,
"alnum_prop": 0.6732318710832588,
"repo_name": "sdss/marvin",
"id": "10ad1f88870883e5d22bf339afb515d764f7953d",
"size": "1348",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/api/test_interaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "166739"
},
{
"name": "HTML",
"bytes": "91250"
},
{
"name": "JavaScript",
"bytes": "247561"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1706012"
},
{
"name": "SCSS",
"bytes": "266310"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
}
|
import datetime
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
A simple time service application component.
"""
def onConnect(self):
self.join("realm1")
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
|
{
"content_hash": "00b749730a2a29e82c4b6f31f34b3aa0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 52,
"avg_line_length": 20.4,
"alnum_prop": 0.6901960784313725,
"repo_name": "eugenejen/AutobahnPython",
"id": "e060c28427843a7505b319f113beceef62785f0b",
"size": "1280",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/basic/session/series/backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""AzureDocument classes and enums.
"""
class DatabaseAccount(object):
"""Database account. A DatabaseAccount is the container for databases.
:Attributes:
- `DatabaseLink`: str, the self-link for Databases in the
databaseAccount.
- `MediaLink`: str, the self-link for Media in the databaseAccount.
- `MaxMediaStorageUsageInMB`: int, attachment content (media) storage
quota in MBs ( Retrieved from gateway ).
- `CurrentMediaStorageUsageInMB`: int, current attachment content
(media) usage in MBs (Retrieved from gateway ).
Value is returned from cached information updated periodically and
is not guaranteed to be real time.
- `ConsistencyPolicy`: dict, UserConsistencyPolicy settings.
- `ConsistencyPolicy['defaultConsistencyLevel']`: dict, the default
consistency level.
- `ConsistencyPolicy['maxStalenessPrefix']`: int, in bounded staleness
consistency, the maximum allowed staleness in terms difference in
sequence numbers (aka version).
- `ConsistencyPolicy['maxStalenessIntervalInSeconds']`: int, In bounded
staleness consistency, the maximum allowed staleness in terms time
interval.
"""
def __init__(self):
self.DatabasesLink = ''
self.MediaLink = ''
self.MaxMediaStorageUsageInMB = 0
self.CurrentMediaStorageUsageInMB = 0
self.ConsumedDocumentStorageInMB = 0
self.ReservedDocumentStorageInMB = 0
self.ProvisionedDocumentStorageInMB = 0
self.ConsistencyPolicy = None
class ConsistencyLevel(object):
"""Represents the consistency levels supported for DocumentDB client
operations.
The requested ConsistencyLevel must match or be weaker than that provisioned
for the database account. Consistency levels.
Consistency levels by order of strength are Strong, BoundedStaleness,
Session and Eventual.
:Attributes:
- `Strong`: Strong Consistency guarantees that read operations always
return the value that was last written.
- `BoundedStaleness` Bounded Staleness guarantees that reads are not
too out-of-date. This can be configured based on number of operations
(MaxStalenessPrefix) or time (MaxStalenessIntervalInSeconds).
- `Session`: Session Consistency guarantees monotonic reads (you never
read old data, then new, then old again), monotonic writes (writes
are ordered) and read your writes (your writes are immediately
visible to your reads) within any single session.
- `Eventual`: Eventual Consistency guarantees that reads will return
a subset of writes. All writes will be eventually be available for
reads.
"""
Strong = 'Strong'
BoundedStaleness = 'BoundedStaleness'
Session = 'Session'
Eventual = 'Eventual'
class IndexingMode(object):
"""Specifies the supported indexing modes.
:Attributes:
- `Consistent`: Index is updated synchronously with a create or
update operation. With consistent indexing, query behavior is the
same as the default consistency level for the collection.
The index is
always kept up to date with the data.
- `Lazy`: Index is updated asynchronously with respect to a create
or update operation.
With lazy indexing, queries are eventually consistent. The index is
updated when the collection is idle.
- `NoIndex`: No index is provided.
Setting IndexingMode to "None" drops the index. Use this if you don't
want to maintain the index for a document collection, to save the
storage cost or improve the write throughput. Your queries will
degenerate to scans of the entire collection.
"""
Consistent = 'consistent'
Lazy = 'lazy'
NoIndex = 'none'
class IndexKind(object):
"""Specifies the index kind of index specs.
:Attributes:
- `Hash`: The index entries are hashed to serve point look up queries.
Can be used to serve queries like: SELECT * FROM docs d WHERE d.prop = 5
- `Range`: The index entries are ordered. Range indexes are optimized for
inequality predicate queries with efficient range scans.
Can be used to serve queries like: SELECT * FROM docs d WHERE d.prop > 5
"""
Hash = 'Hash'
Range = 'Range'
class DataType(object):
"""Specifies the data type of index specs.
:Attributes:
- `Number`: Represents a numeric data type
- `String`: Represents a string data type.
"""
Number = 'Number'
String = 'String'
class IndexingDirective(object):
"""Specifies whether or not the resource is to be indexed.
:Attributes:
- `Default`: Use any pre-defined/pre-configured defaults.
- `Include`: Index the resource.
- `Exclude`: Do not index the resource.
"""
Default = 0
Exclude = 1
Include = 2
class ConnectionMode(object):
"""Represents the connection mode to be used by the client.
:Attributes:
- `Gateway`: Use the DocumentDB gateway to route all requests. The
gateway proxies requests to the right data partition.
"""
Gateway = 0
class MediaReadMode(object):
"""Represents the mode for use with downloading attachment content
(aka media).
:Attributes:
- `Buffered`: Content is buffered at the client and not directly
streamed from the content store.
Use Buffered to reduce the time taken to read and write media files.
- `Streamed`: Content is directly streamed from the content store
without any buffering at the client.
Use Streamed to reduce the client memory overhead of reading and
writing media files.
"""
Buffered = 'Buffered'
Streamed = 'Streamed'
class PermissionMode(object):
"""Enumeration specifying applicability of permission.
:Attributes:
- `NoneMode`: None.
- `Read`: Permission applicable for read operations only.
- `All`: Permission applicable for all operations.
"""
NoneMode = 'none' # None is python's key word.
Read = 'read'
All = 'all'
class TriggerType(object):
"""Specifies the type of the trigger.
:Attributes:
- `Pre`: Trigger should be executed before the associated operation(s).
- `Post`: Trigger should be executed after the associated operation(s).
"""
Pre = 'pre'
Post = 'post'
class TriggerOperation(object):
"""Specifies the operations on which a trigger should be executed.
:Attributes:
- `All`: All operations.
- `Create`: Create operations only.
- `Update`: Update operations only.
- `Delete`: Delete operations only.
- `Replace`: Replace operations only.
"""
All = 'all'
Create = 'create'
Update = 'update'
Delete = 'delete'
Replace = 'replace'
class SSLConfiguration(object):
"""Configurations for SSL connections.
Please refer to https://docs.python.org/2/library/ssl.html#socket-creation for more detail.
:Attributes:
- `SSLKeyFile`: str, the path of the key file for ssl connection.
- `SSLCertFile`: str, the path of the cert file for ssl connection.
- `SSLCaCerts`: str, the path of the ca_certs file for ssl connection.
"""
def __init__(self):
self.SSLKeyFile = None
self.SSLCertFile = None
self.SSLCaCerts = None
class ProxyConfiguration(object):
"""Configurations for proxy.
:Attributes:
- `Host`: str, the host address of the proxy.
- `Port`: int, the port number of the proxy.
"""
def __init__(self):
self.Host = None
self.Port = None
class ConnectionPolicy(object):
"""Represents the Connection policy assocated with a DocumentClient.
:Attributes:
- `RequestTimeout`: int, gets or sets the request timeout (time to wait
for response from network peer)
- `MediaRequestTimeout`: int, gets or sets Time to wait for response
from network peer for attachment content (aka media) operations.
- `ConnectionMode`: int (documents.ConnectionMode), gets or sets the
connection mode used in the client. Currently only Gateway is supported.
- `MediaReadMode`: str (MediaReadMode.Buffered), gets or sets the
attachment content (aka media) download mode.
- `SSLConfiguration`: documents.SSLConfiguration, gets or sets the SSL configuration.
- `ProxyConfiguration`: documents.ProxyConfiguration, gets or sets the proxy configuration.
"""
__defaultRequestTimeout = 60000 # milliseconds
# defaultMediaRequestTimeout is based upon the blob client timeout and the
# retry policy.
__defaultMediaRequestTimeout = 300000 # milliseconds
def __init__(self):
self.RequestTimeout = self.__defaultRequestTimeout
self.MediaRequestTimeout = self.__defaultMediaRequestTimeout
self.ConnectionMode = ConnectionMode.Gateway
self.MediaReadMode = MediaReadMode.Buffered
self.SSLConfiguration = None
self.ProxyConfiguration = None
class RetryPolicy(object):
"""The retry policy.
:Attributes:
- `MaxRetryAttemptsOnRequest`: int, the max retry attempts on request.
- `MaxRetryAttemptsOnQuery`: int, the max retry attempts on query.
"""
def __init__(self):
self.MaxRetryAttemptsOnRequest = 0
self.MaxRetryAttemptsOnQuery = 3
|
{
"content_hash": "71177ee0c5e5bde1144d1ec29592a30c",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 99,
"avg_line_length": 35.28832116788321,
"alnum_prop": 0.6644947771227635,
"repo_name": "shipunyc/azure-documentdb-python",
"id": "fe8b968c7868f280711a8a895ffa39bbeb4e71c6",
"size": "9731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydocumentdb/documents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177455"
}
],
"symlink_target": ""
}
|
import pytest
from SendEmailToCampaignRecipients import *
CAMPAIGN_EMAIL_TO = 'campaignemailto'
CAMPAIGN_EMAIL_SUBJECT = 'campaignemailsubject'
CAMPAIGN_EMAIL_BODY = 'campaignemailbody'
NUM_OF_INCIDENTS = 5
INCIDENT_IDS = [str(i) for i in range(NUM_OF_INCIDENTS)]
CUSTOM_FIELDS = {
CAMPAIGN_EMAIL_TO: 'a@a.com',
CAMPAIGN_EMAIL_SUBJECT: 'Campaign Detected',
CAMPAIGN_EMAIL_BODY: 'PLease check the email'
}
MOCKED_INCIDENT = {
'id': 100,
'CustomFields': CUSTOM_FIELDS
}
def test_send_email_happy_path(mocker):
"""
Given -
Mocked custom field for an incident
When -
Run the main of the command
Then -
Validate the expected args sent to demisto.executeCommand
"""
# prepare
mocker.patch.object(demisto, 'incidents', return_value=[MOCKED_INCIDENT])
mocker.patch.object(demisto, 'executeCommand')
mocker.patch.object(demisto, 'results')
# run
main()
# validate
assert demisto.executeCommand.call_args[0][0] == 'send-mail'
command_arg_dict = demisto.executeCommand.call_args[0][1]
for custom_filed_key, command_key in zip(CUSTOM_FIELDS.keys(), ['to', 'subject', 'body']):
assert command_arg_dict[command_key] == CUSTOM_FIELDS[custom_filed_key]
def test_no_email_to(mocker):
"""
Given -
Empty emailto in the incident
When -
Try to send email
Then -
Validate return_error was called
"""
# prepare
mocker.patch.object(demisto, 'incidents', return_value=[MOCKED_INCIDENT])
mocker.patch.object(demisto, 'results')
CUSTOM_FIELDS[CAMPAIGN_EMAIL_TO] = ''
# run
try:
main()
pytest.fail('SystemExit should occurred as return_error was called')
except SystemExit:
args = demisto.results.call_args[0][0]
assert args['Contents'] == INVALID_EMAIL_TO_MSG
|
{
"content_hash": "44954223eee41a539a638e971338da45",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 94,
"avg_line_length": 26.929577464788732,
"alnum_prop": 0.6427824267782427,
"repo_name": "VirusTotal/content",
"id": "949123cde51eeb1d39b1808564d584c21126e470",
"size": "1912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Campaign/Scripts/SendEmailToCampaignRecipients/SendEmailToCampaignRecipients_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_test_83.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "924a98934eb2335387d5401ab3eeb908",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.69,
"repo_name": "anhstudios/swganh",
"id": "9e735ee975eab74deb982480c87a75fa05a8bf55",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/particle/shared_particle_test_83.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from urllib import urlencode
from django import forms
from django.conf import settings
from django.contrib import admin
from django.core import validators
from django.core.urlresolvers import resolve
from django.utils.html import format_html
from django.utils.translation import ugettext, ugettext_lazy as _
from olympia import amo
from olympia.access import acl
from olympia.amo.urlresolvers import reverse
from . import models
class AddonAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ('css/admin/l10n.css',)
}
js = ('js/admin/l10n.js',)
exclude = ('authors',)
list_display = ('__unicode__', 'type', 'guid',
'status_with_admin_manage_link', 'average_rating')
list_filter = ('type', 'status')
readonly_fields = ('status_with_admin_manage_link',)
fieldsets = (
(None, {
'fields': ('name', 'slug', 'guid', 'default_locale', 'type',
'status_with_admin_manage_link'),
}),
('Details', {
'fields': ('summary', 'description', 'homepage', 'eula',
'privacy_policy', 'developer_comments', 'icon_type',
),
}),
('Support', {
'fields': ('support_url', 'support_email'),
}),
('Stats', {
'fields': ('average_rating', 'bayesian_rating', 'total_ratings',
'text_ratings_count',
'weekly_downloads', 'total_downloads',
'average_daily_users'),
}),
('Flags', {
'fields': ('disabled_by_user', 'view_source', 'requires_payment',
'public_stats', 'is_experimental',
'external_software', 'reputation'),
}),
('Dictionaries', {
'fields': ('target_locale', 'locale_disambiguation'),
}))
def queryset(self, request):
return models.Addon.unfiltered
def status_with_admin_manage_link(self, obj):
# We don't want admins to be able to change the status without logging
# that it happened. So, for now, instead of letting them change the
# status in the django admin, display it as readonly and link to the
# zadmin manage page, which does implement the logging part (and more).
# https://github.com/mozilla/addons-server/issues/7268
link = reverse('zadmin.addon_manage', args=(obj.slug,))
return format_html(u'<a href="{}">{}</a>',
link, obj.get_status_display())
status_with_admin_manage_link.short_description = _(u'Status')
class FeatureAdmin(admin.ModelAdmin):
raw_id_fields = ('addon',)
list_filter = ('application', 'locale')
list_display = ('addon', 'application', 'locale')
class FrozenAddonAdmin(admin.ModelAdmin):
raw_id_fields = ('addon',)
class CompatOverrideRangeInline(admin.TabularInline):
model = models.CompatOverrideRange
# Exclude type since firefox only supports blocking right now.
exclude = ('type',)
class CompatOverrideAdminForm(forms.ModelForm):
def clean(self):
if '_confirm' in self.data:
raise forms.ValidationError('Click "Save" to confirm changes.')
return self.cleaned_data
class CompatOverrideAdmin(admin.ModelAdmin):
raw_id_fields = ('addon',)
inlines = [CompatOverrideRangeInline]
form = CompatOverrideAdminForm
class ReplacementAddonForm(forms.ModelForm):
def clean_path(self):
path = None
try:
path = self.data.get('path')
site = settings.SITE_URL
if models.ReplacementAddon.path_is_external(path):
if path.startswith(site):
raise forms.ValidationError(
'Paths for [%s] should be relative, not full URLs '
'including the domain name' % site)
validators.URLValidator()(path)
else:
path = ('/' if not path.startswith('/') else '') + path
resolve(path)
except forms.ValidationError as validation_error:
# Re-raise the ValidationError about full paths for SITE_URL.
raise validation_error
except Exception:
raise forms.ValidationError('Path [%s] is not valid' % path)
return path
class ReplacementAddonAdmin(admin.ModelAdmin):
list_display = ('guid', 'path', 'guid_slug', '_url')
form = ReplacementAddonForm
def _url(self, obj):
guid_param = urlencode({'guid': obj.guid})
return format_html(
'<a href="{}">Test</a>',
reverse('addons.find_replacement') + '?%s' % guid_param)
def guid_slug(self, obj):
try:
slug = models.Addon.objects.get(guid=obj.guid).slug
except models.Addon.DoesNotExist:
slug = ugettext(u'- Add-on not on AMO -')
return slug
def has_module_permission(self, request):
# If one can see the changelist, then they have access to the module.
return self.has_change_permission(request)
def has_change_permission(self, request, obj=None):
# If an obj is passed, then we're looking at the individual change page
# for a replacement addon, otherwise we're looking at the list. When
# looking at the list, we also allow users with Addons:Edit - they
# won't be able to make any changes but they can see the list.
if obj is not None:
return super(ReplacementAddonAdmin, self).has_change_permission(
request, obj=obj)
else:
return (
acl.action_allowed(request, amo.permissions.ADDONS_EDIT) or
super(ReplacementAddonAdmin, self).has_change_permission(
request, obj=obj))
admin.site.register(models.DeniedGuid)
admin.site.register(models.Addon, AddonAdmin)
admin.site.register(models.FrozenAddon, FrozenAddonAdmin)
admin.site.register(models.CompatOverride, CompatOverrideAdmin)
admin.site.register(models.ReplacementAddon, ReplacementAddonAdmin)
|
{
"content_hash": "afd9ceb838fbdbce172f75f5f2e1deee",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 36.44642857142857,
"alnum_prop": 0.6037889923240242,
"repo_name": "lavish205/olympia",
"id": "26cf0f359c64e4c92c08819f85943d3d955d4027",
"size": "6123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/addons/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808053"
},
{
"name": "HTML",
"bytes": "614229"
},
{
"name": "JavaScript",
"bytes": "1075018"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5064850"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11467"
},
{
"name": "Smarty",
"bytes": "1758"
}
],
"symlink_target": ""
}
|
import itertools
import ggrc
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc import generator
counter = 0
def make_name(msg):
global counter
counter += 1
return msg + str(counter)
def relate(src, dst):
if src < dst:
return (src, dst)
else:
return (dst, src)
class automapping_count_limit(object):
def __init__(self, new_limit):
self.new_limit = new_limit
def __enter__(self):
self.original_limit = ggrc.automapper.rules.count_limit
ggrc.automapper.rules.count_limit = self.new_limit
def __exit__(self, type, value, traceback):
ggrc.automapper.rules.count_limit = self.original_limit
class TestAutomappings(TestCase):
def setUp(self):
super(TestAutomappings, self).setUp()
self.gen = generator.ObjectGenerator()
self.api = self.gen.api
def create_object(self, cls, data):
name = cls._inflector.table_singular
data['context'] = None
res, obj = self.gen.generate(cls, name, {name: data})
self.assertIsNotNone(obj, '%s, %s: %s' % (name, str(data), str(res)))
return obj
def create_mapping(self, src, dst):
return self.create_object(models.Relationship, {
'source': {'id': src.id, 'type': src.type},
'destination': {'id': dst.id, 'type': dst.type}
})
def assert_mapping(self, obj1, obj2, missing=False):
ggrc.db.session.flush()
rel = models.Relationship.find_related(obj1, obj2)
if not missing:
self.assertIsNotNone(rel,
msg='%s not mapped to %s' % (obj1.type, obj2.type))
revisions = models.Revision.query.filter_by(
resource_type='Relationship',
resource_id=rel.id,
).count()
self.assertEqual(revisions, 1)
else:
self.assertIsNone(rel,
msg='%s mapped to %s' % (obj1.type, obj2.type))
def assert_mapping_implication(self, to_create, implied, relevant=set()):
objects = set()
for obj in relevant:
objects.add(obj)
mappings = set()
if type(to_create) is not list:
to_create = [to_create]
for src, dst in to_create:
objects.add(src)
objects.add(dst)
self.create_mapping(src, dst)
mappings.add(relate(src, dst))
if type(implied) is not list:
implied = [implied]
for src, dst in implied:
objects.add(src)
objects.add(dst)
self.assert_mapping(src, dst)
mappings.add(relate(src, dst))
possible = set()
for src, dst in itertools.product(objects, objects):
possible.add(relate(src, dst))
for src, dst in possible - mappings:
self.assert_mapping(src, dst, missing=True)
def with_permutations(self, mk1, mk2, mk3):
obj1, obj2, obj3 = mk1(), mk2(), mk3()
self.assert_mapping_implication(
to_create=[(obj1, obj2), (obj2, obj3)],
implied=(obj1, obj3),
)
obj1, obj2, obj3 = mk1(), mk2(), mk3()
self.assert_mapping_implication(
to_create=[(obj2, obj3), (obj1, obj2)],
implied=(obj1, obj3),
)
def test_mapping_directive_to_a_program(self):
self.with_permutations(
lambda: self.create_object(models.Program, {
'title': make_name('Program')
}),
lambda: self.create_object(models.Regulation, {
'title': make_name('Test PD Regulation')
}),
lambda: self.create_object(models.Objective, {
'title': make_name('Objective')
}),
)
program = self.create_object(models.Program, {
'title': make_name('Program')
})
objective1 = self.create_object(models.Objective, {
'title': make_name('Objective')
})
objective2 = self.create_object(models.Objective, {
'title': make_name('Objective')
})
self.assert_mapping_implication(
to_create=[(program, objective1), (objective1, objective2)],
implied=[],
)
def test_mapping_to_sections(self):
regulation = self.create_object(models.Regulation, {
'title': make_name('Test Regulation')
})
section = self.create_object(models.Section, {
'title': make_name('Test section'),
})
objective = self.create_object(models.Objective, {
'title': make_name('Objective')
})
self.assert_mapping_implication(
to_create=[(regulation, section), (objective, section)],
implied=(objective, regulation),
)
program = self.create_object(models.Program, {
'title': make_name('Program')
})
self.assert_mapping_implication(
to_create=[(objective, program)],
implied=[(regulation, section),
(objective, section),
(objective, regulation)],
relevant=[regulation, section, objective]
)
def test_automapping_limit(self):
with automapping_count_limit(-1):
program = self.create_object(models.Program, {
'title': make_name('Program')
})
regulation = self.create_object(models.Regulation, {
'title': make_name('Test PD Regulation')
})
objective = self.create_object(models.Objective, {
'title': make_name('Objective')
})
self.assert_mapping_implication(
to_create=[(regulation, objective), (objective, program)],
implied=[],
)
def test_mapping_to_objective(self):
regulation = self.create_object(models.Regulation, {
'title': make_name('Test PD Regulation')
})
section = self.create_object(models.Section, {
'title': make_name('Test section'),
'directive': {'id': regulation.id},
})
control = self.create_object(models.Control, {
'title': make_name('Test control')
})
objective = self.create_object(models.Objective, {
'title': make_name('Test control')
})
self.assert_mapping_implication(
to_create=[(regulation, section),
(section, objective),
(objective, control)],
implied=[
(regulation, objective),
(section, control),
(regulation, control),
]
)
program = self.create_object(models.Program, {
'title': make_name('Program')
})
self.assert_mapping_implication(
to_create=[(control, program)],
implied=[
(regulation, section),
(section, objective),
(objective, control),
(regulation, objective),
(section, control),
(regulation, control),
],
relevant=[regulation, section, objective, control]
)
def test_mapping_between_objectives(self):
regulation = self.create_object(models.Regulation, {
'title': make_name('Test PD Regulation')
})
section = self.create_object(models.Section, {
'title': make_name('Test section'),
'directive': {'id': regulation.id},
})
objective1 = self.create_object(models.Objective, {
'title': make_name('Test Objective')
})
objective2 = self.create_object(models.Objective, {
'title': make_name('Test Objective')
})
self.assert_mapping_implication(
to_create=[(regulation, section),
(section, objective1),
(objective1, objective2)],
implied=[
(section, objective2),
(regulation, objective1),
(regulation, objective2),
]
)
def test_mapping_nested_controls(self):
objective = self.create_object(models.Objective, {
'title': make_name('Test Objective')
})
control_p = self.create_object(models.Control, {
'title': make_name('Test control')
})
control1 = self.create_object(models.Control, {
'title': make_name('Test control')
})
control2 = self.create_object(models.Control, {
'title': make_name('Test control')
})
self.assert_mapping_implication(
to_create=[(objective, control_p),
(control_p, control1),
(control_p, control2)],
implied=[(objective, control1), (objective, control2)]
)
def test_automapping_permissions_check(self):
_, creator = self.gen.generate_person(user_role="Creator")
_, admin = self.gen.generate_person(user_role="Administrator")
program = self.create_object(models.Program, {
'title': make_name('Program')
})
regulation = self.create_object(models.Regulation, {
'title': make_name('Regulation'),
'owners': [{"id": admin.id}],
})
owners = [{"id": creator.id}]
self.api.set_user(creator)
section = self.create_object(models.Section, {
'title': make_name('Section'),
'owners': owners,
})
objective = self.create_object(models.Objective, {
'title': make_name('Objective'),
'owners': owners,
})
control = self.create_object(models.Control, {
'title': make_name('Control'),
'owners': owners,
})
self.api.set_user(admin)
self.assert_mapping_implication(
to_create=[(program, regulation), (regulation, section)],
implied=[(program, section)]
)
self.api.set_user(creator)
self.assert_mapping_implication(
to_create=[(section, objective),
(control, objective)],
implied=[(program, regulation),
(program, section),
(section, regulation),
(control, section)],
)
def test_automapping_control_assesment(self):
program = self.create_object(models.Program, {
'title': make_name('Program')
})
regulation = self.create_object(models.Regulation, {
'title': make_name('Test Regulation')
})
audit = self.create_object(models.Audit, {
'title': make_name('Audit'),
'program': {'id': program.id},
'status': 'Planned',
})
control = self.create_object(models.Control, {
'title': make_name('Test control')
})
assessment = self.create_object(models.Assessment, {
'title': make_name('Test CA'),
'audit': {
'id': audit.id,
'type': audit.type
},
'object': {
'id': control.id,
'type': control.type
},
})
self.assert_mapping_implication(
to_create=[(program, regulation), (regulation, assessment)],
implied=[(program, assessment)]
)
|
{
"content_hash": "a2a216177dcb0a5a1fd268e52b6d3da4",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 78,
"avg_line_length": 31.09880239520958,
"alnum_prop": 0.5887166650620969,
"repo_name": "VinnieJohns/ggrc-core",
"id": "44d3268a80bd52c235d438f1f3d9e2ccd6ed3de4",
"size": "10500",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/integration/ggrc/automapper/test_automappings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226950"
},
{
"name": "HTML",
"bytes": "1060386"
},
{
"name": "JavaScript",
"bytes": "1927277"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2762348"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import six
from sentry.app import locks
from sentry.models import OrganizationOption
from sentry.plugins import providers
from sentry.models import Integration
from sentry.utils.http import absolute_uri
from sentry.integrations.exceptions import ApiError, IntegrationError
from sentry.models.apitoken import generate_token
from .webhook import parse_raw_user_email, parse_raw_user_name
class BitbucketRepositoryProvider(providers.IntegrationRepositoryProvider):
name = "Bitbucket"
def get_installation(self, integration_id, organization_id):
if integration_id is None:
raise IntegrationError("Bitbucket requires an integration id.")
integration_model = Integration.objects.get(
id=integration_id, organizations=organization_id, provider="bitbucket"
)
return integration_model.get_installation(organization_id)
def get_repository_data(self, organization, config):
installation = self.get_installation(config.get("installation"), organization.id)
client = installation.get_client()
try:
repo = client.get_repo(config["identifier"])
except Exception as e:
installation.raise_error(e)
else:
config["external_id"] = six.text_type(repo["uuid"])
config["name"] = repo["full_name"]
return config
def get_webhook_secret(self, organization):
# TODO(LB): Revisit whether Integrations V3 should be using OrganizationOption for storage
lock = locks.get(u"bitbucket:webhook-secret:{}".format(organization.id), duration=60)
with lock.acquire():
secret = OrganizationOption.objects.get_value(
organization=organization, key="bitbucket:webhook_secret"
)
if secret is None:
secret = generate_token()
OrganizationOption.objects.set_value(
organization=organization, key="bitbucket:webhook_secret", value=secret
)
return secret
def build_repository_config(self, organization, data):
installation = self.get_installation(data.get("installation"), organization.id)
client = installation.get_client()
try:
resp = client.create_hook(
data["identifier"],
{
"description": "sentry-bitbucket-repo-hook",
"url": absolute_uri(
u"/extensions/bitbucket/organizations/{}/webhook/".format(organization.id)
),
"active": True,
"events": ["repo:push", "pullrequest:fulfilled"],
},
)
except Exception as e:
installation.raise_error(e)
else:
return {
"name": data["identifier"],
"external_id": data["external_id"],
"url": u"https://bitbucket.org/{}".format(data["name"]),
"config": {"name": data["name"], "webhook_id": resp["uuid"]},
"integration_id": data["installation"],
}
def on_delete_repository(self, repo):
installation = self.get_installation(repo.integration_id, repo.organization_id)
client = installation.get_client()
try:
client.delete_hook(repo.config["name"], repo.config["webhook_id"])
except ApiError as exc:
if exc.code == 404:
return
raise
def _format_commits(self, repo, commit_list):
return [
{
"id": c["hash"],
"repository": repo.name,
"author_email": parse_raw_user_email(c["author"]["raw"]),
"author_name": parse_raw_user_name(c["author"]["raw"]),
"message": c["message"],
"timestamp": self.format_date(c["date"]),
"patch_set": c.get("patch_set"),
}
for c in commit_list
]
def compare_commits(self, repo, start_sha, end_sha):
installation = self.get_installation(repo.integration_id, repo.organization_id)
client = installation.get_client()
# use config name because that is kept in sync via webhooks
name = repo.config["name"]
if start_sha is None:
try:
res = client.get_last_commits(name, end_sha)
except Exception as e:
installation.raise_error(e)
else:
return self._format_commits(repo, res[:10])
else:
try:
res = client.compare_commits(name, start_sha, end_sha)
except Exception as e:
installation.raise_error(e)
else:
return self._format_commits(repo, res)
def repository_external_slug(self, repo):
return repo.name
|
{
"content_hash": "01f827521d8da55db2428ec070e7a84c",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 98,
"avg_line_length": 39.08730158730159,
"alnum_prop": 0.580507614213198,
"repo_name": "mvaled/sentry",
"id": "d051c8f58e56c2360e6d1da1accb1040f3232ee4",
"size": "4925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/bitbucket/repository.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import rospy
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import State
from mavros_msgs.srv import CommandBool, CommandBoolRequest
from mavros_msgs.srv import SetMode, SetModeRequest
current_state = State()
def state_cb(msg):
global current_state
current_state = msg
def offboard_node():
rospy.init_node("offb_node")
r = rospy.Rate(20)
rospy.Subscriber("mavros/state", State, state_cb)
local_pos_pub = rospy.Publisher("mavros/setpoint_position/local",
PoseStamped,
queue_size=1000)
arming_client = rospy.ServiceProxy("mavros/cmd/arming", CommandBool)
set_mode_client = rospy.ServiceProxy("mavros/set_mode", SetMode)
while not rospy.is_shutdown() and not current_state.connected:
r.sleep()
pose = PoseStamped()
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = 2
for i in range(100):
local_pos_pub.publish(pose)
r.sleep()
if rospy.is_shutdown():
break
offb_set_mode = SetModeRequest()
offb_set_mode.custom_mode = "OFFBOARD"
arm_cmd = CommandBoolRequest()
arm_cmd.value = True
last_request = rospy.Time.now()
while not rospy.is_shutdown():
if current_state.mode != "OFFBOARD" \
and (rospy.Time.now() - last_request > rospy.Duration(5)):
try:
offb_set_mode_resp = set_mode_client(offb_set_mode)
if offb_set_mode_resp.mode_sent:
rospy.loginfo("Offboard enabled")
except rospy.ServiceException as e:
rospy.logwarn(e)
last_request = rospy.Time.now()
else:
if not current_state.armed \
and (rospy.Time.now() - last_request > rospy.Duration(5)):
try:
arm_cmd_resp = arming_client(arm_cmd)
if arm_cmd_resp.success:
rospy.loginfo("Vehicle armed")
except rospy.ServiceException as e:
rospy.logwarn(e)
last_request = rospy.Time.now()
local_pos_pub.publish(pose)
r.sleep()
if __name__ == "__main__":
try:
offboard_node()
except rospy.ROSInterruptException:
pass
|
{
"content_hash": "74190983861586f4dfc8689c8e61cd58",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 74,
"avg_line_length": 28.530864197530864,
"alnum_prop": 0.584162700129814,
"repo_name": "uenota/px4_simulation_stack",
"id": "ca96571ab15c2f149f6464532e2774344d7cc4cf",
"size": "2334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/offboard_sample.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "17062"
},
{
"name": "CMake",
"bytes": "2910"
},
{
"name": "Dockerfile",
"bytes": "2525"
},
{
"name": "Python",
"bytes": "2334"
},
{
"name": "Shell",
"bytes": "807"
}
],
"symlink_target": ""
}
|
"""
The module contains an implementation of uBoost algorithm.
The main goal of **uBoost** is to fight correlation between predictions and some variables (i.e. mass of particle).
* `uBoostBDT` is a modified version of AdaBoost, that targets to obtain efficiency uniformity at the specified level (global efficiency)
* `uBoostClassifier` is a combination of uBoostBDTs for different efficiencies
This implementation is more advanced than one described in the original paper,
contains smoothing and trains classifiers in threads, has `learning_rate` and `uniforming_rate` parameters,
does automatic weights renormalization and supports SAMME.R modification to use predicted probabilities.
Only binary classification is implemented.
See also: :class:`hep_ml.losses.BinFlatnessLossFunction`, :class:`hep_ml.losses.KnnFlatnessLossFunction`,
:class:`hep_ml.losses.KnnAdaLossFunction`
to fight correlation.
Examples
________
To get uniform prediction in mass for background:
>>> base_tree = DecisionTreeClassifier(max_depth=3)
>>> clf = uBoostClassifier(uniform_features=['mass'], uniform_label=0, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
To get uniform prediction in Dalitz variables for signal
>>> clf = uBoostClassifier(uniform_features=['mass_12', 'mass_23'], uniform_label=1, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
"""
# Authors:
# Alex Rogozhnikov <axelr@yandex-team.ru>
# Nikita Kazeev <kazeevn@yandex-team.ru>
from six.moves import zip
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.random import check_random_state
from .commonutils import sigmoid_function, map_on_cluster, \
compute_knn_indices_of_same_class, compute_cut_for_efficiency, check_xyw
from . import commonutils
from .metrics_utils import compute_group_efficiencies_by_indices
__author__ = "Alex Rogozhnikov, Nikita Kazeev"
__all__ = ["uBoostBDT", "uBoostClassifier"]
class uBoostBDT(BaseEstimator, ClassifierMixin):
def __init__(self,
uniform_features,
uniform_label,
target_efficiency=0.5,
n_neighbors=50,
subsample=1.0,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
uniforming_rate=1.,
train_features=None,
smoothing=0.0,
random_state=None,
algorithm="SAMME"):
"""
uBoostBDT is AdaBoostClassifier, which is modified to have flat
efficiency of signal (class=1) along some variables.
Efficiency is only guaranteed at the cut,
corresponding to global efficiency == target_efficiency.
Can be used alone, without uBoostClassifier.
:param uniform_features: list of strings, names of variables, along which
flatness is desired
:param uniform_label: int, label of class on which uniformity is desired
(typically 0 for background, 1 for signal).
:param target_efficiency: float, the flatness is obtained at global BDT cut,
corresponding to global efficiency
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param subsample: float (default=1.0), part of training dataset used
to build each base estimator.
:param base_estimator: classifier, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
`classes_` and `n_classes_` attributes.
:param n_estimators: integer, optional (default=50)
number of estimators used.
:param learning_rate: float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate``
and ``n_estimators``.
:param uniforming_rate: float, optional (default=1.)
how much do we take into account the uniformity of signal,
there is a trade-off between uniforming_rate and the speed of
uniforming, zero value corresponds to plain AdaBoost
:param train_features: list of strings, names of variables used in
fit/predict. If None, all the variables are used
(including uniform_variables)
:param smoothing: float, (default=0.), used to smooth computing of local
efficiencies, 0.0 corresponds to usual uBoost
:param random_state: int, RandomState instance or None (default None)
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method for
producing uniform selection efficiencies from multivariate classifiers'
"""
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.uniforming_rate = uniforming_rate
self.uniform_features = uniform_features
self.target_efficiency = target_efficiency
self.n_neighbors = n_neighbors
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.uniform_label = uniform_label
self.random_state = random_state
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None, neighbours_matrix=None):
"""Build a boosted classifier from the training set (X, y).
:param X: array-like of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
:param sample_weight: array-like of shape [n_samples] or None
:param neighbours_matrix: array-like of shape [n_samples, n_neighbours],
each row contains indices of signal neighbours
(neighbours should be computed for background too),
if None, this matrix is computed.
:return: self
"""
if self.smoothing < 0:
raise ValueError("Smoothing must be non-negative")
if not isinstance(self.base_estimator, BaseEstimator):
raise TypeError("estimator must be a subclass of BaseEstimator")
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero.")
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator, 'predict_proba'):
raise TypeError(
"uBoostBDT with algorithm='SAMME.R' requires "
"that the weak learner have a predict_proba method.\n"
"Please change the base estimator or set algorithm='SAMME' instead.")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented, with labels 0 and 1"
self.signed_uniform_label = 2 * self.uniform_label - 1
if neighbours_matrix is not None:
assert np.shape(neighbours_matrix) == (len(X), self.n_neighbors), \
"Wrong shape of neighbours_matrix"
self.knn_indices = neighbours_matrix
else:
assert self.uniform_features is not None, \
"uniform_variables should be set"
self.knn_indices = compute_knn_indices_of_same_class(
X.ix[:, self.uniform_features], y, self.n_neighbors)
sample_weight = commonutils.check_sample_weight(y, sample_weight=sample_weight, normalize=True)
assert np.all(sample_weight >= 0.), 'the weights should be non-negative'
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = []
# score cuts correspond to
# global efficiency == target_efficiency on each iteration.
self.score_cuts_ = []
x_train_features = self._get_train_features(X)
x_train_features, y, sample_weight = check_xyw(x_train_features, y, sample_weight)
self.random_state_ = check_random_state(self.random_state)
self._boost(x_train_features, y, sample_weight)
self.score_cut = self.signed_uniform_label * compute_cut_for_efficiency(
self.target_efficiency, y == self.uniform_label, self.decision_function(X) * self.signed_uniform_label)
assert np.allclose(self.score_cut, self.score_cuts_[-1], rtol=1e-10, atol=1e-10), \
"score cut doesn't appear to coincide with the staged one"
assert len(self.estimators_) == len(self.estimator_weights_) == len(self.score_cuts_)
return self
def _make_estimator(self):
estimator = clone(self.base_estimator)
# self.estimators_.append(estimator)
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
return estimator
def _estimator_score(self, estimator, X):
if self.algorithm == "SAMME":
return 2 * estimator.predict(X) - 1.
else:
p = estimator.predict_proba(X)
p[p <= 1e-5] = 1e-5
return np.log(p[:, 1] / p[:, 0])
@staticmethod
def _normalize_weight(y, weight):
# frequently algorithm assigns very big weight to signal events
# compared to background ones (or visa versa, if want to be uniform in bck)
return commonutils.check_sample_weight(y, sample_weight=weight, normalize=True, normalize_by_class=True)
def _compute_uboost_multipliers(self, sample_weight, score, y):
"""Returns uBoost multipliers to sample_weight and computed global cut"""
signed_score = score * self.signed_uniform_label
signed_score_cut = compute_cut_for_efficiency(self.target_efficiency, y == self.uniform_label, signed_score)
global_score_cut = signed_score_cut * self.signed_uniform_label
local_efficiencies = compute_group_efficiencies_by_indices(signed_score, self.knn_indices, cut=signed_score_cut,
smoothing=self.smoothing)
# pay attention - sample_weight should be used only here
e_prime = np.average(np.abs(local_efficiencies - self.target_efficiency),
weights=sample_weight)
is_uniform_class = (y == self.uniform_label)
# beta = np.log((1.0 - e_prime) / e_prime)
# changed to log(1. / e_prime), otherwise this can lead to the situation
# where beta is negative (which is a disaster).
# Mike (uboost author) said he didn't take that into account.
beta = np.log(1. / e_prime)
boost_weights = np.exp((self.target_efficiency - local_efficiencies) * is_uniform_class *
(beta * self.uniforming_rate))
return boost_weights, global_score_cut
def _boost(self, X, y, sample_weight):
"""Implement a single boost using the SAMME or SAMME.R algorithm,
which is modified in uBoost way"""
cumulative_score = np.zeros(len(X))
y_signed = 2 * y - 1
for iteration in range(self.n_estimators):
estimator = self._make_estimator()
mask = _generate_subsample_mask(len(X), self.subsample, self.random_state_)
estimator.fit(X[mask], y[mask], sample_weight=sample_weight[mask])
# computing estimator weight
if self.algorithm == 'SAMME':
y_pred = estimator.predict(X)
# Error fraction
estimator_error = np.average(y_pred != y, weights=sample_weight)
estimator_error = np.clip(estimator_error, 1e-6, 1. - 1e-6)
estimator_weight = self.learning_rate * 0.5 * (
np.log((1. - estimator_error) / estimator_error))
score = estimator_weight * (2 * y_pred - 1)
else:
estimator_weight = self.learning_rate * 0.5
score = estimator_weight * self._estimator_score(estimator, X)
# correcting the weights and score according to predictions
sample_weight *= np.exp(- y_signed * score)
sample_weight = self._normalize_weight(y, sample_weight)
cumulative_score += score
uboost_multipliers, global_score_cut = \
self._compute_uboost_multipliers(sample_weight, cumulative_score, y)
sample_weight *= uboost_multipliers
sample_weight = self._normalize_weight(y, sample_weight)
self.score_cuts_.append(global_score_cut)
self.estimators_.append(estimator)
self.estimator_weights_.append(estimator_weight)
# erasing from memory
self.knn_indices = None
def _get_train_features(self, X):
"""Gets the DataFrame and returns only columns
that should be used in fitting / predictions"""
if self.train_features is None:
return X
else:
return X[self.train_features]
def staged_decision_function(self, X):
"""Decision function after each stage of boosting.
Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats.
"""
X = self._get_train_features(X)
score = np.zeros(len(X))
for classifier, weight in zip(self.estimators_, self.estimator_weights_):
score += self._estimator_score(classifier, X) * weight
yield score
def decision_function(self, X):
"""Decision function. Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats
"""
return commonutils.take_last(self.staged_decision_function(X))
def predict(self, X):
"""Predict classes for each sample
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with predicted classes.
"""
return np.array(self.decision_function(X) > self.score_cut, dtype=int)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
return commonutils.score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
for score in self.staged_decision_function(X):
yield commonutils.score_to_proba(score)
def _uboost_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
return sigmoid_function(self.decision_function(X) - self.score_cut,
self.smoothing)
def _uboost_staged_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
for cut, score in zip(self.score_cuts_, self.staged_decision_function(X)):
yield sigmoid_function(score - cut, self.smoothing)
@property
def feature_importances_(self):
"""Return the feature importances for `train_features`.
:return: array of shape [n_features], the order is the same as in `train_features`
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted,"
" call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_ * weight for tree, weight
in zip(self.estimators_, self.estimator_weights_))
def _train_classifier(classifier, X_train_vars, y, sample_weight, neighbours_matrix):
# supplementary function to train separate parts of uBoost on cluster
return classifier.fit(X_train_vars, y,
sample_weight=sample_weight,
neighbours_matrix=neighbours_matrix)
class uBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, uniform_features,
uniform_label,
train_features=None,
n_neighbors=50,
efficiency_steps=20,
n_estimators=40,
base_estimator=None,
subsample=1.0,
algorithm="SAMME",
smoothing=None,
n_threads=1,
random_state=None):
"""uBoost classifier, an algorithm of boosting targeted to obtain
flat efficiency in signal along some variables (e.g. mass).
In principle, uBoost is ensemble of uBoostBDTs. See [1] for details.
Parameters
----------
:param uniform_features: list of strings, names of variables,
along which flatness is desired
:param uniform_label: int,
tha label of class for which uniformity is desired
:param train_features: list of strings,
names of variables used in fit/predict.
if None, all the variables are used (including uniform_variables)
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param n_estimators: integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
:param efficiency_steps: integer, optional (default=20),
How many uBoostBDTs should be trained
(each with its own target_efficiency)
:param base_estimator: object, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required,
as well as proper `classes_` and `n_classes_` attributes.
:param subsample: float (default =1.) part of training dataset used
to train each base classifier.
:param smoothing: float, default=None, used to smooth computing of
local efficiencies, 0.0 corresponds to usual uBoost,
:param random_state: int, RandomState instance or None, (default=None)
:param n_threads: int, number of threads used.
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method
for producing uniform selection efficiencies from multivariate classifiers'
"""
self.uniform_features = uniform_features
self.uniform_label = uniform_label
self.knn = n_neighbors
self.efficiency_steps = efficiency_steps
self.random_state = random_state
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.n_threads = n_threads
self.algorithm = algorithm
def _get_train_features(self, X):
if self.train_features is not None:
return X[self.train_features]
else:
return X
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set.
:param X: data, pandas.DatFrame of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
The target values (integers that correspond to classes).
:param sample_weight: array-like of shape [n_samples] with weights or None
:return: self
"""
if self.uniform_features is None:
raise ValueError("Please set uniform variables")
if len(self.uniform_features) == 0:
raise ValueError("The set of uniform variables cannot be empty")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented"
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight, classification=True)
data_train_features = self._get_train_features(X)
if self.smoothing is None:
self.smoothing = 10. / self.efficiency_steps
neighbours_matrix = compute_knn_indices_of_same_class(
X[self.uniform_features], y, n_neighbours=self.knn)
self.target_efficiencies = np.linspace(0, 1, self.efficiency_steps + 2)[1:-1]
self.classifiers = []
for efficiency in self.target_efficiencies:
classifier = uBoostBDT(
uniform_features=self.uniform_features,
uniform_label=self.uniform_label,
train_features=None,
target_efficiency=efficiency, n_neighbors=self.knn,
n_estimators=self.n_estimators,
base_estimator=self.base_estimator,
random_state=self.random_state, subsample=self.subsample,
smoothing=self.smoothing, algorithm=self.algorithm)
self.classifiers.append(classifier)
self.classifiers = map_on_cluster('threads-{}'.format(self.n_threads),
_train_classifier,
self.classifiers,
self.efficiency_steps * [data_train_features],
self.efficiency_steps * [y],
self.efficiency_steps * [sample_weight],
self.efficiency_steps * [neighbours_matrix])
return self
def predict(self, X):
"""Predict labels
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: numpy.array of shape [n_samples]
"""
return self.predict_proba(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
X = self._get_train_features(X)
score = sum(clf._uboost_predict_score(X) for clf in self.classifiers)
return commonutils.score_to_proba(score / self.efficiency_steps)
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
X = self._get_train_features(X)
for scores in zip(*[clf._uboost_staged_predict_score(X) for clf in self.classifiers]):
yield commonutils.score_to_proba(sum(scores) / self.efficiency_steps)
def _generate_subsample_mask(n_samples, subsample, random_generator):
"""
:param float subsample: part of samples to be left
:param random_generator: numpy.random.RandomState instance
"""
assert 0 < subsample <= 1., 'subsample should be in range (0, 1]'
if subsample == 1.0:
mask = slice(None, None, None)
else:
mask = random_generator.uniform(size=n_samples) < subsample
return mask
|
{
"content_hash": "c9bb14ad8a4f8a44e283215ef5b9d609",
"timestamp": "",
"source": "github",
"line_count": 563,
"max_line_length": 136,
"avg_line_length": 43.202486678507995,
"alnum_prop": 0.6258685195082844,
"repo_name": "anaderi/hep_ml",
"id": "420748500412190b39b4584397a462297b5296b7",
"size": "24323",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "hep_ml/uboost.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "203512"
},
{
"name": "Shell",
"bytes": "598"
}
],
"symlink_target": ""
}
|
import collections.abc
import functools
BUFFER_MAX = 1 * 1024 * 1024 # We'll buffer up to 1MB
def add_vary_callback(*varies):
def inner(request, response):
vary = set(response.vary if response.vary is not None else [])
vary |= set(varies)
response.vary = vary
return inner
def add_vary(*varies):
def inner(view):
@functools.wraps(view)
def wrapped(context, request):
request.add_response_callback(add_vary_callback(*varies))
return view(context, request)
return wrapped
return inner
def cache_control(
seconds, *, public=True, stale_while_revalidate=None, stale_if_error=None
):
def inner(view):
@functools.wraps(view)
def wrapped(context, request):
response = view(context, request)
if not request.registry.settings.get("pyramid.prevent_http_cache", False):
if seconds:
if public:
response.cache_control.public = True
else:
response.cache_control.private = True
response.cache_control.stale_while_revalidate = (
stale_while_revalidate
)
response.cache_control.stale_if_error = stale_if_error
response.cache_control.max_age = seconds
else:
response.cache_control.no_cache = True
response.cache_control.no_store = True
response.cache_control.must_revalidate = True
return response
return wrapped
return inner
def conditional_http_tween_factory(handler, registry):
def conditional_http_tween(request):
response = handler(request)
# If the Last-Modified header has been set, we want to enable the
# conditional response processing.
if response.last_modified is not None:
response.conditional_response = True
streaming = not isinstance(response.app_iter, collections.abc.Sequence)
# We want to only enable the conditional machinery if either we
# were given an explicit ETag header by the view or we have a
# buffered response and can generate the ETag header ourself.
if response.etag is not None:
response.conditional_response = True
# We can only reasonably implement automatic ETags on 200 responses
# to GET or HEAD requests. The subtles of doing it in other cases
# are too hard to get right.
elif request.method in {"GET", "HEAD"} and response.status_code == 200:
# If we have a streaming response, but it's small enough, we'll
# just go ahead and buffer it in memory so that we can generate a
# ETag for it.
if (
streaming
and response.content_length is not None
and response.content_length <= BUFFER_MAX
):
response.body
streaming = False
# Anything that has survived as a streaming response at this point
# and doesn't have an ETag header already, we'll go ahead and give
# it one.
if not streaming:
response.conditional_response = True
response.md5_etag()
return response
return conditional_http_tween
def includeme(config):
config.add_tween("warehouse.cache.http.conditional_http_tween_factory")
|
{
"content_hash": "4f389b7f1314bdec1672e5cf96c48811",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 86,
"avg_line_length": 33.74285714285714,
"alnum_prop": 0.5955405023990968,
"repo_name": "dstufft/warehouse",
"id": "2482f4b05a3f289b4449d0bfd40278d71bfd63b8",
"size": "4084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warehouse/cache/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "182835"
},
{
"name": "Dockerfile",
"bytes": "7026"
},
{
"name": "HTML",
"bytes": "287733"
},
{
"name": "JavaScript",
"bytes": "59981"
},
{
"name": "Makefile",
"bytes": "6026"
},
{
"name": "Mako",
"bytes": "1505"
},
{
"name": "Python",
"bytes": "1545647"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
}
|
"""Mixin holding dropout fields for RNN cells."""
import tensorflow.compat.v2 as tf
from tensorflow.tools.docs import doc_controls
from keras import backend
@doc_controls.do_not_generate_docs
class DropoutRNNCellMixin:
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN
cell by multiple inheritance. Any cell that mix with class should have
following fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
_random_generator: A backend.RandomGenerator instance, which will be used
to produce outputs based on the inputs and dropout rate.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
self._create_non_trackable_mask_cache()
super().__init__(*args, **kwargs)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _create_non_trackable_mask_cache(self):
"""Create the cache for dropout and recurrent dropout mask.
Note that the following two masks will be used in "graph function" mode,
e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
tensors will be generated differently than in the "graph function" case,
and they will be cached.
Also note that in graph mode, we still cache those masks only because
the RNN could be created with `unroll=True`. In that case, the
`cell.call()` function will be invoked multiple times, and we want to
ensure same mask is used every time.
Also the caches are created without tracking. Since they are not
picklable by python when deepcopy, we don't want
`layer._obj_reference_counts_dict` to track it by default.
"""
self._dropout_mask_cache = backend.ContextValueCache(
self._create_dropout_mask
)
self._recurrent_dropout_mask_cache = backend.ContextValueCache(
self._create_recurrent_dropout_mask
)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it `call()` method
so that the cached mask is cleared before calling the `cell.call()`. The
mask should be cached across the timestep within the same batch, but
shouldn't be cached between batches. Otherwise it will introduce
unreasonable bias against certain index of data within the batch.
"""
self._dropout_mask_cache.clear()
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method
so that the cached mask is cleared before calling the cell.call(). The
mask should be cached across the timestep within the same batch, but
shouldn't be cached between batches. Otherwise it will introduce
unreasonable bias against certain index of data within the batch.
"""
self._recurrent_dropout_mask_cache.clear()
def _create_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
self._random_generator,
tf.ones_like(inputs),
self.dropout,
training=training,
count=count,
)
def _create_recurrent_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
self._random_generator,
tf.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count,
)
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will
be ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for
cell that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will
be ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for
cell that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
def __getstate__(self):
# Used for deepcopy. The caching can't be pickled by python, since it
# will contain tensor and graph.
state = super().__getstate__()
state.pop("_dropout_mask_cache", None)
state.pop("_recurrent_dropout_mask_cache", None)
return state
def __setstate__(self, state):
state["_dropout_mask_cache"] = backend.ContextValueCache(
self._create_dropout_mask
)
state["_recurrent_dropout_mask_cache"] = backend.ContextValueCache(
self._create_recurrent_dropout_mask
)
super().__setstate__(state)
def _generate_dropout_mask(generator, ones, rate, training=None, count=1):
def dropped_inputs():
return generator.dropout(ones, rate)
if count > 1:
return [
backend.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return backend.in_train_phase(dropped_inputs, ones, training=training)
|
{
"content_hash": "109410a13b06ff0de80e325c224f96da",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 80,
"avg_line_length": 41.89090909090909,
"alnum_prop": 0.6532118055555556,
"repo_name": "keras-team/keras",
"id": "df02f668ea3cebcb61cb0bb80e20e82b4300d340",
"size": "7601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/layers/rnn/dropout_rnn_cell_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
}
|
from django.db import models
from constants import APPLICATION_LABEL
MAX_PROJECT_TAG_LENGTH = 20
class ProjectTag(models.Model):
name = models.CharField(max_length=MAX_PROJECT_TAG_LENGTH, blank=True, null=False, unique=True)
def __unicode__(self):
return self.name
class Meta:
app_label= APPLICATION_LABEL
|
{
"content_hash": "c18189d77fc4eabf6ef2abe536074d0e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 99,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6816901408450704,
"repo_name": "EarthSystemCoG/COG",
"id": "6d7382269bbf1b0093a0bb0e06bc1651ac0eb38a",
"size": "355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cog/models/project_tag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "270"
},
{
"name": "CSS",
"bytes": "893678"
},
{
"name": "Classic ASP",
"bytes": "48011"
},
{
"name": "HTML",
"bytes": "96546078"
},
{
"name": "Java",
"bytes": "483882"
},
{
"name": "JavaScript",
"bytes": "13287152"
},
{
"name": "MATLAB",
"bytes": "30087"
},
{
"name": "PHP",
"bytes": "80287"
},
{
"name": "Python",
"bytes": "852780"
},
{
"name": "Rich Text Format",
"bytes": "6112"
},
{
"name": "Shell",
"bytes": "10602"
}
],
"symlink_target": ""
}
|
import arrow
import logging
import pickle
import redis
import random
from collections import MutableMapping
from datetime import timedelta
CONST_GC_MAX = 100
CONST_GC_PROB = 50
class ExpiringDictStore(MutableMapping):
""" Store for ExpiringDict.
Implement MutableMapping interface to act as a collection.
"""
def __init__(self, max_age):
self._max_age = max_age
def __getitem__(self, key):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def _gc(self, force=False):
pass
def set_max_age(self, max_age):
self._max_age = max_age
def refresh(self, key):
raise NotImplementedError()
class ExpiringDictStoreDict(ExpiringDictStore):
""" dict store for ExpiringDict.
Store datas in a python dict as a tuple, containing storage time.
"""
def __init__(self, max_age):
logging.debug('Creating Dict ExpiringDict')
self._max_age = max_age
self._real_storage = dict()
def __getitem__(self, key):
tuple_value = self._real_storage.__getitem__(key)
value_age = arrow.now() - tuple_value[1]
if value_age.seconds < self._max_age:
return tuple_value[0]
else:
self._real_storage.__delitem__(key)
raise KeyError(key)
def __setitem__(self, key, value):
self._gc()
self._real_storage.__setitem__(key, (value, arrow.now()))
def __delitem__(self, key):
self._real_storage.__delitem__(key)
def __iter__(self):
return self._real_storage.__iter__()
def __len__(self):
return self._real_storage.__len__()
def refresh(self, key):
try:
tuple_value = self.__getitem__(key)
self.__setitem__(key, tuple_value[0])
except KeyError:
pass
def _gc(self, force=False):
""" Remove expired key from the dict.
Use a probability for Garbage Collector to run or not.
"""
probability = random.randint(0, CONST_GC_MAX)
if probability > CONST_GC_PROB and not force:
return
logging.debug('ExpiringDict Garbage Collector')
max_date = (arrow.now() - timedelta(seconds=self._max_age))
keys_to_remove = [k for k, v in self._real_storage.items()
if v[1] < max_date]
logging.debug('Number of keys to remove: %d' % len(keys_to_remove))
for k in keys_to_remove:
self._real_storage.__delitem__(k)
class ExpiringDictStoreRedis(ExpiringDictStore):
""" redis store for ExpiringDict.
Store datas as key on a redis server.
"""
def __init__(self, max_age, redis_hostname=None, redis_port=6379,
key_prefix=None, redis_instance=None, redis_url=None):
self._max_age = max_age
self._key_prefix = key_prefix if key_prefix else id(self)
logging.debug('Creating Redis ExpiringDict with prefix: {}'.format(
self._key_prefix))
if redis_url:
self._redis = redis.StrictRedis.from_url(redis_url)
else:
self._redis = redis_instance if redis_instance \
else redis.StrictRedis(host=redis_hostname, port=redis_port)
def _get_real_key(self, key):
return str(self._key_prefix) + str(key)
def __setitem__(self, key, value):
logging.debug('Setting Redis Key: %s' % self._get_real_key(key))
self._redis.set(self._get_real_key(key), pickle.dumps(value),
ex=self._max_age)
def __getitem__(self, key):
logging.debug('Getting Redis Key: %s' % self._get_real_key(key))
try:
return pickle.loads(self._redis.get(self._get_real_key(key)))
except Exception:
raise KeyError(key)
def __delitem__(self, key):
self._redis.delete(self._get_real_key(key))
def __iter__(self):
prefix_len = len(self._key_prefix)
for i in self._redis.keys("^%s.*" % self._key_prefix):
real_key = i[prefix_len:]
yield real_key
def __len__(self):
return len(self._redis.keys("^%s.*" % self._key_prefix))
def refresh(self, key):
try:
value = self.__getitem__(key)
self.__setitem__(key, value)
except KeyError:
pass
class ExpiringDict(object):
""" ExpiringDict class.
Act as a python dict but with expiration for each data stored.
Can store data in a redis server if available. Otherwise, uses a python dict
"""
def __init__(self, max_age, redis_hostname=None, redis_port=6379,
redis_key_prefix=None, redis_url=None):
if redis_url or (redis_hostname and redis_port):
self._store = ExpiringDictStoreRedis(
max_age, redis_hostname=redis_hostname, redis_port=redis_port,
key_prefix=redis_key_prefix, redis_url=redis_url
)
else:
self._store = ExpiringDictStoreDict(max_age)
def __getitem__(self, key):
return self._store.__getitem__(key)
def __setitem__(self, key, value):
self._store.__setitem__(key, value)
def __delitem__(self, key):
self._store.__delitem__(key)
def __iter__(self):
self._store._gc(force=True)
return self._store.__iter__()
def __len__(self):
self._store._gc(force=True)
return self._store.__len__()
def set_max_age(self, max_age):
self._store.set_max_age(max_age)
def get(self, key, default=None):
""" Get item from the dict
Return default value if key does not exist.
"""
try:
return self._store.__getitem__(key)
except KeyError:
pass
return default
def refresh(self, key):
""" Reset TTL to max_age for given key
"""
self._store.refresh(key)
def pop(self, key, default=None):
""" Get item and remove it.
Return default if expired or does not exist.
"""
try:
item = self._store.__getitem__(key)
self._store.__delitem__(key)
return item
except KeyError:
return default
|
{
"content_hash": "28a730dd52e06e409285d3436b24386d",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 30.35377358490566,
"alnum_prop": 0.574980574980575,
"repo_name": "Pi3rral/expiringpdict",
"id": "d8e744b0c719713146ea86bc1fc40f798778e3bd",
"size": "6435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expiringpdict/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7995"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='infosystem',
version='0.1.87',
summary='Infosystem Framework',
url='https://github.com/samueldmq/infosystem',
author='Samuel de Medeiros Queiroz, Francois Oliveira',
author_email='samueldmq@gmail.com, oliveira.francois@gmail.com',
license='Apache-2',
packages=find_packages(exclude=["tests"])
)
|
{
"content_hash": "54cf711895b1f099c13c2a888a339618",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 29.692307692307693,
"alnum_prop": 0.7046632124352331,
"repo_name": "samueldmq/infosystem",
"id": "a38b9a08fb502771de2c8c591833f05dbfe1589d",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90123"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
# The interface of a data iter that works for bucketing
#
# DataIter
# - default_bucket_key: the bucket key for the default symbol.
#
# DataBatch
# - provide_data: same as DataIter, but specific to this batch
# - provide_label: same as DataIter, but specific to this batch
# - bucket_key: the key for the bucket that should be used for this batch
def default_read_content(path):
with open(path) as ins:
content = ins.read()
#content = content.replace('\n', ' <eos> ').replace('. ', ' <eos> ')
content = content.replace('\n', '<eos>')
return content
def default_build_vocab(path):
content = default_read_content(path)
content = content.split(' ')
idx = 0 # 0 is left for zero-padding
the_vocab = {}
#the_vocab[' '] = 0 # put a dummy element here so that len(vocab) is correct
for word in content:
if len(word) == 0:
continue
if not word in the_vocab:
the_vocab[word] = idx
idx += 1
return the_vocab
def default_text2id(sentence, the_vocab):
words = sentence.split(' ')
words = [the_vocab[w] for w in words if len(w) > 0]
return words
def default_gen_buckets(sentences, batch_size, the_vocab):
len_dict = {}
max_len = -1
for sentence in sentences:
words = default_text2id(sentence, the_vocab)
if len(words) == 0:
continue
if len(words) > max_len:
max_len = len(words)
if len(words) in len_dict:
len_dict[len(words)] += 1
else:
len_dict[len(words)] = 1
print(len_dict)
tl = 0
buckets = []
for l, n in len_dict.items(): # TODO: There are better heuristic ways to do this
if n + tl >= batch_size:
buckets.append(l)
tl = 0
else:
tl += n
if tl > 0:
buckets.append(max_len)
print("=============Buckets: " + buckets)
return buckets
class SimpleBatch(object):
def __init__(self, data_names, data, label_names, label, bucket_key):
self.data = data
self.label = label
self.data_names = data_names
self.label_names = label_names
self.bucket_key = bucket_key
self.pad = 0
self.index = None # TODO: what is index?
@property
def provide_data(self):
return [(n, x.shape) for n, x in zip(self.data_names, self.data)]
@property
def provide_label(self):
return [(n, x.shape) for n, x in zip(self.label_names, self.label)]
class DummyIter(mx.io.DataIter):
"A dummy iterator that always return the same batch, used for speed testing"
def __init__(self, real_iter):
super(DummyIter, self).__init__()
self.real_iter = real_iter
self.provide_data = real_iter.provide_data
self.provide_label = real_iter.provide_label
self.batch_size = real_iter.batch_size
for batch in real_iter:
self.the_batch = batch
break
def __iter__(self):
return self
def next(self):
return self.the_batch
class BucketSentenceIter(mx.io.DataIter):
def __init__(self, path, vocab, buckets, batch_size,
init_states, data_name='data', label_name='label',
seperate_char=' <eos> ', text2id=None, read_content=None,
time_major=True):
super(BucketSentenceIter, self).__init__()
if text2id == None:
self.text2id = default_text2id
else:
self.text2id = text2id
if read_content == None:
self.read_content = default_read_content
else:
self.read_content = read_content
content = self.read_content(path)
sentences = content.split(seperate_char)
if len(buckets) == 0:
buckets = default_gen_buckets(sentences, batch_size, vocab)
self.vocab_size = len(vocab)
self.data_name = data_name
self.label_name = label_name
self.time_major = time_major
self.layout_mapper = mx.io.DefaultLayoutMapper(1 if time_major else 0)
buckets.sort()
self.buckets = buckets
self.data = [[] for _ in buckets]
# pre-allocate with the largest bucket for better memory sharing
self.default_bucket_key = max(buckets)
for sentence in sentences:
sentence = self.text2id(sentence, vocab)
if len(sentence) == 0:
continue
for i, bkt in enumerate(buckets):
if bkt >= len(sentence):
self.data[i].append(sentence)
#print 'sen_id: ', buckets, sentence
break
# we just ignore the sentence it is longer than the maximum
# bucket size here
# convert data into ndarrays for better speed during training
data = [np.zeros((len(x), buckets[i])) for i, x in enumerate(self.data)]
for i_bucket in range(len(self.buckets)):
for j in range(len(self.data[i_bucket])):
sentence = self.data[i_bucket][j]
data[i_bucket][j, :len(sentence)] = sentence
self.data = data
print '----final data: ', self.data[0]
# Get the size of each bucket, so that we could sample
# uniformly from the bucket
bucket_sizes = [len(x) for x in self.data]
print("Summary of dataset ==================")
for bkt, size in zip(buckets, bucket_sizes):
print("bucket of len %3d : %d samples" % (bkt, size))
self.batch_size = batch_size
self.make_data_iter_plan()
self.init_states = init_states
self.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states]
if self.time_major:
self.provide_data = [('data', (self.default_bucket_key, batch_size))] + init_states
self.provide_label = [('softmax_label', (self.default_bucket_key, batch_size))]
else:
self.provide_data = [('data', (batch_size, self.default_bucket_key))] + init_states
self.provide_label = [('softmax_label', (self.batch_size, self.default_bucket_key))]
def make_data_iter_plan(self):
"make a random data iteration plan"
# truncate each bucket into multiple of batch-size
bucket_n_batches = []
for i in range(len(self.data)):
bucket_n_batches.append(len(self.data[i]) / self.batch_size)
self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)]
bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)])
np.random.shuffle(bucket_plan)
bucket_idx_all = [np.random.permutation(len(x)) for x in self.data]
self.bucket_plan = bucket_plan
self.bucket_idx_all = bucket_idx_all
self.bucket_curr_idx = [0 for x in self.data]
self.data_buffer = []
self.label_buffer = []
for i_bucket in range(len(self.data)):
if self.time_major:
data = np.zeros((self.buckets[i_bucket], self.batch_size))
label = np.zeros((self.buckets[i_bucket], self.batch_size))
else:
data = np.zeros((self.batch_size, self.buckets[i_bucket]))
label = np.zeros((self.batch_size, self.buckets[i_bucket]))
self.data_buffer.append(data)
self.label_buffer.append(label)
def __iter__(self):
for i_bucket in self.bucket_plan:
data = self.data_buffer[i_bucket]
i_idx = self.bucket_curr_idx[i_bucket]
idx = self.bucket_idx_all[i_bucket][i_idx:i_idx+self.batch_size]
self.bucket_curr_idx[i_bucket] += self.batch_size
init_state_names = [x[0] for x in self.init_states]
if self.time_major:
data[:] = self.data[i_bucket][idx].T
else:
data[:] = self.data[i_bucket][idx]
label = self.label_buffer[i_bucket]
if self.time_major:
label[:-1, :] = data[1:, :]
label[-1, :] = 0
else:
label[:, :-1] = data[:, 1:]
label[:, -1] = 0
data_all = [mx.nd.array(data)] + self.init_state_arrays
label_all = [mx.nd.array(label)]
data_names = ['data'] + init_state_names
label_names = ['softmax_label']
data_batch = SimpleBatch(data_names, data_all, label_names, label_all,
self.buckets[i_bucket])
yield data_batch
def reset(self):
self.bucket_curr_idx = [0 for x in self.data]
class MyBucketSentenceIter(mx.io.DataIter):
def my_text2id(self, sentence, the_vocab):
words = sentence
words = [the_vocab[w] for w in words if len(w) > 0]
return words
def __init__(self, path, vocab, buckets, batch_size,
init_states, data_name='data', label_name='label',
seperate_char=' <eos> ', text2id=None, read_content=None,
time_major=True):
super(MyBucketSentenceIter, self).__init__()
self.text2id = self.my_text2id
if read_content == None:
self.read_content = default_read_content
else:
self.read_content = read_content
print 'path: ', path
content = self.read_content(path)
#sentences = content.split(seperate_char)
words = content.split(' ')
words = words[1:]
#detect word errors
for word in words:
word_id = vocab[word]
if word_id >= 10000:
print 'error word: ', word
if len(buckets) == 0:
buckets = default_gen_buckets(sentences, batch_size, vocab)
self.vocab_size = len(vocab)
self.data_name = data_name
self.label_name = label_name
self.time_major = time_major
self.layout_mapper = mx.io.DefaultLayoutMapper(1 if time_major else 0)
buckets.sort()
self.buckets = buckets
self.data = [[] for _ in buckets]
# pre-allocate with the largest bucket for better memory sharing
self.default_bucket_key = max(buckets)
print 'buckets: ', buckets
number_of_word_per_sentence = buckets[0]/2
number_of_sentence = (len(words)+number_of_word_per_sentence-1)/number_of_word_per_sentence
for sentence_idx in range(number_of_sentence):
end_of_current_sentence = (sentence_idx+1) * number_of_word_per_sentence
if end_of_current_sentence > len(words):
end_of_current_sentence = len(words)
sentence_ids = self.text2id(words[sentence_idx*number_of_word_per_sentence:end_of_current_sentence], vocab)
self.data[0].append(sentence_ids)
# convert data into ndarrays for better speed during training
data = [np.zeros((len(x), buckets[i])) for i, x in enumerate(self.data)]
for i_bucket in range(len(self.buckets)):
for j in range(len(self.data[i_bucket])):
sentence = self.data[i_bucket][j]
#print sentence
data[i_bucket][j, :len(sentence)] = sentence
self.data = data
print 'final data: ', self.data
# Get the size of each bucket, so that we could sample
# uniformly from the bucket
bucket_sizes = [len(x) for x in self.data]
print("Summary of dataset ==================")
for bkt, size in zip(buckets, bucket_sizes):
print("bucket of len %3d : %d samples" % (bkt, size))
self.batch_size = batch_size
self.make_data_iter_plan()
self.init_states = init_states
self.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states]
if self.time_major:
self.provide_data = [('data', (self.default_bucket_key, batch_size))] + init_states
self.provide_label = [('softmax_label', (self.default_bucket_key, batch_size))]
else:
self.provide_data = [('data', (batch_size, self.default_bucket_key))] + init_states
self.provide_label = [('softmax_label', (self.batch_size, self.default_bucket_key))]
def make_data_iter_plan(self):
"make a random data iteration plan"
# truncate each bucket into multiple of batch-size
bucket_n_batches = []
for i in range(len(self.data)):
bucket_n_batches.append(len(self.data[i]) / self.batch_size)
self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)]
bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)])
np.random.shuffle(bucket_plan)
bucket_idx_all = [np.random.permutation(len(x)) for x in self.data]
self.bucket_plan = bucket_plan
self.bucket_idx_all = bucket_idx_all
self.bucket_curr_idx = [0 for x in self.data]
self.data_buffer = []
self.label_buffer = []
for i_bucket in range(len(self.data)):
if self.time_major:
data = np.zeros((self.buckets[i_bucket], self.batch_size))
label = np.zeros((self.buckets[i_bucket], self.batch_size))
else:
data = np.zeros((self.batch_size, self.buckets[i_bucket]))
label = np.zeros((self.batch_size, self.buckets[i_bucket]))
self.data_buffer.append(data)
self.label_buffer.append(label)
def __iter__(self):
for i_bucket in self.bucket_plan:
data = self.data_buffer[i_bucket]
i_idx = self.bucket_curr_idx[i_bucket]
idx = self.bucket_idx_all[i_bucket][i_idx:i_idx+self.batch_size]
self.bucket_curr_idx[i_bucket] += self.batch_size
init_state_names = [x[0] for x in self.init_states]
if self.time_major:
data[:] = self.data[i_bucket][idx].T
else:
data[:] = self.data[i_bucket][idx]
label = self.label_buffer[i_bucket]
if self.time_major:
label[:-1, :] = data[1:, :]
label[-1, :] = 0
else:
label[:, :-1] = data[:, 1:]
label[:, -1] = 0
data_all = [mx.nd.array(data)] + self.init_state_arrays
label_all = [mx.nd.array(label)]
data_names = ['data'] + init_state_names
label_names = ['softmax_label']
data_batch = SimpleBatch(data_names, data_all, label_names, label_all,
self.buckets[i_bucket])
yield data_batch
def reset(self):
self.bucket_curr_idx = [0 for x in self.data]
|
{
"content_hash": "e843657f091ceebbee7e2e86a46584ec",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 119,
"avg_line_length": 36.909774436090224,
"alnum_prop": 0.5673253208392748,
"repo_name": "linmajia/dlbench",
"id": "b25515bb251ad62ede8b16538c8d49d640928c94",
"size": "14904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synthetic/experiments/mxnet/rnn/bucket_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "122057"
},
{
"name": "PowerShell",
"bytes": "1152"
},
{
"name": "Python",
"bytes": "252271"
},
{
"name": "Shell",
"bytes": "87021"
}
],
"symlink_target": ""
}
|
from ordered_model.admin import OrderedModelAdmin
from filebrowser.settings import ADMIN_THUMBNAIL
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin import StackedInline
from .model import Photo
class PhotoAdmin(OrderedModelAdmin):
list_display = ('as_rendered', 'is_enabled', 'name', 'slug',
'move_up_down_links', 'order', 'created', 'updated')
list_filter = ('is_enabled', 'categories')
list_editable = ('name', 'slug', 'is_enabled')
date_hierarchy = 'modified'
def as_rendered(self, obj):
if obj.image and obj.image.filetype == "Image":
image_style = "color: #fff; height: 240px; width: 320px; \
display: flex; align-items: center; align-content: center; \
background: left top url('%s') no-repeat; position: relative;" \
% obj.image.version_generate(ADMIN_THUMBNAIL).url
span_style = "background: rgba(0, 0, 0, .3); color: #fff; \
padding: 1em; border-radius: 0 .5em .5em 0;"
return "<div style=\"%s\"><span style=\"%s\">%s</span></div>" \
% (image_style, span_style, obj.name)
else:
return obj.name
as_rendered.allow_tags = True
as_rendered.short_description = _("as rendered")
class PhotoInline(StackedInline):
model = Photo.categories.through
|
{
"content_hash": "7d8a9dd71029421df8dfe2ce4c8ff21a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 42.875,
"alnum_prop": 0.6239067055393586,
"repo_name": "gbezyuk/django-react-redux-universal-hot-example",
"id": "4dea40665bb0826289ce1a20ca248f7f840b9c3e",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/photos/models/photo/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "256"
},
{
"name": "JavaScript",
"bytes": "39411"
},
{
"name": "Python",
"bytes": "21535"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IPs on a BIG-IP system
version_added: "2.2"
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
required: False
default: None
allow_service:
description:
- Configure port lockdown for the Self IP. By default, the Self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports
as well as specific protocols. This list should contain C(protocol):C(port)
values.
required: False
default: None
name:
description:
- The self IP to create.
required: True
default: Value of C(address)
netmask:
description:
- The netmasks for the self IP.
required: True
state:
description:
- The state of the variable on the system. When C(present), guarantees
that the Self-IP exists with the provided attributes. When C(absent),
removes the Self-IP from the system.
required: False
default: present
choices:
- absent
- present
traffic_group:
description:
- The traffic group for the self IP addresses in an active-active,
redundant load balancer configuration.
required: False
vlan:
description:
- The VLAN that the new self IPs will be on.
required: True
route_domain:
description:
- The route domain id of the system.
If none, id of the route domain will be "0" (default route domain)
required: False
default: none
version_added: 2.3
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
extends_documentation_fragment: f5
requirements:
- netaddr
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create Self IP
bigip_selfip:
address: "10.10.10.10"
name: "self1"
netmask: "255.255.255.0"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "no"
vlan: "vlan1"
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
validate_certs: "no"
name: "self1"
address: "10.10.10.10"
netmask: "255.255.255.0"
vlan: "vlan1"
route_domain: "10"
allow_service: "default"
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- "tcp:443"
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- "tcp:443"
- "tpc:22"
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- all
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- gre:0
- igmp:0
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- tcp:0
delegate_to: localhost
'''
RETURN = '''
allow_service:
description: Services that allowed via this Self IP
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the Self IP
returned: created
type: string
sample: "192.0.2.10"
name:
description: The name of the Self IP
returned:
- created
- changed
- deleted
type: string
sample: "self1"
netmask:
description: The netmask of the Self IP
returned:
- changed
- created
type: string
sample: "255.255.255.0"
traffic_group:
description: The traffic group that the Self IP is a member of
return:
- changed
- created
type: string
sample: "traffic-group-local-only"
vlan:
description: The VLAN set on the Self IP
return:
- changed
- created
type: string
sample: "vlan1"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from netaddr import IPNetwork, AddrFormatError
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
FLOAT = ['enabled', 'disabled']
DEFAULT_TG = 'traffic-group-local-only'
ALLOWED_PROTOCOLS = ['eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp']
class BigIpSelfIp(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def present(self):
changed = False
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
:return: List of values currently stored in BIG-IP, formatted for use
in this class.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
r = self.api.tm.net.selfips.selfip.load(
name=name,
partition=partition
)
if hasattr(r, 'address'):
p['route_domain'] = str(None)
if '%' in r.address:
ipaddr = []
ipaddr = r.address.split('%', 1)
rdmask = ipaddr[1].split('/', 1)
r.address = "%s/%s" % (ipaddr[0], rdmask[1])
p['route_domain'] = str(rdmask[0])
ipnet = IPNetwork(r.address)
p['address'] = str(ipnet.ip)
p['netmask'] = str(ipnet.netmask)
if hasattr(r, 'trafficGroup'):
p['traffic_group'] = str(r.trafficGroup)
if hasattr(r, 'vlan'):
p['vlan'] = str(r.vlan)
if hasattr(r, 'allowService'):
if r.allowService == 'all':
p['allow_service'] = set(['all'])
else:
p['allow_service'] = set([str(x) for x in r.allowService])
else:
p['allow_service'] = set(['none'])
p['name'] = name
return p
def verify_services(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
result = []
for svc in self.params['allow_service']:
if svc in ['all', 'none', 'default']:
result = [svc]
break
tmp = svc.split(':')
if tmp[0] not in ALLOWED_PROTOCOLS:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '%s' must be between 0 and 65535"
% (port)
)
else:
result.append(svc)
return set(result)
def fmt_services(self, services):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
:param services: The services to format. This is always a Python set
:return:
"""
result = list(services)
if result[0] == 'all':
return 'all'
elif result[0] == 'none':
return None
else:
return list(services)
def traffic_groups(self):
result = []
groups = self.api.tm.cm.traffic_groups.get_collection()
for group in groups:
# Just checking for the addition of the partition here for
# different versions of BIG-IP
if '/' + self.params['partition'] + '/' in group.name:
result.append(group.name)
else:
full_name = '/%s/%s' % (self.params['partition'], group.name)
result.append(str(full_name))
return result
def update(self):
changed = False
svcs = []
params = dict()
current = self.read()
check_mode = self.params['check_mode']
address = self.params['address']
allow_service = self.params['allow_service']
name = self.params['name']
netmask = self.params['netmask']
partition = self.params['partition']
traffic_group = self.params['traffic_group']
vlan = self.params['vlan']
route_domain = self.params['route_domain']
if address is not None and address != current['address']:
raise F5ModuleError(
'Self IP addresses cannot be updated'
)
if netmask is not None:
# I ignore the address value here even if they provide it because
# you are not allowed to change it.
try:
address = IPNetwork(current['address'])
new_addr = "%s/%s" % (address.ip, netmask)
nipnet = IPNetwork(new_addr)
if route_domain is not None:
nipnet = "%s%s%s" % (address.ip, route_domain, netmask)
cur_addr = "%s/%s" % (current['address'], current['netmask'])
cipnet = IPNetwork(cur_addr)
if route_domain is not None:
cipnet = "%s%s%s" % (current['address'], current['route_domain'], current['netmask'])
if nipnet != cipnet:
if route_domain is not None:
address = "%s%s%s/%s" % (address.ip, '%', route_domain, netmask)
else:
address = "%s/%s" % (nipnet.ip, nipnet.prefixlen)
params['address'] = address
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value was invalid'
)
if traffic_group is not None:
traffic_group = "/%s/%s" % (partition, traffic_group)
if traffic_group not in self.traffic_groups():
raise F5ModuleError(
'The specified traffic group was not found'
)
if 'traffic_group' in current:
if traffic_group != current['traffic_group']:
params['trafficGroup'] = traffic_group
else:
params['trafficGroup'] = traffic_group
if vlan is not None:
vlans = self.get_vlans()
vlan = "/%s/%s" % (partition, vlan)
if 'vlan' in current:
if vlan != current['vlan']:
params['vlan'] = vlan
else:
params['vlan'] = vlan
if vlan not in vlans:
raise F5ModuleError(
'The specified VLAN was not found'
)
if allow_service is not None:
svcs = self.verify_services()
if 'allow_service' in current:
if svcs != current['allow_service']:
params['allowService'] = self.fmt_services(svcs)
else:
params['allowService'] = self.fmt_services(svcs)
if params:
changed = True
params['name'] = name
params['partition'] = partition
if check_mode:
return changed
self.cparams = camel_dict_to_snake_dict(params)
if svcs:
self.cparams['allow_service'] = list(svcs)
else:
return changed
r = self.api.tm.net.selfips.selfip.load(
name=name,
partition=partition
)
r.update(**params)
r.refresh()
return True
def get_vlans(self):
"""Returns formatted list of VLANs
The VLAN values stored in BIG-IP are done so using their fully
qualified name which includes the partition. Therefore, "correct"
values according to BIG-IP look like this
/Common/vlan1
This is in contrast to the formats that most users think of VLANs
as being stored as
vlan1
To provide for the consistent user experience while not turfing
BIG-IP, we need to massage the values that are provided by the
user so that they include the partition.
:return: List of vlans formatted with preceding partition
"""
partition = self.params['partition']
vlans = self.api.tm.net.vlans.get_collection()
return [str("/" + partition + "/" + x.name) for x in vlans]
def create(self):
params = dict()
svcs = []
check_mode = self.params['check_mode']
address = self.params['address']
allow_service = self.params['allow_service']
name = self.params['name']
netmask = self.params['netmask']
partition = self.params['partition']
traffic_group = self.params['traffic_group']
vlan = self.params['vlan']
route_domain = self.params['route_domain']
if address is None or netmask is None:
raise F5ModuleError(
'An address and a netmask must be specififed'
)
if vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
else:
vlan = "/%s/%s" % (partition, vlan)
try:
ipin = "%s/%s" % (address, netmask)
ipnet = IPNetwork(ipin)
if route_domain is not None:
params['address'] = "%s%s%s/%s" % (ipnet.ip, '%', route_domain, ipnet.prefixlen)
else:
params['address'] = "%s/%s" % (ipnet.ip, ipnet.prefixlen)
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value was invalid'
)
if traffic_group is None:
params['trafficGroup'] = "/%s/%s" % (partition, DEFAULT_TG)
else:
traffic_group = "/%s/%s" % (partition, traffic_group)
if traffic_group in self.traffic_groups():
params['trafficGroup'] = traffic_group
else:
raise F5ModuleError(
'The specified traffic group was not found'
)
vlans = self.get_vlans()
if vlan in vlans:
params['vlan'] = vlan
else:
raise F5ModuleError(
'The specified VLAN was not found'
)
if allow_service is not None:
svcs = self.verify_services()
params['allowService'] = self.fmt_services(svcs)
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if svcs:
self.cparams['allow_service'] = list(svcs)
if check_mode:
return True
d = self.api.tm.net.selfips.selfip
d.create(**params)
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the self IP")
def delete(self):
params = dict()
check_mode = self.params['check_mode']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
dc = self.api.tm.net.selfips.selfip.load(**params)
dc.delete()
if self.exists():
raise F5ModuleError("Failed to delete the self IP")
return True
def exists(self):
name = self.params['name']
partition = self.params['partition']
return self.api.tm.net.selfips.selfip.exists(
name=name,
partition=partition
)
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
address=dict(required=False, default=None),
allow_service=dict(type='list', default=None),
name=dict(required=True),
netmask=dict(required=False, default=None),
traffic_group=dict(required=False, default=None),
vlan=dict(required=False, default=None),
route_domain=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
if not HAS_NETADDR:
raise F5ModuleError(
"The netaddr python module is required."
)
obj = BigIpSelfIp(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
{
"content_hash": "624ad8b549d6c9cb39add66df4febe02",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 105,
"avg_line_length": 30.118840579710145,
"alnum_prop": 0.5565393128669041,
"repo_name": "mcgonagle/ansible_f5",
"id": "7c2f730d1cd68a412c23eb1af8a516062d397d9a",
"size": "21515",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "library_old/bigip_selfip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2293136"
},
{
"name": "Shell",
"bytes": "3752"
},
{
"name": "Tcl",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import json
import sys
import pyperf
EMPTY = ({}, 2000)
SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo',
'key5': 'string'}
SIMPLE = (SIMPLE_DATA, 1000)
NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0],
'key5': SIMPLE[0], 'key': '\u0105\u0107\u017c'}
NESTED = (NESTED_DATA, 1000)
HUGE = ([NESTED[0]] * 1000, 1)
CASES = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE']
def bench_json_dumps(data):
for obj, count_it in data:
for _ in count_it:
json.dumps(obj)
def add_cmdline_args(cmd, args):
if args.cases:
cmd.extend(("--cases", args.cases))
def main():
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.argparser.add_argument("--cases",
help="Comma separated list of cases. Available cases: %s. By default, run all cases."
% ', '.join(CASES))
runner.metadata['description'] = "Benchmark json.dumps()"
args = runner.parse_args()
if args.cases:
cases = []
for case in args.cases.split(','):
case = case.strip()
if case:
cases.append(case)
if not cases:
print("ERROR: empty list of cases")
sys.exit(1)
else:
cases = CASES
data = []
for case in cases:
obj, count = globals()[case]
data.append((obj, range(count)))
runner.bench_func('json_dumps', bench_json_dumps, data)
if __name__ == '__main__':
main()
|
{
"content_hash": "a492dc7ea4de1aed2c538ff42e038ea7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 26.508474576271187,
"alnum_prop": 0.5332480818414322,
"repo_name": "python/performance",
"id": "ccdaaadac65ee74d1ecc1b171b314c2fde019923",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyperformance/benchmarks/bm_json_dumps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "133837"
},
{
"name": "Python",
"bytes": "463402"
},
{
"name": "Shell",
"bytes": "14726"
}
],
"symlink_target": ""
}
|
""" test_colr_tool.py
Unit tests for colr.py command line tool.
These tests should be ran with `green -q` to quiet stdout.
If you are using nose then stdout should be quiet already.
If you are using unittest to run these, then -b should work to quiet them.
-Christopher Welborn 12-09-2015
"""
import random
import sys
import unittest
from colr import (
Colr,
hex2rgb,
name_data,
InvalidArg,
InvalidColr,
InvalidStyle,
rgb2hex,
)
from colr.__main__ import (
__version__,
InvalidNumber,
InvalidRgb,
get_colr,
)
from .testing_tools import (
ColrToolTestCase,
StdOutCatcher,
)
r = random.SystemRandom()
# Save names in list format, for random.choice().
name_data_names = list(name_data)
class ColrToolTests(ColrToolTestCase):
def setUp(self):
# Default argd, when no flags are given.
self.argd = {
'--auto-disable': False,
'--back': None,
'--center': None,
'--debug': False,
'--err': False,
'--fore': None,
'--frequency': None,
'--gradient': None,
'--gradientrgb': [],
'--help': False,
'--listcodes': False,
'--ljust': None,
'--names': False,
'--newline': False,
'--offset': None,
'--rainbow': False,
'--rjust': None,
'--spread': None,
'--stripcodes': False,
'--style': None,
'--translate': False,
'--truecolor': False,
'--unique': False,
'--version': False,
'BACK': None,
'CODE': [],
'FORE': None,
'STYLE': None,
'TEXT': None,
}
# Number of random values to generate for tests to choose from.
max_vals = 50
# Valid basic color name args for the colr tool.
self.valid_basic_vals = (
'white',
'black',
'red',
'cyan',
'green',
'blue',
)
# Valid extended color names/values.
self.valid_ext_vals = [
'lightblue',
'lightcyan',
]
self.valid_ext_vals.extend(
set((
str(r.randint(0, 255))
for _ in range(max_vals)
))
)
# Valid rgb color values.
self.valid_rgb_vals = []
self.valid_rgb_vals.extend(
set((
','.join(str(r.randint(0, 255)) for rgb in range(3))
for _ in range(max_vals)
))
)
# Valid hex values.
self.valid_hex_vals = ['000', 'fff', 'f0f', 'aba']
for rgb in self.valid_rgb_vals:
rgbtup = tuple(int(x) for x in rgb.split(','))
self.valid_hex_vals.append(rgb2hex(*rgbtup))
# Valid style names/values.
self.valid_style_vals = (
'0',
'1',
'2',
'3',
'4',
'5',
'7',
'22',
'b',
'bold',
'bright',
'd',
'dim',
'f',
'flash',
'h',
'highlight',
'hilight',
'hilite',
'i',
'italic',
'n',
'none',
'normal',
'reset_all',
'reverse',
'u',
'underline',
'underlined',
)
def test_auto_disable(self):
""" colr tool should auto disable when asked. """
argd = {'TEXT': 'Hello', 'FORE': 'red', '--auto-disable': True}
self.assertMain(
argd,
stdout='\x1b[31mHello\x1b[0m\n',
msg='main() with --auto-disable failed.'
)
def test_basic_colors(self):
""" colr tool should recognize basic colors. """
argd = {'TEXT': 'Hello World', 'FORE': 'green', 'BACK': 'blue'}
for _ in range(10):
argd['FORE'] = r.choice(self.valid_basic_vals)
argd['BACK'] = r.choice(self.valid_basic_vals)
self.assertMain(argd, msg='main() failed with valid args.')
# Invalid color names should raise a InvalidColr.
badargsets = (
{'FORE': 'XXX', 'BACK': r.choice(self.valid_basic_vals)},
{'BACK': 'XXX', 'FORE': r.choice(self.valid_basic_vals)},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidColr):
self.run_main_test(argd, should_fail=True)
def test_debug_deps(self):
""" colr tool should load debug dependencies. """
argd = {'TEXT': 'Hello', '--debug': True}
self.assertMain(argd, msg='main() with --debug failed.')
def test_entry_point(self):
""" entry_point() should run and handle the exit status code. """
argd = {'TEXT': 'test'}
self.assertEntry(argd, stdout='test')
argd = {'TEXT': 'test', 'FORE': 'blah'}
self.assertEntry(argd, should_fail=True)
def test_extended_colors(self):
""" colr tool should recognize extended colors. """
argd = {'TEXT': 'Hello World', 'FORE': '235'}
for _ in range(10):
argd['FORE'] = r.choice(self.valid_ext_vals)
argd['BACK'] = r.choice(self.valid_ext_vals)
self.assertMain(argd, msg='main() failed on extended colors.')
# Invalid color values should raise a InvalidColr.
badargsets = (
{'FORE': '1000', 'BACK': r.choice(self.valid_ext_vals)},
{'BACK': '-1', 'FORE': r.choice(self.valid_ext_vals)},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidColr):
self.run_main_test(argd, should_fail=True)
def test_gradient(self):
""" colr tool should do basic gradients. """
for name in self.valid_basic_vals:
for spread in range(3):
argd = {
'TEXT': 'test',
'--gradient': name,
'--spread': str(spread),
}
self.assertMain(
argd,
msg='main() failed on valid --gradient name.',
)
for badspread in ('s', 'x', 'bad'):
argd['--spread'] = badspread
with self.assertRaises(InvalidNumber):
self.run_main_test(argd)
def test_gradient_rgb(self):
""" colr tool should do basic rgb gradients. """
for i in range(256):
i1 = str(i)
i2 = str((i + 255) % 255)
rgbs = [
','.join((i1, i1, i1)),
','.join((i2, i2, i2)),
]
argd = {'TEXT': 'test', '--gradientrgb': rgbs}
self.assertMain(
argd,
msg='main() failed on valid --gradientrgb args.',
)
argd = {'TEXT': 'test', '--gradientrgb': ['0,0,0', '1,1,1', '2,2,2']}
raiser = self.assertRaises(
InvalidArg,
msg='main() should have failed with more than 2 --gradientrgb args.'
)
with raiser:
self.run_main_test(argd, should_fail=True)
# Invalid rgb values should raise a InvalidColr.
badargsets = (
{'--gradientrgb': ['-1,25,25', r.choice(self.valid_rgb_vals)]},
{'--gradientrgb': ['257,25,25', r.choice(self.valid_rgb_vals)]},
{'--gradientrgb': ['a,255,255', r.choice(self.valid_rgb_vals)]},
{'--gradientrgb': ['xxx', r.choice(self.valid_rgb_vals)]},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidRgb):
self.run_main_test(argd, should_fail=True)
def test_hex_colors(self):
""" colr tool should recognize hex colors. """
argd = {'TEXT': 'Hello World', 'FORE': 'd7d7d7'}
for _ in range(10):
argd['FORE'] = r.choice(self.valid_hex_vals)
argd['BACK'] = r.choice(self.valid_hex_vals)
self.assertMain(argd, msg='main() failed on hex colors.')
# Without -T, close matches should be used.
argd = {'TEXT': 'Hello World', '--truecolor': False}
hexvals = {
'010203': '000000',
'040506': '000000',
}
for hexval, closematch in hexvals.items():
argd['FORE'] = hexval
self.assertEqual(
get_colr(argd['TEXT'], self.make_argd(argd)),
Colr(argd['TEXT'], closematch),
msg='Hex value close match failed without --truecolor.',
)
# With -T, rgb mode should be used.
argd = {'TEXT': 'Hello World', '--truecolor': True}
hexvals = (
'010203',
'040506',
)
for hexval in hexvals:
argd['FORE'] = hexval
self.assertEqual(
get_colr(argd['TEXT'], self.make_argd(argd)),
Colr(argd['TEXT'], hex2rgb(argd['FORE'])),
msg='Hex value failed with --truecolor.',
)
# Invalid color values should raise a InvalidColr.
argd = {'TEXT': 'Hello World', '--truecolor': False}
badargsets = (
{'FORE': 'ffooll', 'BACK': r.choice(self.valid_hex_vals)},
{'BACK': 'oopsie', 'FORE': r.choice(self.valid_hex_vals)},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidColr):
self.run_main_test(argd, should_fail=True)
def test_just(self):
""" colr tool should justify text with --center, --ljust, and --rjust.
"""
s = 'test'
cases = {
('--ljust', 10): '\x1b[31mtest\x1b[0m \n',
('--rjust', 10): ' \x1b[31mtest\x1b[0m\n',
('--center', 10): ' \x1b[31mtest\x1b[0m \n',
}
for argset, expected in cases.items():
argd = {'TEXT': s, 'FORE': 'red', argset[0]: str(argset[1])}
self.assertMain(
argd,
stdout=expected,
msg='Justification failed for {}={}.'.format(
argset[0],
argset[1],
),
)
def test_list_codes(self):
""" colr tool should list escape codes with --listcodes. """
cases = {
s: str(Colr('test', s))
for s in ('red', 'blue', 'green', 'white')
}
for name, s in cases.items():
argd = {'TEXT': s, '--listcodes': True}
self.assertMainIn(
argd,
stdout=name,
msg='main() with --listcodes did not recognize an escape code.',
)
def test_list_names(self):
""" colr tool should list names with --names. """
argd = {'--names': True}
self.assertMain(argd, msg='main() failed with --names')
def test_rainbow(self):
""" colr tool should work for basic rainbows. """
argsets = []
for offset in range(0, 256):
for freq in range(0, 11):
freq = freq * 0.1
for width in range(1, 4):
argd = {
'TEXT': 'test',
'--rainbow': True,
'--offset': str(offset),
'--frequency': str(freq),
'--width': str(width),
}
argsets.append(argd)
for argd in argsets:
self.assertMain(
argd,
msg='main() with --rainbow failed for valid args.',
)
for badfreq in ('s', 'bad', 'x2'):
argd['--frequency'] = badfreq
with self.assertRaises(InvalidNumber):
self.run_main_test(argd)
def test_rgb_colors(self):
""" colr tool should recognize rgb colors. """
argd = {'TEXT': 'Hello World', 'FORE': '25, 25, 25'}
with StdOutCatcher():
for _ in range(10):
argd['FORE'] = r.choice(self.valid_rgb_vals)
argd['BACK'] = r.choice(self.valid_rgb_vals)
self.assertEqual(
0,
self.run_main_test(argd)
)
# Invalid rgb values should raise a InvalidColr.
badargsets = (
{'FORE': '-1,25,25', 'BACK': r.choice(self.valid_rgb_vals)},
{'BACK': '257,25,25', 'FORE': r.choice(self.valid_rgb_vals)},
{'FORE': 'a,255,255', 'BACK': r.choice(self.valid_rgb_vals)},
{'BACK': 'xxx', 'FORE': r.choice(self.valid_rgb_vals)},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidColr):
self.run_main_test(argd, should_fail=True)
def test_strip_codes(self):
""" colr tool should strip escape codes with --stripcodes. """
cases = [
str(Colr(' ').join(Colr('test', s), Colr('this', 'blue')))
for s in ('red', 'blue', 'green', 'white')
]
# Purposely starting at 1, and ending at 254 because of i + 1, i - 1.
cases.extend(
str(Colr(' ').join(
Colr('test', i, back=i + 1),
Colr('this', i - 1)
))
for i in range(1, 255)
)
for s in cases:
argd = {'TEXT': s, '--stripcodes': True}
self.assertMain(
argd,
stdout='test this',
msg='main() with --stripcodes did not strip codes properly.',
)
def test_styles(self):
""" colr tool should recognize styles. """
argd = {'TEXT': 'Hello World', 'FORE': '235', 'STYLE': 'normal'}
for _ in range(10):
argd['STYLE'] = r.choice(self.valid_style_vals)
self.assertMain(argd, msg='main() failed with valid style arg.')
# Invalid style values should raise a InvalidStyle.
badargsets = (
{'STYLE': 'dimmer'},
{'STYLE': 'x'},
)
for argset in badargsets:
argd.update(argset)
with self.assertRaises(InvalidStyle):
self.run_main_test(argd, should_fail=True)
def test_translate(self):
""" colr tool should translate color names/codes with --translate. """
for name in self.valid_basic_vals:
argd = {'CODE': [name], '--translate': True}
self.assertMainIn(
argd,
stdout=name,
msg='stdout was missing the color name.',
)
for name in range(9, 256):
s = str(name)
argd = {'CODE': [s], '--translate': True}
self.assertMainIn(
argd,
stdout='Name: {}'.format(s),
msg='stdout was missing the extended color name.',
)
rgb = ','.join((s, s, s))
argd = {'CODE': [rgb], '--translate': True}
self.assertMain(
argd,
msg='main() with --translate failed for valid rgb code.',
)
for badname in ('blah', '256', 'not-a-color', 'x,y,z'):
argd = {'CODE': [badname], '--translate': True}
self.assertMain(
argd,
should_fail=True,
)
if __name__ == '__main__':
print('Testing Colr Tool v. {}'.format(__version__))
# unittest.main() calls sys.exit(status_code).
unittest.main(argv=sys.argv, verbosity=2)
|
{
"content_hash": "e2f5ac172e1344b5c16489856dbcf192",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 80,
"avg_line_length": 34.112554112554115,
"alnum_prop": 0.4736040609137056,
"repo_name": "welbornprod/colr",
"id": "31e012c76fc3e587ac67b96470e912e41dc09c33",
"size": "15808",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test/test_colr_tool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "550066"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
from omegaconf import DictConfig, OmegaConf
import hydra
@hydra.main(config_path='conf')
def my_app(cfg):
print(OmegaConf.to_yaml(cfg))
def _apply_argparse_cli():
import argparse, sys
p = argparse.ArgumentParser()
p.add_argument("--db", default="mysql")
p.add_argument("--db-config", nargs="*", default=[])
args = p.parse_args()
sys.argv[1:] = ["+db=%s" % args.db] + args.db_config
if __name__ == "__main__":
_apply_argparse_cli()
my_app()
|
{
"content_hash": "f2927f1a935dde28ba866cd8de75f936",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 26.61111111111111,
"alnum_prop": 0.6200417536534447,
"repo_name": "guildai/guild",
"id": "53ac7f59a4fb05456a48e3bc3f267f60098fdc45",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hydra/my_app_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "416"
},
{
"name": "JavaScript",
"bytes": "29682"
},
{
"name": "Makefile",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "736181"
},
{
"name": "Shell",
"bytes": "1074"
},
{
"name": "Vue",
"bytes": "48469"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from distutils.version import LooseVersion
from django.core.management.base import NoArgsCommand
from django.core import urlresolvers
from django.utils.importlib import import_module
import django
import imp
import logging
import re
import sys
import types
errors = []
logged_more = False
app_paths = {}
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('satchmo_check').setLevel(logging.DEBUG)
log = logging.getLogger('satchmo_check')
class Command(NoArgsCommand):
help = "Check the system to see if the Satchmo components are installed correctly."
# These settings help to not import some dependencies before they are
can_import_settings = False
requires_model_validation = False
def handle_noargs(self, **options):
"""Checks Satchmo installation and configuration.
Tests, catches and shortly summarizes many common installation errors, without tracebacks.
If was seen a traceback, it should be reported to a developer. (for now)
Tracebacks are saved to the 'satchmo.log'. It helps to find cyclic dependencies etc.
"""
from django.conf import settings
global logged_more
print_out("Checking your satchmo configuration.")
try:
import satchmo_store
except ImportError:
error_out("Satchmo is not installed correctly. Please verify satchmo is on your sys path.")
print "Using Django version %s" % django.get_version()
print "Using Satchmo version %s" % satchmo_store.get_version()
#Check the Django version
#Make sure we only get the X.Y.Z version info and not any other alpha or beta designations
version_check = LooseVersion(".".join(map(str, django.VERSION)[:3]))
if version_check < LooseVersion("1.2.3"):
error_out("Django version must be >= 1.2.3")
# Store these checked installation paths also to the paths overview
verbose_check_install('satchmo', 'satchmo_store', verbose_name='Satchmo')
verbose_check_install('django', 'django', '1.2.3')
# Try importing all our dependencies
verbose_check_install('', 'Crypto.Cipher', verbose_name='The Python Cryptography Toolkit')
try:
import Image
verbose_check_install('Image', 'Image')
except ImportError:
verbose_check_install('PIL', 'PIL', verbose_name='The Python Imaging Library')
verbose_check_install('reportlab', 'reportlab', '2.3')
verbose_check_install('TRML2PDF', 'trml2pdf', '1.0', verbose_name='Tiny RML2PDF')
verbose_check_install('django_registration', 'registration', '0.7')
verbose_check_install('', 'yaml', verbose_name='YAML')
verbose_check_install('sorl_thumbnail', 'sorl', '3.2.5', 'caf69b520632', 'Sorl imaging library')
verbose_check_install('django_caching_app_plugins', 'app_plugins', '0.1.2', '53a31761e344')
verbose_check_install('django_livesettings', 'livesettings', '1.4-8', 'e2769f9f60ec')
verbose_check_install('django_signals_ahoy', 'signals_ahoy', '0.1.0', '9ad8779d4c63')
verbose_check_install('django_threaded_multihost', 'threaded_multihost', '1.4.1', '7ca3743d8a70')
verbose_check_install('django-keyedcache', 'keyedcache', '1.4-4', '4be18235b372')
# Installers versions can be interesting for installation problems
check_install('pip', 'pip') # pip can not show the version number
verbose_check_install('setuptools', 'setuptools', required=False)
#verbose_check_install('mercurial', 'mercurial', required=False)
try:
cache_avail = settings.CACHE_BACKEND
except AttributeError:
error_out("A CACHE_BACKEND must be configured.")
# Try looking up a url to see if there's a misconfiguration there
try:
# The function urlresolvers.reverse has its own way of error reporting to screen and we have no access to it
url = urlresolvers.reverse('satchmo_search')
# Catch SystemExit, because if an error occurs, `urlresolvers` usually calls sys.exit() and other error messages would be lost.
except (Exception, SystemExit), e:
error_out("Unable to resolve urls. Received error - %s" % formaterror(e))
try:
from l10n.l10n_settings import get_l10n_default_currency_symbol
except:
pass
if not isinstance(get_l10n_default_currency_symbol(),types.UnicodeType):
error_out("Your currency symbol should be a unicode string.")
if 'satchmo_store.shop.SSLMiddleware.SSLRedirect' not in settings.MIDDLEWARE_CLASSES:
error_out("You must have satchmo_store.shop.SSLMiddleware.SSLRedirect in your MIDDLEWARE_CLASSES.")
if 'satchmo_store.shop.context_processors.settings' not in settings.TEMPLATE_CONTEXT_PROCESSORS:
error_out("You must have satchmo_store.shop.context_processors.settings in your TEMPLATE_CONTEXT_PROCESSORS.")
if 'threaded_multihost.middleware.ThreadLocalMiddleware' not in settings.MIDDLEWARE_CLASSES:
error_out("You must install django threaded multihost \n and place threaded_multihost.middleware.ThreadLocalMiddleware in your MIDDLEWARE_CLASSES.")
if 'satchmo_store.accounts.email-auth.EmailBackend' not in settings.AUTHENTICATION_BACKENDS:
error_out("You must have satchmo_store.accounts.email-auth.EmailBackend in your AUTHENTICATION_BACKENDS")
if len(settings.SECRET_KEY) == 0:
error_out("You must have SECRET_KEY set to a valid string in your settings.py file")
python_ver = Decimal("%s.%s" % (sys.version_info[0], sys.version_info[1]))
if python_ver < Decimal("2.4"):
error_out("Python version must be at least 2.4.")
if python_ver < Decimal("2.5"):
try:
from elementtree.ElementTree import Element
except ImportError:
error_out("Elementtree is not installed.")
# Check all installed apps
if not filter(lambda x: re.search('not .*(installed|imported)', x), errors):
for appname in settings.INSTALLED_APPS:
pkgtype, filename, root_filename = find_module_extend(appname)
try:
app = import_module(appname)
except (Exception, SystemExit):
if not pkgtype:
error_out('Can not find module "%s"' % appname)
else:
error_out('Can not import "%s"' % appname)
log.exception('Can not import "%s"' % appname)
logged_more = True
else:
log.debug('It does not test INSTALLED_APPS due to previous errors.')
log.debug('\n'.join(2 * ['Installation paths:'] +
[' %s : %s' % (k, sorted(list(v))) for k, v in sorted(app_paths.items())]
))
apps_in_root = sorted(reduce(set.union,
[v for k, v in app_paths.items() if k.startswith('/root')],
set()))
if apps_in_root:
error_out('No package should be installed in the "/root" home directory, but packages %s are.' % (apps_in_root,))
logged_more = True
if len(errors) == 0:
print_out("Your configuration has no errors.")
else:
print_out(""); print_out("The following errors were found:")
for error in errors:
print_out(error)
if logged_more:
print "Error details are in 'satchmo.log'"
def print_out(msg):
"Print immediately to screen and to the log."
log.info(msg)
print msg
def error_out(msg):
"Prints not to the log and at the end to the screen."
log.error(msg)
errors.append(msg)
def formaterror(e):
"Format an exception like this: 'ExceptionName: error message'."
exc_name = getattr(type(e), '__name__', None)
if not exc_name:
# some exceptions defined in C++ do not have an attribute __name__
# e.g. backend.DatabaseError for postgres_psycopg2
exc_name = re.sub(r"<.*'(.*)'>", '\\1', str(type(e)))
exc_name = exc_name.split('.')[-1]
return '%s: %s' % (exc_name, e)
def find_module_extend(appname):
"""Find module - support for "package.package.module".
Returns tuple (pkgtype, filename, root_filename) (result constants are defined in imp)
root_filename is for the firstlevel package
This does not find an yet imported module which is not on the python path now and was only at the time of import (typically store.localsite)
"""
lpos = 0
path = pkgtype = None
try:
while lpos < len(appname):
rpos = (appname.find('.', lpos) < 0) and len(appname) or appname.find('.', lpos)
dummyfile, filename, (suffix, mode, pkgtype) = imp.find_module(appname[lpos:rpos], path)
path = [filename]
if not lpos:
root_filename = filename
lpos = rpos + 1
root_appname = appname.split('.')[0]
if root_filename.endswith('/' + root_appname):
root_filename = root_filename[:-len(root_appname) - 1]
app_paths.setdefault(root_filename, set())
app_paths[root_filename].add(root_appname)
return (pkgtype, filename, root_filename)
except ImportError:
return (None, None, None)
APP_NOT_FOUND, APP_OLD_VERSION, APP_IMPORT_FAIL, APP_OK = range(4)
def check_install(project_name, appname, min_version=None, hg_hash=None):
"""Checks if package is installed, version is greater or equal to the required and can be imported.
This uses different methods of determining the version for diffenent types of installation
in this order: app.get_version() (custom), app.VERSION (tuple), app.__version__ (string), mercurial hash, setuptools version
project_name # verbose name for setuptools (can be with "-")
package_name # package name for python import
min_version # minimal required version (>=min_version)
hg_hash # hg hash of the commit, which should be in the repository. This should be consistent to min_version.
(A developper need not eventually to use specified important version (need not hg up tip) but should be informed to do pull.)
One met condition is sufficient.
Returns tuple: (result_code, # any of APP_* constants
version_string)
The import problem can be caused by a dependency on other packages and therefore it is differentiated from not installed.
"""
# Version number can be obtained even without any requirements for a version this way: (all versions meet this)
# check_install(project_name, appname, min_version='.', hg_hash='')
import os
import time
global logged_more
isimported = False
isversion = None
version = ''
# find it only
pkgtype, filename, root_filename = find_module_extend(appname)
isfound = (pkgtype != None)
if isfound:
try:
# import it
app = __import__(appname)
isimported = True
except ImportError:
log.exception('Can not import "%s"' % appname)
logged_more = True
if isimported:
try:
# get version from app
if hasattr(app, 'get_version'):
get_version = app.get_version
if callable(get_version):
version = get_version()
else:
version = get_version
elif hasattr(app, 'VERSION'):
version = app.VERSION
elif hasattr(app, '__version__'):
version = app.__version__
if isinstance(version, (list, tuple)):
version = '.'.join(map(str, version))
if version and version[0].isdigit() and min_version:
isversion = LooseVersion(version) >= LooseVersion(min_version)
except:
pass
if pkgtype == imp.PKG_DIRECTORY: # and hg_hash != None and (version == None or not version[0].isdigit()):
try:
# get version from mercurial
from mercurial import ui, hg
try:
linkpath = os.readlink(filename)
dirname = os.path.join(os.path.dirname(filename), linkpath)
except:
dirname = filename
repo = None
hg_dir = os.path.normpath(os.path.join(dirname, '..'))
repo = hg.repository(ui.ui(), hg_dir)
try:
node_id = repo.changelog.lookup(hg_hash)
isversion = True
#node = repo[node_id] # required node
node = repo['.'] # current node
datestr = time.strftime('%Y-%m-%d', time.gmtime(node.date()[0]))
version_hg = "hg-%s:%s %s" % (node.rev(), node.hex()[0:12], datestr)
version = (version + ' ' + version_hg).strip()
except:
isversion = isversion or False
except:
pass
if isfound and min_version and project_name and isversion == None:
try:
# get version from setuptools
from pkg_resources import require
version = require(project_name)[0].version
isversion = isversion or False
require('%s >=%s' % (project_name, min_version))
isversion = True
except:
pass
# If no required version specified, then it is also OK
isversion = isversion or (isversion == None and min_version == None)
result = isfound and (isversion and (isimported and APP_OK or APP_IMPORT_FAIL) or APP_OLD_VERSION) or APP_NOT_FOUND
return (result, version)
def verbose_check_install(project_name, appname, min_version=None, hg_hash=None, verbose_name=None, required=True):
"""Check a pagkage and writes the results.
Calls ``check_install`` (see for info about the similar parameters)
verbose_name is used for messageses. Default is same as appname."""
result, version = check_install(project_name, appname, min_version, hg_hash)
verbose_name = (verbose_name or re.sub('[_-]', ' ', appname.capitalize()))
if result != APP_NOT_FOUND and required or result == APP_OK:
log.debug('%s: version %s ' % (verbose_name, version or '(unknown)'))
#
if result == APP_NOT_FOUND:
msg = 'is not installed.'
elif result == APP_OLD_VERSION:
msg = 'should be upgraded to version %s or newer.' % min_version
elif result == APP_IMPORT_FAIL:
msg = 'can not be imported now, but the right version is probably installed. Maybe dependency problem.'
elif result == APP_OK:
msg = None
#
if msg and required:
error_out(' '.join((verbose_name, msg)))
|
{
"content_hash": "5cc81b9b615e9268f715a4624965e860",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 160,
"avg_line_length": 46.43478260869565,
"alnum_prop": 0.6194489031567684,
"repo_name": "grengojbo/satchmo",
"id": "39b78d4067cc6d354a56e4f66169c39bd75849dd",
"size": "14952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satchmo/apps/satchmo_store/shop/management/commands/satchmo_check.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "73898"
},
{
"name": "Python",
"bytes": "1752948"
}
],
"symlink_target": ""
}
|
class DbConnector:
def __init__(self):
pass
def add_comment_count(self, comments_url, count):
raise NotImplementedError
def add_story(self, nb_hash, added, comments_url, story_url):
raise NotImplementedError
def close_connection(self):
raise NotImplementedError
def ensure_domains_table_exists(self):
raise NotImplementedError
def ensure_stories_table_exists(self):
raise NotImplementedError
def insert_domain_entry(self, nb_hash, nb_url, domain, toplevel, toplevel_new):
raise NotImplementedError
def list_stories_with_comments_fewer_than(self, threshold):
raise NotImplementedError
def list_stories_without_comment_count(self):
raise NotImplementedError
def list_urls(self):
raise NotImplementedError
def unstar(self, nb_hash):
raise NotImplementedError
def record_error(self, url, code, headers, body):
raise NotImplementedError
def ensure_config_table_exists(self):
raise NotImplementedError
def read_config(self):
raise NotImplementedError
def add_hashes(self, hashes):
raise NotImplementedError
def read_hashes(self, count):
raise NotImplementedError
def mark_story_done(self, story_hash):
raise NotImplementedError
def list_comment_count_update_candidates(self):
raise NotImplementedError
|
{
"content_hash": "9712a3766cfa6a8159dfe0cbe5360b04",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 26.537037037037038,
"alnum_prop": 0.6838799720865317,
"repo_name": "bmordue/nb",
"id": "50322764c77b5ea61979ce1d9d043ee3cad7de19",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/connectors/DbConnector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "137"
},
{
"name": "HCL",
"bytes": "730"
},
{
"name": "Makefile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "39426"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class Frames(Model):
"""The response for a Get Frames request.
:param review_id: Id of the review.
:type review_id: str
:param video_frames:
:type video_frames:
list[~azure.cognitiveservices.vision.contentmoderator.models.Frame]
"""
_attribute_map = {
'review_id': {'key': 'ReviewId', 'type': 'str'},
'video_frames': {'key': 'VideoFrames', 'type': '[Frame]'},
}
def __init__(self, review_id=None, video_frames=None):
super(Frames, self).__init__()
self.review_id = review_id
self.video_frames = video_frames
|
{
"content_hash": "b5c0d34607f4686960b37b01eb30ddc0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 28.772727272727273,
"alnum_prop": 0.6113744075829384,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "724a54ce61cfa902debb47467a3368d0d6b237e5",
"size": "1107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/frames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='kao_json',
version='0.3.1',
description='Python JSON Conversion Library',
author='Chris Loew',
author_email='cloew123@gmail.com',
#url='http://www.python.org/sigs/distutils-sig/',
packages=['kao_json',
'kao_json.builders',
'kao_json.config',
'kao_json.providers'],
install_requires = ['kao_decorators',
'kao_dict']
)
|
{
"content_hash": "81dc97f6e987bb9656beca1661c60844",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 32.2,
"alnum_prop": 0.5445134575569358,
"repo_name": "cloew/KaoJson",
"id": "cfb5de6510b611d6449c23e4e53c01b9dd51b5f7",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14832"
}
],
"symlink_target": ""
}
|
from __future__ import division
from __future__ import with_statement
# standard python libraries
try:
import json
except:
import simplejson as json
import re
import os
import time
# matplotlib.sf.net
import matplotlib
import numpy
# www.gtk.org
import gtk
# our own libraries
from elrond.static import *
from elrond.util import APINotImplemented, Object, Property
def parse(f):
x = []
y = []
fd = open(f, 'r')
lines = [l.strip() for l in fd.readlines()]
fd.close()
for i, line in enumerate(lines):
data = filter(lambda x: x != '', re.split('[, ]', line.strip()))
try:
y.append(float(data[1]))
x.append(float(data[0]))
except IndexError:
y.append(float(data[0]))
x.append(i)
return x, y
##
## Backends...
##
class IBackend(Object):
"""The IBackend class is the base implementation for any class that can produce plots.
e.g. ASCII art or fancy GUI backends like matplotlib.
"""
def stripchart(self, filename):
x_list, y_list = parse(filename)
self.clear()
self.prefs.ymin = 0
self.prefs.ymax = 100
step = 100
x_first = x_list[0: clamp(step, u=len(x_list))]
y_first = y_list[0: clamp(step, u=len(y_list))]
self.prefs.xmin = 0
self.prefs.xmax = len(x_first)
for i in range(0, len(x_first)):
self.plotl(x_first[0:i + 1], y_first[0:i + 1])
self.draw()
self.plotl(x_list, y_list)
for i in range(0, len(x_list)):
self.prefs.xmin = i + 1
self.prefs.xmax = i + 1 + step
self.draw()
def open(self, filename, i=None):
self.clear()
with open(filename, 'r') as f:
storage = json.load(f)
print 'File: %s' % (filename)
print 'Timestamp: %s' % (storage['timestamp'])
for data in storage['data']:
self.plotl(data['x'], data['y'], i=i, xlabel=data['xlabel'], ylabel=data['ylabel'],
style=data['style'], color=int(data['color'], 0))
self.draw()
if not self.prefs.overlay:
self.__storage['data'] = []
self.__storage['data'].extend(storage['data'])
def save(self, filename):
self.__storage['timestamp'] = time.ctime(time.time())
with open(filename, 'w') as f:
json.dump(self.__storage, f, indent=8)
# TODO:
def stats(self, x, y):
print ' len =', len(y)
print ' mean =', numpy.mean(y)
print ' sum =', sum(y)
print ' std =', numpy.std(y)
ymin = numpy.min(y)
print ' ymin =', ymin
print ' xmin =', x[y.index(ymin)]
ymax = numpy.max(y)
print ' ymax =', ymax
print ' xmax =', x[y.index(ymax)]
def __plot__(self, x, y, style=None, color=0xFF0000, xlabel=None, ylabel=None):
self.stats(x, y)
data = {
'xlabel': xlabel,
'x': x,
'ylabel': ylabel,
'y': y,
'style': style,
'color': '0x%06X' % (color)
}
if not self.prefs.overlay:
self.__storage['data'] = []
self.__storage['data'].append(data)
def plotr(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotl(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotlh(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotlv(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotrh(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotrv(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def draw(self, *args, **kwargs):
pass
def clear(self, *args, **kwargs):
pass
def show(self, *args, **kwargs):
pass
def hide(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def __init__(self):
Object.__init__(self)
self.__storage = {
'data': []
}
class ConsoleBackend(IBackend):
"""This is the simplest of backends. This simply prints to the console. This backend
must be used within a ConsoleContainer.
"""
def __plot__(self, x, y, style=None, color=0xFF0000, xlabel=None, ylabel=None):
IBackend.__plot__(self, x, y, style=style, color=color, xlabel=xlabel, ylabel=ylabel)
for i in range(0, len(x)):
print 'x,y[%d] = %.4f, %4f' % (i, x[i], y[i])
class IMatplotlibBackend(IBackend):
"""This backend uses matplotlib to prodce plots. An ImageContainer or WindowContainer in-turn
contains this backed to either render the plot to and image or to a GUI.
"""
def __plot__(self, x, y, i=None, axes=None, style='-', color=0xFF0000, xlabel=None, ylabel=None):
IBackend.__plot__(self, x, y, style=style, color=color, xlabel=xlabel, ylabel=ylabel)
if i is None or axes is None:
# TODO: raise an exception
return
if not xlabel is None:
# TODO: axes.set_xlabel(xlabel)
pass
if not ylabel is None:
# TODO: axes.set_ylabel(ylabel)
pass
if i > self.__nsubplots - 1:
self.subplot_new()
subplot = self.__subplots[i][axes]
subplot.plot(x, y, style, color='#%06X' % (color))
subplot.grid(True)
def plotl(self, *args, **kwargs):
if not 'i' in kwargs:
kwargs['i'] = self.__isubplot
kwargs['axes'] = 'axl'
self.__plot__(*args, **kwargs)
@APINotImplemented
def plotr(self, *args, **kwargs):
if not 'i' in kwargs:
kwargs['i'] = self.__isubplot
kwargs['axes'] = 'axr'
self.__plot__(*args, **kwargs)
def plotlh(self, y, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axl'].axhline(y, ls=style, color='#%06X' % (color))
self.__subplots[i]['axl'].grid(True)
def plotlv(self, x, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axl'].axvline(x, ls=style, color='#%06X' % (color))
self.__subplots[i]['axl'].grid(True)
@APINotImplemented
def plotrh(self, y, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axr'].axhline(y, ls=style, color='#%06X' % (color))
self.__subplots[i]['axr'].grid(True)
@APINotImplemented
def plotrv(self, x, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axr'].axvline(x, ls=style, color='#%06X' % (color))
self.__subplots[i]['axr'].grid(True)
def __draw(self, subplot, limits):
subplot.axis('auto')
if filter(lambda x: x != 0, limits):
subplot.axis(limits)
def draw(self):
limits = [self.prefs.xmin, self.prefs.xmax, self.prefs.yminl, self.prefs.ymaxl]
for subplot in self.__subplots:
self.__draw(subplot['axl'], limits)
# limits = [self.prefs.xmin, self.prefs.xmax, self.prefs.yminr, self.prefs.ymaxr]
# for subplot in self.__subplots:
# self.__draw(subplot['axr'], limits)
self.canvas.draw()
def __reset(self):
for i, subplot in enumerate(self.__subplots):
axl = subplot['axl']
axr = subplot['axr']
axl.grid(True)
axl.yaxis.set_label_position('left')
axl.yaxis.tick_left()
# axr.grid(True)
# axr.yaxis.set_label_position('right')
# axr.yaxis.tick_right()
axl.change_geometry(self.__nsubplots, 1, self.__nsubplots - i)
self.figure.subplots_adjust()
def clear(self):
if not self.prefs.overlay:
for subplot in self.__subplots:
try:
subplot['axl'].clear()
subplot['axr'].clear()
except:
pass
self.__reset()
def subplot_new(self):
self.__isubplot = len(self.__subplots)
self.__nsubplots = self.__isubplot + 1
axl = self.figure.add_subplot(self.__nsubplots, 1, self.__nsubplots)
axr = None # axl.twinx()
self.__subplots.append({'axl': axl, 'axr': axr})
self.__reset()
def __init__(self):
IBackend.__init__(self)
from matplotlib.figure import Figure
self.figure = Figure()
self.__subplots = []
self.subplot_new()
class MatplotlibImageBackend(IMatplotlibBackend):
def render(self, filename):
self.figure.savefig(filename)
def __init__(self):
IMatplotlibBackend.__init__(self)
from matplotlib.backends.backend_cairo \
import FigureCanvasCairo as FigureCanvas
self.canvas = FigureCanvas(self.figure)
class MatplotlibWindowBackend(IMatplotlibBackend):
@Property
def widget():
def fget(self):
self.__widget = gtk.VBox()
self.__widget.pack_start(self.canvas)
self.__widget.pack_start(self.toolbar, False, False)
return self.__widget
def fset(self, widget):
self.__widget = widget
return locals()
def show(self):
self.__widget.show()
self.canvas.show()
self.toolbar.show()
def hide(self):
self.toolbar.hide()
self.canvas.hide()
self.__widget.hide()
def __init__(self):
IMatplotlibBackend.__init__(self)
from matplotlib.backends.backend_gtk \
import FigureCanvasGTK as FigureCanvas
self.canvas = FigureCanvas(self.figure)
from matplotlib.backends.backend_gtk \
import NavigationToolbar2GTK as NavigationToolbar
self.toolbar = NavigationToolbar(self.canvas, None)
##
## Containers...
##
class IContainer(Object):
"""The IContainer class is the base implementation for any class that contains IBackends.
e.g. console wrappers, image only wrappers, or fancy GUI toolkits like GTK+.
"""
@Property
def prefs():
def fget(self):
return self.backend.prefs
def fset(self, prefs):
self.backend.prefs = prefs
return locals()
def plotr(self, *args, **kwargs):
self.backend.plotr(*args, **kwargs)
def plotl(self, *args, **kwargs):
self.backend.plotl(*args, **kwargs)
def plotlh(self, *args, **kwargs):
self.backend.plotlh(*args, **kwargs)
def plotlv(self, *args, **kwargs):
self.backend.plotlv(*args, **kwargs)
def plotrh(self, *args, **kwargs):
self.backend.plotrh(*args, **kwargs)
def plotrv(self, *args, **kwargs):
self.backend.plotrv(*args, **kwargs)
def draw(self, *args, **kwargs):
self.backend.draw(*args, **kwargs)
def clear(self, *args, **kwargs):
self.backend.clear(*args, **kwargs)
def show(self, *args, **kwargs):
self.backend.show(*args, **kwargs)
def hide(self, *args, **kwargs):
self.backend.hide(*args, **kwargs)
def run(self, *args, **kwargs):
self.backend.run(*args, **kwargs)
def stripchart(self, *args, **kwargs):
self.backend.stripchart(*args, **kwargs)
def open(self, *args, **kwargs):
self.backend.open(*args, **kwargs)
def save(self, *args, **kwargs):
self.backend.save(*args, **kwargs)
class ConsoleContainer(IContainer):
def __init__(self):
IContainer.__init__(self)
self.backend = ConsoleBackend()
class ImageContainer(IContainer):
def draw(self, *args, **kwargs):
IContainer.draw(self, *args, **kwargs)
self.backend.render('foobar.png')
def __init__(self):
IContainer.__init__(self)
self.backend = MatplotlibImageBackend()
class WindowContainer(IContainer):
@Property
def prefs():
def fget(self):
return self.backend.prefs
def fset(self, prefs):
self.backend.prefs = prefs
widget = self.__builder.get_object('preferences_xmin_entry')
widget.set_text(str(self.backend.prefs.xmin))
widget = self.__builder.get_object('preferences_xmax_entry')
widget.set_text(str(self.backend.prefs.xmax))
widget = self.__builder.get_object('preferences_yminl_entry')
widget.set_text(str(self.backend.prefs.yminl))
widget = self.__builder.get_object('preferences_ymaxl_entry')
widget.set_text(str(self.backend.prefs.ymaxl))
widget = self.__builder.get_object('preferences_yminr_entry')
widget.set_text(str(self.backend.prefs.yminr))
widget = self.__builder.get_object('preferences_ymaxr_entry')
widget.set_text(str(self.backend.prefs.ymaxr))
return locals()
@Property
def title():
def fget(self):
return self.__title
def fset(self, title):
self.__title = title
if not self.__title:
return
self.__container.set_title(self.__title)
return locals()
def clear(self, *args, **kwargs):
IContainer.clear(self, *args, **kwargs)
def show(self, *args, **kwargs):
IContainer.show(self, *args, **kwargs)
self.__container.show()
def hide(self, *args, **kwargs):
IContainer.hide(self, *args, **kwargs)
self.__container.hide()
def run(self):
gtk.main()
def on_open_ok_button_clicked(self, widget, data=None):
self.__open.hide()
filename = self.__open.get_filename()
if not filename:
return
self.__open.set_filename(filename)
self.open(filename)
def on_open_cancel_button_clicked(self, widget, data=None):
self.__open.hide()
def on_open_chooser_delete_event(self, widget, data=None):
self.__open.hide()
return True
def on_plot_open_button_clicked(self, widget, data=None):
self.__open = self.__builder.get_object('open_chooser')
self.__open.show()
def on_plot_save_button_clicked(self, widget, data=None):
if not self.filename:
self.on_plot_saveas_button_clicked(self, None)
if self.filename:
self.save(self.filename)
def on_saveas_ok_button_clicked(self, widget, data=None):
self.__saveas.hide()
filename = self.__saveas.get_filename()
if not filename:
return
self.__saveas.set_filename(filename)
self.filename = filename
self.on_plot_save_button_clicked(self, None)
def on_saveas_cancel_button_clicked(self, widget, data=None):
self.__saveas.hide()
def on_saveas_chooser_delete_event(self, widget, data=None):
self.__saveas.hide()
return True
def on_plot_saveas_button_clicked(self, widget, data=None):
self.__saveas = self.__builder.get_object('saveas_chooser')
self.__saveas.show()
def on_preferences_ok_button_clicked(self, widget, data=None):
self.__preferences.hide()
widget = self.__builder.get_object('preferences_xmin_entry')
self.prefs.xmin = float(widget.get_text())
widget = self.__builder.get_object('preferences_xmax_entry')
self.prefs.xmax = float(widget.get_text())
widget = self.__builder.get_object('preferences_yminl_entry')
self.prefs.yminl = float(widget.get_text())
widget = self.__builder.get_object('preferences_ymaxl_entry')
self.prefs.ymaxl = float(widget.get_text())
widget = self.__builder.get_object('preferences_yminr_entry')
self.prefs.yminr = float(widget.get_text())
widget = self.__builder.get_object('preferences_ymaxr_entry')
self.prefs.ymaxr = float(widget.get_text())
self.draw()
def on_preferences_cancel_button_clicked(self, widget, data=None):
self.__preferences.hide()
def on_plot_preferences_button_clicked(self, widget, data=None):
self.__preferences = self.__builder.get_object('preferences_dialog')
self.__preferences.show()
def on_preferences_dialog_delete_event(self, widget, data=None):
self.__preferences.hide()
return True
def on_plot_overlay_button_toggled(self, widget, data=None):
self.prefs.overlay = widget.get_active()
def on_plot_window_destroy(self, widget, data=None):
gtk.main_quit()
def __init__(self, container):
IContainer.__init__(self)
self.backend = MatplotlibWindowBackend()
buildername = os.environ['GRIMA_ETC'] + os.sep + 'grima-plot.ui'
self.__builder = gtk.Builder()
self.__builder.add_from_file(buildername)
self.__builder.connect_signals(self)
if container:
self.__container = container
widget = self.__builder.get_object('plot_embeddable')
container = self.__builder.get_object('plot_container')
container.remove(widget)
self.__container.add(widget)
else:
self.__container = self.__builder.get_object('plot_window')
# TODO: this should not be needed, but somehow the widget show'ing order
# is all screwed up and the window doesn't display correctly without this
self.__container.set_default_size(700, 500)
widget = self.__builder.get_object('plot_backend')
widget.add(self.backend.widget)
# TODO:
self.filename = None
##
## This is the public API...
##
class Plot(Object):
def __create_display(self):
if not self.__enabled:
return
self.__display = None
if self.type == 'console':
self.__display = ConsoleContainer()
if self.type == 'image':
self.__display = ImageContainer()
if self.type == 'window':
self.__display = WindowContainer(self.container)
try:
self.__display.prefs = self
self.__display.title = self.title
except:
self.__enabled = False
@Property
def enabled():
def fget(self):
return self.__enabled
def fset(self, enabled):
self.__enabled = enabled
self.__create_display()
return locals()
@Property
def type():
def fget(self):
return self.__type
def fset(self, tipe):
self.__type = tipe
self.__create_display()
return locals()
@Property
def title():
def fget(self):
return self.__title
def fset(self, title):
self.__title = title
return locals()
def plotr(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotr(*args, **kwargs)
def plotl(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotl(*args, **kwargs)
def plotlh(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotlh(*args, **kwargs)
def plotlv(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotlv(*args, **kwargs)
def plotrh(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotrh(*args, **kwargs)
def plotrv(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotrv(*args, **kwargs)
def draw(self, *args, **kwargs):
if not self.enabled:
return
self.__display.draw(*args, **kwargs)
def clear(self, *args, **kwargs):
if not self.enabled:
return
self.__display.clear(*args, **kwargs)
def show(self, *args, **kwargs):
if not self.enabled:
return
self.__display.show(*args, **kwargs)
def hide(self, *args, **kwargs):
if not self.enabled:
return
self.__display.hide(*args, **kwargs)
def run(self, *args, **kwargs):
if not self.enabled:
return
self.__display.run(*args, **kwargs)
def stripchart(self, *args, **kwargs):
if not self.enabled:
return
self.__display.stripchart(*args, **kwargs)
def open(self, *args, **kwargs):
if not self.enabled:
return
self.__display.open(*args, **kwargs)
def save(self, *args, **kwargs):
if not self.enabled:
return
self.__display.save(*args, **kwargs)
def __init__(self):
Object.__init__(self)
self.enabled = False
self.container = None
self.type = 'console'
self.title = None
# TODO: use preferences
self.xmin = 0
self.xmax = 0
self.yminl = 0
self.ymaxl = 0
self.yminr = 0
self.ymaxr = 0
self.overlay = False
# $Id:$
#
# Local Variables:
# indent-tabs-mode: nil
# python-continuation-offset: 2
# python-indent: 8
# End:
# vim: ai et si sw=8 ts=8
|
{
"content_hash": "15bf6d9a12f93b1b455fcedf861891e7",
"timestamp": "",
"source": "github",
"line_count": 804,
"max_line_length": 107,
"avg_line_length": 31.940298507462686,
"alnum_prop": 0.46736760124610593,
"repo_name": "cdsi/grima",
"id": "5321145ab6dd5a87aabdfbd2a8109f1375b9d8ab",
"size": "25680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grima/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "656"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "PHP",
"bytes": "8038"
},
{
"name": "Python",
"bytes": "54848"
},
{
"name": "Shell",
"bytes": "11378"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['Seasonal_DayOfWeek'] , ['AR'] );
|
{
"content_hash": "17ec29caa8459f6a25f315d13f2835dc",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 86,
"avg_line_length": 39.75,
"alnum_prop": 0.710691823899371,
"repo_name": "antoinecarme/pyaf",
"id": "96128bf168ad97c2131ef5f2e968ae386c4665be",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_Seasonal_DayOfWeek_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from ncclient.operations.rpc import *
import unittest
from mock import patch
from ncclient import manager
import ncclient.manager
import ncclient.transport
from ncclient.xml_ import *
from ncclient.operations import RaiseMode
from ncclient.capabilities import Capabilities
import sys
if sys.version >= '3':
patch_str = 'ncclient.operations.rpc.Event.isSet'
else:
patch_str = 'threading._Event.isSet'
xml1 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos">
<ok/>
</rpc-reply>"""
xml2 = """<rpc-reply message-id="urn:uuid:15ceca00-904e-11e4-94ad-5c514f91ab3f">
<load-configuration-results>
<rpc-error>
<error-severity>error</error-severity>
<error-info>
<bad-element>system1</bad-element>
</error-info>
<error-message>syntax error</error-message>
</rpc-error>
<rpc-error>
<error-severity>error</error-severity>
<error-info>
<bad-element>}</bad-element>
</error-info>
<error-message>error recovery ignores input until this point</error-message>
</rpc-error>
</load-configuration-results>
</rpc-reply>"""
xml3 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos" attrib1 = "test">
<software-information>
<host-name>R1</host-name>
<product-model>firefly-perimeter</product-model>
<product-name>firefly-perimeter</product-name>
<package-information>
<name>junos</name>
<comment>JUNOS Software Release [12.1X46-D10.2]</comment>
</package-information>
</software-information>
<rpc-error>
</rpc-error>
<cli>
<banner></banner>
</cli>
</rpc-reply>"""
xml4 = """<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/14.2I0/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:b19400d6-fa2a-11e4-8f7b-0800278ff596">
<data>
<configuration xmlns="http://xml.juniper.net/xnm/1.1/xnm" junos:changed-seconds="1431599824" junos:changed-localtime="2015-05-14 03:37:04 PDT">
<system>
<services>
<ftp>
<connection-limit>200</connection-limit>
<rate-limit>214</rate-limit>
</ftp>
<ssh>
<root-login>allow</root-login>
</ssh>
<telnet>
</telnet>
<netconf>
<ssh>
</ssh>
</netconf>
</services>
</system>
</configuration>
</data>
</rpc-reply>"""
class TestRPC(unittest.TestCase):
def test_rpc_reply(self):
obj = RPCReply(xml4)
obj.parse()
self.assertTrue(obj.ok)
self.assertFalse(obj.error)
self.assertEqual(xml4, obj.xml)
self.assertTrue(obj._parsed)
@patch('ncclient.transport.Session.send')
@patch(patch_str)
def test_rpc_send(self, mock_thread, mock_send):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
session = ncclient.transport.Session(capabilities)
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
sub_ele(node, "confirmed")
sub_ele(node, "confirm-timeout").text = "50"
sub_ele(node, "log").text = "message"
result = obj._request(node)
ele = new_ele("rpc",
{"message-id": obj._id},
**device_handler.get_xml_extra_prefix_kwargs())
ele.append(node)
node = to_xml(ele)
mock_send.assert_called_once_with(node)
self.assertEqual(
result.data_xml,
(NCElement(
reply,
device_handler.transform_reply())).data_xml)
self.assertEqual(obj.session, session)
self.assertEqual(reply, obj.reply)
@patch('ncclient.transport.Session.send')
@patch(patch_str)
def test_rpc_async(self, mock_thread, mock_send):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
session = ncclient.transport.Session(capabilities)
obj = RPC(
session,
device_handler,
raise_mode=RaiseMode.ALL,
timeout=0,
async_mode=True)
reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
result = obj._request(node)
self.assertEqual(result, obj)
@patch('ncclient.transport.Session.send')
@patch(patch_str)
def test_rpc_timeout_error(self, mock_thread, mock_send):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
session = ncclient.transport.Session(capabilities)
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
reply = RPCReply(xml1)
obj.deliver_reply(reply)
node = new_ele("commit")
sub_ele(node, "confirmed")
mock_thread.return_value = False
self.assertRaises(TimeoutExpiredError, obj._request, node)
@patch('ncclient.transport.Session.send')
@patch(patch_str)
def test_rpc_rpcerror(self, mock_thread, mock_send):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
session = ncclient.transport.Session(capabilities)
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
sub_ele(node, "confirmed")
err = RPCError(to_ele(xml2))
obj.deliver_error(err)
self.assertRaises(RPCError, obj._request, node)
@patch('ncclient.transport.Session.send')
@patch(patch_str)
def test_rpc_capability_error(self, mock_thread, mock_send):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
session = ncclient.transport.Session(capabilities)
session._server_capabilities = [':running']
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
obj._assert(':running')
self.assertRaises(MissingCapabilityError,
obj._assert, ':candidate')
|
{
"content_hash": "23692fe56ce58a5a11c88d735399e800",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 235,
"avg_line_length": 36.353932584269664,
"alnum_prop": 0.6247875135218668,
"repo_name": "einarnn/ncclient",
"id": "131d2970fb306cf14857fa53a46ede2d122c4f83",
"size": "6471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/operations/test_rpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "301712"
}
],
"symlink_target": ""
}
|
import numpy as np
from .parametric import f_oneway, ttest_1samp_no_p
from ..parallel import parallel_func, check_n_jobs
from ..fixes import jit, has_numba
from ..utils import (split_list, logger, verbose, ProgressBar, warn, _pl,
check_random_state, _check_option, _validate_type)
from ..source_estimate import (SourceEstimate, VolSourceEstimate,
MixedSourceEstimate)
from ..source_space import SourceSpaces
def _get_buddies_fallback(r, s, neighbors, indices=None):
if indices is None:
buddies = np.where(r)[0]
else:
buddies = indices[r[indices]]
buddies = buddies[np.in1d(s[buddies], neighbors, assume_unique=True)]
r[buddies] = False
return buddies.tolist()
def _get_selves_fallback(r, s, ind, inds, t, t_border, max_step):
start = t_border[max(t[ind] - max_step, 0)]
stop = t_border[min(t[ind] + max_step + 1, len(t_border) - 1)]
indices = inds[start:stop]
selves = indices[r[indices]]
selves = selves[s[ind] == s[selves]]
r[selves] = False
return selves.tolist()
def _where_first_fallback(x):
# this is equivalent to np.where(r)[0] for these purposes, but it's
# a little bit faster. Unfortunately there's no way to tell numpy
# just to find the first instance (to save checking every one):
next_ind = int(np.argmax(x))
if next_ind == 0:
next_ind = -1
return next_ind
if has_numba: # pragma: no cover
@jit()
def _get_buddies(r, s, neighbors, indices=None):
buddies = list()
# At some point we might be able to use the sorted-ness of s or
# neighbors to further speed this up
if indices is None:
n_check = len(r)
else:
n_check = len(indices)
for ii in range(n_check):
if indices is None:
this_idx = ii
else:
this_idx = indices[ii]
if r[this_idx]:
this_s = s[this_idx]
for ni in range(len(neighbors)):
if this_s == neighbors[ni]:
buddies.append(this_idx)
r[this_idx] = False
break
return buddies
@jit()
def _get_selves(r, s, ind, inds, t, t_border, max_step):
selves = list()
start = t_border[max(t[ind] - max_step, 0)]
stop = t_border[min(t[ind] + max_step + 1, len(t_border) - 1)]
for ii in range(start, stop):
this_idx = inds[ii]
if r[this_idx] and s[ind] == s[this_idx]:
selves.append(this_idx)
r[this_idx] = False
return selves
@jit()
def _where_first(x):
for ii in range(len(x)):
if x[ii]:
return ii
return -1
else: # pragma: no cover
# fastest ways we've found with NumPy
_get_buddies = _get_buddies_fallback
_get_selves = _get_selves_fallback
_where_first = _where_first_fallback
@jit()
def _masked_sum(x, c):
return np.sum(x[c])
@jit()
def _masked_sum_power(x, c, t_power):
return np.sum(np.sign(x[c]) * np.abs(x[c]) ** t_power)
@jit()
def _sum_cluster_data(data, tstep):
return np.sign(data) * np.logical_not(data == 0) * tstep
def _get_clusters_spatial(s, neighbors):
"""Form spatial clusters using neighbor lists.
This is equivalent to _get_components with n_times = 1, with a properly
reconfigured adjacency matrix (formed as "neighbors" list)
"""
# s is a vector of spatial indices that are significant, like:
# s = np.where(x_in)[0]
# for x_in representing a single time-instant
r = np.ones(s.shape, bool)
clusters = list()
next_ind = 0 if s.size > 0 else -1
while next_ind >= 0:
# put first point in a cluster, adjust remaining
t_inds = [next_ind]
r[next_ind] = 0
icount = 1 # count of nodes in the current cluster
while icount <= len(t_inds):
ind = t_inds[icount - 1]
# look across other vertices
buddies = _get_buddies(r, s, neighbors[s[ind]])
t_inds.extend(buddies)
icount += 1
next_ind = _where_first(r)
clusters.append(s[t_inds])
return clusters
def _reassign(check, clusters, base, num):
"""Reassign cluster numbers."""
# reconfigure check matrix
check[check == num] = base
# concatenate new values into clusters array
clusters[base - 1] = np.concatenate((clusters[base - 1],
clusters[num - 1]))
clusters[num - 1] = np.array([], dtype=int)
def _get_clusters_st_1step(keepers, neighbors):
"""Directly calculate clusters.
This uses knowledge that time points are
only adjacent to immediate neighbors for data organized as time x space.
This algorithm time increases linearly with the number of time points,
compared to with the square for the standard (graph) algorithm.
This algorithm creates clusters for each time point using a method more
efficient than the standard graph method (but otherwise equivalent), then
combines these clusters across time points in a reasonable way.
"""
n_src = len(neighbors)
n_times = len(keepers)
# start cluster numbering at 1 for diffing convenience
enum_offset = 1
check = np.zeros((n_times, n_src), dtype=int)
clusters = list()
for ii, k in enumerate(keepers):
c = _get_clusters_spatial(k, neighbors)
for ci, cl in enumerate(c):
check[ii, cl] = ci + enum_offset
enum_offset += len(c)
# give them the correct offsets
c = [cl + ii * n_src for cl in c]
clusters += c
# now that each cluster has been assigned a unique number, combine them
# by going through each time point
for check1, check2, k in zip(check[:-1], check[1:], keepers[:-1]):
# go through each one that needs reassignment
inds = k[check2[k] - check1[k] > 0]
check1_d = check1[inds]
n = check2[inds]
nexts = np.unique(n)
for num in nexts:
prevs = check1_d[n == num]
base = np.min(prevs)
for pr in np.unique(prevs[prevs != base]):
_reassign(check1, clusters, base, pr)
# reassign values
_reassign(check2, clusters, base, num)
# clean up clusters
clusters = [cl for cl in clusters if len(cl) > 0]
return clusters
def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
"""Directly calculate clusters.
This uses knowledge that time points are
only adjacent to immediate neighbors for data organized as time x space.
This algorithm time increases linearly with the number of time points,
compared to with the square for the standard (graph) algorithm.
"""
n_src = len(neighbors)
n_times = len(keepers)
t_border = list()
t_border.append(0)
for ki, k in enumerate(keepers):
keepers[ki] = k + ki * n_src
t_border.append(t_border[ki] + len(k))
t_border = np.array(t_border)
keepers = np.concatenate(keepers)
v = keepers
t, s = divmod(v, n_src)
r = np.ones(t.shape, dtype=bool)
clusters = list()
inds = np.arange(t_border[0], t_border[n_times])
next_ind = 0 if s.size > 0 else -1
while next_ind >= 0:
# put first point in a cluster, adjust remaining
t_inds = [next_ind]
r[next_ind] = False
icount = 1 # count of nodes in the current cluster
# look for significant values at the next time point,
# same sensor, not placed yet, and add those
while icount <= len(t_inds):
ind = t_inds[icount - 1]
selves = _get_selves(r, s, ind, inds, t, t_border, max_step)
# look at current time point across other vertices
these_inds = inds[t_border[t[ind]]:t_border[t[ind] + 1]]
buddies = _get_buddies(r, s, neighbors[s[ind]], these_inds)
t_inds += buddies + selves
icount += 1
next_ind = _where_first(r)
clusters.append(v[t_inds])
return clusters
def _get_clusters_st(x_in, neighbors, max_step=1):
"""Choose the most efficient version."""
n_src = len(neighbors)
n_times = x_in.size // n_src
cl_goods = np.where(x_in)[0]
if len(cl_goods) > 0:
keepers = [np.array([], dtype=int)] * n_times
row, col = np.unravel_index(cl_goods, (n_times, n_src))
lims = [0]
if isinstance(row, int):
row = [row]
col = [col]
else:
order = np.argsort(row)
row = row[order]
col = col[order]
lims += (np.where(np.diff(row) > 0)[0] + 1).tolist()
lims.append(len(row))
for start, end in zip(lims[:-1], lims[1:]):
keepers[row[start]] = np.sort(col[start:end])
if max_step == 1:
return _get_clusters_st_1step(keepers, neighbors)
else:
return _get_clusters_st_multistep(keepers, neighbors,
max_step)
else:
return []
def _get_components(x_in, adjacency, return_list=True):
"""Get connected components from a mask and a adjacency matrix."""
from scipy import sparse
if adjacency is False:
components = np.arange(len(x_in))
else:
from scipy.sparse.csgraph import connected_components
mask = np.logical_and(x_in[adjacency.row], x_in[adjacency.col])
data = adjacency.data[mask]
row = adjacency.row[mask]
col = adjacency.col[mask]
shape = adjacency.shape
idx = np.where(x_in)[0]
row = np.concatenate((row, idx))
col = np.concatenate((col, idx))
data = np.concatenate((data, np.ones(len(idx), dtype=data.dtype)))
adjacency = sparse.coo_matrix((data, (row, col)), shape=shape)
_, components = connected_components(adjacency)
if return_list:
start = np.min(components)
stop = np.max(components)
comp_list = [list() for i in range(start, stop + 1, 1)]
mask = np.zeros(len(comp_list), dtype=bool)
for ii, comp in enumerate(components):
comp_list[comp].append(ii)
mask[comp] += x_in[ii]
clusters = [np.array(k) for k, m in zip(comp_list, mask) if m]
return clusters
else:
return components
def _find_clusters(x, threshold, tail=0, adjacency=None, max_step=1,
include=None, partitions=None, t_power=1, show_info=False):
"""Find all clusters which are above/below a certain threshold.
When doing a two-tailed test (tail == 0), only points with the same
sign will be clustered together.
Parameters
----------
x : 1D array
Data
threshold : float | dict
Where to threshold the statistic. Should be negative for tail == -1,
and positive for tail == 0 or 1. Can also be an dict for
threshold-free cluster enhancement.
tail : -1 | 0 | 1
Type of comparison
adjacency : scipy.sparse.coo_matrix, None, or list
Defines adjacency between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
If adjacency is a list, it is assumed that each entry stores the
indices of the spatial neighbors in a spatio-temporal dataset x.
Default is None, i.e, a regular lattice adjacency.
False means no adjacency.
max_step : int
If adjacency is a list, this defines the maximal number of steps
between vertices along the second dimension (typically time) to be
considered adjacent.
include : 1D bool array or None
Mask to apply to the data of points to cluster. If None, all points
are used.
partitions : array of int or None
An array (same size as X) of integers indicating which points belong
to each partition.
t_power : float
Power to raise the statistical values (usually t-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
show_info : bool
If True, display information about thresholds used (for TFCE). Should
only be done for the standard permutation.
Returns
-------
clusters : list of slices or list of arrays (boolean masks)
We use slices for 1D signals and mask to multidimensional
arrays.
sums : array
Sum of x values in clusters.
"""
from scipy import ndimage
_check_option('tail', tail, [-1, 0, 1])
x = np.asanyarray(x)
if not np.isscalar(threshold):
if not isinstance(threshold, dict):
raise TypeError('threshold must be a number, or a dict for '
'threshold-free cluster enhancement')
if not all(key in threshold for key in ['start', 'step']):
raise KeyError('threshold, if dict, must have at least '
'"start" and "step"')
tfce = True
use_x = x[np.isfinite(x)]
if use_x.size == 0:
raise RuntimeError(
'No finite values found in the observed statistic values')
if tail == -1:
if threshold['start'] > 0:
raise ValueError('threshold["start"] must be <= 0 for '
'tail == -1')
if threshold['step'] >= 0:
raise ValueError('threshold["step"] must be < 0 for '
'tail == -1')
stop = np.min(use_x)
elif tail == 1:
stop = np.max(use_x)
else: # tail == 0
stop = max(np.max(use_x), -np.min(use_x))
del use_x
thresholds = np.arange(threshold['start'], stop,
threshold['step'], float)
h_power = threshold.get('h_power', 2)
e_power = threshold.get('e_power', 0.5)
if show_info is True:
if len(thresholds) == 0:
warn('threshold["start"] (%s) is more extreme than data '
'statistics with most extreme value %s'
% (threshold['start'], stop))
else:
logger.info('Using %d thresholds from %0.2f to %0.2f for TFCE '
'computation (h_power=%0.2f, e_power=%0.2f)'
% (len(thresholds), thresholds[0], thresholds[-1],
h_power, e_power))
scores = np.zeros(x.size)
else:
thresholds = [threshold]
tfce = False
# include all points by default
if include is None:
include = np.ones(x.shape, dtype=bool)
if tail in [0, 1] and not np.all(np.diff(thresholds) > 0):
raise ValueError('Thresholds must be monotonically increasing')
if tail == -1 and not np.all(np.diff(thresholds) < 0):
raise ValueError('Thresholds must be monotonically decreasing')
# set these here just in case thresholds == []
clusters = list()
sums = list()
for ti, thresh in enumerate(thresholds):
# these need to be reset on each run
clusters = list()
if tail == 0:
x_ins = [np.logical_and(x > thresh, include),
np.logical_and(x < -thresh, include)]
elif tail == -1:
x_ins = [np.logical_and(x < thresh, include)]
else: # tail == 1
x_ins = [np.logical_and(x > thresh, include)]
# loop over tails
for x_in in x_ins:
if np.any(x_in):
out = _find_clusters_1dir_parts(x, x_in, adjacency,
max_step, partitions, t_power,
ndimage)
clusters += out[0]
sums.append(out[1])
if tfce:
# the score of each point is the sum of the h^H * e^E for each
# supporting section "rectangle" h x e.
if ti == 0:
h = abs(thresh)
else:
h = abs(thresh - thresholds[ti - 1])
h = h ** h_power
for c in clusters:
# triage based on cluster storage type
if isinstance(c, slice):
len_c = c.stop - c.start
elif isinstance(c, tuple):
len_c = len(c)
elif c.dtype == bool:
len_c = np.sum(c)
else:
len_c = len(c)
scores[c] += h * (len_c ** e_power)
# turn sums into array
sums = np.concatenate(sums) if sums else np.array([])
if tfce:
# each point gets treated independently
clusters = np.arange(x.size)
if adjacency is None or adjacency is False:
if x.ndim == 1:
# slices
clusters = [slice(c, c + 1) for c in clusters]
else:
# boolean masks (raveled)
clusters = [(clusters == ii).ravel()
for ii in range(len(clusters))]
else:
clusters = [np.array([c]) for c in clusters]
sums = scores
return clusters, sums
def _find_clusters_1dir_parts(x, x_in, adjacency, max_step, partitions,
t_power, ndimage):
"""Deal with partitions, and pass the work to _find_clusters_1dir."""
if partitions is None:
clusters, sums = _find_clusters_1dir(x, x_in, adjacency, max_step,
t_power, ndimage)
else:
# cluster each partition separately
clusters = list()
sums = list()
for p in range(np.max(partitions) + 1):
x_i = np.logical_and(x_in, partitions == p)
out = _find_clusters_1dir(x, x_i, adjacency, max_step, t_power,
ndimage)
clusters += out[0]
sums.append(out[1])
sums = np.concatenate(sums)
return clusters, sums
def _find_clusters_1dir(x, x_in, adjacency, max_step, t_power, ndimage):
"""Actually call the clustering algorithm."""
from scipy import sparse
if adjacency is None:
labels, n_labels = ndimage.label(x_in)
if x.ndim == 1:
# slices
clusters = ndimage.find_objects(labels, n_labels)
# equivalent to if len(clusters) == 0 but faster
if not clusters:
sums = list()
else:
index = list(range(1, n_labels + 1))
if t_power == 1:
sums = ndimage.sum(x, labels, index=index)
else:
sums = ndimage.sum(np.sign(x) * np.abs(x) ** t_power,
labels, index=index)
else:
# boolean masks (raveled)
clusters = list()
sums = np.empty(n_labels)
for label in range(n_labels):
c = labels == label + 1
clusters.append(c.ravel())
if t_power == 1:
sums[label] = np.sum(x[c])
else:
sums[label] = np.sum(np.sign(x[c]) *
np.abs(x[c]) ** t_power)
else:
if x.ndim > 1:
raise Exception("Data should be 1D when using a adjacency "
"to define clusters.")
if isinstance(adjacency, sparse.spmatrix) or adjacency is False:
clusters = _get_components(x_in, adjacency)
elif isinstance(adjacency, list): # use temporal adjacency
clusters = _get_clusters_st(x_in, adjacency, max_step)
else:
raise ValueError('adjacency must be a sparse matrix or list')
if t_power == 1:
sums = [_masked_sum(x, c) for c in clusters]
else:
sums = [_masked_sum_power(x, c, t_power) for c in clusters]
return clusters, np.atleast_1d(sums)
def _cluster_indices_to_mask(components, n_tot):
"""Convert to the old format of clusters, which were bool arrays."""
for ci, c in enumerate(components):
components[ci] = np.zeros((n_tot), dtype=bool)
components[ci][c] = True
return components
def _cluster_mask_to_indices(components, shape):
"""Convert to the old format of clusters, which were bool arrays."""
for ci, c in enumerate(components):
if isinstance(c, np.ndarray): # mask
components[ci] = np.where(c.reshape(shape))
elif isinstance(c, slice):
components[ci] = np.arange(c.start, c.stop)
else:
assert isinstance(c, tuple), type(c)
c = list(c) # tuple->list
for ii, cc in enumerate(c):
if isinstance(cc, slice):
c[ii] = np.arange(cc.start, cc.stop)
else:
c[ii] = np.where(cc)[0]
components[ci] = tuple(c)
return components
def _pval_from_histogram(T, H0, tail):
"""Get p-values from stats values given an H0 distribution.
For each stat compute a p-value as percentile of its statistics
within all statistics in surrogate data
"""
# from pct to fraction
if tail == -1: # up tail
pval = np.array([np.mean(H0 <= t) for t in T])
elif tail == 1: # low tail
pval = np.array([np.mean(H0 >= t) for t in T])
else: # both tails
pval = np.array([np.mean(abs(H0) >= abs(t)) for t in T])
return pval
def _setup_adjacency(adjacency, n_tests, n_times):
from scipy import sparse
if not sparse.issparse(adjacency):
raise ValueError("If adjacency matrix is given, it must be a "
"SciPy sparse matrix.")
if adjacency.shape[0] == n_tests: # use global algorithm
adjacency = adjacency.tocoo()
else: # use temporal adjacency algorithm
got_times, mod = divmod(n_tests, adjacency.shape[0])
if got_times != n_times or mod != 0:
raise ValueError(
'adjacency (len %d) must be of the correct size, i.e. be '
'equal to or evenly divide the number of tests (%d).\n\n'
'If adjacency was computed for a source space, try using '
'the fwd["src"] or inv["src"] as some original source space '
'vertices can be excluded during forward computation'
% (adjacency.shape[0], n_tests))
# we claim to only use upper triangular part... not true here
adjacency = (adjacency + adjacency.transpose()).tocsr()
adjacency = [
adjacency.indices[adjacency.indptr[i]:adjacency.indptr[i + 1]]
for i in range(len(adjacency.indptr) - 1)]
return adjacency
def _do_permutations(X_full, slices, threshold, tail, adjacency, stat_fun,
max_step, include, partitions, t_power, orders,
sample_shape, buffer_size, progress_bar):
n_samp, n_vars = X_full.shape
if buffer_size is not None and n_vars <= buffer_size:
buffer_size = None # don't use buffer for few variables
# allocate space for output
max_cluster_sums = np.empty(len(orders), dtype=np.double)
if buffer_size is not None:
# allocate buffer, so we don't need to allocate memory during loop
X_buffer = [np.empty((len(X_full[s]), buffer_size), dtype=X_full.dtype)
for s in slices]
for seed_idx, order in enumerate(orders):
# shuffle sample indices
assert order is not None
idx_shuffle_list = [order[s] for s in slices]
if buffer_size is None:
# shuffle all data at once
X_shuffle_list = [X_full[idx, :] for idx in idx_shuffle_list]
t_obs_surr = stat_fun(*X_shuffle_list)
else:
# only shuffle a small data buffer, so we need less memory
t_obs_surr = np.empty(n_vars, dtype=X_full.dtype)
for pos in range(0, n_vars, buffer_size):
# number of variables for this loop
n_var_loop = min(pos + buffer_size, n_vars) - pos
# fill buffer
for i, idx in enumerate(idx_shuffle_list):
X_buffer[i][:, :n_var_loop] =\
X_full[idx, pos: pos + n_var_loop]
# apply stat_fun and store result
tmp = stat_fun(*X_buffer)
t_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
# The stat should have the same shape as the samples for no adj.
if adjacency is None:
t_obs_surr.shape = sample_shape
# Find cluster on randomized stats
out = _find_clusters(t_obs_surr, threshold=threshold, tail=tail,
max_step=max_step, adjacency=adjacency,
partitions=partitions, include=include,
t_power=t_power)
perm_clusters_sums = out[1]
if len(perm_clusters_sums) > 0:
max_cluster_sums[seed_idx] = np.max(perm_clusters_sums)
else:
max_cluster_sums[seed_idx] = 0
progress_bar.update(seed_idx + 1)
return max_cluster_sums
def _do_1samp_permutations(X, slices, threshold, tail, adjacency, stat_fun,
max_step, include, partitions, t_power, orders,
sample_shape, buffer_size, progress_bar):
n_samp, n_vars = X.shape
assert slices is None # should be None for the 1 sample case
if buffer_size is not None and n_vars <= buffer_size:
buffer_size = None # don't use buffer for few variables
# allocate space for output
max_cluster_sums = np.empty(len(orders), dtype=np.double)
if buffer_size is not None:
# allocate a buffer so we don't need to allocate memory in loop
X_flip_buffer = np.empty((n_samp, buffer_size), dtype=X.dtype)
for seed_idx, order in enumerate(orders):
assert isinstance(order, np.ndarray)
# new surrogate data with specified sign flip
assert order.size == n_samp # should be guaranteed by parent
signs = 2 * order[:, None].astype(int) - 1
if not np.all(np.equal(np.abs(signs), 1)):
raise ValueError('signs from rng must be +/- 1')
if buffer_size is None:
# be careful about non-writable memmap (GH#1507)
if X.flags.writeable:
X *= signs
# Recompute statistic on randomized data
t_obs_surr = stat_fun(X)
# Set X back to previous state (trade memory eff. for CPU use)
X *= signs
else:
t_obs_surr = stat_fun(X * signs)
else:
# only sign-flip a small data buffer, so we need less memory
t_obs_surr = np.empty(n_vars, dtype=X.dtype)
for pos in range(0, n_vars, buffer_size):
# number of variables for this loop
n_var_loop = min(pos + buffer_size, n_vars) - pos
X_flip_buffer[:, :n_var_loop] =\
signs * X[:, pos: pos + n_var_loop]
# apply stat_fun and store result
tmp = stat_fun(X_flip_buffer)
t_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
# The stat should have the same shape as the samples for no adj.
if adjacency is None:
t_obs_surr.shape = sample_shape
# Find cluster on randomized stats
out = _find_clusters(t_obs_surr, threshold=threshold, tail=tail,
max_step=max_step, adjacency=adjacency,
partitions=partitions, include=include,
t_power=t_power)
perm_clusters_sums = out[1]
if len(perm_clusters_sums) > 0:
# get max with sign info
idx_max = np.argmax(np.abs(perm_clusters_sums))
max_cluster_sums[seed_idx] = perm_clusters_sums[idx_max]
else:
max_cluster_sums[seed_idx] = 0
progress_bar.update(seed_idx + 1)
return max_cluster_sums
def bin_perm_rep(ndim, a=0, b=1):
"""Ndim permutations with repetitions of (a,b).
Returns an array with all the possible permutations with repetitions of
(0,1) in ndim dimensions. The array is shaped as (2**ndim,ndim), and is
ordered with the last index changing fastest. For examble, for ndim=3:
Examples
--------
>>> bin_perm_rep(3)
array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
"""
# Create the leftmost column as 0,0,...,1,1,...
nperms = 2 ** ndim
perms = np.empty((nperms, ndim), type(a))
perms.fill(a)
half_point = nperms // 2
perms[half_point:, 0] = b
# Fill the rest of the table by sampling the previous column every 2 items
for j in range(1, ndim):
half_col = perms[::2, j - 1]
perms[:half_point, j] = half_col
perms[half_point:, j] = half_col
# This is equivalent to something like:
# orders = [np.fromiter(np.binary_repr(s + 1, ndim), dtype=int)
# for s in np.arange(2 ** ndim)]
return perms
def _get_1samp_orders(n_samples, n_permutations, tail, rng):
"""Get the 1samp orders."""
max_perms = 2 ** (n_samples - (tail == 0)) - 1
extra = ''
if isinstance(n_permutations, str):
if n_permutations != 'all':
raise ValueError('n_permutations as a string must be "all"')
n_permutations = max_perms
n_permutations = int(n_permutations)
if max_perms < n_permutations:
# omit first perm b/c accounted for in H0.append() later;
# convert to binary array representation
extra = ' (exact test)'
orders = bin_perm_rep(n_samples)[1:max_perms + 1]
elif n_samples <= 20: # fast way to do it for small(ish) n_samples
orders = rng.choice(max_perms, n_permutations - 1, replace=False)
orders = [np.fromiter(np.binary_repr(s + 1, n_samples), dtype=int)
for s in orders]
else: # n_samples >= 64
# Here we can just use the hash-table (w/collision detection)
# functionality of a dict to ensure uniqueness
orders = np.zeros((n_permutations - 1, n_samples), int)
hashes = {}
ii = 0
# in the symmetric case, we should never flip one of the subjects
# to prevent positive/negative equivalent collisions
use_samples = n_samples - (tail == 0)
while ii < n_permutations - 1:
signs = tuple((rng.uniform(size=use_samples) < 0.5).astype(int))
if signs not in hashes:
orders[ii, :use_samples] = signs
if tail == 0 and rng.uniform() < 0.5:
# To undo the non-flipping of the last subject in the
# tail == 0 case, half the time we use the positive
# last subject, half the time negative last subject
orders[ii] = 1 - orders[ii]
hashes[signs] = None
ii += 1
return orders, n_permutations, extra
def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
adjacency, n_jobs, seed, max_step,
exclude, step_down_p, t_power, out_type,
check_disjoint, buffer_size):
n_jobs = check_n_jobs(n_jobs)
"""Aux Function.
Note. X is required to be a list. Depending on the length of X
either a 1 sample t-test or an F test / more sample permutation scheme
is elicited.
"""
_check_option('out_type', out_type, ['mask', 'indices'])
_check_option('tail', tail, [-1, 0, 1])
if not isinstance(threshold, dict):
threshold = float(threshold)
if (tail < 0 and threshold > 0 or tail > 0 and threshold < 0 or
tail == 0 and threshold < 0):
raise ValueError('incompatible tail and threshold signs, got '
'%s and %s' % (tail, threshold))
# check dimensions for each group in X (a list at this stage).
X = [x[:, np.newaxis] if x.ndim == 1 else x for x in X]
n_samples = X[0].shape[0]
n_times = X[0].shape[1]
sample_shape = X[0].shape[1:]
for x in X:
if x.shape[1:] != sample_shape:
raise ValueError('All samples mush have the same size')
# flatten the last dimensions in case the data is high dimensional
X = [np.reshape(x, (x.shape[0], -1)) for x in X]
n_tests = X[0].shape[1]
if adjacency is not None and adjacency is not False:
adjacency = _setup_adjacency(adjacency, n_tests, n_times)
if (exclude is not None) and not exclude.size == n_tests:
raise ValueError('exclude must be the same shape as X[0]')
# Step 1: Calculate t-stat for original data
# -------------------------------------------------------------
t_obs = stat_fun(*X)
_validate_type(t_obs, np.ndarray, 'return value of stat_fun')
logger.info('stat_fun(H1): min=%f max=%f' % (np.min(t_obs), np.max(t_obs)))
# test if stat_fun treats variables independently
if buffer_size is not None:
t_obs_buffer = np.zeros_like(t_obs)
for pos in range(0, n_tests, buffer_size):
t_obs_buffer[pos: pos + buffer_size] =\
stat_fun(*[x[:, pos: pos + buffer_size] for x in X])
if not np.alltrue(t_obs == t_obs_buffer):
warn('Provided stat_fun does not treat variables independently. '
'Setting buffer_size to None.')
buffer_size = None
# The stat should have the same shape as the samples for no adj.
if t_obs.size != np.prod(sample_shape):
raise ValueError('t_obs.shape %s provided by stat_fun %s is not '
'compatible with the sample shape %s'
% (t_obs.shape, stat_fun, sample_shape))
if adjacency is None or adjacency is False:
t_obs.shape = sample_shape
if exclude is not None:
include = np.logical_not(exclude)
else:
include = None
# determine if adjacency itself can be separated into disjoint sets
if check_disjoint is True and (adjacency is not None and
adjacency is not False):
partitions = _get_partitions_from_adjacency(adjacency, n_times)
else:
partitions = None
logger.info('Running initial clustering')
out = _find_clusters(t_obs, threshold, tail, adjacency,
max_step=max_step, include=include,
partitions=partitions, t_power=t_power,
show_info=True)
clusters, cluster_stats = out
# The stat should have the same shape as the samples
t_obs.shape = sample_shape
# For TFCE, return the "adjusted" statistic instead of raw scores
if isinstance(threshold, dict):
t_obs = cluster_stats.reshape(t_obs.shape) * np.sign(t_obs)
logger.info('Found %d clusters' % len(clusters))
# convert clusters to old format
if adjacency is not None and adjacency is not False:
# our algorithms output lists of indices by default
if out_type == 'mask':
clusters = _cluster_indices_to_mask(clusters, n_tests)
else:
# ndimage outputs slices or boolean masks by default
if out_type == 'indices':
clusters = _cluster_mask_to_indices(clusters, t_obs.shape)
# convert our seed to orders
# check to see if we can do an exact test
# (for a two-tailed test, we can exploit symmetry to just do half)
extra = ''
rng = check_random_state(seed)
del seed
if len(X) == 1: # 1-sample test
do_perm_func = _do_1samp_permutations
X_full = X[0]
slices = None
orders, n_permutations, extra = _get_1samp_orders(
n_samples, n_permutations, tail, rng)
else:
n_permutations = int(n_permutations)
do_perm_func = _do_permutations
X_full = np.concatenate(X, axis=0)
n_samples_per_condition = [x.shape[0] for x in X]
splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
slices = [slice(splits_idx[k], splits_idx[k + 1])
for k in range(len(X))]
orders = [rng.permutation(len(X_full))
for _ in range(n_permutations - 1)]
del rng
parallel, my_do_perm_func, _ = parallel_func(
do_perm_func, n_jobs, verbose=False)
if len(clusters) == 0:
warn('No clusters found, returning empty H0, clusters, and cluster_pv')
return t_obs, np.array([]), np.array([]), np.array([])
# Step 2: If we have some clusters, repeat process on permuted data
# -------------------------------------------------------------------
# Step 3: repeat permutations for step-down-in-jumps procedure
n_removed = 1 # number of new clusters added
total_removed = 0
step_down_include = None # start out including all points
n_step_downs = 0
while n_removed > 0:
# actually do the clustering for each partition
if include is not None:
if step_down_include is not None:
this_include = np.logical_and(include, step_down_include)
else:
this_include = include
else:
this_include = step_down_include
logger.info('Permuting %d times%s...' % (len(orders), extra))
with ProgressBar(len(orders)) as progress_bar:
H0 = parallel(
my_do_perm_func(X_full, slices, threshold, tail, adjacency,
stat_fun, max_step, this_include, partitions,
t_power, order, sample_shape, buffer_size,
progress_bar.subset(idx))
for idx, order in split_list(orders, n_jobs, idx=True))
# include original (true) ordering
if tail == -1: # up tail
orig = cluster_stats.min()
elif tail == 1:
orig = cluster_stats.max()
else:
orig = abs(cluster_stats).max()
H0.insert(0, [orig])
H0 = np.concatenate(H0)
logger.info('Computing cluster p-values')
cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
# figure out how many new ones will be removed for step-down
to_remove = np.where(cluster_pv < step_down_p)[0]
n_removed = to_remove.size - total_removed
total_removed = to_remove.size
step_down_include = np.ones(n_tests, dtype=bool)
for ti in to_remove:
step_down_include[clusters[ti]] = False
if adjacency is None and adjacency is not False:
step_down_include.shape = sample_shape
n_step_downs += 1
if step_down_p > 0:
a_text = 'additional ' if n_step_downs > 1 else ''
logger.info('Step-down-in-jumps iteration #%i found %i %s'
'cluster%s to exclude from subsequent iterations'
% (n_step_downs, n_removed, a_text,
_pl(n_removed)))
logger.info('Done.')
# The clusters should have the same shape as the samples
clusters = _reshape_clusters(clusters, sample_shape)
return t_obs, clusters, cluster_pv, H0
def _check_fun(X, stat_fun, threshold, tail=0, kind='within'):
"""Check the stat_fun and threshold values."""
from scipy import stats
if kind == 'within':
ppf = stats.t.ppf
if threshold is None:
if stat_fun is not None and stat_fun is not ttest_1samp_no_p:
warn('Automatic threshold is only valid for stat_fun=None '
'(or ttest_1samp_no_p), got %s' % (stat_fun,))
p_thresh = 0.05 / (1 + (tail == 0))
n_samples = len(X)
threshold = -ppf(p_thresh, n_samples - 1)
if np.sign(tail) < 0:
threshold = -threshold
logger.info("Using a threshold of {:.6f}".format(threshold))
stat_fun = ttest_1samp_no_p if stat_fun is None else stat_fun
else:
assert kind == 'between'
ppf = stats.f.ppf
if threshold is None:
if stat_fun is not None and stat_fun is not f_oneway:
warn('Automatic threshold is only valid for stat_fun=None '
'(or f_oneway), got %s' % (stat_fun,))
elif tail != 1:
warn('Ignoring argument "tail", performing 1-tailed F-test')
p_thresh = 0.05
dfn = len(X) - 1
dfd = np.sum([len(x) for x in X]) - len(X)
threshold = ppf(1. - p_thresh, dfn, dfd)
logger.info("Using a threshold of {:.6f}".format(threshold))
stat_fun = f_oneway if stat_fun is None else stat_fun
return stat_fun, threshold
@verbose
def permutation_cluster_test(
X, threshold=None, n_permutations=1024, tail=0, stat_fun=None,
adjacency=None, n_jobs=1, seed=None, max_step=1, exclude=None,
step_down_p=0, t_power=1, out_type='indices', check_disjoint=False,
buffer_size=1000, verbose=None):
"""Cluster-level statistical permutation test.
For a list of :class:`NumPy arrays <numpy.ndarray>` of data,
calculate some statistics corrected for multiple comparisons using
permutations and cluster-level correction. Each element of the list ``X``
should contain the data for one group of observations (e.g., 2D arrays for
time series, 3D arrays for time-frequency power values). Permutations are
generated with random partitions of the data. See
:footcite:`MarisOostenveld2007` for details.
Parameters
----------
X : list of array, shape (n_observations, p[, q])
The data to be clustered. Each array in ``X`` should contain the
observations for one group. The first dimension of each array is the
number of observations from that group; remaining dimensions comprise
the size of a single observation. For example if ``X = [X1, X2]``
with ``X1.shape = (20, 50, 4)`` and ``X2.shape = (17, 50, 4)``, then
``X`` has 2 groups with respectively 20 and 17 observations in each,
and each data point is of shape ``(50, 4)``. Note: that the
*last dimension* of each element of ``X`` should correspond to the
dimension represented in the ``adjacency`` parameter
(e.g., spectral data should be provided as
``(observations, frequencies, channels/vertices)``).
%(threshold_clust_f)s
%(n_permutations_clust_int)s
%(tail_clust)s
%(stat_fun_clust_f)s
%(adjacency_clust_n)s
%(n_jobs)s
%(seed)s
%(max_step_clust)s
exclude : bool array or None
Mask to apply to the data to exclude certain points from clustering
(e.g., medial wall vertices). Should be the same shape as X. If None,
no points are excluded.
%(step_down_p_clust)s
%(f_power_clust)s
%(out_type_clust)s
%(check_disjoint_clust)s
%(buffer_size_clust)s
%(verbose)s
Returns
-------
F_obs : array, shape (p[, q])
Statistic (F by default) observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv : array
P-value for each cluster.
H0 : array, shape (n_permutations,)
Max cluster level stats observed under permutation.
References
----------
.. footbibliography::
"""
stat_fun, threshold = _check_fun(X, stat_fun, threshold, tail, 'between')
return _permutation_cluster_test(
X=X, threshold=threshold, n_permutations=n_permutations, tail=tail,
stat_fun=stat_fun, adjacency=adjacency, n_jobs=n_jobs, seed=seed,
max_step=max_step, exclude=exclude, step_down_p=step_down_p,
t_power=t_power, out_type=out_type, check_disjoint=check_disjoint,
buffer_size=buffer_size)
@verbose
def permutation_cluster_1samp_test(
X, threshold=None, n_permutations=1024, tail=0, stat_fun=None,
adjacency=None, n_jobs=1, seed=None, max_step=1,
exclude=None, step_down_p=0, t_power=1, out_type='indices',
check_disjoint=False, buffer_size=1000, verbose=None):
"""Non-parametric cluster-level paired t-test.
Parameters
----------
X : array, shape (n_observations, p[, q])
The data to be clustered. The first dimension should correspond to the
difference between paired samples (observations) in two conditions.
The subarrays ``X[k]`` can be 1D (e.g., time series) or 2D (e.g.,
time-frequency image) associated with the kth observation. For
spatiotemporal data, see also
:func:`mne.stats.spatio_temporal_cluster_1samp_test`.
%(threshold_clust_t)s
%(n_permutations_clust_all)s
%(tail_clust)s
%(stat_fun_clust_t)s
%(adjacency_clust_1)s
%(n_jobs)s
%(seed)s
%(max_step_clust)s
exclude : bool array or None
Mask to apply to the data to exclude certain points from clustering
(e.g., medial wall vertices). Should be the same shape as X. If None,
no points are excluded.
%(step_down_p_clust)s
%(t_power_clust)s
%(out_type_clust)s
%(check_disjoint_clust)s
%(buffer_size_clust)s
%(verbose)s
Returns
-------
t_obs : array, shape (p[, q])
T-statistic observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv : array
P-value for each cluster.
H0 : array, shape (n_permutations,)
Max cluster level stats observed under permutation.
Notes
-----
From an array of paired observations, e.g. a difference in signal
amplitudes or power spectra in two conditions, calculate if the data
distributions in the two conditions are significantly different.
The procedure uses a cluster analysis with permutation test
for calculating corrected p-values. Randomized data are generated with
random sign flips. See :footcite:`MarisOostenveld2007` for more
information.
Because a 1-sample t-test on the difference in observations is
mathematically equivalent to a paired t-test, internally this function
computes a 1-sample t-test (by default) and uses sign flipping (always)
to perform permutations. This might not be suitable for the case where
there is truly a single observation under test; see :ref:`disc-stats`.
If ``n_permutations >= 2 ** (n_samples - (tail == 0))``,
``n_permutations`` and ``seed`` will be ignored since an exact test
(full permutation test) will be performed.
If no initial clusters are found, i.e., all points in the true
distribution are below the threshold, then ``clusters``, ``cluster_pv``,
and ``H0`` will all be empty arrays.
References
----------
.. footbibliography::
"""
stat_fun, threshold = _check_fun(X, stat_fun, threshold, tail)
return _permutation_cluster_test(
X=[X], threshold=threshold, n_permutations=n_permutations, tail=tail,
stat_fun=stat_fun, adjacency=adjacency, n_jobs=n_jobs, seed=seed,
max_step=max_step, exclude=exclude, step_down_p=step_down_p,
t_power=t_power, out_type=out_type, check_disjoint=check_disjoint,
buffer_size=buffer_size)
@verbose
def spatio_temporal_cluster_1samp_test(
X, threshold=None, n_permutations=1024, tail=0,
stat_fun=None, adjacency=None, n_jobs=1, seed=None,
max_step=1, spatial_exclude=None, step_down_p=0, t_power=1,
out_type='indices', check_disjoint=False, buffer_size=1000,
verbose=None):
"""Non-parametric cluster-level paired t-test for spatio-temporal data.
This function provides a convenient wrapper for
:func:`mne.stats.permutation_cluster_1samp_test`, for use with data
organized in the form (observations × time × space). See
:footcite:`MarisOostenveld2007` for details.
Parameters
----------
X : array, shape (n_observations, p[, q], n_vertices)
The data to be clustered. The first dimension should correspond to the
difference between paired samples (observations) in two conditions.
The second, and optionally third, dimensions correspond to the
time or time-frequency data. And, the last dimension should be spatial;
it is the dimension the adjacency parameter will be applied to.
%(threshold_clust_t)s
%(n_permutations_clust_all)s
%(tail_clust)s
%(stat_fun_clust_t)s
%(adjacency_clust_st1)s
%(n_jobs)s
%(seed)s
%(max_step_clust)s
spatial_exclude : list of int or None
List of spatial indices to exclude from clustering.
%(step_down_p_clust)s
%(t_power_clust)s
%(out_type_clust)s
%(check_disjoint_clust)s
%(buffer_size_clust)s
%(verbose)s
Returns
-------
t_obs : array, shape (p[, q], n_vertices)
T-statistic observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv : array
P-value for each cluster.
H0 : array, shape (n_permutations,)
Max cluster level stats observed under permutation.
References
----------
.. footbibliography::
"""
# convert spatial_exclude before passing on if necessary
if spatial_exclude is not None:
exclude = _st_mask_from_s_inds(
np.prod(X.shape[1:-1]), X.shape[-1], spatial_exclude, True)
else:
exclude = None
return permutation_cluster_1samp_test(
X, threshold=threshold, stat_fun=stat_fun, tail=tail,
n_permutations=n_permutations, adjacency=adjacency,
n_jobs=n_jobs, seed=seed, max_step=max_step, exclude=exclude,
step_down_p=step_down_p, t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint, buffer_size=buffer_size)
@verbose
def spatio_temporal_cluster_test(
X, threshold=None, n_permutations=1024, tail=0, stat_fun=None,
adjacency=None, n_jobs=1, seed=None, max_step=1,
spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
check_disjoint=False, buffer_size=1000,
verbose=None):
"""Non-parametric cluster-level test for spatio-temporal data.
This function provides a convenient wrapper for
:func:`mne.stats.permutation_cluster_test`, for use with data
organized in the form (observations × time × space).
See :footcite:`MarisOostenveld2007` for more information.
Parameters
----------
X : list of array, shape (n_observations, p[, q], n_vertices)
The data to be clustered. Each array in ``X`` should contain the
observations for one group. The first dimension of each array is the
number of observations from that group (and may vary between groups).
The second, and optionally third, dimensions correspond to the
time or time-frequency data. And, the last dimension should be spatial;
it is the dimension the adjacency parameter will be applied to. All
dimensions except the first should match across all groups.
%(threshold_clust_f)s
%(n_permutations_clust_int)s
%(tail_clust)s
%(stat_fun_clust_f)s
%(adjacency_clust_stn)s
%(n_jobs)s
%(seed)s
%(max_step_clust)s
spatial_exclude : list of int or None
List of spatial indices to exclude from clustering.
%(step_down_p_clust)s
%(f_power_clust)s
%(out_type_clust)s
%(check_disjoint_clust)s
%(buffer_size_clust)s
%(verbose)s
Returns
-------
F_obs : array, shape (p[, q], n_vertices)
Statistic (F by default) observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv: array
P-value for each cluster.
H0 : array, shape (n_permutations,)
Max cluster level stats observed under permutation.
References
----------
.. footbibliography::
"""
# convert spatial_exclude before passing on if necessary
if spatial_exclude is not None:
exclude = _st_mask_from_s_inds(
np.prod(X[0].shape[1:-1]), X[0].shape[-1], spatial_exclude, True)
else:
exclude = None
return permutation_cluster_test(
X, threshold=threshold, stat_fun=stat_fun, tail=tail,
n_permutations=n_permutations, adjacency=adjacency,
n_jobs=n_jobs, seed=seed, max_step=max_step, exclude=exclude,
step_down_p=step_down_p, t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint, buffer_size=buffer_size)
def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True):
"""Compute mask to apply to a spatio-temporal adjacency matrix.
This can be used to include (or exclude) certain spatial coordinates.
This is useful for excluding certain regions from analysis (e.g.,
medial wall vertices).
Parameters
----------
n_times : int
Number of time points.
n_vertices : int
Number of spatial points.
vertices : list or array of int
Vertex numbers to set.
set_as : bool
If True, all points except "vertices" are set to False (inclusion).
If False, all points except "vertices" are set to True (exclusion).
Returns
-------
mask : array of bool
A (n_times * n_vertices) array of boolean values for masking
"""
mask = np.zeros((n_times, n_vertices), dtype=bool)
mask[:, vertices] = True
mask = mask.ravel()
if set_as is False:
mask = np.logical_not(mask)
return mask
@verbose
def _get_partitions_from_adjacency(adjacency, n_times, verbose=None):
"""Specify disjoint subsets (e.g., hemispheres) based on adjacency."""
from scipy import sparse
if isinstance(adjacency, list):
test = np.ones(len(adjacency))
test_adj = np.zeros((len(adjacency), len(adjacency)), dtype='bool')
for vi in range(len(adjacency)):
test_adj[adjacency[vi], vi] = True
test_adj = sparse.coo_matrix(test_adj, dtype='float')
else:
test = np.ones(adjacency.shape[0])
test_adj = adjacency
part_clusts = _find_clusters(test, 0, 1, test_adj)[0]
if len(part_clusts) > 1:
logger.info('%i disjoint adjacency sets found'
% len(part_clusts))
partitions = np.zeros(len(test), dtype='int')
for ii, pc in enumerate(part_clusts):
partitions[pc] = ii
if isinstance(adjacency, list):
partitions = np.tile(partitions, n_times)
else:
logger.info('No disjoint adjacency sets found')
partitions = None
return partitions
def _reshape_clusters(clusters, sample_shape):
"""Reshape cluster masks or indices to be of the correct shape."""
# format of the bool mask and indices are ndarrays
if len(clusters) > 0 and isinstance(clusters[0], np.ndarray):
if clusters[0].dtype == bool: # format of mask
clusters = [c.reshape(sample_shape) for c in clusters]
else: # format of indices
clusters = [np.unravel_index(c, sample_shape) for c in clusters]
return clusters
def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1.0, tmin=0,
subject='fsaverage', vertices=None):
"""Assemble summary SourceEstimate from spatiotemporal cluster results.
This helps visualizing results from spatio-temporal-clustering
permutation tests.
Parameters
----------
clu : tuple
The output from clustering permutation tests.
p_thresh : float
The significance threshold for inclusion of clusters.
tstep : float
The time step between samples of the original :class:`STC
<mne.SourceEstimate>`, in seconds (i.e., ``1 / stc.sfreq``). Defaults
to ``1``, which will yield a colormap indicating cluster duration
measured in *samples* rather than *seconds*.
tmin : float | int
The time of the first sample.
subject : str
The name of the subject.
vertices : list of array | instance of SourceSpaces | None
The vertex numbers associated with the source space locations. Defaults
to None. If None, equals ``[np.arange(10242), np.arange(10242)]``.
Can also be an instance of SourceSpaces to get vertex numbers from.
.. versionchanged:: 0.21
Added support for SourceSpaces.
Returns
-------
out : instance of SourceEstimate
A summary of the clusters. The first time point in this SourceEstimate
object is the summation of all the clusters. Subsequent time points
contain each individual cluster. The magnitude of the activity
corresponds to the duration spanned by the cluster (duration units are
determined by ``tstep``).
.. versionchanged:: 0.21
Added support for volume and mixed source estimates.
"""
_validate_type(vertices, (None, list, SourceSpaces), 'vertices')
if vertices is None:
vertices = [np.arange(10242), np.arange(10242)]
klass = SourceEstimate
elif isinstance(vertices, SourceSpaces):
klass = dict(surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate)[vertices.kind]
vertices = [s['vertno'] for s in vertices]
else:
klass = {1: VolSourceEstimate,
2: SourceEstimate}.get(len(vertices), MixedSourceEstimate)
n_vertices_need = sum(len(v) for v in vertices)
t_obs, clusters, clu_pvals, _ = clu
n_times, n_vertices = t_obs.shape
if n_vertices != n_vertices_need:
raise ValueError(
f'Number of cluster vertices ({n_vertices}) did not match the '
f'provided vertices ({n_vertices_need})')
good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
# Build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
if len(good_cluster_inds) == 0:
raise RuntimeError('No significant clusters available. Please adjust '
'your threshold or check your statistical '
'analysis.')
data = np.zeros((n_vertices, n_times))
data_summary = np.zeros((n_vertices, len(good_cluster_inds) + 1))
for ii, cluster_ind in enumerate(good_cluster_inds):
data.fill(0)
t_inds, v_inds = clusters[cluster_ind]
data[v_inds, t_inds] = t_obs[t_inds, v_inds]
# Store a nice visualization of the cluster by summing across time
data_summary[:, ii + 1] = np.sum(_sum_cluster_data(data, tstep),
axis=1)
# Make the first "time point" a sum across all clusters for easy
# visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
return klass(data_summary, vertices, tmin, tstep, subject)
|
{
"content_hash": "fbce863b9f91926d7e11993aea16511c",
"timestamp": "",
"source": "github",
"line_count": 1497,
"max_line_length": 79,
"avg_line_length": 39.5437541750167,
"alnum_prop": 0.5861614608848421,
"repo_name": "Eric89GXL/mne-python",
"id": "f2830bf486883fad603aeaa9d0261ea385a4b55d",
"size": "59596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mne/stats/cluster_level.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6164915"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from gevent import monkey; monkey.patch_all()
import os
from collections import namedtuple
#from plumb_util import find_service, find_text
from syncstomp.managed import ManagedConnection
from apollo.queues import OneQueueApolloMonitor, ApolloMonitor
import logging; logger = logging.getLogger(__name__)
from uuid import uuid4
CALL = namedtuple('Call', 'name args kwargs')
ENV_USER = os.environ.get('USER', 'jenkins')
MONITOR = None
MONITOR_ARGS = None
STOMP = None
QUEUE = None
class OneMonitorWithEvents(OneQueueApolloMonitor):
"""
This is the same as a normal OneQueueApolloMonitor except that its
on_queue_* methods keep track of when they've been called.
"""
def __init__(self, *args, **kwargs):
self._history = []
super(OneMonitorWithEvents, self).__init__(*args, **kwargs)
def get_call_history(self):
return self._history
def _func(self, name, *args, **kwargs):
logger.debug('stomp.%s(*%r, **%r)', name, args, kwargs)
self._history.append(CALL(name, args, kwargs))
def on_queue_init(self, *args, **kwargs):
self._func('on_queue_init', *args, **kwargs)
def on_queue_update(self, *args, **kwargs):
self._func('on_queue_update', *args, **kwargs)
def on_queue_empty(self, *args, **kwargs):
self._func('on_queue_empty', *args, **kwargs)
def on_queue_missing(self, *args, **kwargs):
self._func('on_queue_missing', *args, **kwargs)
def setup():
global MONITOR_ARGS, STOMP, QUEUE
# TODO: this should presumably be tested on ferret, not njord,
# but the ManagedConnection doesn't want to connect.
# host_and_ports = find_service('_apollo_adm._tcp', zone=None)
# virtual_host = find_text('_apollo_vhost', zone=None)
# host, port = host_and_ports[0]
# host = 'ferret.lumi.'
# virtual_host = 'stomp-testing' # should I make a new virtual host? how?
host = 'njord.lumi.'
port = 61680
virtual_host = 'broker_lumi'
MONITOR_ARGS = dict(host=host, port=port, virtual_host=virtual_host)
QUEUE = 'test.apollo.onemonitor.%s.%s' % (ENV_USER, uuid4())
STOMP = ManagedConnection(host_and_ports = [(host, 61613)], version = 1.1)
STOMP.start()
STOMP.connect(wait = True)
def test_no_queue_data():
monitor = OneMonitorWithEvents(
QUEUE, update_interval_s=float('inf'), **MONITOR_ARGS)
queue_data = monitor.queue
assert queue_data is None, queue_data
# check that on_queue_missing was called
assert len(monitor._history) == 1, monitor._history
assert monitor._history[0].name == 'on_queue_missing', monitor._history[0]
monitor._history = []
def test_proper_init():
global MONITOR
# send a message to the stomp queue
STOMP.send({}, destination='/queue/'+QUEUE)
MONITOR = OneMonitorWithEvents(
QUEUE, update_interval_s=float('inf'), **MONITOR_ARGS)
queue_data = MONITOR.queue
assert queue_data is not None, queue_data
# check that on_queue_init was called
assert len(MONITOR._history) == 1, MONITOR._history
assert MONITOR._history[0].name == 'on_queue_init', MONITOR._history[0]
MONITOR._history = []
def test_queue_data():
# get some queue data, and make sure our queue is in there
queue_data = MONITOR._get_queue_data()
assert queue_data['id'] == QUEUE, queue_data
def test_queue_changes():
MONITOR.do_update()
# check that the queue was created and deleted
assert any(c.name == 'on_queue_update' for c in MONITOR._history)
def test_do_update():
# do_update (this has to do with gevent Events)
# TODO
pass
def teardown():
# I can't think of much we need to do here
monitor = ApolloMonitor(**MONITOR_ARGS)
monitor.delete_queue(QUEUE)
|
{
"content_hash": "0052e8189d52d1d61bcdee73a8cc453c",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 78,
"avg_line_length": 31.132231404958677,
"alnum_prop": 0.6586142819219538,
"repo_name": "pombredanne/apollo-1",
"id": "d689754ee702fa5a89fcf793d1133bf6d481e859",
"size": "3767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_onemonitor.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from sqlalchemy import Column, Integer, String, Text, DateTime, Index
from datetime import datetime
from .db import Base
class Question(Base):
__tablename__ = 'questions'
__table_args__ = (
Index('questions_query_id_index', "query_id"),
Index('questions_question_id_index', "question_id"),
)
ORDERED_ATTRS = ["query_id", "rank", "question_id", "title", "snippet",
"status", "updated_at", "answer_num", "view_num", "category",
"question_body", "best_answer_body"]
query_id = Column(String(8), primary_key=True)
rank = Column(Integer)
question_id = Column(String(12), primary_key=True)
title = Column(String(255))
snippet = Column(Text)
status = Column(String(10))
updated_at = Column(DateTime)
answer_num = Column(Integer)
view_num = Column(Integer)
category = Column(String(255))
question_body = Column(Text)
best_answer_body = Column(Text)
@classmethod
def readline(cls, line):
ls = [l.strip() for l in line.split("\t")]
if len(ls) != 12:
raise RuntimeError("Invalid format for %s: %s"
% (cls.__name__, line))
args = {attr: ls[i] for i, attr in enumerate(cls.ORDERED_ATTRS)}
result = Question(**args)
# convertion
result.rank = int(result.rank)
result.updated_at = datetime.strptime(result.updated_at,
"%Y/%m/%d %H:%M:%S")
result.answer_num = int(result.answer_num)
result.view_num = int(result.view_num)
return result
|
{
"content_hash": "732dcebea7d0e5664b3a02a2fb4d684c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 35.29545454545455,
"alnum_prop": 0.601416613007083,
"repo_name": "mpkato/openliveq",
"id": "56279ac039d052e7f6b2a5d24924389483d23efa",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openliveq/question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57897"
}
],
"symlink_target": ""
}
|
import argparse
import sys
import json
import time
import requests
import tornado.ioloop
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import url_concat
testurls = ['https://t.co/gFVKpELNO0',
'https://docs.python.org/2/library/configparser.html',
'http://www.huffingtonpost.com/entry/clinton-sanders-debate_us_56bd67d2e4b0b40245c60e1f',
'http://edition.cnn.com/2016/02/11/entertainment/taylor-swift-kanye-west-new-song/index.html',
'http://www.nytimes.com/2016/02/11/technology/twitter-to-save-itself-must-scale-back-world-swallowing-ambitions.html?ref=technology',
'http://blogs.sciencemag.org/pipeline/archives/2012/07/31/synthetic_chemistry_the_rise_of_the_algorithms',
'http://bbc.in/1Xlrix7',
'https://t.co/MefZPI6SNw',
]
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print type(response.body)
print(json.loads(response.body))
def async_client(address):
http_client = AsyncHTTPClient()
for testurl in testurls:
params = {"url": testurl}
url = url_concat(address, params)
print(url)
http_client.fetch(url, handle_request)
tornado.ioloop.IOLoop.instance().start()
def sync_client(address):
for testurl in testurls:
params = {"url": testurl}
r = requests.get(address, params=params)
print(r.json())
def main():
port = sys.argv[1]
address = 'http://localhost:' + port
sync_client(address)
#async_client(address)
if __name__ == '__main__':
main()
|
{
"content_hash": "4964f28945fb57558337e7ddee896d9c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 145,
"avg_line_length": 28.842105263157894,
"alnum_prop": 0.6551094890510949,
"repo_name": "lrei/canonical_urls",
"id": "679de24c7f6c04345b3d6fb03b9f15be42c93174",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canonicalclient.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28086"
}
],
"symlink_target": ""
}
|
import unittest
import math
from function.generic_univariate_pitch_function import GenericUnivariatePitchFunction
from harmonicmodel.tertian_chord_template import TertianChordTemplate
from melody.constraints.contextual_note import ContextualNote
from melody.constraints.fit_pitch_to_function_constraint import FitPitchToFunctionConstraint
from melody.solver.p_map import PMap
from structure.line import Line
from structure.note import Note
from structure.tempo import Tempo
from structure.time_signature import TimeSignature
from timemodel.duration import Duration
from timemodel.event_sequence import EventSequence
from timemodel.offset import Offset
from timemodel.position import Position
from timemodel.tempo_event import TempoEvent
from timemodel.tempo_event_sequence import TempoEventSequence
from timemodel.time_signature_event import TimeSignatureEvent
from tonalmodel.diatonic_pitch import DiatonicPitch
from tonalmodel.diatonic_tone import DiatonicTone
from tonalmodel.modality import ModalityType
from tonalmodel.pitch_range import PitchRange
from tonalmodel.tonality import Tonality
from harmoniccontext.harmonic_context import HarmonicContext
from melody.constraints.policy_context import PolicyContext
import logging
import sys
class TestFitPitchToFunctionConstraint(unittest.TestCase):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Note: add -s --nologcapture to 'additional arguments in configuration to see logging
def setUp(self):
pass
def tearDown(self):
pass
def test_compute_simple_function_tone(self):
print('--- test_compute_simple_function_tone ---')
line = Line()
f = GenericUnivariatePitchFunction(TestFitPitchToFunctionConstraint.sinasoidal, Position(0), Position(2))
v_note = Note(DiatonicPitch.parse('A:4'), Duration(1, 32))
line.pin(v_note, Offset(0))
constraint, lower_policy_context = TestFitPitchToFunctionConstraint.build_simple_constraint(v_note, f,
ModalityType.Major,
'G', 'tV')
p_map = PMap()
p_map[v_note] = ContextualNote(lower_policy_context)
results = constraint.values(p_map, v_note)
assert results is not None
assert len(results) == 1
print(next(iter(results)).diatonic_pitch)
assert 'C:4' == str(next(iter(results)).diatonic_pitch)
v_note = Note(DiatonicPitch.parse('A:4'), Duration(1, 32))
line.pin(v_note, Offset(1, 32))
constraint, lower_policy_context = TestFitPitchToFunctionConstraint.build_simple_constraint(v_note, f,
ModalityType.Major,
'G', 'tV')
p_map = PMap()
p_map[v_note] = ContextualNote(lower_policy_context)
results = constraint.values(p_map, v_note)
assert results is not None
assert len(results) == 1
print(next(iter(results)).diatonic_pitch)
assert 'E:4' == str(next(iter(results)).diatonic_pitch)
p_map[v_note].note = next(iter(results))
assert constraint.verify(p_map)
def test_compute_with_minor_key(self):
print('-- test_compute_with_minor_key ---')
line = Line()
f = GenericUnivariatePitchFunction(TestFitPitchToFunctionConstraint.sinasoidal, Position(0), Position(2))
v_notes = [Note(DiatonicPitch.parse('A:4'), Duration(1, 16)) for _ in range(0, 33)]
for i in range(0, 33):
line.pin(v_notes[i], Offset(i, 16))
constraint, lower_policy_context = \
TestFitPitchToFunctionConstraint.build_simple_constraint(v_notes[0], f, ModalityType.NaturalMinor,
'C', 'tV')
constraints = list()
constraints.append(constraint)
for i in range(1, 33):
c, _ = \
TestFitPitchToFunctionConstraint.build_simple_constraint(v_notes[i], f, ModalityType.NaturalMinor,
'C', 'tV')
constraints.append(c)
p_map = PMap()
p_map[v_notes[0]] = ContextualNote(lower_policy_context)
results = constraint.values(p_map, v_notes[0])
assert results is not None
assert len(results) == 1
print(next(iter(results)).diatonic_pitch)
assert 'C:4' == str(next(iter(results)).diatonic_pitch)
result_pitches = []
for i in range(0, 33):
p_map = PMap()
p_map[v_notes[i]] = ContextualNote(lower_policy_context)
results = constraints[i].values(p_map, v_notes[i])
result_pitches.append(next(iter(results)).diatonic_pitch)
assert len(result_pitches) == 33
for i in range(0, 33):
print('[{0}] {1}'.format(i, str(result_pitches[i])))
checks = ['C:4', 'G:4', 'D:5', 'F:5', 'G:5', 'F:5', 'D:5', 'G:4', 'C:4']
for i in range(0, len(checks)):
assert checks[i] == str(result_pitches[i])
BASE = DiatonicPitch.parse('C:4').chromatic_distance
@staticmethod
def sinasoidal(v):
return TestFitPitchToFunctionConstraint.BASE + 19 * math.sin(2 * math.pi * v)
@staticmethod
def policy_creator(modality_type, modality_tone, tertian_chord_txt, low_pitch_txt, hi_pitch_txt):
diatonic_tonality = Tonality.create(modality_type, modality_tone)
chord = TertianChordTemplate.parse(tertian_chord_txt).create_chord(diatonic_tonality)
hc = HarmonicContext(diatonic_tonality, chord, Duration(1, 2))
pitch_range = PitchRange(DiatonicPitch.parse(low_pitch_txt).chromatic_distance,
DiatonicPitch.parse(hi_pitch_txt).chromatic_distance)
return PolicyContext(hc, pitch_range)
@staticmethod
def build_simple_constraint(v_note, f, modality_type, key_str, chord_str):
lower_policy_context = TestFitPitchToFunctionConstraint.policy_creator(modality_type, DiatonicTone(key_str),
chord_str, 'C:2', 'C:8')
tempo_seq = TempoEventSequence()
ts_seq = EventSequence()
tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
ts_seq.add(TimeSignatureEvent(TimeSignature(3, Duration(1, 4), 'sww'), Position(0)))
return FitPitchToFunctionConstraint(v_note, f, tempo_seq, ts_seq), lower_policy_context
|
{
"content_hash": "49c3552c2610f4c88d2272eb54585ee6",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 119,
"avg_line_length": 44.728476821192054,
"alnum_prop": 0.6209653538643767,
"repo_name": "dpazel/music_rep",
"id": "de3be5a7011404c1751c392f8ba057690cf425f4",
"size": "6754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/melody_tests/constraints_tests/test_fit_pitch_to_function_constraint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "6964"
},
{
"name": "Python",
"bytes": "1584408"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetArtistTracks(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetArtistTracks Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetArtistTracks, self).__init__(temboo_session, '/Library/LastFm/User/GetArtistTracks')
def new_input_set(self):
return GetArtistTracksInputSet()
def _make_result_set(self, result, path):
return GetArtistTracksResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetArtistTracksChoreographyExecution(session, exec_id, path)
class GetArtistTracksInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetArtistTracks
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) Your Last.fm API Key.)
"""
super(GetArtistTracksInputSet, self)._set_input('APIKey', value)
def set_Artist(self, value):
"""
Set the value of the Artist input for this Choreo. ((required, string) The artist name you are interested in.)
"""
super(GetArtistTracksInputSet, self)._set_input('Artist', value)
def set_EndTimestamp(self, value):
"""
Set the value of the EndTimestamp input for this Choreo. ((optional, date) A unix timestamp to end at.)
"""
super(GetArtistTracksInputSet, self)._set_input('EndTimestamp', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number to fetch. Defaults to 1.)
"""
super(GetArtistTracksInputSet, self)._set_input('Page', value)
def set_StartTimestamp(self, value):
"""
Set the value of the StartTimestamp input for this Choreo. ((optional, date) A unix timestamp to start at.)
"""
super(GetArtistTracksInputSet, self)._set_input('StartTimestamp', value)
def set_User(self, value):
"""
Set the value of the User input for this Choreo. ((required, string) The last.fm username to fetch the recent tracks of.)
"""
super(GetArtistTracksInputSet, self)._set_input('User', value)
class GetArtistTracksResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetArtistTracks Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Last.fm.)
"""
return self._output.get('Response', None)
class GetArtistTracksChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetArtistTracksResultSet(response, path)
|
{
"content_hash": "16fb8e442f17379c4237dab6818b4062",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 129,
"avg_line_length": 40.97530864197531,
"alnum_prop": 0.6824344682133172,
"repo_name": "jordanemedlock/psychtruths",
"id": "f9fd666b956885bc663460dd2ee7d5a3977461eb",
"size": "4233",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/LastFm/User/GetArtistTracks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from common_fixtures import * # NOQA
def _clean_hostlabelmaps_for_host(client, host):
for label in host.hostLabels():
host.removelabel(label=label.id)
wait_for_condition(
client, host,
lambda x: len(x.hostLabels()) == 0,
lambda x: 'Number of labels for host is: ' + len(x.hostLabels()))
def test_edit_host_label(super_client, context):
host = context.host
_clean_hostlabelmaps_for_host(super_client, host)
new_labels = {'role': 'web+db',
'nom': 'foobar'}
host = super_client.update(host, labels=new_labels)
wait_for_condition(
super_client, host,
lambda x: len(x.hostLabels()) == 2,
lambda x: 'Number of labels for host is: ' +
len(x.hostLabels()))
_assert_labels(host.hostLabels(), new_labels)
host = super_client.wait_success(host)
assert host.state == 'active'
new_labels = {'role': 'web+db',
'foo': 'bar',
'loc': 'sf'}
host = super_client.update(host, labels=new_labels)
wait_for_condition(
super_client, host,
lambda x: len(x.hostLabels()) == 3 and
_get_labels_map(x.hostLabels()).get('loc') == 'sf',
lambda x: 'Host labels are: ' + str(_get_labels_map(x.hostLabels())))
_assert_labels(host.hostLabels(), new_labels)
def _assert_labels(labels_list, checking_for_labels):
labels_map = _get_labels_map(labels_list)
for k, v in checking_for_labels.items():
assert labels_map.get(k) is not None and labels_map.get(k) == v
def _get_labels_map(labels_list):
labels_map = {}
for label in labels_list:
labels_map[label.key] = label.value
return labels_map
|
{
"content_hash": "b8d46b40c5d64aead60454582d5701fc",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.732142857142858,
"alnum_prop": 0.599651365485183,
"repo_name": "rancherio/cattle",
"id": "af660e993421d2ac4ece22f98f440fda3fba6e2d",
"size": "1721",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_label.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "13960"
},
{
"name": "Java",
"bytes": "5846427"
},
{
"name": "Python",
"bytes": "725435"
},
{
"name": "Shell",
"bytes": "45883"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import string
import sys
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write("wrapper.py",
"""import os
import string
import sys
open('%s', 'wb').write("wrapper.py\\n")
args = filter(lambda s: s != 'fake_link_flag', sys.argv[1:])
os.system(string.join(args, " "))
""" % string.replace(test.workpath('wrapper.out'), '\\', '\\\\'))
test.write('SConstruct', """
foo = Environment()
bar = Environment(LINK = foo.subst(r'%(_python_)s wrapper.py $LINK'),
LINKFLAGS = foo.subst('$LINKFLAGS fake_link_flag'))
foo.Program(target = 'foo', source = 'foo.c')
bar.Program(target = 'bar', source = 'bar.c')
""" % locals())
test.write('foo.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.write('bar.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.run(arguments = 'foo' + _exe)
test.must_not_exist(test.workpath('wrapper.out'))
test.run(arguments = 'bar' + _exe)
test.fail_test(test.read('wrapper.out') != "wrapper.py\n")
test.pass_test()
|
{
"content_hash": "24867c5dd5988dc21eae432d7fd8e4f7",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 69,
"avg_line_length": 21.03174603174603,
"alnum_prop": 0.5954716981132075,
"repo_name": "datalogics/scons",
"id": "d80960d197b2a7941ac3792c64107afa314037ec",
"size": "2427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/LINK/LINKFLAGS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4756209"
},
{
"name": "Shell",
"bytes": "13866"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import mock
from src.main.Parser import Parser
@mock.patch('random.randint', return_value=3)
class TestParser(TestCase):
def test_double_dice_roll(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("2d4+3d5"), 15)
self.assertEqual(parser.parse("2d4-3d5"), -3)
self.assertEqual(parser.parse("2d4*3d5"), 54)
self.assertEqual(parser.parse("4d4/2d4"), 2)
def test_mulitply(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("2d4*4"), 24)
def test_divide(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("2d4/6"), 1)
def test_subtract(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("2d4-2"), 4)
def test_addition(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("2d4+5"), 11)
def test_constants(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("4"), 4)
self.assertEqual(parser.parse("4+3"), 7)
self.assertEqual(parser.parse("4-3"), 1)
self.assertEqual(parser.parse("6/3"), 2)
self.assertEqual(parser.parse("4*3"), 12)
def test_dice_roll(self, mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("3d4"), 9)
def test_negative_roll(self, mocked_roll):
parser = Parser()
self.assertRaises(SyntaxError,lambda:parser.parse("-3d4"))
self.assertRaises(SyntaxError,lambda:parser.parse("3d-4"))
self.assertRaises(SyntaxError,lambda:parser.parse("-3d4-4"))
def test_mod(self,mocked_roll):
parser = Parser()
self.assertEqual(parser.parse("3d4"),9)
self.assertEqual(parser.parse("3d4k2H"),6)
self.assertEqual(parser.parse("3d4r4"),9)
self.assertEqual(parser.parse("3d4r4k2H"),6)
def test_negative_mod(self, mocked_roll):
parser = Parser()
self.assertRaises(SyntaxError,lambda: parser.parse("3d3k4Hr2"))
self.assertRaises(ValueError,lambda:parser.parse("0d4k2H"))
self.assertRaises(ValueError,lambda:parser.parse("3d0k2H"))
self.assertRaises(ValueError, lambda: parser.parse("3d3k0L"))
self.assertRaises(ValueError,lambda: parser.parse("3d3k4H"))
self.assertRaises(ValueError, lambda: parser.parse("3d3r0k2L"))
self.assertRaises(SyntaxError, lambda: parser.parse("3d3k2Lr0"))
|
{
"content_hash": "de396fa5cc30d0910d4f9a744089e200",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 72,
"avg_line_length": 37.34848484848485,
"alnum_prop": 0.6478701825557809,
"repo_name": "charleshamel73/diceroller",
"id": "9d10aa21e6fe82757645793294ce6384ab55a511",
"size": "2465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18984"
}
],
"symlink_target": ""
}
|
import Axon
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
from Kamaelia.Visualisation.PhysicsGraph.TopologyViewerComponent import TopologyViewerComponent
from Kamaelia.UI.Pygame.Button import Button
class EditorLogic(Axon.Component.component):
Inboxes = {
"inbox" : "Not used yet",
"control" : "Not listened to yet - BUG",
"itemselect" : "Recieves messages saying which item is currently selected",
"editnode" : "Inbox where we recieve messages telling us to switch to edit mode",
"newnode" : "Inbox where we recieve messages telling us to add a node",
"delnode" : "Inbox where we recieve messages telling us to delete a node",
"linknode" : "Inbox where we recieve messages telling us to form links",
}
Outboxes = {
"outbox" : "Passthrough of events from the TVC",
"signal" : "We don't do anything here yet",
"nodeevents" : "Where messages to control a TVC go",
}
def main(self):
import time
node = None
linkstart = None
linkmode = False
n = 1
while 1:
yield 1
#
# This really looks like a bunch of composite components operating on shared state.
# Leads to an interesting question - how can we use this to our advantage - we
# "clearly" can, but the question is "how" ?
# One thing that's very clear here is this - even if these are all operating on shared
# state, there's one thing that *must* be true - these components must NOT operate
# concurrently.
#
# (Hmm... Sequential code on shared state defaulting to preclude concurrency? Makes
# sense thinking about it)
#
# Hmmm. Further thought - this is akin to the idea of multiple mains run sequentially
# inside a component. What if those "mains" are sequential sub-component mixins?
#
# This aspect is //similar// to the exogenous connectors idea.
#
if self.dataReady("itemselect"):
event,type,new_node = self.recv("itemselect")
if event is "SELECT":
if new_node is not None:
node = new_node
if linkmode and linkstart is not None:
self.send(("ADD", "LINK", linkstart, node), "nodeevents")
linkmode = False
linkstart = None
self.send((event,type,new_node), "outbox")
if self.dataReady("editnode"):
self.recv("editnode")
if node is not None:
self.send( ("UPDATE_NAME", "NODE", node, time.asctime()), "nodeevents")
if self.dataReady("newnode"):
self.recv("newnode")
self.send( ("ADD", "NODE", n, "Unconfigured Component "+str(n), "auto", "-"), "nodeevents")
node = n
n = n + 1
if self.dataReady("delnode"):
self.recv("delnode")
if node is not None:
self.send(("DEL", "NODE", node), "nodeevents")
if self.dataReady("linknode"):
self.recv("linknode")
if node is not None:
linkstart = node
linkmode = True
TVC = TopologyViewerComponent(position=(0,0)).activate()
Graphline(
NEW = Button(caption="New Component", msg="NEXT", position=(72,8)),
EDIT = Button(caption="Edit Component", msg="NEXT", position=(182,8)),
DEL = Button(caption="Delete Component", msg="NEXT", position=(292,8)),
LINK = Button(caption="Make Link", msg="NEXT", position=(402,8)),
CONSOLEINPUT = pipeline(
ConsoleReader(">>> "),
chunks_to_lines(),
lines_to_tokenlists(),
),
EDITOR_LOGIC = EditorLogic(),
DEBUG = ConsoleEchoer(),
TVC = TVC,
linkages = {
("CONSOLEINPUT", "outbox"): ("TVC", "inbox"),
("NEW", "outbox"): ("EDITOR_LOGIC", "newnode"),
("EDIT", "outbox"): ("EDITOR_LOGIC", "editnode"),
("DEL", "outbox") : ("EDITOR_LOGIC", "delnode"),
("LINK", "outbox") : ("EDITOR_LOGIC", "linknode"),
("TVC", "outbox") : ("EDITOR_LOGIC", "itemselect"),
("EDITOR_LOGIC", "outbox") : ("DEBUG", "inbox"),
("EDITOR_LOGIC", "nodeevents") : ("TVC", "inbox"),
}
).run()
|
{
"content_hash": "f6efc007d907896eb72b97f63effbcd3",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 107,
"avg_line_length": 43.36036036036036,
"alnum_prop": 0.5630583835445668,
"repo_name": "bbc/kamaelia",
"id": "80573ef713f4eade98a1e9295b1ba8c98fbfb894",
"size": "4832",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MPS/Experiments/Editor2/grapheditor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""Tests for Qt Linguist storage class"""
from lxml import etree
from translate.misc.multistring import multistring
from translate.storage import ts2 as ts
from translate.storage import test_base
from translate.storage.placeables import parse
from translate.storage.placeables import xliff
from translate.storage.placeables.lisa import xml_to_strelem
xliffparsers = []
for attrname in dir(xliff):
attr = getattr(xliff, attrname)
if type(attr) is type and \
attrname not in ('XLIFFPlaceable') and \
hasattr(attr, 'parse') and \
attr.parse is not None:
xliffparsers.append(attr.parse)
def rich_parse(s):
return parse(s, xliffparsers)
class TestTSUnit(test_base.TestTranslationUnit):
UnitClass = ts.tsunit
class TestTSfile(test_base.TestTranslationStore):
StoreClass = ts.tsfile
def test_basic(self):
tsfile = ts.tsfile()
assert tsfile.units == []
tsfile.addsourceunit("Bla")
assert len(tsfile.units) == 1
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert len(newfile.units) == 1
assert newfile.units[0].source == "Bla"
assert newfile.findunit("Bla").source == "Bla"
assert newfile.findunit("dit") is None
def test_source(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.source = "Term"
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert newfile.findunit("Concept") is None
assert newfile.findunit("Term") is not None
def test_target(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.target = "Konsep"
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert newfile.findunit("Concept").target == "Konsep"
def test_plurals(self):
"""Test basic plurals"""
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("File(s)")
tsunit.target = [u"Leêr", u"Leêrs"]
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
checkunit = newfile.findunit("File(s)")
assert checkunit.target == [u"Leêr", u"Leêrs"]
assert checkunit.hasplural()
def test_language(self):
"""Check that we can get and set language and sourcelanguage
in the header"""
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="de">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.gettargetlanguage() == 'fr'
assert tsfile.getsourcelanguage() == 'de'
tsfile.settargetlanguage('pt_BR')
assert 'pt_BR' in str(tsfile)
assert tsfile.gettargetlanguage() == 'pt-br'
# We convert en_US to en
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="en_US">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.getsourcelanguage() == 'en'
def test_locations(self):
"""test that locations work well"""
tsstr = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation>Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert len(tsfile.units) == 2
assert tsfile.units[0].getlocations() == ['../tools/qtconfig/mainwindow.cpp:+202']
assert tsfile.units[1].getlocations() == ['+5']
def test_merge_with_fuzzies(self):
"""test that merge with fuzzy works well"""
tsstr1 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished">Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsstr2 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation type="unfinished"/>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr1)
tsfile2 = ts.tsfile.parsestring(tsstr2)
assert len(tsfile.units) == 2
assert len(tsfile2.units) == 2
tsfile2.units[0].merge(tsfile.units[0]) #fuzzy
tsfile2.units[1].merge(tsfile.units[1]) #not fuzzy
assert tsfile2.units[0].isfuzzy() == True
assert tsfile2.units[1].isfuzzy() == False
|
{
"content_hash": "656dff124ec117f7cfa776a9b1770a0f",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 98,
"avg_line_length": 33.622754491017965,
"alnum_prop": 0.6329474621549421,
"repo_name": "rlr/fjord",
"id": "ae2f0f4da760d9fc2a7a993d61c358bfc095219a",
"size": "6422",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/storage/test_ts2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "159723"
},
{
"name": "HTML",
"bytes": "133287"
},
{
"name": "JavaScript",
"bytes": "304461"
},
{
"name": "Python",
"bytes": "823931"
},
{
"name": "Shell",
"bytes": "11741"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
import tempfile
import shutil
import argparse
import glob
import stat
from LoggerManager import logger, initConsoleLogging, initFileLogging
""" constants """
EXTERNAL_DATA_SIZE_THRESHOLD = 1*1024*1024 # 1 MiB
EXTERNAL_DATA_PREFIX = ".ExternalData_SHA1_"
IGNORE_FILE_LIST = ("CMakeLists.txt")
VALID_KIDS_BUILD_SUFFIX_LIST = (".KIDs", "KIDS", ".KID", ".kids", ".kid")
VALID_PATCH_INFO_SUFFIX_LIST = (".TXTs",".TXT",".txt","txts")
VALID_CSV_FILE_SUFFIX_LIST = (".csv",".CSV")
VALID_GLOBAL_FILE_SUFFIX_LIST = (".GBLs", ".GBL")
VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST = (".RSA", "rsa", ".RO", ".ro")
VALID_HEADER_FILE_SUFFIX_LIST = (".json",".JSON")
VALID_SHA1_FILE_SUFFIX_LIST = (".SHA1",".sha1")
VALID_KIDS_BUILD_HEADER_SUFFIX_LIST = tuple(
[x+y for x in VALID_KIDS_BUILD_SUFFIX_LIST for y in VALID_HEADER_FILE_SUFFIX_LIST]
)
VALID_KIDS_BUILD_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_KIDS_BUILD_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_PATCH_INFO_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_PATCH_INFO_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_GLOBAL_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_GLOBAL_FILE_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_ROUTINE_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
"""
Utilities functions to check if it is valid file type extension
"""
def isValidSha1Suffix(fileName):
return fileName.endswith(VALID_SHA1_FILE_SUFFIX_LIST)
def isValidPythonSuffix(fileName):
return fileName.endswith(".py")
def isValidKIDSBuildSuffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_SUFFIX_LIST)
def isValidPatchInfoSuffix(fileName):
return fileName.endswith(VALID_PATCH_INFO_SUFFIX_LIST)
def isValidGlobalFileSuffix(fileName):
return fileName.endswith(VALID_GLOBAL_FILE_SUFFIX_LIST)
def isValidRoutineFileSuffix(fileName):
return fileName.endswith(VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST)
def isValidKIDSBuildHeaderSuffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_HEADER_SUFFIX_LIST)
def isValidKIDSBuildSha1Suffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_SHA1_SUFFIX_LIST)
def isValidPatchInfoSha1Suffix(fileName):
return fileName.endswith(VALID_PATCH_INFO_SHA1_SUFFIX_LIST)
def isValidGlobalSha1Suffix(fileName):
return fileName.endswith(VALID_GLOBAL_SHA1_SUFFIX_LIST)
def isValidRoutineSha1Suffix(fileName):
return fileName.endswith(VALID_ROUTINE_SHA1_SUFFIX_LIST)
def isValidCSVSuffix(fileName):
return fileName.endswith(VALID_CSV_FILE_SUFFIX_LIST)
def isValidPatchDataSuffix(fileName, includeExternalExt=False):
isValid = ( isValidKIDSBuildSuffix(fileName) or
isValidPatchInfoSuffix(fileName) or
isValidGlobalFileSuffix(fileName) or
isValidRoutineFileSuffix(fileName) or
isValidCSVSuffix(fileName) or
isValidPythonSuffix(fileName)
)
if includeExternalExt:
isValid = isValid or (isValidKIDSBuildHeaderSuffix(fileName) or
isValidKIDSBuildSha1Suffix(fileName) or
isValidPatchInfoSha1Suffix(fileName) or
isValidGlobalSha1Suffix(fileName) or
isValidRoutineSha1Suffix(fileName)
)
return isValid
def isValidPatchRelatedFiles(absFileName, checkExternalExt=False):
fileName = os.path.basename(absFileName)
# ignore files that starts with .
if fileName.startswith('.'):
return False
# ignore symlink files as well
try:
st = os.stat(absFileName)
if stat.S_ISLNK(st.st_mode):
return False
except OSError:
return False
# ignore the external data file
if fileName.startswith(EXTERNAL_DATA_PREFIX):
return False
if fileName in IGNORE_FILE_LIST:
return False
# ignore invalid file extensions
if not isValidPatchDataSuffix(fileName, checkExternalExt):
return False
return True
""" utility function to check if externalData name is valid """
def isValidExternalDataFileName(fileName):
baseName = os.path.basename(fileName)
return baseName.startswith(EXTERNAL_DATA_PREFIX)
""" retrive sha1 hash from the filename directly """
def getSha1HashFromExternalDataFileName(fileName):
baseName = os.path.basename(fileName)
return baseName[len(EXTERNAL_DATA_PREFIX):]
""" generate External Data filename """
def generateExternalDataFileName(sha1Sum):
return "%s%s" % (EXTERNAL_DATA_PREFIX, sha1Sum)
""" read the sha1Sum from sha1 file """
def readSha1SumFromSha1File(sha1File):
with open(sha1File, "r") as input:
return input.readline().rstrip('\r\n ')
""" add file to git ignore list
@fileName: absolute path of the file
"""
def addToGitIgnoreList(fileName):
rootDir = os.path.dirname(fileName)
basename = os.path.basename(fileName)
gitIgnoreFile = os.path.join(rootDir, ".gitignore")
if os.path.exists(gitIgnoreFile):
with open(gitIgnoreFile, "r") as ignoreFile:
for line in ignoreFile:
if line.strip(' \r\n') == basename:
return
with open(gitIgnoreFile, "a") as ignoreFile:
ignoreFile.write("%s\n" % basename)
else:
with open(gitIgnoreFile, "w") as ignoreFile:
ignoreFile.write("%s\n" % basename)
""" utility method to generate sha1 hash key for input file """
def generateSha1Sum(inputFilename):
assert os.path.exists(inputFilename)
fileSize = os.path.getsize(inputFilename)
MAX_READ_SIZE = 20 * 1024 * 1024 # 20 MiB
buf = fileSize/50
if buf > MAX_READ_SIZE:
buf = MAX_READ_SIZE
with open(inputFilename, "r") as inputFile:
return generateSha1SumCommon(inputFile, buf)
""" utility method to generate sha1 hash key for file like object """
def generateSha1SumCommon(fileObject, buf=1024):
import hashlib
sha1sum = hashlib.sha1()
while True:
nByte = fileObject.read(buf)
if nByte:
sha1sum.update(nByte)
else:
break
return sha1sum.hexdigest()
""" Convert the KIDS Build, Global or TXT file to External Data format """
class ExternalDataConverter(object):
def __init__(self, externalDir, gitignore=False,
sizeLimit=EXTERNAL_DATA_SIZE_THRESHOLD):
self._externDir = None
self._gitIgnore = gitignore
if externalDir != None and os.path.exists(externalDir):
self._externDir = os.path.abspath(externalDir)
if sizeLimit <=0:
sizeLimit = EXTERNAL_DATA_SIZE_THRESHOLD
self._sizeLimit = sizeLimit
"""
Convert All the files with size > than threshold under the current
directory recursively
"""
def convertCurrentDir(self, curDir):
assert os.path.exists(curDir)
absCurDir = os.path.abspath(curDir)
for (root, dirs, files) in os.walk(absCurDir):
for fileName in files:
absFileName = os.path.join(root, fileName)
if not isValidPatchRelatedFiles(absFileName):
continue
# get the size of the file
fileSize = os.path.getsize(absFileName)
if fileSize < self._sizeLimit:
continue
if isValidKIDSBuildSuffix(fileName):
logger.info("converting KIDS file %s " % absFileName)
self.convertKIDSBuildFile(absFileName)
else:
self.convertToSha1File(absFileName)
""" """
def convertKIDSBuildFile(self, kidsFile):
from KIDSBuildParser import KIDSBuildParser, outputMetaDataInJSON
assert os.path.exists(kidsFile)
""" write KIDS metadata file """
kidsParser = KIDSBuildParser(None)
""" do not parse the routine part """
kidsParser.unregisterSectionHandler(KIDSBuildParser.ROUTINE_SECTION)
kidsParser.parseKIDSBuild(kidsFile)
logger.info("output meta data as %s" % (kidsFile+".json"))
outputMetaDataInJSON(kidsParser, kidsFile+".json")
self.convertToSha1File(kidsFile)
""" """
def convertToSha1File(self, inputFile):
assert os.path.exists(inputFile)
""" write the sha-1 hash to the .SHA1 file """
sha1Sum = generateSha1Sum(inputFile)
with open(inputFile + ".sha1", "w") as output:
output.write("%s\n" % sha1Sum)
""" add the file to ignore list """
if self._gitIgnore:
addToGitIgnoreList(inputFile)
self.__moveToExternalDir__(inputFile, sha1Sum)
def __moveToExternalDir__(self, fileName, sha1Sum):
assert os.path.exists(fileName)
destFile = generateExternalDataFileName(sha1Sum)
if self._externDir:
destFile = os.path.join(self._externDir, destFile)
else:
destFile = os.path.join(os.path.dirname(fileName), destFile)
if os.path.exists(destFile):
if generateSha1Sum(destFile) == sha1Sum:
os.remove(fileName)
logger.info("%s already exists and is valid" % destFile)
return
os.remove(destFile)
os.rename(fileName, destFile)
def main():
initConsoleLogging()
parser = argparse.ArgumentParser(
description='Convert Patch Data to external data format')
parser.add_argument('-i', '--inputDir', required=True,
help='path to top leve directory to convert all patch data')
parser.add_argument('-e', '--externalDataDir', required=False, default=None,
help='output dir to store the external data,'
' default is inplace')
parser.add_argument('-g', '--gitignore', required=False, default=False,
action="store_true",
help='Add original file to .gitignore, default is not')
parser.add_argument('-s', '--size', default=1, type=int,
help='file size threshold to be converted to external '
'data, unit is MiB, default is 1(MiB)')
parser.add_argument('-l', '--logFile', default=None,
help='output log file, default is no')
result = parser.parse_args();
logger.info (result)
if result.logFile:
initFileLogging(result.logFile)
converter = ExternalDataConverter(result.externalDataDir, result.gitignore,
result.size*EXTERNAL_DATA_SIZE_THRESHOLD)
converter.convertCurrentDir(result.inputDir)
""" -------- TEST CODE SECTION -------- """
TEST_INPUT_STRING = "LR*5.2*334"
def test_generateSha1SumCommon(inputString=TEST_INPUT_STRING):
import StringIO
stringIo = StringIO.StringIO(inputString) # convert string to stringIO
print generateSha1SumCommon(stringIo)
TEST_INPUT_FILE = "../Packages/MultiBuilds/CPRS28_RELATED.KID"
def test_generateSha1Sum(inputFile=TEST_INPUT_FILE):
print generateSha1Sum(inputFile)
if __name__ == '__main__':
main()
|
{
"content_hash": "05194cce77ddf460195f436242cbdd4f",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 91,
"avg_line_length": 35.863945578231295,
"alnum_prop": 0.6976479514415781,
"repo_name": "luisibanez/vista-debian-med-package",
"id": "14d19bd21145ef285a03eeefbab699f49c3d08e9",
"size": "11309",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Scripts/ConvertToExternalData.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9885"
},
{
"name": "JavaScript",
"bytes": "5277"
},
{
"name": "M",
"bytes": "6982"
},
{
"name": "Python",
"bytes": "1021549"
},
{
"name": "Ruby",
"bytes": "5667"
},
{
"name": "Shell",
"bytes": "45819"
}
],
"symlink_target": ""
}
|
"""
Periscope API for the masses
"""
import os
import time
from periapi.downloadmgr import DownloadManager
from periapi.listener import Listener
from periapi.broadcast import Broadcast
DEFAULT_NOTIFICATION_INTERVAL = 15
class AutoCap:
"""Class to check notifications stream and start capping new broadcasts"""
def __init__(self, api, listener_opts, quiet_mode=False):
self.quiet_mode = quiet_mode
self.keep_running = True
self.api = api
self.config = self.api.session.config
if not self.config.get('download_directory'):
self.config['download_directory'] = os.path.join(os.path.expanduser('~'), 'downloads')
self.config.write()
if not os.path.exists(self.config.get("download_directory")):
os.makedirs(self.config.get("download_directory"))
self.listener = Listener(api=self.api, **listener_opts)
self.downloadmgr = DownloadManager(api=self.api)
def start(self):
"""Starts autocapper loop"""
loops = 0
while self.keep_running:
new_broadcasts = self.listener.check_for_new()
if new_broadcasts:
for broadcast in new_broadcasts:
self.downloadmgr.start_dl(broadcast)
if not self.quiet_mode:
loops = self.print_current_status(loops)
time.sleep(self.interval)
self.downloadmgr.pool.close()
self.downloadmgr.pool.join()
def stop(self):
"""Stops autocapper loop"""
self.keep_running = False
def cap_one(self, broadcast_id):
"""Cap a single broadcast"""
broadcast_info = self.api.get_access(broadcast_id).get('broadcast')
broadcast = Broadcast(self.api, broadcast_info)
self.downloadmgr.start_dl(broadcast)
_ = len(self.downloadmgr.active_downloads)
while _ > 0:
if not self.quiet_mode:
print(self.downloadmgr.status)
time.sleep(self.interval)
self.downloadmgr.sema.acquire()
_ = len(self.downloadmgr.active_downloads)
self.downloadmgr.sema.release()
self.downloadmgr.pool.close()
self.downloadmgr.pool.join()
def cap_user(self, username):
"""Cap all broadcasts by a user"""
user_id = self.api.find_user_id(username)
broadcasts = self.api.get_user_broadcast_history(user_id)
if len(broadcasts) < 1:
print("No broadcast history found for {}".format(username))
return None
for i in broadcasts:
broadcast = Broadcast(self.api, i)
self.downloadmgr.start_dl(broadcast)
_ = len(self.downloadmgr.active_downloads)
while _ > 0:
if not self.quiet_mode:
print(self.downloadmgr.status)
time.sleep(self.interval)
self.downloadmgr.sema.acquire()
_ = len(self.downloadmgr.active_downloads)
self.downloadmgr.sema.release()
self.downloadmgr.pool.close()
self.downloadmgr.pool.join()
def print_current_status(self, loops):
"""Prints current status and lists active downloads every so often"""
print(self.downloadmgr.status)
if (loops * self.interval) > 150:
loops = 0
if len(self.downloadmgr.currently_downloading) > 0:
print("\tCurrently downloading:")
for bc_title in self.downloadmgr.currently_downloading:
print("\t{}".format(bc_title))
else:
loops += 1
return loops
@property
def interval(self):
"""Get the interval (in seconds) to check for notifications.
Set default if no value exists."""
if not self.config.get('notification_interval'):
self.config['notification_interval'] = DEFAULT_NOTIFICATION_INTERVAL
self.config.write()
return self.config.get('notification_interval')
@interval.setter
def interval(self, value):
"""Set the interval (in seconds) to check for notifications"""
self.config['notification_interval'] = value
self.config.write()
|
{
"content_hash": "b4065b5d060c3d4ff59fee8a524e48f3",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 98,
"avg_line_length": 34.66942148760331,
"alnum_prop": 0.6095351609058403,
"repo_name": "baliscope/periapi",
"id": "66e39dd6d9e5e7508903b12ec9efe54758a4b13e",
"size": "4218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "periapi/autocap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59858"
}
],
"symlink_target": ""
}
|
"""
This is a pure Python implementation of the greedy-merge-sort algorithm
reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/
For doctests run following command:
python3 -m doctest -v greedy_merge_sort.py
Objective
Merge a set of sorted files of different length into a single sorted file.
We need to find an optimal solution, where the resultant file
will be generated in minimum time.
Approach
If the number of sorted files are given, there are many ways
to merge them into a single sorted file.
This merge can be performed pair wise.
To merge a m-record file and a n-record file requires possibly m+n record moves
the optimal choice being,
merge the two smallest files together at each step (greedy approach).
"""
def optimal_merge_pattern(files: list) -> float:
"""Function to merge all the files with optimum cost
Args:
files [list]: A list of sizes of different files to be merged
Returns:
optimal_merge_cost [int]: Optimal cost to merge all those files
Examples:
>>> optimal_merge_pattern([2, 3, 4])
14
>>> optimal_merge_pattern([5, 10, 20, 30, 30])
205
>>> optimal_merge_pattern([8, 8, 8, 8, 8])
96
"""
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
min_index = files.index(min(files))
temp += files[min_index]
files.pop(min_index)
files.append(temp)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "548c63a80b32bca17b449666c6e3bc7b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 29.232142857142858,
"alnum_prop": 0.6725717776420281,
"repo_name": "TheAlgorithms/Python",
"id": "a1c934f84498b0beaa9113ed59767bc64726ecda",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greedy_methods/optimal_merge_pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
}
|
import os
import shutil
import numpy as np
import skimage as ski
import skimage.io
import skimage.util
import skimage.measure
from scipy import ndimage as ndi
indir = '/groups/flyem/data/temp/ordishc/exports/raveler_export_for davi'
outdir = '/groups/saalfeld/home/nuneziglesiasj/data/raveler_export_davi_v7'
os.makedirs(os.path.join(outdir, 'superpixel_maps'), exist_ok=True)
# Every section must have one zero pixel, so choose a uniform, sacrificial pixel.
zero_pixel = [0, 0]
sp_fns = sorted(os.listdir(os.path.join(indir, 'superpixel_maps')))
sp_to_seg_file = 'superpixel_to_segment_map.txt'
seg_to_body_file = 'segment_to_body_map.txt'
# read images using generators
sections = (ski.io.imread(os.path.join(indir, 'superpixel_maps', f))
for f in sp_fns)
# find max body id
seg2bod = np.loadtxt(os.path.join(indir, seg_to_body_file),
dtype=int, delimiter='\t')
max_body_id = np.max(seg2bod[:, 1])
start_body = max_body_id + 1
sp2seg = np.loadtxt(os.path.join(indir, sp_to_seg_file),
dtype=int, delimiter='\t')
max_segment_id = max(np.max(seg2bod[:, 0]), np.max(sp2seg[:, 2]))
start_segment = max_segment_id + 1
# section boundaries in superpixel-to-segment map table
sec_idxs = np.unique(sp2seg[:, 0], return_index=True)[1]
sec_idxs = np.concatenate((sec_idxs, [len(sp2seg)]))
# we will append to these, concatenate, sort, rewrite.
sp2seg_new = [sp2seg]
seg2bod_new = [seg2bod]
for i, (filename, superpixels) in enumerate(zip(sp_fns, sections)):
print('Processing section %i...' % i)
# find max superpixel id and create background map
superpixel_map = np.sum(superpixels * [1, 1<<8, 1<<16, 0], axis=-1)
max_superpixel_id = max(np.max(superpixel_map), np.max(sp2seg[sp2seg[:, 0] == i, 1]))
bg_superpixels = (superpixel_map == 0)
# find all connected components of background
# * replace 0 with 1, everything else with 0
# * find connected components -> label image
# * add max superpixel id to label image, except where 0
labels, num_components = ndi.label(bg_superpixels)
replace = labels > 0
labels = labels[replace]
labels += max_superpixel_id
superpixels[replace, 0] = labels % (1<<8)
superpixels[replace, 1] = (labels % (1<<16)) // (1<<8)
superpixels[replace, 2] = labels // (1<<16)
superpixels[zero_pixel[0], zero_pixel[1], 0:3] = [0, 0, 0]
ski.io.imsave(os.path.join(outdir, 'superpixel_maps', filename),
superpixels)
# make maps by stacking arrays -- these will be written out with savetxt
unique_sps = np.unique(labels)
unique_seg = np.arange(start_segment, start_segment + num_components,
dtype=int)
unique_bod = np.arange(start_body, start_body + num_components,
dtype=int)
section = np.empty_like(unique_sps)
section.fill(i)
# add to the sp2seg and seg2bod maps
sp2seg_new.append(np.array([section, unique_sps, unique_seg]).T)
seg2bod_new.append(np.array([unique_seg, unique_bod]).T)
# update the current starting segment and body
start_segment += num_components
start_body += num_components
sp2seg = np.concatenate(sp2seg_new, axis=0)
# sort by section
sp2seg = sp2seg[np.argsort(sp2seg[:, 0]), :]
seg2bod = np.concatenate(seg2bod_new, axis=0)
np.savetxt(os.path.join(outdir, sp_to_seg_file), sp2seg,
fmt='%i', delimiter='\t')
np.savetxt(os.path.join(outdir, seg_to_body_file), seg2bod,
fmt='%i', delimiter='\t')
|
{
"content_hash": "3cb4069881aef1388c54a14a365778c4",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 89,
"avg_line_length": 36.677083333333336,
"alnum_prop": 0.6628798636750923,
"repo_name": "jni/special-scripts",
"id": "71806598dc6adef02bb92f734ff9d9b87c0aec55",
"size": "3544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "raveler-reassign-zeros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3544"
}
],
"symlink_target": ""
}
|
'''
The MIT License (MIT)
Copyright (c) 2016 Wei-Hung Weng
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Title : DNN pipeline
Author : Wei-Hung Weng
Created : 11/26/2016
Comment :
Modified:
09/16/2016:
remove cnn
csail: rpdr sg15
09/17/2016:
class balance
'''
#!/usr/bin/python
import os, sys, time, re, codecs, logging
import numpy as np
import scipy as sp
import pandas as pd
import cPickle as pickle
from W2v import *
from nltk.tokenize import sent_tokenize
from gensim.models import word2vec
from gensim.corpora.dictionary import Dictionary
from sklearn.preprocessing import LabelEncoder, LabelBinarizer, label_binarize
from sklearn.cross_validation import train_test_split, StratifiedKFold
from keras.utils.np_utils import to_categorical
import multiprocessing
from keras.preprocessing import sequence
from keras.models import Sequential, model_from_json
from keras.layers import Embedding, Dense, Dropout, LSTM, Bidirectional, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.callbacks import EarlyStopping
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, roc_auc_score, classification_report
from keras import backend as K
def multiclass_roc_auc_score(truth, pred, average="macro"):
lb = LabelBinarizer()
lb.fit(truth)
truth = lb.transform(truth)
pred = lb.transform(pred)
return roc_auc_score(truth, pred, average=average)
def create_dictionaries(data, model, feature):
gensim_dict = Dictionary()
gensim_dict.doc2bow(model.vocab.keys(), allow_update=True)
w2idx = {v: k+1 for k, v in gensim_dict.items()}
w2idxl = {v.lower(): k+1 for k, v in gensim_dict.items()}
#w2vec = {word: model[word.lower()] for word in w2idx.keys()}
w2vec = {}
for word in w2idx.keys():
if feature == 'bow':
try:
w2vec[word.lower()] = model[word]
except KeyError:
w2vec[word.lower()] = [0] * model.vector_size
else:
try:
w2vec[word] = model[word]
except KeyError:
w2vec[word] = [0] * model.vector_size
def parse_dataset(data, feature):
for key in data.keys():
if feature == 'bow':
txt = data[key].lower().replace('\n', '').split()
else:
txt = data[key].replace('\n', '').split()
new_txt = []
for word in txt:
try:
if feature == 'bow':
new_txt.append(w2idxl[word])
else:
new_txt.append(w2idx[word])
except:
new_txt.append(0)
data[key] = new_txt
return data
out = parse_dataset(data, feature)
return w2idx, w2vec, out
def Dnn(df, y, encoder, class_wt, trained_w2v, batch_size, n_epoch, \
best_score, feature, weighting, algorithm, repeat, kfold):
y = dict(zip(df.fname, y))
print("Class weight")
class_weight = {}
for i in xrange(len(encoder.classes_)):
if class_wt:
class_weight[i] = len(y) * 1. / sum(x == i for x in y.values())
else:
class_weight[i] = 1
print 'Read corpus for word2vec'
t = time.time()
raw = pd.DataFrame(df[feature]).values.tolist()
fnames = df['fname'].values.tolist()
print "Read %d corpus of documents" % len(raw)
t_e = time.time() - t
print 'Corpus reading time ' + str(t_e) + ' sec'
dict_content = zip(fnames, raw)
tok_dict = dict(dict_content)
del raw, fnames, dict_content
w2vTraining = [item for l in tok_dict.values() for item in l]
print 'Load word2vec model'
if trained_w2v == 'no':
t_w = time.time()
model, featureMatrix = W2v(data=w2vTraining, \
dimension=200, window=5, subsample=1e-5, \
stem=False, concept=True, removeStopwords=True)
t_w2v = time.time() - t_w
print 'Word2vec training time ' + str(t_w2v) + ' sec'
elif trained_w2v == 'umls':
model = word2vec.Word2Vec.load_word2vec_format('/Users/weng/_hms_phi/DeVine_etal_200.txt', binary=False)
elif trained_w2v == 'pubmed':
model = word2vec.Word2Vec.load_word2vec_format('/Users/weng/_hms_phi/wikipedia-pubmed-and-PMC-w2v.bin', binary=True)
elif trained_w2v == 'google':
model = word2vec.Word2Vec.load_word2vec_format('.bin', binary=True)
w2v_dict = {}
for k in tok_dict.keys():
w2v_dict[k] = tok_dict[k][0]
w2v_idx, w2v_vec, w2v_emb = create_dictionaries(data=w2v_dict, model=model, feature=feature)
vocab_dim = model.vector_size
n_symbols = len(w2v_idx)
embedding_weights = np.zeros((n_symbols+1, vocab_dim))
for word, index in w2v_idx.items():
try:
embedding_weights[index, :] = w2v_vec[word]
except KeyError:
embedding_weights[index, :] = [0] * model.vector_size
#embedding_weights = embedding_weights[1:]
max_length = max((len(v), k) for k,v in w2v_emb.iteritems())[0]
X = w2v_dict
#https://github.com/fchollet/keras/issues/853
#There are 3 approaches:
#
#Learn embedding from scratch - simply add an Embedding layer to your model
#Fine tune learned embeddings - this involves setting word2vec / GloVe vectors as your Embedding layer's weights.
#Use word word2vec / Glove word vectors as inputs to your model, instead of one-hot encoding.
#The third one is the best option(Assuming the word vectors were obtained from the same domain as the inputs to your models. For e.g, if you are doing sentiment analysis on tweets, you should use GloVe vectors trained on tweets).
#
#In the first option, everything has to be learned from scratch. You dont need it unless you have a rare scenario. The second one is good, but, your model will be unnecessarily big with all the word vectors for words that are not frequently used.
np.random.seed(777)
print("Constrcut the dataset")
tmp = [X, y]
d = {}
for k in X.iterkeys():
d[k] = tuple(d[k] for d in tmp)
X = []
y = []
for k, v in d.iteritems():
X.append(v[0])
y.append(v[1])
print("Split the dataset")
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.3, random_state=42)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=max_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_length)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print("Convert labels to Numpy Sets")
y_train = np.array(y_train)
y_test = np.array(y_test)
print("Convert to categories")
y_train_dummy = to_categorical(y_train)
y_test_dummy = to_categorical(y_test)
print("Modeling")
input_length = max_length
cpu_count = multiprocessing.cpu_count()
pred_table = pd.DataFrame()
score_table = pd.DataFrame()
best_score = 0
best_model = best_feature = best_algorithm = coef = ''
skf = StratifiedKFold(y=y_train, n_folds=kfold, shuffle=True, random_state=None)
for idx, (train, test) in enumerate(skf):
model = Sequential()
model.add(Embedding(input_dim = n_symbols+1,
output_dim = vocab_dim,
#mask_zero = False, # need to be zero otherwise cnn won't work
weights = [embedding_weights],
input_length = input_length,
trainable = True))
model.add(Convolution1D(nb_filter=64, filter_length=2, border_mode='valid', activation='relu'))
#model.add(Convolution1D(nb_filter=32, filter_length=2, border_mode='valid', activation='relu'))
model.add(MaxPooling1D(pool_length=2))
model.add(Dropout(0.5))
#model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='valid', activation='relu'))
#model.add(MaxPooling1D(pool_length=2))
#model.add(Dropout(0.5))
#model.add(Bidirectional(LSTM(32)))
#model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(y_train_dummy.shape[1] * 10, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(y_train_dummy.shape[1], activation = 'softmax'))
#model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
print("Training")
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
t = time.time()
model.fit(X_train, y_train_dummy, batch_size=batch_size, nb_epoch=n_epoch, class_weight=class_weight, \
validation_data=(X_test, y_test_dummy), shuffle = True, callbacks=[early_stopping])
time.sleep(0.1)
t_e = time.time() - t
print "Model: " + feature + ' | ' + str(algorithm) + ' | rep' + str(repeat+1) + ' | cv' + str(kfold+1) + ' | time: ' + str(t_e) + ' sec'
y_pred_prob = model.predict_proba(X_test)
try:
y_pred = model.predict_classes(X_test) # different
except TypeError:
y_pred = model.predict(X_test.values)
t = pd.concat([pd.DataFrame(y_test), pd.DataFrame(y_pred)], axis=1)
t.columns = ['true', 'pred']
pred_tbl = pd.DataFrame(y_pred_prob)
pred_tbl = pd.concat([t, pred_tbl], axis=1)
pred_tbl['rep'] = repeat + 1
pred_tbl['k'] = kfold + 1
pred_tbl['algorithm'] = str(algorithm)
pred_tbl['feature'] = feature
pred_tbl['weighting'] = weighting
acc = accuracy_score(y_test, y_pred)
pr, re, f1, xx = precision_recall_fscore_support(y_test, y_pred, average='binary') # didn't use 'macro'
auc = multiclass_roc_auc_score(y_test, y_pred, average='weighted') # weighted AUC takes imbalanced label into account
print acc, pr, re, f1, auc
metrics = pd.DataFrame([0])
metrics['time'] = t_e
metrics['accuracy'] = acc
metrics['precision'] = pr
metrics['recall'] = re
metrics['f1'] = f1
metrics['auc'] = auc
metrics['rep'] = repeat + 1
metrics['k'] = idx + 1
metrics['algorithm'] = str(algorithm)
metrics['feature'] = feature
metrics['weighting'] = weighting
if auc > best_score:
best_model = model
best_score = auc
best_feature = feature
best_algorithm = str(algorithm)[0:9]
best_coef = coef
#encoder.inverse_transform(y_pred) #real label name
Y_test = np.argmax(y_test_dummy, axis=1)
Y_pred = np.argmax(y_pred_prob, axis=1)
print(classification_report(Y_test, Y_pred))
pred_table = pd.concat([pred_table, pred_tbl], axis=0)
score_table = pd.concat([score_table, metrics], axis=0)
return best_model, best_feature, best_algorithm, best_coef, score_table, pred_table, metrics
#j = open('best_dnn_model.json', 'r')
#json = j.read()
#j.close()
#model = model_from_json(json)
#model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
#score = model.evaluate(X_test, y_test_dummy, verbose=0)
#https://github.com/dandxy89/DeepLearning_MachineLearning/blob/master/EmbeddingKeras/imdb_embedding_w2v.py
#http://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/
#https://github.com/fchollet/keras/issues/1629 (consider biridectional lstm)
#http://datascience.stackexchange.com/questions/10048/what-is-the-best-keras-model-for-multi-label-classification (for binary)
#
# '''
# as feature
# '''
# print("Use layer output as features")
# #https://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
# print("Extract layer output")
# get_8th_layer_output = K.function([model.layers[0].input, K.learning_phase()],
# [model.layers[8].output])
# output_train = get_8th_layer_output([X_train, 1])[0]
# output_test = get_8th_layer_output([X_test, 0])[0]
#
#
# rf(output_train, y_train, output_test, y_test)
# svc(output_train, y_train, output_test, y_test)
|
{
"content_hash": "7031ee25b6c11cbd3f140663c959dd2c",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 250,
"avg_line_length": 40.240469208211145,
"alnum_prop": 0.6211922460282757,
"repo_name": "ckbjimmy/cdc",
"id": "bd1681170dc42ac8ab72554c024a0f1e436b4324",
"size": "13722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdc/src/Dnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103495"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.leftlanelab.firebase.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.leftlanelab.firebase.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComLeftlanelabFirebaseModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
{
"content_hash": "889ae0871382acbac4a4748ca45e425b",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 125,
"avg_line_length": 30.821862348178136,
"alnum_prop": 0.6856692499671614,
"repo_name": "gimdongwoo/firebase-titanium",
"id": "e99af93dec5deb45e9f351ee1d12e5606f4fadd4",
"size": "7613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iphone/build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "33589"
},
{
"name": "Objective-C",
"bytes": "250319"
},
{
"name": "Python",
"bytes": "7613"
}
],
"symlink_target": ""
}
|
import json
filename = 'username.json'
with open(filename) as f_obj:
username = json.load(f_obj)
print("Welcome back, " + username + "!")
|
{
"content_hash": "bc8f12a29f8bbff413d838572e344f4e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 44,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.6486486486486487,
"repo_name": "mccarrion/python-practice",
"id": "9b60e796aade908bca20f0cca43c3a9ab6689b8b",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crash_course/chapter10/greet_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10589"
},
{
"name": "Python",
"bytes": "159771"
}
],
"symlink_target": ""
}
|
from flask import Flask, url_for, jsonify, request, abort, has_request_context
import logging
from logging.handlers import RotatingFileHandler
import os
from utils import generate_id
import redis
from rest_data_structures import Car, Appliance, Pantry
app = Flask(__name__)
class CustomFormatter(logging.Formatter):
def format(self, record):
if has_request_context():
record.path = request.path
record.endpoint = request.endpoint
record.remote_addr = request.remote_addr
record.access_route = request.access_route
record.headers = request.headers
record.data = request.get_data(as_text=True)
return super(CustomFormatter, self).format(record)
def setup_logging():
try:
os.mkdir('logs/')
except FileExistsError:
pass
handler = RotatingFileHandler('logs/app.log', maxBytes=10*1024*1024, backupCount=20)
handler.setLevel(logging.INFO)
custom_format = '''%(levelname)s %(name)s %(path)s %(endpoint)s %(remote_addr)s %(access_route)s %(message)s\n%(headers)s\n%(data)s\n *******''' # noqa E105
handler.setFormatter(CustomFormatter(fmt=custom_format))
app.logger.addHandler(handler)
def validate_token(func):
def inner(*args, **kwargs):
if 'X-Auth-Token' not in request.headers:
return func(*args, valid_token=False, **kwargs)
rclient = RedisClient()
token = request.headers['X-Auth-Token']
if rclient.validate_token(token):
return func(*args, valid_token=token, **kwargs)
else:
return func(*args, valid_token=False, **kwargs)
# change the name so Flask doesn't complain
inner.__name__ = 'inner_{}'.format(func.__name__)
return inner
class RedisClient(object):
def __init__(self):
self.redis = redis.StrictRedis() # defaults to localhost:6379
def init_new_token(self, token):
self.redis.sadd('valid_tokens', token)
def validate_token(self, token):
return self.redis.sismember('valid_tokens', token)
def get_item_list(self, token, type):
key = '{}-list-{}'.format(type, token)
return [m.decode() for m in self.redis.smembers(key)]
def item_in_list(self, token, type, item_id):
key = '{}-list-{}'.format(type, token)
return self.redis.sismember(key, item_id)
def create_item(self, token, type, item):
# create the item in redis
item_id = generate_id()
item_key = '{}-{}'.format(type, item_id)
self.redis.hset(item_key, mapping=item.get_mapping())
# add the item to the list
list_key = '{}-list-{}'.format(type, token)
self.redis.sadd(list_key, item_id)
return item_id
def delete_item(self, token, type, item_id):
item_key = '{}-{}'.format(type, item_id)
self.redis.delete(item_key)
# add the item to the list
list_key = '{}-list-{}'.format(type, token)
self.redis.srem(list_key, item_id)
def get_item(self, type, item_id):
item_key = '{}-{}'.format(type, item_id)
return {k.decode(): v.decode() for k, v in self.redis.hgetall(item_key).items()}
def update_item(self, type, item_id, item):
item_key = '{}-{}'.format(type, item_id)
self.redis.hset(item_key, mapping=item.get_mapping())
def handle_collection(token, collection_pre, singular_pre, data_structure):
rclient = RedisClient()
if request.method == 'GET':
# return the list of this item
output = {collection_pre: rclient.get_item_list(token, collection_pre)}
elif request.method == 'POST':
# create a new item
req_item = request.get_json()
new_item = data_structure()
if new_item.create(req_item):
# has all the fields we need
item_id = rclient.create_item(token, collection_pre, new_item)
output = {singular_pre: item_id}
else:
# does not have the required fields
app.logger.warning('Tried to create a {} with bad params: {}'.format(singular_pre, req_item))
abort(400)
return output
def handle_item(token, collection_pre, singular_pre, item_id, data_structure):
rclient = RedisClient()
if not rclient.item_in_list(token, collection_pre, item_id):
# this id does not exist - return an error
app.logger.warning('Invalid {} id {}'.format(singular_pre, car_id))
abort(400)
if request.method == 'GET':
# return the info for this item
item = rclient.get_item(collection_pre, item_id)
output = dict(**item)
output[singular_pre] = item_id
elif request.method == 'DELETE':
# delete the given car
rclient.delete_item(token, collection_pre, item_id)
output = {'result': 'success'}
elif request.method == 'PUT':
# update the item
req_item = request.get_json()
existing_item = rclient.get_item(collection_pre, item_id)
item = data_structure(other=existing_item)
item.update(req_item)
rclient.update_item(collection_pre, item_id, item)
output = dict(**item.get_mapping())
output[singular_pre] = item_id
return output
@app.after_request
def add_header(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
return response
@app.route('/', methods=['GET'])
def root():
output = {'/': 'rules, routes, and description',
'/get_token': 'get an X-Auth-Token to start adding things',
'/cars': 'a collection of the cars - send your key to get the cars for that key',
'/formats/car': 'the expected keys for a car object',
'/appliances': 'a collection of the appliances - send your key to get the appliances for that key',
'/formats/appliance': 'the expected keys for an appliance object',
'/pantry': 'a collection of the items in the pantry - send your key to get the items for that key',
'/formats/pantry': 'the expected keys for a pantry object',
'/formats': 'all the expected keys for each of the items in the collections'}
return jsonify(**output)
@app.route('/formats', methods=['GET'])
@app.route('/formats/<format>', methods=['GET'])
def formats(format=None):
if format == 'car':
output = Car().help()
elif format == 'appliance':
output = {'error': 'not implemented yet'}
elif format == 'pantry':
output = {'error': 'not implemented yet'}
elif format == None:
output = {'car':Car().help(),
'appliance':Appliance().help(),
'pantry':Pantry().help()}
else:
output = {'error': 'invalid format requested: {}'.format(format)}
return jsonify(**output)
@app.route('/get_token', methods=['GET'])
def get_token():
rclient = RedisClient()
new_token = generate_id()
rclient.init_new_token(new_token)
return jsonify({'X-Auth-Token': new_token})
@app.route('/cars', methods=['GET', 'POST', 'PUT'])
@validate_token
def cars(valid_token=False):
if valid_token:
output = handle_collection(valid_token, 'cars', 'car', Car)
else:
# send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
@app.route('/cars/<car_id>', methods=['GET', 'PUT', 'DELETE'])
@validate_token
def car(car_id=None, valid_token=False):
if valid_token:
output = handle_item(valid_token, 'cars', 'car', car_id, Car)
else:
# invalid token - send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
@app.route('/appliances', methods=['GET', 'POST', 'PUT'])
@validate_token
def appliances(valid_token=False):
if valid_token:
output = handle_collection(valid_token, 'appliances', 'appliance', Appliance)
else:
# send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
@app.route('/appliances/<appliance_id>', methods=['GET', 'PUT', 'DELETE'])
@validate_token
def appliance(appliance_id=None, valid_token=False):
if valid_token:
output = handle_item(valid_token, 'appliances', 'appliance', appliance_id, Appliance)
else:
# invalid token - send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
@app.route('/pantry', methods=['GET', 'POST', 'PUT'])
@validate_token
def pantry(valid_token=False):
if valid_token:
output = handle_collection(valid_token, 'pantry', 'pantry_item', Pantry)
else:
# send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
@app.route('/pantry/<pantry_item_id>', methods=['GET', 'PUT', 'DELETE'])
@validate_token
def pantry_item(pantry_item_id=None, valid_token=False):
if valid_token:
output = handle_item(valid_token, 'pantry', 'pantry_item', pantry_item_id, Pantry)
else:
# invalid token - send the usage info
output = {'usage': 'send your token in as the value with the key "X-Auth-Token" in the request headers'}
return jsonify(**output)
setup_logging()
if __name__ == '__main__':
app.run()
|
{
"content_hash": "d1df9eca9fafd28735e7be129e9b9361",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 161,
"avg_line_length": 37.83399209486166,
"alnum_prop": 0.6225449226911827,
"repo_name": "jeremyprice/RU_Python_IV_API",
"id": "5e2f444590ac181df84d59270cce27d564a05e84",
"size": "9596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18918"
}
],
"symlink_target": ""
}
|
import unittest
# DO NOT IMPORT redi here!
# Each test needs it's own redi module so we can manipulate the
# module-scoped variables and functions
class TestResume(unittest.TestCase):
def test_no_resume_deletes_old_run_data(self):
class MockPersonFormEvents(object):
def delete(self):
raise FileDeleted()
class FileDeleted():
pass
import redi.redi
redi_ref = reload(redi.redi)
redi_ref._person_form_events_service = MockPersonFormEvents()
import redi.batch
batch = reload(redi.batch)
batch.check_input_file = lambda *args: None
with self.assertRaises(FileDeleted):
redi_ref._run(config_file=None, configuration_directory='',
do_keep_gen_files=None, dry_run=True, get_emr_data=False,
settings=MockSettings(), data_folder=None,
database_path=None, raw_txt_file = None, redcap_client=None,
report_courier=None, report_creator=None)
def test_no_resume_stores(self):
class MockPersonFormEvents(object):
def delete(self):
pass
def store(self, ignored):
raise FileStored()
class FileStored():
pass
import redi.redi
redi_ref = reload(redi.redi)
redi_ref._person_form_events_service = MockPersonFormEvents()
redi_ref._create_person_form_event_tree_with_data = lambda *args: (
None, None, None, None, None)
redi_ref._delete_last_runs_data = lambda *args: None
redi_ref._removedirs = lambda *args: None
redi_ref._mkdir = lambda *args: None
redi_ref.connect_to_redcap = lambda *args: None
import redi.batch
batch = reload(redi.batch)
batch.check_input_file = lambda *args: None
with self.assertRaises(FileStored):
redi_ref._run(config_file=None, configuration_directory='',
do_keep_gen_files=None, dry_run=True, get_emr_data=False,
settings=MockSettings(), data_folder=None, raw_txt_file = None,
database_path=None, redcap_client=None,
report_courier=None, report_creator=None)
def test_resume_fetches_data_from_last_run(self):
class MockPersonFormEvents(object):
def fetch(self):
raise DataFetched()
class DataFetched():
pass
import redi.redi
redi_ref = reload(redi.redi)
redi_ref._person_form_events_service = MockPersonFormEvents()
import redi.batch
batch = reload(redi.batch)
batch.check_input_file = lambda *args: None
with self.assertRaises(DataFetched):
redi_ref._run(config_file=None, configuration_directory='',
do_keep_gen_files=None, dry_run=True, get_emr_data=False,
settings=MockSettings(), data_folder=None, raw_txt_file = None,
database_path=None, resume=True, redcap_client=None,
report_courier=None, report_creator=None)
class MockSettings(object):
def __getattr__(self, item):
return '' if ('file' in item) else None
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e1c7aed9f9222e273a04957e0fa1008a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 85,
"avg_line_length": 35.45744680851064,
"alnum_prop": 0.5901590159015901,
"repo_name": "nrejack/redi",
"id": "e80b579328a84ffc6bf39970a440a45a538fe89f",
"size": "3892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/TestResume.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "387410"
},
{
"name": "Makefile",
"bytes": "15777"
},
{
"name": "PHP",
"bytes": "24097"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "490227"
},
{
"name": "R",
"bytes": "7847"
},
{
"name": "Ruby",
"bytes": "363227"
},
{
"name": "Shell",
"bytes": "14170"
},
{
"name": "VimL",
"bytes": "1269"
},
{
"name": "XSLT",
"bytes": "15791"
}
],
"symlink_target": ""
}
|
from hashlib import sha1
import mimetypes
import traceback
import warnings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse
from django.core.validators import ipv4_re
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
import requests
import vidscraper
from djvidscraper.utils import get_api_keys, download_thumbnail
from djvidscraper.signals import (pre_video_import, post_video_import,
pre_feed_import_publish,
post_feed_import_publish)
class FeedImportIdentifier(models.Model):
"""
Represents a single identifier for a video, seen during an import of a
given feed.
"""
identifier_hash = models.CharField(max_length=40)
feed = models.ForeignKey('Feed')
def __unicode__(self):
return self.identifier_hash
class FeedImport(models.Model):
created_timestamp = models.DateTimeField(auto_now_add=True)
modified_timestamp = models.DateTimeField(auto_now=True)
is_complete = models.BooleanField(default=False)
#: Denormalized field displaying (eventually accurate) count of
#: errors during the import process.
error_count = models.PositiveIntegerField(default=0)
#: Denormalized field displaying (eventually accurate) count of
#: videos imported during the import process.
import_count = models.PositiveIntegerField(default=0)
feed = models.ForeignKey('Feed', related_name='imports')
class Meta:
get_latest_by = 'created_timestamp'
ordering = ['-created_timestamp']
def _get_identifier_hashes(self, vidscraper_video):
identifiers = (
vidscraper_video.guid,
vidscraper_video.link,
vidscraper_video.flash_enclosure_url,
vidscraper_video.embed_code
)
if vidscraper_video.files is not None:
identifiers += tuple(f.url for f in vidscraper_video.files
if not f.expires)
return [sha1(i).hexdigest() for i in identifiers if i]
def is_seen(self, vidscraper_video):
hashes = self._get_identifier_hashes(vidscraper_video)
if not hashes:
return False
kwargs = {
'feed': self.feed,
'identifier_hash__in': hashes,
}
return FeedImportIdentifier.objects.filter(**kwargs).exists()
def mark_seen(self, vidscraper_video):
hashes = self._get_identifier_hashes(vidscraper_video)
# TODO: Use bulk_create.
for identifier_hash in hashes:
kwargs = {
'feed': self.feed,
'identifier_hash': identifier_hash,
}
FeedImportIdentifier.objects.create(**kwargs)
def run(self):
feed = self.feed
try:
iterator = feed.get_iterator()
iterator.load()
feed.update_metadata(iterator)
except Exception:
self.record_step(FeedImportStep.IMPORT_ERRORED,
with_traceback=True)
return
try:
for vidscraper_video in iterator:
try:
vidscraper_video.load()
if self.is_seen(vidscraper_video):
self.record_step(FeedImportStep.VIDEO_SEEN)
if feed.stop_if_seen:
break
else:
continue
video = Video.from_vidscraper_video(
vidscraper_video,
status=Video.UNPUBLISHED,
commit=False,
feed=feed,
sites=feed.sites.all(),
owner=feed.owner,
owner_email=feed.owner_email,
owner_session=feed.owner_session,
)
try:
video.clean_fields()
video.validate_unique()
except ValidationError:
self.record_step(FeedImportStep.VIDEO_INVALID,
with_traceback=True)
video.save()
try:
video.save_m2m()
except Exception:
video.delete()
raise
self.mark_seen(vidscraper_video)
self.record_step(FeedImportStep.VIDEO_IMPORTED,
video=video)
except Exception:
self.record_step(FeedImportStep.VIDEO_ERRORED,
with_traceback=True)
# Update timestamp (and potentially counts) after each
# video.
self.save()
except Exception:
self.record_step(FeedImportStep.IMPORT_ERRORED,
with_traceback=True)
# Pt 2: Mark videos active all at once.
if not feed.moderate_imported_videos:
to_publish = Video.objects.filter(feedimportstep__feed_import=self,
status=Video.UNPUBLISHED)
for receiver, response in pre_feed_import_publish.send_robust(
sender=self, to_publish=to_publish):
if response:
# Basic sanity check: should be a video queryset.
if (isinstance(response, models.Queryset) and
response.model == Video):
to_publish = response
else:
if isinstance(response, Exception):
warnings.warn("pre_feed_import_publish listener "
"raised exception")
else:
warnings.warn("pre_feed_import_publish returned "
"incorrect response")
to_publish.update(status=Video.PUBLISHED)
published = Video.objects.filter(feedimportstep__feed_import=self,
status=Video.PUBLISHED,
published_datetime=now())
post_feed_import_publish.send_robust(sender=self,
published=published)
Video.objects.filter(feedimportstep__feed_import=self,
status=Video.UNPUBLISHED
).update(status=Video.NEEDS_MODERATION)
self.is_complete = True
self.save()
def record_step(self, step_type, video=None, with_traceback=False):
if step_type in (FeedImportStep.VIDEO_ERRORED,
FeedImportStep.IMPORT_ERRORED):
self.error_count += 1
if step_type == FeedImportStep.VIDEO_IMPORTED:
self.import_count += 1
tb = traceback.format_exc() if with_traceback else ''
self.steps.create(step_type=step_type,
video=video,
traceback=tb)
class FeedImportStep(models.Model):
#: Something errored on the import level.
IMPORT_ERRORED = 'import errored'
#: A video was found to already be in the database - i.e. previously
#: imported.
VIDEO_SEEN = 'video seen'
#: Something semi-expected is wrong with the video which prevents
#: it from being imported.
VIDEO_INVALID = 'video invalid'
#: Something unexpected happened during an import of a video.
VIDEO_ERRORED = 'video errored'
#: A video was successfully imported.
VIDEO_IMPORTED = 'video imported'
STEP_TYPE_CHOICES = (
(IMPORT_ERRORED, _(u'Import errored')),
(VIDEO_SEEN, _(u'Video seen')),
(VIDEO_INVALID, _(u'Video invalid')),
(VIDEO_ERRORED, _(u'Video errored')),
(VIDEO_IMPORTED, _(u'Video imported')),
)
step_type = models.CharField(max_length=14,
choices=STEP_TYPE_CHOICES)
video = models.OneToOneField('Video',
blank=True,
null=True,
on_delete=models.SET_NULL)
traceback = models.TextField(blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
feed_import = models.ForeignKey(FeedImport, related_name='steps')
def __unicode__(self):
return unicode(self.step_type)
class Feed(models.Model):
"""
Represents an automated feed import in the database.
"""
sites = models.ManyToManyField(Site)
thumbnail = models.ImageField(
upload_to='djvidscraper/feed/thumbnail/%Y/%m/%d/',
blank=True,
max_length=255)
modified_timestamp = models.DateTimeField(auto_now=True)
created_timestamp = models.DateTimeField(auto_now_add=True)
# Import settings
moderate_imported_videos = models.BooleanField(default=False)
enable_automatic_imports = models.BooleanField(default=True)
# Feeds are expected to stay in the same order.
stop_if_seen = models.BooleanField(default=True)
should_update_metadata = models.BooleanField(
default=True,
verbose_name="Update metadata on next import"
)
#: Original url entered by a user when adding this feed.
original_url = models.URLField(max_length=400)
# Feed metadata
name = models.CharField(max_length=250, blank=True)
description = models.TextField(blank=True)
#: Webpage where the contents of this feed could be browsed.
web_url = models.URLField(blank=True, max_length=400)
# Owner info. Owner is the person who created the video. Should always
# have editing access.
owner = models.ForeignKey('auth.User', null=True, blank=True)
owner_email = models.EmailField(max_length=250,
blank=True)
owner_session = models.ForeignKey('sessions.Session',
blank=True, null=True)
# Cached information from the import.
external_etag = models.CharField(max_length=250, blank=True)
external_last_modified = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djvidscraper_feed_detail', kwargs={'pk': self.pk})
def start_import(self):
imp = FeedImport()
imp.feed = self
imp.save()
imp.run()
def get_iterator(self):
return vidscraper.auto_feed(
self.original_url,
max_results=None,
api_keys=get_api_keys(),
etag=self.external_etag or None,
last_modified=self.external_last_modified,
)
get_iterator.alters_data = True
def update_metadata(self, iterator):
save = False
# Always update etag and last_modified.
etag = getattr(iterator, 'etag', None) or ''
if (etag and etag != self.external_etag):
self.external_etag = etag
save = True
last_modified = getattr(iterator, 'last_modified', None)
if last_modified is not None:
self.external_last_modified = last_modified
save = True
# If the feed metadata is marked to be updated, do it.
if self.should_update_metadata:
self.name = iterator.title or self.original_url
self.external_url = iterator.webpage or ''
self.description = iterator.description or ''
# Only update metadata once.
self.should_update_metadata = False
save = True
if save:
self.save()
class Video(models.Model):
UNPUBLISHED = 'unpublished'
NEEDS_MODERATION = 'needs moderation'
PUBLISHED = 'published'
HIDDEN = 'hidden'
STATUS_CHOICES = (
(UNPUBLISHED, _(u'Unpublished')),
(NEEDS_MODERATION, _(u'Needs moderation')),
(PUBLISHED, _(u'Published')),
(HIDDEN, _(u'Hidden')),
)
# Video core data
#: This field contains a URL which a user gave as "the" URL
#: for this video. It may or may not be the same as ``external_url``
#: or a file url. It may not even exist, if they're using embedding.
original_url = models.URLField(max_length=400, blank=True)
# Video metadata
#: Canonical web home of the video as best as we can tell.
web_url = models.URLField(max_length=400, blank=True)
embed_code = models.TextField(blank=True)
flash_enclosure_url = models.URLField(max_length=400, blank=True)
name = models.CharField(max_length=250)
description = models.TextField(blank=True)
thumbnail = models.ImageField(
upload_to='djvidscraper/video/thumbnail/%Y/%m/%d/',
blank=True,
max_length=255)
guid = models.CharField(max_length=250, blank=True)
# Technically duplication, but the only other way to get this would
# be to check the import step's import's feed. Which would be silly.
feed = models.ForeignKey(Feed, blank=True, null=True,
related_name='videos')
# Owner info. Owner is the person who created the video. Should always
# have editing access.
owner = models.ForeignKey('auth.User', null=True, blank=True)
owner_email = models.EmailField(max_length=250,
blank=True)
owner_session = models.ForeignKey('sessions.Session',
blank=True, null=True)
# Cached information from vidscraper.
external_user_username = models.CharField(max_length=250, blank=True)
external_user_url = models.URLField(blank=True, max_length=400)
external_thumbnail_url = models.URLField(blank=True, max_length=400)
external_thumbnail_tries = models.PositiveSmallIntegerField(default=0)
external_published_datetime = models.DateTimeField(null=True, blank=True)
# Other internal use.
sites = models.ManyToManyField(Site)
status = models.CharField(max_length=16,
choices=STATUS_CHOICES,
default=UNPUBLISHED)
modified_timestamp = models.DateTimeField(auto_now=True)
created_timestamp = models.DateTimeField(auto_now_add=True)
published_datetime = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ['-published_datetime', '-modified_timestamp']
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djvidscraper_video_detail', kwargs={'pk': self.pk})
@classmethod
def from_vidscraper_video(cls, video, status=None, commit=True,
feed=None, sites=None, owner=None,
owner_email=None, owner_session=None):
"""
Builds a :class:`Video` instance from a
:class:`vidscraper.videos.Video` instance. If `commit` is False,
the :class:`Video` will not be saved, and the created instance will
have a `save_m2m()` method that must be called after you call `save()`.
"""
pre_video_import.send_robust(sender=cls, vidscraper_video=video)
if status is None:
status = cls.NEEDS_MODERATION
instance = cls(
original_url=video.url,
web_url=video.link or '',
embed_code=video.embed_code or '',
flash_enclosure_url=video.flash_enclosure_url or '',
name=video.title or '',
description=video.description or '',
guid=video.guid or '',
feed=feed,
owner=owner,
owner_email=owner_email or '',
owner_session=owner_session,
external_user_username=video.user or '',
external_user_url=video.user_url or '',
external_thumbnail_url=video.thumbnail_url or '',
external_published_datetime=video.publish_datetime,
status=status,
published_datetime=now() if status == cls.PUBLISHED else None,
)
if not sites:
sites = [Site.objects.get_current()]
def save_m2m():
instance.sites = sites
if video.files:
for video_file in video.files:
if video_file.expires is None:
VideoFile.objects.create(video=instance,
url=video_file.url,
length=video_file.length,
mimetype=video_file.mime_type)
instance.download_external_thumbnail()
post_video_import.send_robust(sender=cls, instance=instance,
vidscraper_video=video)
if commit:
instance.save()
save_m2m()
else:
instance.save_m2m = save_m2m
return instance
def download_external_thumbnail(self, override_thumbnail=False):
"""Try to download and save an external thumbnail."""
if not self.external_thumbnail_url:
return
if self.thumbnail and not override_thumbnail:
return
from django.conf import settings
max_retries = getattr(settings,
'DJVIDSCRAPER_MAX_DOWNLOAD_RETRIES',
3)
if self.external_thumbnail_tries > max_retries:
return
try:
final_path = download_thumbnail(self.external_thumbnail_url,
self,
'thumbnail')
except Exception:
self.external_thumbnail_tries += 1
self.save()
else:
try:
self.thumbnail = final_path
self.save()
except Exception:
default_storage.delete(final_path)
download_external_thumbnail.alters_data = True
class VideoFile(models.Model):
video = models.ForeignKey(Video, related_name='files')
url = models.URLField(max_length=2048)
length = models.PositiveIntegerField(null=True, blank=True)
mimetype = models.CharField(max_length=60, blank=True)
def fetch_metadata(self):
"""
Do a HEAD request on self.url to try to get metadata
(self.length and self.mimetype).
Note that while this method fills in those attributes, it does *not*
call self.save() - so be sure to do so after calling this method!
"""
if not self.url:
return
try:
response = requests.head(self.url, timeout=5)
if response.status_code == 302:
response = requests.head(response.headers['location'],
timeout=5)
except Exception:
pass
else:
if response.status_code != 200:
return
self.length = response.headers.get('content-length')
self.mimetype = response.headers.get('content-type', '')
if self.mimetype in ('application/octet-stream', ''):
# We got a not-useful MIME type; guess!
guess = mimetypes.guess_type(self.url)
if guess[0] is not None:
self.mimetype = guess[0]
class FeaturedVideo(models.Model):
"""M2M connecting sites to videos."""
site = models.ForeignKey(Site)
video = models.ForeignKey(Video)
order = models.PositiveSmallIntegerField(default=1)
created_timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('site', 'video')
ordering = ('order', 'created_timestamp')
class WatchManager(models.Manager):
def from_request(self, request, video):
"""
Creates a Watch based on an HTTP request. If the request came
from localhost, check to see if it was forwarded to (hopefully) get the
right IP address.
"""
user_agent = request.META.get('HTTP_USER_AGENT', '')
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if not ipv4_re.match(ip):
ip = '0.0.0.0'
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
self.create(video=video,
user=user,
ip_address=ip,
user_agent=user_agent)
class Watch(models.Model):
"""
Record of a video being watched.
"""
video = models.ForeignKey(Video)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey('auth.User', blank=True, null=True)
ip_address = models.IPAddressField()
# Watch queries may want to exlude "bot", "spider", "crawler", etc.
# from counts.
user_agent = models.CharField(max_length=255, blank=True)
objects = WatchManager()
|
{
"content_hash": "a4fbbaeba5bc8e64359b4b3309cdd9cc",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 79,
"avg_line_length": 37.83600713012478,
"alnum_prop": 0.5730707622726845,
"repo_name": "pculture/django-vidscraper",
"id": "76181ebbc16a7dfb0e02e8f89f73eb6535ec1ac0",
"size": "21226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djvidscraper/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "120495"
},
{
"name": "Python",
"bytes": "36680"
},
{
"name": "Ruby",
"bytes": "1038"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.mwld
Tool-specific initialization for the Metrowerks CodeWarrior linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwld.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Tool
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
def exists(env):
import SCons.Tool.mwcc
return SCons.Tool.mwcc.set_vars(env)
def shlib_generator(target, source, env, for_signature):
cmd = ['$SHLINK', '$SHLINKFLAGS', '-shared']
no_import_lib = env.get('no_import_lib', 0)
if no_import_lib: cmd.extend('-noimplib')
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.extend(['-implib', implib.get_string(for_signature)])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX"))
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Append an import library to the list of targets.
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX'))
return target, source
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "d3919d2abc7d3531ea627de66cb4f17f",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 135,
"avg_line_length": 34,
"alnum_prop": 0.6880153930731171,
"repo_name": "bubichain/blockchain",
"id": "a8cf2a62ecbac993d05eaab9f2b424e7bb1d4ff6",
"size": "3638",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "src/3rd/src/jsoncpp/scons-2.1.0/engine/SCons/Tool/mwld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1831"
},
{
"name": "C",
"bytes": "5533376"
},
{
"name": "C++",
"bytes": "15012019"
},
{
"name": "HTML",
"bytes": "1137"
},
{
"name": "Java",
"bytes": "1387256"
},
{
"name": "M4",
"bytes": "1430"
},
{
"name": "Makefile",
"bytes": "17557"
},
{
"name": "Objective-C",
"bytes": "382710"
},
{
"name": "PHP",
"bytes": "3641"
},
{
"name": "Pascal",
"bytes": "1986"
},
{
"name": "Protocol Buffer",
"bytes": "15604"
},
{
"name": "Python",
"bytes": "1856699"
},
{
"name": "Roff",
"bytes": "429468"
},
{
"name": "Shell",
"bytes": "6104"
}
],
"symlink_target": ""
}
|
"""Generates straight-line programs performing arithmetic on multiple variables.
"""
import dataclasses
import random
from typing import Optional, Text, Tuple
DEFAULT_OPS = ("+=", "-=", "*=")
@dataclasses.dataclass
class MultivarArithmeticConfig(object):
"""The config class for the multivar arithmetic generator."""
base: int
length: int
num_digits: int = 1
variables: int = 2
constant_probability: float = 0.75
max_value: Optional[int] = None
ops: Tuple[Text, Ellipsis] = DEFAULT_OPS
encoder_name: Text = "simple"
mod: Optional[int] = None
output_mod: Optional[int] = None
def generate_python_source(length, config):
"""Generates Python code according to the config."""
max_value = config.max_value or (config.base ** config.num_digits - 1)
# Initialize v1..vN to 0.
statements = [
"v{} = 0".format(i)
for i in range(1, config.variables)
]
used_variables = {0}
for _ in range(length):
# choose variable to modify
var = random.randint(0, config.variables - 1)
# choose operation
op = random.choice(config.ops)
# choose constant or existing variable
use_constant = random.random() < config.constant_probability
if use_constant:
value = random.randint(0, max_value)
else:
# Choose a random variable.
value = "v{}".format(random.choice(list(used_variables)))
used_variables.add(var)
statement = "v{var} {op} {value}".format(
var=var,
op=op,
value=value,
)
statements.append(statement)
return "\n".join(statements)
|
{
"content_hash": "e2dc12a23ada9fe51b5e3df1f72e5afa",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6628205128205128,
"repo_name": "google-research/google-research",
"id": "b5ded93412ddcb18e64fe82b7687315c9183d692",
"size": "2168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipagnn/datasets/control_flow_programs/program_generators/multivar_arithmetic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import sys
import os
import unittest
import pexpect
from Naked.toolshed.shell import execute
from Naked.toolshed.system import file_exists, make_path
class CryptoUnicodePassphraseTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def submit_same_uni_passphrase(self, system_command):
child = pexpect.spawn(system_command)
#child.logfile = sys.stdout
child.expect("Please enter your passphrase: ")
child.sendline("ƷȦϺѠ")
child.expect("Please enter your passphrase again: ")
child.sendline("ƷȦϺѠ")
child.interact()
return child
def test_unicode_passphrase_single_file_encrypt(self):
command = "crypto testdir7/uni_test.txt"
child = self.submit_same_uni_passphrase(command)
self.assertTrue(file_exists(make_path("testdir7", "uni_test.txt.crypt"))) #test that new encrypted file exists
child.close()
# cleanup
os.remove(make_path("testdir7","uni_test.txt.crypt"))
def test_unicode_passphrase_multi_file_encrypt(self):
command = "crypto testdir7/uni_test.txt testdir7/uni_test2.txt"
child = self.submit_same_uni_passphrase(command)
self.assertTrue(file_exists(make_path("testdir7", "uni_test.txt.crypt"))) #test that new encrypted file exists
self.assertTrue(file_exists(make_path("testdir7", "uni_test2.txt.crypt"))) #test that new encrypted file exists
child.close()
# cleanup
os.remove(make_path("testdir7","uni_test.txt.crypt"))
os.remove(make_path("testdir7","uni_test2.txt.crypt"))
def test_unicode_directory_encrypt(self):
command = "crypto testdir7"
child = self.submit_same_uni_passphrase(command)
self.assertTrue(file_exists(make_path("testdir7", "uni_test.txt.crypt"))) #test that new encrypted file exists
self.assertTrue(file_exists(make_path("testdir7", "uni_test2.txt.crypt"))) #test that new encrypted file exists
child.close()
# cleanup
os.remove(make_path("testdir7","uni_test.txt.crypt"))
os.remove(make_path("testdir7","uni_test2.txt.crypt"))
|
{
"content_hash": "8b26583dd86871eeb83670ae3a3e38a4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 36.83050847457627,
"alnum_prop": 0.6695812241141279,
"repo_name": "chrissimpkins/crypto",
"id": "2358cd336343eb5f1761b477ba5b1f7580a6f52e",
"size": "2222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_unicode-passphrase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158483"
},
{
"name": "Shell",
"bytes": "1807"
}
],
"symlink_target": ""
}
|
from ..core import Mapping
class Compound(Mapping):
"""
Mapping based on passing one mapping through another
.. math::
f(\mathbf{x}) = f_2(f_1(\mathbf{x}))
:param mapping1: first mapping
:type mapping1: GPy.mappings.Mapping
:param mapping2: second mapping
:type mapping2: GPy.mappings.Mapping
"""
def __init__(self, mapping1, mapping2):
assert(mapping1.output_dim==mapping2.input_dim)
input_dim, output_dim = mapping1.input_dim, mapping2.output_dim
super(Compound, self).__init__(input_dim=input_dim, output_dim=output_dim)
self.mapping1 = mapping1
self.mapping2 = mapping2
self.link_parameters(self.mapping1, self.mapping2)
def f(self, X):
return self.mapping2.f(self.mapping1.f(X))
def update_gradients(self, dL_dF, X):
hidden = self.mapping1.f(X)
self.mapping2.update_gradients(dL_dF, hidden)
self.mapping1.update_gradients(self.mapping2.gradients_X(dL_dF, hidden), X)
def gradients_X(self, dL_dF, X):
hidden = self.mapping1.f(X)
return self.mapping1.gradients_X(self.mapping2.gradients_X(dL_dF, hidden), X)
|
{
"content_hash": "aab556cd15031fd2d2ad224787814bd8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 32.52777777777778,
"alnum_prop": 0.6507258753202391,
"repo_name": "SheffieldML/GPy",
"id": "9078910f29b1f9920c371c0a84c286243d4bcbec",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "GPy/mappings/compound.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C",
"bytes": "2030"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Cython",
"bytes": "49903"
},
{
"name": "Python",
"bytes": "2344657"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
from flask import g, redirect, url_for
from app import app, db
from .models import auth
@app.route('/delete-me/')
@auth.protected(csrf=True, role='user')
def delete_me():
db.session.delete(g.user)
db.commit()
auth.logout()
return redirect(url_for('index'))
|
{
"content_hash": "3bf1eb36ba73883b7c3f7cfd776b97f7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 39,
"avg_line_length": 19.785714285714285,
"alnum_prop": 0.6750902527075813,
"repo_name": "jpscaletti/authcode",
"id": "965013cc944bf242d7d961dd7640aad582210ac2",
"size": "292",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/default/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5028"
},
{
"name": "HTML",
"bytes": "5654"
},
{
"name": "Makefile",
"bytes": "683"
},
{
"name": "Python",
"bytes": "128576"
}
],
"symlink_target": ""
}
|
"""
Demonstration of borders and background colors.
"""
from vispy.scene import SceneCanvas
canvas = SceneCanvas(keys='interactive', bgcolor='w', show=True)
grid = canvas.central_widget.add_grid(spacing=0, bgcolor='gray',
border_color='k')
view1 = grid.add_view(row=0, col=0, margin=10, bgcolor=(1, 0, 0, 0.5),
border_color=(1, 0, 0))
view2 = grid.add_view(row=0, col=1, margin=10, bgcolor=(0, 1, 0, 0.5),
border_color=(0, 1, 0))
if __name__ == '__main__':
canvas.app.run()
|
{
"content_hash": "348749087c5a47fd75f5e683df6c6978",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 35.1875,
"alnum_prop": 0.566607460035524,
"repo_name": "Eric89GXL/vispy",
"id": "c08ad119cebd4307048c5bfd876a1052dba283fe",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basics/scene/background_borders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
class Actor():
@abstractmethod
def __init__(self, batteryManagerObj):
pass
@abstractmethod
def run(self):
pass
|
{
"content_hash": "adca0ddccdf9aa543f38d654608bd23c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 19.875,
"alnum_prop": 0.7421383647798742,
"repo_name": "zjcers/ecohawks-battery",
"id": "5c55eeae23fd60d2b1c8bb5b6936e9831d1cacf2",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actors/absactor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Processing",
"bytes": "2507"
},
{
"name": "Python",
"bytes": "33217"
},
{
"name": "Shell",
"bytes": "641"
}
],
"symlink_target": ""
}
|
"""Stores application configuration taken from e.g. app.yaml, queues.yaml."""
# TODO: Support more than just app.yaml.
import errno
import logging
import os
import os.path
import random
import string
import threading
import types
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import backendinfo
from google.appengine.api import dispatchinfo
from google.appengine.tools.devappserver2 import errors
# Constants passed to functions registered with
# ServerConfiguration.add_change_callback.
NORMALIZED_LIBRARIES_CHANGED = 1
SKIP_FILES_CHANGED = 2
HANDLERS_CHANGED = 3
INBOUND_SERVICES_CHANGED = 4
ENV_VARIABLES_CHANGED = 5
ERROR_HANDLERS_CHANGED = 6
NOBUILD_FILES_CHANGED = 7
class ServerConfiguration(object):
"""Stores server configuration information.
Most configuration options are mutable and may change any time
check_for_updates is called. Client code must be able to cope with these
changes.
Other properties are immutable (see _IMMUTABLE_PROPERTIES) and are guaranteed
to be constant for the lifetime of the instance.
"""
_IMMUTABLE_PROPERTIES = [
('application', 'application'),
('version', 'major_version'),
('runtime', 'runtime'),
('threadsafe', 'threadsafe'),
('server', 'server_name'),
('basic_scaling', 'basic_scaling'),
('manual_scaling', 'manual_scaling'),
('automatic_scaling', 'automatic_scaling')]
def __init__(self, yaml_path):
"""Initializer for ServerConfiguration.
Args:
yaml_path: A string containing the full path of the yaml file containing
the configuration for this server.
"""
self._yaml_path = yaml_path
self._app_info_external = None
self._application_root = os.path.realpath(os.path.dirname(yaml_path))
self._last_failure_message = None
self._app_info_external, files_to_check = self._parse_configuration(
self._yaml_path)
self._mtimes = self._get_mtimes([self._yaml_path] + files_to_check)
self._application = 'dev~%s' % self._app_info_external.application
self._api_version = self._app_info_external.api_version
self._server_name = self._app_info_external.server
self._version = self._app_info_external.version
self._threadsafe = self._app_info_external.threadsafe
self._basic_scaling = self._app_info_external.basic_scaling
self._manual_scaling = self._app_info_external.manual_scaling
self._automatic_scaling = self._app_info_external.automatic_scaling
self._runtime = self._app_info_external.runtime
if self._runtime == 'python':
logging.warning(
'The "python" runtime specified in "%s" is not supported - the '
'"python27" runtime will be used instead. A description of the '
'differences between the two can be found here:\n'
'https://developers.google.com/appengine/docs/python/python25/diff27',
self._yaml_path)
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
@property
def application_root(self):
"""The directory containing the application e.g. "/home/user/myapp"."""
return self._application_root
@property
def application(self):
return self._application
@property
def api_version(self):
return self._api_version
@property
def server_name(self):
return self._server_name or 'default'
@property
def major_version(self):
return self._version
@property
def version_id(self):
if self.server_name == 'default':
return '%s.%s' % (
self.major_version,
self._minor_version_id)
else:
return '%s:%s.%s' % (
self.server_name,
self.major_version,
self._minor_version_id)
@property
def runtime(self):
return self._runtime
@property
def threadsafe(self):
return self._threadsafe
@property
def basic_scaling(self):
return self._basic_scaling
@property
def manual_scaling(self):
return self._manual_scaling
@property
def automatic_scaling(self):
return self._automatic_scaling
@property
def normalized_libraries(self):
return self._app_info_external.GetNormalizedLibraries()
@property
def skip_files(self):
return self._app_info_external.skip_files
@property
def nobuild_files(self):
return self._app_info_external.nobuild_files
@property
def error_handlers(self):
return self._app_info_external.error_handlers
@property
def handlers(self):
return self._app_info_external.handlers
@property
def inbound_services(self):
return self._app_info_external.inbound_services
@property
def env_variables(self):
return self._app_info_external.env_variables
@property
def is_backend(self):
return False
def check_for_updates(self):
"""Return any configuration changes since the last check_for_updates call.
Returns:
A set containing the changes that occured. See the *_CHANGED module
constants.
"""
new_mtimes = self._get_mtimes(self._mtimes.keys())
if new_mtimes == self._mtimes:
return set()
try:
app_info_external, files_to_check = self._parse_configuration(
self._yaml_path)
except Exception, e:
failure_message = str(e)
if failure_message != self._last_failure_message:
logging.error('Configuration is not valid: %s', failure_message)
self._last_failure_message = failure_message
return set()
self._last_failure_message = None
self._mtimes = self._get_mtimes([self._yaml_path] + files_to_check)
for app_info_attribute, self_attribute in self._IMMUTABLE_PROPERTIES:
app_info_value = getattr(app_info_external, app_info_attribute)
self_value = getattr(self, self_attribute)
if (app_info_value == self_value or
app_info_value == getattr(self._app_info_external,
app_info_attribute)):
# Only generate a warning if the value is both different from the
# immutable value *and* different from the last loaded value.
continue
if isinstance(app_info_value, types.StringTypes):
logging.warning('Restart the development server to see updates to "%s" '
'["%s" => "%s"]',
app_info_attribute,
self_value,
app_info_value)
else:
logging.warning('Restart the development server to see updates to "%s"',
app_info_attribute)
changes = set()
if (app_info_external.GetNormalizedLibraries() !=
self.normalized_libraries):
changes.add(NORMALIZED_LIBRARIES_CHANGED)
if app_info_external.skip_files != self.skip_files:
changes.add(SKIP_FILES_CHANGED)
if app_info_external.nobuild_files != self.nobuild_files:
changes.add(NOBUILD_FILES_CHANGED)
if app_info_external.handlers != self.handlers:
changes.add(HANDLERS_CHANGED)
if app_info_external.inbound_services != self.inbound_services:
changes.add(INBOUND_SERVICES_CHANGED)
if app_info_external.env_variables != self.env_variables:
changes.add(ENV_VARIABLES_CHANGED)
if app_info_external.error_handlers != self.error_handlers:
changes.add(ERROR_HANDLERS_CHANGED)
self._app_info_external = app_info_external
if changes:
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
return changes
@staticmethod
def _get_mtimes(filenames):
filename_to_mtime = {}
for filename in filenames:
try:
filename_to_mtime[filename] = os.path.getmtime(filename)
except OSError as e:
# Ignore deleted includes.
if e.errno != errno.ENOENT:
raise
return filename_to_mtime
@staticmethod
def _parse_configuration(configuration_path):
# TODO: It probably makes sense to catch the exception raised
# by Parse() and re-raise it using a module-specific exception.
with open(configuration_path) as f:
return appinfo_includes.ParseAndReturnIncludePaths(f)
class BackendsConfiguration(object):
"""Stores configuration information for a backends.yaml file."""
def __init__(self, app_yaml_path, backend_yaml_path):
"""Initializer for BackendsConfiguration.
Args:
app_yaml_path: A string containing the full path of the yaml file
containing the configuration for this server.
backend_yaml_path: A string containing the full path of the backends.yaml
file containing the configuration for backends.
"""
self._update_lock = threading.RLock()
self._base_server_configuration = ServerConfiguration(app_yaml_path)
backend_info_external = self._parse_configuration(
backend_yaml_path)
self._backends_name_to_backend_entry = {}
for backend in backend_info_external.backends or []:
self._backends_name_to_backend_entry[backend.name] = backend
self._changes = dict(
(backend_name, set())
for backend_name in self._backends_name_to_backend_entry)
@staticmethod
def _parse_configuration(configuration_path):
# TODO: It probably makes sense to catch the exception raised
# by Parse() and re-raise it using a module-specific exception.
with open(configuration_path) as f:
return backendinfo.LoadBackendInfo(f)
def get_backend_configurations(self):
return [BackendConfiguration(self._base_server_configuration, self, entry)
for entry in self._backends_name_to_backend_entry.values()]
def check_for_updates(self, backend_name):
"""Return any configuration changes since the last check_for_updates call.
Args:
backend_name: A str containing the name of the backend to be checked for
updates.
Returns:
A set containing the changes that occured. See the *_CHANGED module
constants.
"""
with self._update_lock:
server_changes = self._base_server_configuration.check_for_updates()
if server_changes:
for backend_changes in self._changes.values():
backend_changes.update(server_changes)
changes = self._changes[backend_name]
self._changes[backend_name] = set()
return changes
class BackendConfiguration(object):
"""Stores backend configuration information.
This interface is and must remain identical to ServerConfiguration.
"""
def __init__(self, server_configuration, backends_configuration,
backend_entry):
"""Initializer for BackendConfiguration.
Args:
server_configuration: A ServerConfiguration to use.
backends_configuration: The BackendsConfiguration that tracks updates for
this BackendConfiguration.
backend_entry: A backendinfo.BackendEntry containing the backend
configuration.
"""
self._server_configuration = server_configuration
self._backends_configuration = backends_configuration
self._backend_entry = backend_entry
if backend_entry.dynamic:
self._basic_scaling = appinfo.BasicScaling(
max_instances=backend_entry.instances or 1)
self._manual_scaling = None
else:
self._basic_scaling = None
self._manual_scaling = appinfo.ManualScaling(
instances=backend_entry.instances or 1)
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
@property
def application_root(self):
"""The directory containing the application e.g. "/home/user/myapp"."""
return self._server_configuration.application_root
@property
def application(self):
return self._server_configuration.application
@property
def api_version(self):
return self._server_configuration.api_version
@property
def server_name(self):
return self._backend_entry.name
@property
def major_version(self):
return self._server_configuration.major_version
@property
def version_id(self):
return '%s:%s.%s' % (
self.server_name,
self.major_version,
self._minor_version_id)
@property
def runtime(self):
return self._server_configuration.runtime
@property
def threadsafe(self):
return self._server_configuration.threadsafe
@property
def basic_scaling(self):
return self._basic_scaling
@property
def manual_scaling(self):
return self._manual_scaling
@property
def automatic_scaling(self):
return None
@property
def normalized_libraries(self):
return self._server_configuration.normalized_libraries
@property
def skip_files(self):
return self._server_configuration.skip_files
@property
def nobuild_files(self):
return self._server_configuration.nobuild_files
@property
def error_handlers(self):
return self._server_configuration.error_handlers
@property
def handlers(self):
if self._backend_entry.start:
return [appinfo.URLMap(
url='/_ah/start',
script=self._backend_entry.start,
login='admin')] + self._server_configuration.handlers
return self._server_configuration.handlers
@property
def inbound_services(self):
return self._server_configuration.inbound_services
@property
def env_variables(self):
return self._server_configuration.env_variables
@property
def is_backend(self):
return True
def check_for_updates(self):
"""Return any configuration changes since the last check_for_updates call.
Returns:
A set containing the changes that occured. See the *_CHANGED module
constants.
"""
changes = self._backends_configuration.check_for_updates(
self._backend_entry.name)
if changes:
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
return changes
class DispatchConfiguration(object):
"""Stores dispatcher configuration information."""
def __init__(self, yaml_path):
self._yaml_path = yaml_path
self._mtime = os.path.getmtime(self._yaml_path)
self._process_dispatch_entries(self._parse_configuration(self._yaml_path))
@staticmethod
def _parse_configuration(configuration_path):
# TODO: It probably makes sense to catch the exception raised
# by LoadSingleDispatch() and re-raise it using a module-specific exception.
with open(configuration_path) as f:
return dispatchinfo.LoadSingleDispatch(f)
def check_for_updates(self):
mtime = os.path.getmtime(self._yaml_path)
if mtime > self._mtime:
self._mtime = mtime
try:
dispatch_info_external = self._parse_configuration(self._yaml_path)
except Exception, e:
failure_message = str(e)
logging.error('Configuration is not valid: %s', failure_message)
return
self._process_dispatch_entries(dispatch_info_external)
def _process_dispatch_entries(self, dispatch_info_external):
path_only_entries = []
hostname_entries = []
for entry in dispatch_info_external.dispatch:
parsed_url = dispatchinfo.ParsedURL(entry.url)
if parsed_url.host:
hostname_entries.append(entry)
else:
path_only_entries.append((parsed_url, entry.server))
if hostname_entries:
logging.warning(
'Hostname routing is not supported by the development server. The '
'following dispatch entries will not match any requests:\n%s',
'\n\t'.join(str(entry) for entry in hostname_entries))
self._entries = path_only_entries
@property
def dispatch(self):
return self._entries
class ApplicationConfiguration(object):
"""Stores application configuration information."""
def __init__(self, yaml_paths):
"""Initializer for ApplicationConfiguration.
Args:
yaml_paths: A list of strings containing the paths to yaml files.
"""
self.servers = []
self.dispatch = None
if len(yaml_paths) == 1 and os.path.isdir(yaml_paths[0]):
directory_path = yaml_paths[0]
for app_yaml_path in [os.path.join(directory_path, 'app.yaml'),
os.path.join(directory_path, 'app.yml')]:
if os.path.exists(app_yaml_path):
yaml_paths = [app_yaml_path]
break
else:
raise errors.AppConfigNotFoundError(
'no app.yaml file at %r' % directory_path)
for backends_yaml_path in [os.path.join(directory_path, 'backends.yaml'),
os.path.join(directory_path, 'backends.yml')]:
if os.path.exists(backends_yaml_path):
yaml_paths.append(backends_yaml_path)
break
for yaml_path in yaml_paths:
if os.path.isdir(yaml_path):
raise errors.InvalidAppConfigError(
'"%s" is a directory and a yaml configuration file is required' %
yaml_path)
elif (yaml_path.endswith('backends.yaml') or
yaml_path.endswith('backends.yml')):
# TODO: Reuse the ServerConfiguration created for the app.yaml
# instead of creating another one for the same file.
self.servers.extend(
BackendsConfiguration(yaml_path.replace('backends.y', 'app.y'),
yaml_path).get_backend_configurations())
elif (yaml_path.endswith('dispatch.yaml') or
yaml_path.endswith('dispatch.yml')):
if self.dispatch:
raise errors.InvalidAppConfigError(
'Multiple dispatch.yaml files specified')
self.dispatch = DispatchConfiguration(yaml_path)
else:
server_configuration = ServerConfiguration(yaml_path)
self.servers.append(server_configuration)
application_ids = set(server.application
for server in self.servers)
if len(application_ids) > 1:
raise errors.InvalidAppConfigError(
'More than one application ID found: %s' %
', '.join(sorted(application_ids)))
self._app_id = application_ids.pop()
server_names = set()
for server in self.servers:
if server.server_name in server_names:
raise errors.InvalidAppConfigError('Duplicate server: %s' %
server.server_name)
server_names.add(server.server_name)
if self.dispatch:
if 'default' not in server_names:
raise errors.InvalidAppConfigError(
'A default server must be specified.')
missing_servers = (
set(server_name for _, server_name in self.dispatch.dispatch) -
server_names)
if missing_servers:
raise errors.InvalidAppConfigError(
'Servers %s specified in dispatch.yaml are not defined by a yaml '
'file.' % sorted(missing_servers))
@property
def app_id(self):
return self._app_id
|
{
"content_hash": "e0130eac187debf4591e795bd8ad6d05",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 80,
"avg_line_length": 33.12105263157895,
"alnum_prop": 0.6661369775941522,
"repo_name": "elsigh/browserscope",
"id": "84d9f1bc0651fe347667f375edc48b3a76ff8f2c",
"size": "19480",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/appengine_tools/devappserver2/application_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42911"
},
{
"name": "Groff",
"bytes": "674"
},
{
"name": "HTML",
"bytes": "2895472"
},
{
"name": "JavaScript",
"bytes": "2274900"
},
{
"name": "Python",
"bytes": "4264474"
},
{
"name": "Shell",
"bytes": "642"
}
],
"symlink_target": ""
}
|
import mock
import sys
guestfs = mock.Mock()
sys.modules['guestfs'] = guestfs
from sahara.cli.image_pack import api
from sahara.tests.unit import base
class TestSaharaImagePackAPI(base.SaharaTestCase):
def setUp(self):
super(TestSaharaImagePackAPI, self).setUp()
def tearDown(self):
super(TestSaharaImagePackAPI, self).tearDown()
@mock.patch('sahara.cli.image_pack.api.guestfs')
@mock.patch('sahara.cli.image_pack.api.plugins_base')
@mock.patch('sahara.cli.image_pack.api.LOG')
def test_pack_image_call(self, mock_log, mock_plugins_base, mock_guestfs):
guest = mock.Mock()
mock_guestfs.GuestFS = mock.Mock(return_value=guest)
guest.inspect_os = mock.Mock(return_value=['/dev/something1'])
plugin = mock.Mock()
mock_plugins_base.PLUGINS = mock.Mock(
get_plugin=mock.Mock(return_value=plugin))
api.pack_image(
"image_path", "plugin_name", "plugin_version",
{"anarg": "avalue"}, root_drive=None, test_only=False)
guest.add_drive_opts.assert_called_with("image_path", format="qcow2")
guest.set_network.assert_called_with(True)
guest.launch.assert_called_once_with()
guest.mount.assert_called_with('/dev/something1', '/')
guest.sh.assert_called_with("echo Testing sudo without tty...")
guest.sync.assert_called_once_with()
guest.umount_all.assert_called_once_with()
guest.close.assert_called_once_with()
@mock.patch('sahara.cli.image_pack.api.plugins_base')
def test_get_plugin_arguments(self, mock_plugins_base):
api.setup_plugins()
mock_plugins_base.setup_plugins.assert_called_once_with()
mock_PLUGINS = mock.Mock()
mock_plugins_base.PLUGINS = mock_PLUGINS
mock_plugin = mock.Mock()
mock_plugin.get_versions = mock.Mock(return_value=['1'])
mock_plugin.get_image_arguments = mock.Mock(
return_value=["Argument!"])
mock_PLUGINS.get_plugin = mock.Mock(return_value=mock_plugin)
result = api.get_plugin_arguments('Plugin!')
mock_plugin.get_versions.assert_called_once_with()
mock_plugin.get_image_arguments.assert_called_once_with('1')
self.assertEqual(result, {'1': ['Argument!']})
|
{
"content_hash": "c8f69c61fe2a432bd24d361b566ab47a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 40.19298245614035,
"alnum_prop": 0.6569183762549106,
"repo_name": "tellesnobrega/sahara",
"id": "68f6073b8139226e9446acad231f9d8d73d03665",
"size": "2874",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/cli/image_pack/test_image_pack_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "3354711"
},
{
"name": "Shell",
"bytes": "56856"
}
],
"symlink_target": ""
}
|
import tornado.ioloop
import tornado.web
from libthumbor.url import Url
from thumbor.handlers.imaging import ImagingHandler
class ThumborServiceApp(tornado.web.Application):
def __init__(self, context):
self.context = context
self.debug = getattr(self.context.server, "debug", False)
super(ThumborServiceApp, self).__init__(self.get_handlers(), debug=self.debug)
def get_handlers(self):
handlers = []
for handler_list in self.context.modules.importer.handler_lists:
get_handlers = getattr(handler_list, 'get_handlers', None)
if get_handlers is None:
continue
handlers.extend(get_handlers(self.context))
# Imaging handler (GET)
handlers.append((Url.regex(), ImagingHandler, {"context": self.context}))
return handlers
|
{
"content_hash": "e93c3a646e3734d3def40403ce835ed3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 86,
"avg_line_length": 33.88,
"alnum_prop": 0.6611570247933884,
"repo_name": "kkopachev/thumbor",
"id": "6679e50a3fc4a6edbb71ef05cd8c24b7f3b3ce34",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thumbor/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58654"
},
{
"name": "JavaScript",
"bytes": "2516"
},
{
"name": "Makefile",
"bytes": "9691"
},
{
"name": "Python",
"bytes": "591888"
},
{
"name": "Shell",
"bytes": "286"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use `airflow.providers.apache.hive.hooks.hive`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.hive.hooks.hive import ( # noqa
HIVE_QUEUE_PRIORITIES,
HiveCliHook,
HiveMetastoreHook,
HiveServer2Hook,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.hive.hooks.hive`.",
DeprecationWarning,
stacklevel=2,
)
|
{
"content_hash": "7400a624aee15e01f81fc76f717a6069",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 25.88235294117647,
"alnum_prop": 0.7340909090909091,
"repo_name": "DinoCow/airflow",
"id": "5009647593e49c8fb1f03ac1c062357da9b19ba0",
"size": "1227",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/hooks/hive_hooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
'''
This test is meant to exercise NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
'''
class NULLDUMMYTest(MincoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']])
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.tx_submit(self.nodes[0], test1txs[2])
self.block_submit(self.nodes[0], test1txs, False, True)
print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
self.block_submit(self.nodes[0], [test4tx])
print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR)
self.block_submit(self.nodes[0], [test5tx], True)
print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.tx_submit(self.nodes[0], i)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg = ""):
tx.rehash()
try:
node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
|
{
"content_hash": "153b7f50cf4a71b97603e748a51f5031",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 137,
"avg_line_length": 44.22377622377623,
"alnum_prop": 0.6570208728652751,
"repo_name": "mincoin-project/mincoin",
"id": "d907844ed4b319cee8fa5e69eb3b599e0b5a16a8",
"size": "6534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/nulldummy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "650854"
},
{
"name": "C++",
"bytes": "4577982"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "174531"
},
{
"name": "Makefile",
"bytes": "102845"
},
{
"name": "Objective-C",
"bytes": "6702"
},
{
"name": "Objective-C++",
"bytes": "7229"
},
{
"name": "Python",
"bytes": "882269"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3788"
},
{
"name": "Shell",
"bytes": "34265"
}
],
"symlink_target": ""
}
|
class Generator(object):
def __init__(self, manager):
self.__init_attributes(manager)
self.__sigid1 = manager.connect("generate-dictionary", self.__generate_cb)
def __init_attributes(self, manager):
self.__manager = manager
from re import UNICODE, compile
self.__pattern = compile(r"[^-\w]", UNICODE)
from dbus import Dictionary, String, Int32
try:
self.__empty_dict = Dictionary({}, signature="ss")
except:
self.__empty_dict = Dictionary({}, key_type=String, value_type=Int32)
return
def __generate(self, data):
try:
text, editor_id = data
if not text: raise ValueError
words = self.__generate_words(text)
if not words: raise ValueError
dictionary = self.__generate_dictionary(words)
if not dictionary: raise ValueError
self.__manager.emit("finished", (editor_id, dictionary))
except ValueError:
self.__manager.emit("finished", (editor_id, self.__empty_dict))
return False
def __generate_words(self, text):
from re import split
words = split(self.__pattern, text)
words =[word for word in words if self.__filter(word)]
return words
def __filter(self, word):
if len(word) < 4: return False
if word.startswith("---"): return False
if word.startswith("___"): return False
return True
def __generate_dictionary(self, words):
dictionary = {}
for string in words:
if string in dictionary.keys():
dictionary[string] += 1
else:
dictionary[string] = 1
return dictionary
def __generate_cb(self, manager, data):
self.__generate(data)
return False
|
{
"content_hash": "2f9501a684efddea42b3d871af5a8cc4",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 28.61111111111111,
"alnum_prop": 0.6802588996763754,
"repo_name": "baverman/scribes-goodies",
"id": "9900ee31be8b7de1348da20efcb2cfec34ed9612",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/CompleteLikeEclipse/scribes/edit/complete_like_eclipse/IndexerProcess/DictionaryGenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104020"
}
],
"symlink_target": ""
}
|
import datetime
import factory
from django.utils.timezone import utc
from openbudget.apps.accounts.factories import AccountFactory
from openbudget.apps.sheets.factories import TemplateFactory, TemplateNodeFactory
from openbudget.apps.taxonomies.models import Taxonomy, Tag, TaggedNode
class TaxonomyFactory(factory.DjangoModelFactory):
FACTORY_FOR = Taxonomy
user = factory.SubFactory(AccountFactory)
template = factory.Subfactory(TemplateFactory)
name = factory.Sequence(lambda n: 'Taxonomy {0}'.format(n))
description = factory.Sequence(lambda n: 'Taxononmy {0} description text.'.format(n))
created_on = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
last_modified = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
class TagFactory(factory.DjangoModelFactory):
FACTORY_FOR = Tag
taxonomy = factory.SubFactory(TaxonomyFactory)
name = factory.Sequence(lambda n: 'Taxonomy {0}'.format(n))
created_on = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
last_modified = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
class TaggedNodeFactory(factory.DjangoModelFactory):
FACTORY_FOR = TaggedNode
tag = factory.SubFactory(TagFactory)
content_object = factory.SubFactory(TemplateNodeFactory)
created_on = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
last_modified = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
|
{
"content_hash": "85b6effe782b49f332faab141e906b28",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 32.76,
"alnum_prop": 0.7307692307692307,
"repo_name": "nborwankar/open-budgets",
"id": "08a0aace0caec70ae9534910ee9e0cdb8cd8ad23",
"size": "1638",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openbudget/apps/taxonomies/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from airflow import DAG
from airflow.decorators import task
from airflow.providers.amazon.aws.hooks.sqs import SqsHook
from airflow.providers.amazon.aws.operators.sqs import SqsPublishOperator
from airflow.providers.amazon.aws.sensors.sqs import SqsSensor
QUEUE_NAME = 'Airflow-Example-Queue'
AWS_CONN_ID = 'aws_default'
@task(task_id="create_queue")
def create_queue_fn():
"""This is a Python function that creates an SQS queue"""
hook = SqsHook()
result = hook.create_queue(queue_name=QUEUE_NAME)
return result['QueueUrl']
@task(task_id="delete_queue")
def delete_queue_fn(queue_url):
"""This is a Python function that deletes an SQS queue"""
hook = SqsHook()
hook.get_conn().delete_queue(QueueUrl=queue_url)
with DAG(
dag_id='example_sqs',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
dagrun_timeout=timedelta(minutes=60),
tags=['example'],
catchup=False,
) as dag:
# [START howto_sqs_operator_and_sensor]
# Using a task-decorated function to create an SQS queue
create_queue = create_queue_fn()
publish_to_queue = SqsPublishOperator(
task_id='publish_to_queue',
sqs_queue=create_queue,
message_content="{{ task_instance }}-{{ execution_date }}",
message_attributes=None,
delay_seconds=0,
)
read_from_queue = SqsSensor(
task_id='read_from_queue',
sqs_queue=create_queue,
max_messages=5,
wait_time_seconds=1,
visibility_timeout=None,
message_filtering=None,
message_filtering_match_values=None,
message_filtering_config=None,
)
# Using a task-decorated function to delete the SQS queue we created earlier
delete_queue = delete_queue_fn(create_queue)
create_queue >> publish_to_queue >> read_from_queue >> delete_queue
# [END howto_sqs_operator_and_sensor]
|
{
"content_hash": "7f08c90aa246808eb8ee64aefa96559e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 30.0625,
"alnum_prop": 0.682952182952183,
"repo_name": "mistercrunch/airflow",
"id": "acf8c12e3292dceb7a09dd004dbbea0ee58ab683",
"size": "2710",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/example_dags/example_sqs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
"""
A utils for Markdown
convert : render markdown to html
get_toc : Get the Table of Content
get_images: Return a list of images, can be used to extract the top image
"""
import os
import markdown
from jinja2.nodes import CallBlock
from jinja2.ext import Extension
# ------------------------------------------------------------------------------
class MarkdownTagExtension(Extension):
"""
A simple extension for adding a {% markdown %}{% endmarkdown %} tag to Jinja
<div>
{% markdown %}
## Hi
{% endmarkdown %}
</div>
"""
tags = set(['markdown'])
def __init__(self, environment):
super(MarkdownTagExtension, self).__init__(environment)
environment.extend(
markdowner=markdown.Markdown(extensions=['extra'])
)
def parse(self, parser):
lineno = next(parser.stream).lineno
body = parser.parse_statements(
['name:endmarkdown'],
drop_needle=True
)
return CallBlock(
self.call_method('_markdown_support'),
[],
[],
body
).set_lineno(lineno)
def _markdown_support(self, caller):
block = caller()
block = self._strip_whitespace(block)
return self._render_markdown(block)
def _strip_whitespace(self, block):
lines = block.split('\n')
whitespace = ''
output = ''
if (len(lines) > 1):
for char in lines[1]:
if (char == ' ' or char == '\t'):
whitespace += char
else:
break
for line in lines:
output += line.replace(whitespace, '', 1) + '\r\n'
return output.strip()
def _render_markdown(self, block):
block = self.environment.markdowner.convert(block)
return block
class MarkdownExtension(Extension):
options = {}
file_extensions = '.md'
def preprocess(self, source, name, filename=None):
if (not name or
(name and not os.path.splitext(name)[1] in self.file_extensions)):
return source
return convert(source)
# Markdown
mkd = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.toc'
])
def convert(text):
'''
Convert MD text to HTML
:param text:
:return:
'''
mkd.reset()
return mkd.convert(text)
|
{
"content_hash": "560c344eb04f89e08893bf8ca3c05fde",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 24.82,
"alnum_prop": 0.5535858178887993,
"repo_name": "mardix/mambo",
"id": "1eda0ebf49d0778c793721097fab1ece602be8d6",
"size": "2482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mambo/md_ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "10148"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "47149"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.