prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
"""
Copyrigh | t (C) <2010> Autin L. TSRI
This file git_upy/blender/v271/__init__.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but W | ITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
|
"""
Django settings for paypal_demo project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gt^xy(p!5wcff5@zy#^cnvuz9ry#-#g$59du41x@a!l=#)3q6+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paypal_demo',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware. | SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paypal_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': | {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paypal_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#PayPal Settings
INSTALLED_APPS.append('paypal.standard.ipn')
PAYPAL_RECEIVER_EMAIL = "sandbox@neutrondrive.com"
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_data_labels23.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
workshe | et = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
| chart.axis_ids = [45705856, 45740416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {
'value': 1,
'font': {'name': 'Consolas', 'baseline': 1 * -1, 'pitch_family': 49, 'charset': 0}
},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'position': 'inside_base'},
})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
f.data['labels'])
self.label_config = self._label_config_to_df(self.config)
# Combine the labels data with the labels configuration
self.data['labels'] = self._merge_labels_and_config(
labels=self.data['labels'],
config=self.label_config)
def bin_data(self):
"""Makes a dict of dicts of pd.Panels at self.output."""
label_bins = self.create_label_bins(self.data['labels'])
major_axis = label_bins.index.values
minor_axis = label_bins.drop(['Start_Time', 'End_Time'], axis=1).columns
minor_axis = minor_axis.append(pd.Index(['stat']))
raw = self.data['samples']
output = {channel: pd.Panel(items=statistics.keys(),
major_axis=major_axis,
minor_axis=minor_axis)
for channel, statistics in self.panels.iteritems()}
for channel, statistics in self.panels.iteritems():
for stat_name, stat_fun in statistics.iteritems():
new_panel = label_bins.copy(deep=True)
new_panel.drop(['Start_Time', 'End_Time'], axis=1, inplace=True)
new_panel['stat'] = np.nan
cond_lbls = pd.Series(data=zip(label_bins.loc[:, 'Condition'],
label_bins.loc[:, 'Label'])
).unique()
for cond_lbl in cond_lbls:
sel = (label_bins.loc[:, 'Condition'] == cond_lbl[0]) \
& (label_bins.loc[:, 'Label'] == cond_lbl[1])
sel_bins = label_bins.loc[sel, :]
samples = pd.Series(name=channel)
pos = pd.Series(name='pos')
for _, label_bin in sel_bins.iterrows():
selector = (raw.index.values >= label_bin['Start_Time']) \
& (raw.index.values < label_bin['End_Time'])
samples = samples.append(raw.loc[selector, channel])
pos = pos.append(raw.loc[selector, 'pos'])
stat = stat_fun(samples, pos)
new_panel.loc[sel, 'stat'] = stat
output[channel][stat_name] = new_panel.sort('Bin_Order')
self.output = output
@staticmethod
def _label_config_to_df(config):
"""Convert the label configuration dictionary to a data frame."""
labels_list = []
for event_type, label_config in config.iteritems():
pattern = label_config['pattern']
if isinstance(pattern, dict):
for event_group, flag in label_config['pattern'].iteritems():
labels_list.append({
'Label': event_type,
'Condition': event_group,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': flag})
elif isinstance(pattern, int):
labels_list.append({
'Label': event_type,
'Condition': np.nan,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': pattern})
else:
raise Exception('Bad Biopac config flag {}'.format(pattern))
return pd.DataFrame(labels_l | ist)
@staticmethod
def _clean_labels(labels):
"""
Turn the Biopac flag channel into a data frame of label flags and start
times.
"""
# TODO(janmtl): finish this docstring
flags = labels['flag'].values
low_offset = np.append(-255, flags)
high_offset = np.append(flags, flags[-1])
event_flags = flags[(low_offset-high_offset) != 0]
start_times = np.where((low_offset-high_offset) != 0)[0] |
labels = pd.DataFrame({'flag': event_flags,
'Start_Time': start_times})
labels = labels[(labels['flag'] != 255)]
return labels
@staticmethod
def _clean_samples(samples):
"""
.
"""
scale = 0.55
samples.index = samples.index*100
for col_name, col in samples.iteritems():
x = col.index
y = col.values
spl = UnivariateSpline(x, y, k=5, s=scale*len(x))
samples[col_name] = spl(x)
samples['pos'] = True
return samples
@staticmethod
def _merge_labels_and_config(labels, config):
"""
Merge together the contents of the labels file with the label
configuration dictionary.
"""
labels = pd.merge(labels, config, on='flag')
labels.sort('Start_Time', inplace=True)
return labels
def create_label_bins(self, labels):
"""Replace the N_Bins column with Bin_Index and the Duration column
with End_Time. This procedure grows the number of rows in the labels
data frame."""
total_bins = labels['N_Bins'].sum()
label_bins = pd.DataFrame(columns=['Order', 'ID', 'Label',
'Condition', 'Bin_Order',
'Start_Time', 'End_Time',
'Bin_Index'],
index=np.arange(0, total_bins))
idx = 0
for _, label in labels.iterrows():
n_bins = label['N_Bins']
cuts = np.linspace(start=label['Start_Time'] + label['Left_Trim'],
stop=(label['Start_Time']
+ label['Duration']
- label['Right_Trim']),
num=n_bins+1)
label_info = np.tile(label.as_matrix(columns=['Label',
'Condition']),
(n_bins, 1))
# Order and ID
label_bins.iloc[idx:idx+n_bins, 0:2] = np.nan
# Label, Condition
label_bins.iloc[idx:idx+n_bins, 2:4] = label_info
# Bin_Order
label_bins.iloc[idx:idx+n_bins, 4] = idx+np.arange(0, n_bins, 1)
# Start_Time
label_bins.iloc[idx:idx+n_bins, 5] = cuts[0:n_bins]
# End_Time
label_bins.iloc[idx:idx+n_bins, 6] = cuts[1:n_bins+1]
# Bin_Index
label_bins.iloc[idx:idx+n_bins, 7] = np.arange(0, n_bins, 1)
idx = idx + n_bins
# Add the Order by iterating over Labels and Bin indices
for lc, group in label_bins.groupby(['Label', 'Bin_Index']):
selector = (label_bins['Label'] == lc[0]) & \
(label_bins['Bin_Index'] == lc[1])
label_bins.loc[selector, 'Order'] = \
np.arange(0, np.sum(selector), 1)
return label_bins
@staticmethod
def _validate_config(raw):
"""
Validates the label configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{event_type (str):
{
duration: (float or int),
bins: (int),
pattern: dictionary of flags keyed by group
}
}
"""
# TODO(janmtl): improve this docstring
schema = Schema({str: {'duration': Or(float, int),
'bins': int,
'pattern': Or(int, {str: int}),
Optional('left_trim'): Or(float, int),
Optional('right_trim'): Or(float, int)}})
return schema.validate(raw)
@staticmethod
def _validate_schedule(raw):
"""
Validates the schedule configurat |
#!/usr/bin/env python
from setuptools import setup
setup(
name='nodenet',
version='0.1.0',
description='an asynchronous node-based UDP networking library',
author='Ajay MT',
author_email='ajaymt@icloud.com',
url='http://github.com/AjayMT/nodenet',
download_url='https://github.com/AjayMT/nodenet/tarball/v0.1.0',
keywords='node network UDP asynchronous',
py_modules=[ | 'nodenet'],
requires=[
'pyuv (>1.0.0, <2.0.0)',
'emitter (>=0.0.6)'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Languag | e :: Python :: 2',
]
)
|
#!/usr/b | in/env python
class RoundFloatManual(object):
def __init__(self, val):
assert isinstance(val, float), \
"Value must be a float!"
self.value = round(val, 2)
def __str__(self):
return '%.2f' % self.value
__repr__ = __str__ | |
# | !/usr/bin/env python
import turtle
import random
def bloom(radius):
turtle.colormode(255)
for rad in range(40, 10, -5):
for looper in range(360//rad):
turtle.up()
turtle.circle(radius+rad, rad)
turtle.begin_fill()
turtle.fillcolor((200+random.randint(0, rad),
200+random.randint(0 | , rad),
200+random.randint(0, rad)))
turtle.down()
turtle.circle(-rad)
turtle.end_fill()
def main():
"""Simple flower, using global turtle instance"""
turtle.speed(0)
turtle.colormode(1.0)
bloom(5)
turtle.exitonclick()
###
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# ======================================================================
# Copyright (C) 2007-2014 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
Logging support for pyftpdlib, inspired from Tornado's
(http://www.tornadoweb.org/).
This is not supposed to be imported/used directly.
Instead you should use logging.basicConfig before serve_forever().
"""
import logging
import sys
import time
try:
import curses
except ImportError:
curses = None
from pyftpdlib._compat import unicode
# default logger
logger = logging.getLogger('pyftpdlib')
def _stderr_supports_color():
color = False
if curses is not None and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
# configurable options
LEVEL = logging.INFO
PREFIX = '[%(levelname)1.1s %(asctime)s]'
COLOURED = _stderr_supports_color()
TIME_FORMAT = "%y-%m-%d %H:%M:%S"
# taken and adapted from Tornado
class LogFormatter(logging.Formatter):
"""Log formatter used in pyftpdlib.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
"""
def __init__(self, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self._coloured = COLOURED and _stderr_supports_color()
if self._coloured:
curses.setupterm()
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf")
or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode(fg_color, "ascii")
self._colors = {
# blues
logging.DEBUG: unicode(curses.tparm(fg_color, 4), "ascii"),
# green
logging.INFO: unicode(curses.tparm(fg_color, 2), "ascii"),
# yellow
logging.WARNING: unicode(curses.tparm(fg_color, 3), "ascii"),
# red
logging.ERROR: unicode(curses.tparm(fg_color, 1), "ascii")
}
sel | f._norma | l = unicode(curses.tigetstr("sgr0"), "ascii")
def format(self, record):
try:
record.message = record.getMessage()
except Exception:
err = sys.exc_info()[1]
record.message = "Bad message (%r): %r" % (err, record.__dict__)
record.asctime = time.strftime(TIME_FORMAT,
self.converter(record.created))
prefix = PREFIX % record.__dict__
if self._coloured:
prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal)
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings wherever possible).
try:
message = unicode(record.message)
except UnicodeDecodeError:
message = repr(record.message)
formatted = prefix + " " + message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
def _config_logging():
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger = logging.getLogger('pyftpdlib')
logger.setLevel(LEVEL)
logger.addHandler(channel)
|
on import GridSearchCV
from grass.pygrass import raster as r
from grass.pygrass.utils import getenv
import grass.script as gs
from cStringIO import StringIO
from subprocess import PIPE
from io import BytesIO
from itertools import combinations
def setParamDict():
params = {}
for p in ['learning_rate', 'max_depth', 'loss', 'subsample',
'min_samples_leaf', 'max_features', 'n_estimators']:
if p in ['max_depth', 'min_samples_leaf', 'n_estimators']:
params[p] = map(int, options[p].split(','))
elif p in ['learning_rate', 'max_features', 'subsample']:
params[p] = map(float, options[p].split(','))
else:
params[p] = options[p].split(',')
return params
def writeMap(name, x,y,z):
result = BytesIO()
np.savetxt(result,
np.column_stack((x,
y,
z)))
result.seek(0)
gs.write_command('r.in.xyz', stdin=result.getvalue(), input='-', output=name,
method='mean', separator=' ', overwrite=True)
# #############################################################################
# Define variables
# List of input maps has to start with Y
# Initaial settings for automatized model selection
options = {'cores': '20',
'learning_rate': '0.009,0.007,0.005',
'max_depth': '11,13,15',
'min_samples_leaf': '1,2,3',
'max_features': '0.9,0.8,0.7',
'subsample': '0.5',
'loss': 'huber',
'n_estimators': '3000',
'y': 'test_area_luroeykalven_water_grid_25833_10m@p_Sentinel4Nature_S2_Luroeykalven',
'x': 'unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_1,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_2,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_3,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_4,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_5,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_6,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_7,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_8,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_9,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_10',
'deviance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_deviance.pdf',
'featureimportance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_featureimportance.pdf',
'partialdependence': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_partial_dependence.pdf',
'crossval': '0.25',
'output': 'ForestCover_Luroeykalven_water_FCLS',
'spatial_term': None
}
cores = int(options['cores'])
spatial_term = options['spatial_term']
output = options['output']
deviance = options['deviance']
featureimportance = options['featureimportance']
partialdependence = options['partialdependence']
crossval = float(options['crossval'])
params = setParamDict()
# #############################################################################
# Load data
maps = [options['y']] + options['x'].rstrip('\n').split(',')
data = np.genfromtxt(BytesIO(gs.read_command('r.stats',
flags='1Ng',
input=maps)), delimiter=" ")
y = 2
if spatial_term:
x = [0,1] + range(3,len(data[0]))
else:
x = range(3,len(data[0]))
# Create a mas for NoData in either x or y
mask_y = np.isnan(data[:,y])
for i in range(3,len(data[0])):
if i == 3:
mask_x = np.isnan(data[:,i])
else:
mask_x = np.logical_or((np.isnan(data[:,i])), mask_x)
all_y_idx = np.where(np.logical_or(mask_x, mask_y)==False)
all_x_idx = np.where(mask_x==False)
# Random shuffle data points with training data, excluding all NoData
all_y = shuffle(data[all_y_idx])
# Training and test set
offset = int(all_y.shape[0] * (1 - crossval))
X_train, y_train, coor_train = all_y[:offset,x], all_y[:offset,y], all_y[:offset,[0,1]]
X_test, y_test, coor_test= all_y[offset:,x], all_y[offset:,y], all_y[offset:,[0,1]]
# Set for predicitions
predict, coor_predict = data[all_x_idx][:,x], data[all_x_idx][:,[0,1]]
# Run model selection process if requested
model_selection = False
for k in params.keys():
if len(params[k]) > 1:
model_selection = True
if model_selection:
gs.message('Running model selection ...')
clf = ensemble.GradientBoostingRegressor()
# this may take some minutes
gs_cv = GridSearchCV(clf, params, n_jobs=cores).fit(X_train, y_train)
# best hyperparameter setting
best_params = gs_cv.best_params_
print('Best hyper-parameter set is:')
print(best_params)
else:
best_params = {}
for k in params.keys():
best_params[k] = params[k][0]
# #############################################################################
# Fit regression model
gs.message('Fitting regression model ...')
clf = ensemble.GradientBoostingRegressor(**best_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
r2 = r2_score(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
print("R2: %.4f" % r2)
# #############################################################################
# Generate requested plots
# Plot training deviance
# compute test set deviance
if deviance:
test_score = np.zeros((best_params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.rcParams.update({'figure.autolayout': True})
plt.title('Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.savefig(deviance)
# #############################################################################
# Plot feature importance
if featureimportance:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
plt.figure(figsize=(12, 12))
plt.rcParams.update({'figure.autolayout': True})
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
#plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(cols)[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig(featureimportance)
if partialdependence:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
fig, axs = plot_partial_dependence(clf, X_train, cols, n_jobs=cores, n_cols=2,
feature_names=cols, figsize=(len(cols), len(cols)*2))
fig.savefig(partialdependence)
sorted_idx = np.argsort(clf.feature_importances_)
twoway = list(combinations(list(reversed(sorted_idx[-6:])), 2))
fig, axs = plot_partial_dependence(clf, X_train, twoway, n_jobs=cores, n_cols=2,
feature_names=cols, figsize=(len(twoway), int(len(twoway)*3)))
fig.savefig(partialdependence.rs | trip('.pdf') + '_twoway | .pdf')
# #############################################################################
# Predict data outside trainifrom subprocess import PIPEng areas
writeMap(output, coor_predict[:,0], coor_predict[:,1], clf.predict(predict))
# Write train error map
writeMap(output + '_train_error', coor_train[:,0], coor_train[:,1], clf.predict(X_train) - y_train |
from gittle import Gittle
repo = Gittle('.')
laste | st = [
info['sha']
for info in repo.commit_info()[1:3]
]
print(repo.diff(*lastest, diff_type='cl | assic'))
print("""
Last Diff
""")
print(list(repo.diff('HEAD')))
|
##########################################################################
#
# Copyright (c) 2012-2014, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferScene
from _GafferSceneTest import *
from SceneTestCase import SceneTestCase
from ScenePlugTest import ScenePlugTest
from GroupTest import GroupTest
from SceneTimeWarpTest import SceneTimeWarpTest
from SceneProceduralTest import SceneProceduralTest
from CubeTest import CubeTest
from PlaneTest import PlaneTest
from SphereTest import SphereTest
from InstancerTest import InstancerTest
from ObjectToSceneTest import ObjectToSceneTest
from CameraTest import CameraTest
from OutputsTest import OutputsTest
from CustomOptionsTest import CustomOptionsTest
from DeleteOptionsTest import Delete | OptionsTest
from CopyOptionsTest import CopyOptionsTest
from SceneNodeTest import SceneNodeTest
from PathMatcherTest import PathMatcherTest
from PathFilterTest import PathFilterTest
from ShaderAssignmentTest import ShaderAssignmentTest
from CustomAttri | butesTest import CustomAttributesTest
from AlembicSourceTest import AlembicSourceTest
from DeletePrimitiveVariablesTest import DeletePrimitiveVariablesTest
from SeedsTest import SeedsTest
from SceneContextVariablesTest import SceneContextVariablesTest
from SubTreeTest import SubTreeTest
from OpenGLAttributesTest import OpenGLAttributesTest
from StandardOptionsTest import StandardOptionsTest
from ScenePathTest import ScenePathTest
from PathMatcherDataTest import PathMatcherDataTest
from LightTest import LightTest
from TestRender import TestRender
from RenderTest import RenderTest
from OpenGLShaderTest import OpenGLShaderTest
from OpenGLRenderTest import OpenGLRenderTest
from TransformTest import TransformTest
from AimConstraintTest import AimConstraintTest
from PruneTest import PruneTest
from ShaderTest import ShaderTest
from TextTest import TextTest
from MapProjectionTest import MapProjectionTest
from MapOffsetTest import MapOffsetTest
from PointConstraintTest import PointConstraintTest
from SceneReaderTest import SceneReaderTest
from SceneWriterTest import SceneWriterTest
from IsolateTest import IsolateTest
from DeleteAttributesTest import DeleteAttributesTest
from UnionFilterTest import UnionFilterTest
from SceneSwitchTest import SceneSwitchTest
from ShaderSwitchTest import ShaderSwitchTest
from ParentConstraintTest import ParentConstraintTest
from ParentTest import ParentTest
from StandardAttributesTest import StandardAttributesTest
from PrimitiveVariablesTest import PrimitiveVariablesTest
from DuplicateTest import DuplicateTest
from ModuleTest import ModuleTest
from GridTest import GridTest
from SetTest import SetTest
from FreezeTransformTest import FreezeTransformTest
from SetFilterTest import SetFilterTest
from FilterTest import FilterTest
from SceneAlgoTest import SceneAlgoTest
from CoordinateSystemTest import CoordinateSystemTest
from DeleteOutputsTest import DeleteOutputsTest
from ExternalProceduralTest import ExternalProceduralTest
from ClippingPlaneTest import ClippingPlaneTest
from FilterSwitchTest import FilterSwitchTest
from PointsTypeTest import PointsTypeTest
from ParametersTest import ParametersTest
from SceneFilterPathFilterTest import SceneFilterPathFilterTest
from AttributeVisualiserTest import AttributeVisualiserTest
from SceneLoopTest import SceneLoopTest
from SceneProcessorTest import SceneProcessorTest
from MeshToPointsTest import MeshToPointsTest
from InteractiveRenderTest import InteractiveRenderTest
from FilteredSceneProcessorTest import FilteredSceneProcessorTest
from ShaderBallTest import ShaderBallTest
from LightTweaksTest import LightTweaksTest
from FilterResultsTest import FilterResultsTest
if __name__ == "__main__":
import unittest
unittest.main()
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
import abc
import copy
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.reporters import utils
from buildbot.util import httpclientservice
from buildbot.util import service
class HttpStatusPushBase(service.BuildbotService):
neededDetails = dict()
def checkConfig(self, *args, **kwargs):
service.BuildbotService.checkConfig(self)
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
if not isinstance(kwargs.get('builders'), (type(None), list)):
config.error("builders must be a list or None")
@defer.inlineCallbacks
def reconfigService(self, builders=None, debug=None, verify=None, **kwargs):
yield service.BuildbotService.reconfigService(self)
self.debug = debug
self.verify = verify
self.builders = builders
self.neededDetails = copy.copy(self.neededDetails)
for k, v in iteritems(kwargs):
if k.startswith("want"):
self.neededDetails[k] = v
@defer.inlineCallbacks
def startService(self):
yield service.BuildbotService.startService(self)
startConsuming = self.master.mq.startConsuming
self._buildCompleteConsumer = yield startConsuming(
self.buildFinished,
('builds', None, 'finished'))
self._buildStartedConsumer = yield startConsuming(
self.buildStarted,
('builds', None, 'new'))
def stopService(self):
self._buildCompleteConsumer.stopConsuming()
self._buildStartedConsumer.stopConsuming()
def buildStarted(self, key, build):
return self.getMoreInfoAndSend(build)
def buildFinished(self, key, build):
return self.getMoreInfoAndSend(build)
def filterBuilds(self, build):
if self.builders is not None:
return build['builder']['name'] in self.builders
return True
@defer.inlineCallbacks
def getMoreInfoAndSend(self, build):
yield utils.getDetailsForBuild(self.master, build, **self.neededDetails)
if self.filterBuilds(build):
yield self.send(build)
@abc.abstractmethod
def send(self, build):
pass
class HttpStatusPush(HttpStatusPushBase):
name = "HttpStatusPush"
secret | s = ['user', 'password', "auth"]
def checkConfig(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
if user is not None and auth is not None:
config.error("Only one of user/password or auth must be given")
if user is not None:
config.warnDeprecated("0.9.1", "user/password is deprecated, use 'auth=(user, password)'")
if (format_fn i | s not None) and not callable(format_fn):
config.error("format_fn must be a function")
HttpStatusPushBase.checkConfig(self, **kwargs)
@defer.inlineCallbacks
def reconfigService(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
yield HttpStatusPushBase.reconfigService(self, **kwargs)
if user is not None:
auth = (user, password)
if format_fn is None:
self.format_fn = lambda x: x
else:
self.format_fn = format_fn
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, serverUrl, auth=auth)
@defer.inlineCallbacks
def send(self, build):
response = yield self._http.post("", json=self.format_fn(build))
if response.code != 200:
log.msg("%s: unable to upload status: %s" %
(response.code, response.content))
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansTamil-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #uni0000 ????
chars.append(0x200B) #uniFEFF ZERO WIDTH SPACE
chars.append(0x200C) #uni200C ZERO WIDTH NON-JOINER
chars.append(0x000D) #uni000D ????
chars.append(0x2212) #minus MINUS SIGN
chars.append(0x2013) #endash EN DASH
chars.append(0x2014) #emdash EM DASH
chars.append(0x2018) #quoteleft LEFT SINGLE QUOTATION MARK
chars.append(0x2019) #quoteright RIGHT SINGLE QUOTATION MARK
chars.append(0x201C) #quotedblleft LEFT DOUBLE QUOTATION MARK
chars.append(0x201D) #quotedblright RIGHT DOUBLE QUOTATION MARK
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x0021) #exclam EXCLAMATION MARK
chars.append(0x0022) #quotedbl QUOTATION MARK
chars.append(0x0023) #numbersign NUMBER SIGN
chars.append(0x0025) #percent PERCENT SIGN
chars.append(0x2026) #ellipsis HORIZONTAL ELLIPSIS
chars.append(0x0027) #quotesingle APOSTROPHE
chars.append(0x0028) #parenleft LEFT PARENTHESIS
chars.append(0x0029) #parenright RIGHT PARENTHESIS
chars.append(0x002A) #asterisk ASTERISK
chars.append(0x002B) #plus PLUS SIGN
chars.append(0x002C) #comma COMMA
chars.append(0x002D) #hyphen HYPHEN-MINUS
chars.append(0x002E) #period FULL STOP
chars.append(0x002F) #slash SOLIDUS
chars.append(0x0030) #zero DIGIT ZERO
chars.append(0x0031) #one DIGIT ONE
chars.append(0x0032) #two DIGIT TWO
chars.append(0x0033) #three DIGIT THREE
chars.append(0x0034) #four DIGIT FOUR
chars.append(0x0035) #five DIGIT FIVE
chars.append(0x0036) #six DIGIT SIX
chars.append(0x0037) #seven DIGIT SEVEN
chars.append(0x0038) #eight DIGIT EIGHT
chars.append(0x0039) #nine DIGIT NINE
chars.append(0x003A) #colon COLON
chars.append(0x003B) #semicolon SEMICOLON
chars.append(0x003C) #less LESS-THAN SIGN
chars.append(0x003D) #equal EQUALS SIGN
chars.append(0x003E) #greater GREATER-THAN SIGN
chars.append(0x003F) #question QUESTION MARK
chars.append(0x200D) #uni200D ZERO WIDTH JOINER
chars.append(0x005B) #bracketleft LEFT SQUARE BRACKET
chars.append(0x005C) #backslash REVERSE SOLIDUS
chars.append(0x005D) #bracketright RIGHT SQUARE BRACKET
chars.append(0x005E) #asciicircum CIRCUMFLEX ACCENT
chars.append(0x005F) #underscore LOW LINE
chars.append(0x007B) #braceleft LEFT CURLY BRACKET
chars.append(0x007C) #bar VERTICAL LINE
chars.append(0x007D) #braceright RIGHT CURLY BRACKET
chars.append(0x007E) #asciitilde TILDE
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x00AD) #uni00AD SOFT HYPHEN
chars.append(0x20B9) #uni20B9 ????
chars.append(0x25CC) #uni25CC DOTTED CIRCLE
chars.append(0x00D7) #multiply MULTIPLICATION SIGN
chars.append(0x00F7) #divide DIVISION SIGN
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
chars.append(0x0964) #uni0964 DEVANAGARI DANDA
chars.append(0x0965) #uni0965 DEVANAGARI DOUBLE DANDA
chars.append(0x0B82) #uni0B82 TAMIL SIGN ANUSVARA
chars.append(0x0B83) #uni0B83 TAMIL SIGN VISARGA
chars.append(0x0B85) #uni0B85 TAMIL LETTER A
chars.append(0x0B86) #uni0B86 TAMIL LETTER AA
chars.append(0x0B87) #uni0B87 TAMIL LETTER I
chars.append(0x0B88) #uni0B88 TAMIL LETTER II
chars.append(0x0B89) #uni0B89 TAMIL LETTER U
chars.append(0x0B8A) #uni0B8A TAMIL LETTER UU
chars.append(0x0B8E) #uni0B8E TAMIL LETTER E
chars.append(0x0B8F) #uni0B8F TAMIL LETTER EE
chars.append(0x0B90) #uni0B90 TAMIL LETTER AI
chars.append(0x0B92) #uni0B92 TAMIL LETTER O
chars.append(0x0B93) #uni0B93 TAMIL LETTER OO
chars.append(0x0B94) #uni0B94 TAMIL LETTER AU
chars.append(0x0B95) #uni0B95 TAMIL LETTER KA
chars.append(0x0B99) #uni0B99 TAMIL LETTER NGA
chars.append(0x0B9A) #uni0B9A TAMIL LETTER CA
chars.append(0x0B9C) #uni0B9C TAMIL LETTER JA
chars.append(0x0B9E) #uni0B9E TAMIL LETTER NYA
chars.append(0x0B9F) #uni0B9F TAMIL LETTER TTA
chars.append(0x0BA3) #uni0BA3 TAMIL LETTER NNA
chars.append(0x0BA4) #uni0BA4 TAMIL LETTER TA
chars.append(0x0BA8) #uni0BA8 TAMIL LETTER NA
chars.append(0x0BA9) #uni0BA9 TAMIL LETTER NNNA
chars.append(0x0BAA) #uni0BAA TAMIL LETTER PA
chars.append(0x0BAE) #uni0BAE TAMIL LETTER MA
chars.append(0x0BAF) #uni0BAF TAMIL LETTER YA
chars.append(0x0BB0) #uni0BB0 TAMIL LETTER RA
chars.append(0x0BB1) #uni0BB1 TAMIL LETTER RRA
chars.append(0x0BB2) #uni0BB2 TAMIL LETTER LA
chars.append(0x0BB3) #uni0BB3 TAMIL LETTER LLA
chars.append(0x0BB4) #uni0BB4 TAMIL LETTER LLLA
chars.append(0x0BB5) #uni0BB5 TAMIL LETTER VA
chars.append(0x0BB6) #uni0BB6 TAMIL LETTER SHA
chars.append(0x0BB7) #uni0BB7 TAMIL LETTER SSA
chars.append(0x0BB8) #uni0BB8 TAMIL LETTER SA
chars.append(0x0BB9) #uni0BB9 TAMIL LETTER HA
chars.append(0x0BBE) #uni0BBE TAMIL VOWEL SIGN AA
chars.append(0x0BBF) #uni0BBF TAMIL VOWEL SIGN I
chars.append(0x0BC0) #uni0BC0 TAMIL VOWEL SIGN II
chars.append(0x0BC1) #uni0BC1 TAMIL VOWEL SIGN U
chars.append(0x0BC2) #uni0BC2 TAMIL VOWEL SIGN UU
chars.append(0x0BC6) #uni0BC6 TAMIL VOWEL SIGN E
chars.append(0x0BC7) #uni0BC7 TAMIL VOWEL SIGN EE
chars.append(0x0BC8) #uni0BC8 TAMIL VOWEL SIGN AI
chars.append(0x0BCA) #uni0BCA TAMIL VOWEL SIGN O
chars.append(0x0BCB) #uni0BCB TAMIL VOWEL SIGN OO
chars.append(0x0BCC) #uni0BCC TAMIL VOWEL SIGN AU
chars.append(0x0BCD) #uni0BCD TAMIL SIGN VIRAMA
chars.append | (0x0BD0) #uni0BD0 TAMIL OM
chars.append(0x0BD7) #uni0BD7 TAMIL AU LENGTH MARK
chars.append(0x0BE6) #uni0BE6 TAMIL DIGIT ZERO
chars.append(0x0BE7) #uni0BE7 TAMIL DIGIT ONE
chars.append(0x0BE8) #uni0BE8 TAMIL DIGIT TWO
chars.append(0x0BE9) #uni0BE9 TAMIL | DIGIT THREE
chars.append(0x0BEA) #uni0BEA TAMIL DIGIT FOUR
chars.append(0x0BEB) #uni0BEB TAMIL DIGIT FIVE
chars.append(0x0BEC) #uni0BEC TAMIL DIGIT SIX
chars.append(0x0BED) #uni0BED TAMIL DIGIT SEVEN
chars.append(0x0BEE) #uni0BEE TAMIL DIGIT EIGHT
chars.append(0x0BEF) #uni0BEF TAMIL DIGIT NINE
chars.append(0x0BF0) #uni0BF0 TAMIL NUMBER TEN
chars.append(0x0BF1) #uni0BF1 TAMIL NUMBER ONE HUNDRED
chars.append(0x0BF2) #uni0BF2 TAMIL NUMBER ONE THOUSAND
chars.append(0x0BF3) #uni0BF3 TAMIL DAY SIGN
chars.append(0x0BF4) #uni0BF4 TAMIL MONTH SIGN
chars.append(0x0BF5) #uni0BF5 TAMIL YEAR SIGN
chars.append(0x0BF6) #uni0BF6 TAMIL DEBIT SIGN
chars.append(0x0BF7) #uni0BF7 TAMIL CREDIT SIGN
chars.append(0x0BF8) #uni0BF8 TAMIL AS ABOVE SIGN
chars.append(0x0BF9) #uni0BF9 TAMIL RUPEE SIGN
chars.append(0x0BFA) #uni0BFA TAMIL NUMBER SIGN
return chars
|
from nbodykit.base.catalog import CatalogSource
from nbodykit.io.stack import FileStack
from nbodykit import CurrentMPIComm
from nbodykit import io
from nbodykit.extern import docrep
from six import string_types
import textwrap
import os
__all__ = ['FileCatalogFactory', 'FileCatalogBase',
'CSVCatalog', 'BinaryCatalog', 'BigFileCatalog',
'HDFCatalog', 'TPMBinaryCatalog', 'Gadget1Catalog', 'FITSCatalog']
class FileCatalogBase(CatalogSource):
"""
Base class to create a source of particles from a
single file, or multiple files, on disk.
Files of a specific type should be subclasses of this class.
Parameters
----------
filetype : subclass of :class:`~nbodykit.io.base.FileType`
the file-like class used to load the data from file; should be a
subclass of :class:`nbodykit.io.base.FileType`
args : tuple, optional
the arguments to pass to the ``filetype`` class when constructing
each file object
kwargs : dict, optional
the keyword arguments to pass to the ``filetype`` class when
constructing each file object
comm : MPI Communicator, optional
the MPI communicator instance; default (``None``) sets to the
current communicator
"""
@CurrentMPIComm.enable
def __init__(self, filetype, args=(), kwargs={}, comm=None):
self.comm = comm
self.filetype = filetype
# bcast the FileStack
if self.comm.rank == 0:
self._source = FileStack(filetype, *args, **kwargs)
else:
self._source = None
self._source = self.comm.bcast(self._source)
# compute the size; start with full file.
lstart = self.comm.rank * self._source.size // self.comm.size
lend = (self.comm.rank + 1) * self._source.size // self.comm.size
self._size = lend - lstart
self.start = 0
self.end = self._source.size
self._lstart = lstart # offset in the file for this rank
self._lend = lend # offset in the file for this rank
# update the meta-data
self.attrs.update(self._source.attrs)
if self.comm.rank == 0:
self.logger.info("Extra arguments to FileType: %s %s" % (str(args), str(kwargs)))
CatalogSource.__init__(self, comm=comm)
def query_range(self, start, end):
"""
Seek to a range in the file catalog.
Parameters
----------
start : int
start of the file relative to the physical file
end : int
end of the file relative to the physical file
Returns
-------
A new catalog that only accesses the given region of the file.
If the original catalog (self) contains any assigned columns not directly
obtained from the file, then the function will raise ValueError, since
the operation in that case is not well defined.
"""
if len(CatalogSource.hardcolumns.fget(self)) > 0:
raise ValueError("cannot seek if columns have been attached to the FileCatalog")
other = self.copy()
other._lstart = self.start + start + self.comm.rank * (end - start) // self.comm.size
other._lend = self.start + start + (self.comm.rank + 1) * (end - start) // self.comm.size
other._size = other._lend - other._lstart
other.start = start
other.end = end
CatalogSource.__init__(other, comm=self.comm)
return other
def __repr__(self):
path = self._source.path
name = self.__class__.__name__
args = (name, self.size, repr(self._source))
return "%s(size=%d, %s)" % args
@property
def hardcolumns(self):
"""
The union of the columns in the file and any transformed columns.
"""
defaults = CatalogSource.hardcolumns | .fget(self)
return list(self._source.dtype.names) + defaults
def get_hardcolumn(self, col):
"""
Return a column from the underlying file source.
Columns are returned as dask arrays.
"""
if col in self._source.dtype.names:
return self._source.get_dask(col)[self._lstart:self._lend]
el | se:
return CatalogSource.get_hardcolumn(self, col)
def _make_docstring(filetype, examples):
"""
Internal function to generate the doc strings for the built-in
CatalogSource objects that rely on :mod:`nbodykit.io` classes
to read data from disk.
"""
qualname = '%s.%s' %(filetype.__module__, filetype.__name__)
__doc__ = """
A CatalogSource that uses :class:`~{qualname}` to read data from disk.
Multiple files can be read at once by supplying a list of file
names or a glob asterisk pattern as the ``path`` argument. See
:ref:`reading-multiple-files` for examples.
Parameters
----------
%(test.parameters)s
comm : MPI Communicator, optional
the MPI communicator instance; default (``None``) sets to the
current communicator
attrs : dict, optional
dictionary of meta-data to store in :attr:`attrs`
""".format(qualname=qualname)
if examples is not None:
__doc__ += """
Examples
--------
Please see :ref:`the documentation <%s>` for examples.
""" %examples
# get the Parameters from the IO libary class
d = docrep.DocstringProcessor()
d.get_sections(d.dedents(filetype.__doc__), 'test', ['Parameters'])
return d.dedents(__doc__)
def FileCatalogFactory(name, filetype, examples=None):
"""
Factory method to create a :class:`~nbodykit.base.catalog.CatalogSource`
that uses a subclass of :mod:`nbodykit.io.base.FileType` to read
data from disk.
Parameters
----------
name : str
the name of the catalog class to create
filetype : subclass of :class:`nbodykit.io.base.FileType`
the subclass of the FileType that reads a specific type of data
examples : str, optional
if given, a documentation cross-reference link where examples can be
found
Returns
-------
subclass of :class:`FileCatalogBase` :
the ``CatalogSource`` object that reads data using ``filetype``
"""
def __init__(self, *args, **kwargs):
comm = kwargs.pop('comm', None)
attrs = kwargs.pop('attrs', {})
FileCatalogBase.__init__(self, filetype=filetype, args=args, kwargs=kwargs, comm=comm)
self.attrs.update(attrs)
# make the doc string for this class
__doc__ = _make_docstring(filetype, examples)
# make the new class object and return it
newclass = type(name, (FileCatalogBase,),{"__init__": __init__, "__doc__":__doc__})
return newclass
CSVCatalog = FileCatalogFactory("CSVCatalog", io.CSVFile, examples='csv-data')
BinaryCatalog = FileCatalogFactory("BinaryCatalog", io.BinaryFile, examples='binary-data')
BigFileCatalog = FileCatalogFactory("BigFileCatalog", io.BigFile, examples='bigfile-data')
HDFCatalog = FileCatalogFactory("HDFCatalog", io.HDFFile, examples='hdf-data')
TPMBinaryCatalog = FileCatalogFactory("TPMBinaryCatalog", io.TPMBinaryFile)
FITSCatalog = FileCatalogFactory("FITSCatalog", io.FITSFile, examples='fits-data')
Gadget1Catalog = FileCatalogFactory("Gadget1Catalog", io.Gadget1File, examples=None)
|
import unittest
from algoliasearch.configs import InsightsConfig
from algoliasearch.exceptions import AlgoliaException
from algoliasearch.insights_client import InsightsClient
class TestInsightsClient(unittest.TestCase):
def test_create(self):
client = InsightsClient.create("foo", "bar")
self.assertIsInstance(client, InsightsClient)
with self.assertRaises(AssertionError) as _:
InsightsClient.create("", "")
def test_create_with_config(self):
config = InsightsConfig("foo", "bar")
self.assertIsInstance(InsightsClient.create_with_config(config), InsightsClient)
def test_region(sel | f):
client = InsightsClient.create("foo", "bar")
self.assertEqual(client._config._region | , "us")
client = InsightsClient.create("foo", "bar", "fr")
self.assertEqual(client._config._region, "fr")
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the infor | mation
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_materials', 'material', 'extra', 'technique']
attrName = 'profile'
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_material_extra_element_names")
# Check for preservation of element data
self.__assistant.FullPreservation(context, self.tagList, self.attrName)
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
from django.conf.urls import include, | url
from article import views
urlpatterns = [
url(r'^$', views.articles, name='articles'),
url(r'^add/?$', views.add_articles, name='ad | d-articles'),
] |
from fabric.api imp | ort *
from fabric.decorators import task
import os, sys
sys.path[0:0] = [os.path.join(os.path.realpath('.'), '..'), ]
try:
from d51.django.virtualenv.test_runner import run_tests
except ImportError, e:
import sys
sys.stderr.write("This project requires d51.django.virtualenv.test_runner\n")
| sys.exit(-1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IRC bot...
"""
import threading
import src.utilities as util
import src.settings
import src.irc.botObject
class BotDispatcher(threading.Thread):
" | ""
The BotDispatcher object handles the delegation of the various bots on
the various specified servers.
One Bot object for each server (call name), meaning several bots can be
connected to the same address.
All bots are stored in the botObjects class variable.
"""
botObjects = {}
def __init__(self):
"""Prepare the object an | d fire off the dispatch method."""
super(BotDispatcher, self).__init__()
self.settingsInstance = src.settings.Settings()
self.dispatch()
def dispatch(self):
"""Create one Bot object for each server and start it in threads."""
servers = self.settingsInstance.settings['servers']
for name, info in servers.items():
self.botObjects[name] = src.irc.botObject.BotObject(
self.settingsInstance.settings,
info
)
thread = threading.Thread(
target=self.botObjects[name].connectToServer
)
thread.start()
def destroyBot(self, botObjName):
"""Gracefully shut down the bot and remove it from self.botObjects."""
try:
self.botObjects[botObjName].destroy()
del self.botObjects[botObjName]
util.write("Bot %s has been detroyed." % botObjName)
except KeyError:
util.write(
"Bot %s does not exist." % botObjName,
outputType="Warning"
)
def reloadBot(self, botObjName):
"""First destroy the Bot object and then reinstantiate it."""
try:
info = self.botObjects[botObjName].info
except KeyError:
info = None
if info is not None:
self.destroyBot(botObjName)
self.botObjects[botObjName] = src.irc.botObject.BotObject(info)
util.write("Bot %s has been reloaded." % botObjName)
else:
util.write(
"Bot %s does not exist." % botObjName,
outputType="Warning"
)
|
import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiff | eq import odeint
device = torch.device('cuda:' | + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([[2., 0.]]).to(device)
t = torch.linspace(0., 25., args.data_size).to(device)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]).to(device)
class Lambda(nn.Module):
def forward(self, t, y):
return torch.mm(y**3, true_A)
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('png')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
ax_vecfield = fig.add_subplot(133, frameon=False)
plt.show(block=False)
def visualize(true_y, pred_y, odefunc, itr):
if args.viz:
ax_traj.cla()
ax_traj.set_title('Trajectories')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('x,y')
ax_traj.plot(t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 0], t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_traj.plot(t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 0], '--', t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_traj.set_xlim(t.cpu().min(), t.cpu().max())
ax_traj.set_ylim(-2, 2)
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Phase Portrait')
ax_phase.set_xlabel('x')
ax_phase.set_ylabel('y')
ax_phase.plot(true_y.cpu().numpy()[:, 0, 0], true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_phase.plot(pred_y.cpu().numpy()[:, 0, 0], pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_phase.set_xlim(-2, 2)
ax_phase.set_ylim(-2, 2)
ax_vecfield.cla()
ax_vecfield.set_title('Learned Vector Field')
ax_vecfield.set_xlabel('x')
ax_vecfield.set_ylabel('y')
y, x = np.mgrid[-2:2:21j, -2:2:21j]
dydt = odefunc(0, torch.Tensor(np.stack([x, y], -1).reshape(21 * 21, 2)).to(device)).cpu().detach().numpy()
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(21, 21, 2)
ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
ax_vecfield.set_xlim(-2, 2)
ax_vecfield.set_ylim(-2, 2)
fig.tight_layout()
plt.savefig('png/{:03d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 50),
nn.Tanh(),
nn.Linear(50, 2),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
return self.net(y**3)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint(func, batch_y0, batch_t).to(device)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, t)
loss = torch.mean(torch.abs(pred_y - true_y))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y, pred_y, func, ii)
ii += 1
end = time.time()
|
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, render
from .models impor | t Comment
from .forms import UpdateCommentForm
from django.http.response import JsonResponse
from django.views.decorators.http import require_http_methods
def update_comment(request, cid):
"""
TODO: This function has code duplication, clean it when you have time.
"""
comment = get_object_or_404(Comment, id=cid, owner=request.user)
if request.method == 'POST':
update_comment_form = UpdateCommentForm(
request.PO | ST, comment=comment)
if update_comment_form.is_valid():
update_comment_form.save()
return render(request, 'comments/comment.html', {
'comment': comment})
else:
update_comment_form = UpdateCommentForm(comment=comment)
return render(request, 'comments/update_form.html', {
'update_comment_form': update_comment_form})
def get_comment(request, cid):
comment = get_object_or_404(Comment, id=cid, owner=request.user)
return render(request, 'comments/comment.html', {
'comment': comment})
@csrf_exempt
@require_http_methods(['POST', ])
def delete_comment(request, cid):
comment = get_object_or_404(Comment, id=cid, owner=request.user)
comment.status = Comment.DELETED_BY_OWNER
comment.save()
comment.story.update_comment_count(save=True)
return JsonResponse({'status': 'deleted'})
|
# -*- coding: UTF-8 -*-
# bp_v1
from .api_v1 import bp_v1
# b | p_v2
from .api_v2 | import bp_v2
__author__ = 'lpe234'
"""
Controller
"""
|
import logging
import re
from pathlib import Path
def find_by_name(path, name):
if type(path) is str:
path = Path(path)
for child in path.iterdir():
if child.name == name:
return child
elif child.is_dir():
ret = find_by_name(child, name)
if ret:
return ret
return None
def find_by_regexp(path, regexp):
if type(path) is str:
path = Path(path)
for child in path.iterdir():
if re.fullmatch(regexp, str(child)):
return child
elif child.is_dir():
ret = find_by_regexp(child, regexp)
if ret:
return ret
return None
def gen_file_list(path: Path, filters=None, toplevel=True):
filters = filters or []
files = [] if toplevel else [path]
for child in path.iterdir():
| if all([filter_.check(child) for filter_ in filters]):
if child.is_dir():
files | += gen_file_list(child, filters, toplevel=False)
else:
files.append(child)
files = [file.relative_to(path) for file in files] if toplevel else files
for file in files:
if not all([filter_.check(file) for filter_ in filters]):
files.remove(file)
logging.info("Filtered:{}".format(file))
return files
def is_relative_to(this, that):
try:
this.relative_to(that)
except ValueError:
return False
else:
return True
|
from Bio import SeqIO
from Bio.Alphabet import IUPAC, ProteinAlphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from optparse import OptionParser
def translate_to_six_frame(dnaRecord, translationTable):
'''
translate a Bio.SeqRecord of a DNA sequence via the given translation table into the six possible translations
dnaRecord = Bio.SeqRecord of DNA sequence (or other)
translationTable = the codon table for translating base triplets into amino acids (number between 1 and 25 based on http://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes)
'''
translations = []
for frame in range(3):
for direction in ['forward', 'reverse']:
if direction == 'forward':
sequence = dnaRecord.seq[frame:]
else:
sequence = dnaRecord.seq.reverse_complement()[fra | me:]
aaSeq = Seq(str(sequence.translate(translationTable)), alphabet = ProteinAlphabet)
aaRecord = SeqRecord(aaSeq, dnaRecord.name)
aaRecord.id = '%s_%s%i' % (aaRecord.id, direction[0], fra | me)
aaRecord.description = '%s|translation %s frame %i' % (aaRecord.description, direction, frame)
translations.append(aaRecord)
return translations
if __name__ == '__main__':
op = OptionParser()
op.add_option('-g','--genomes', dest='genomeFilenames', action='append', default=None, help='the input genome in fasta format')
op.add_option('-o','--outputs', dest='outputFilenames', action='append', default=None, help='the output fasta file with the six frame translation')
op.add_option('-t','--translTable', dest='translationTableNumber', default=1, type='int', help='a translation table number according to http://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes')
opts, args = op.parse_args()
for genomeFilename, outputFilename in zip(opts.genomeFilenames, opts.outputFilenames):
translations = []
for dnaRecord in SeqIO.parse(open(genomeFilename), 'fasta'):
translations.extend(translate_to_six_frame(dnaRecord, opts.translationTableNumber))
SeqIO.write(translations, open(outputFilename, 'w+'), 'fasta')
|
readsf', help='Fasta file of sequences')
parser.add_option('-k', dest='k', type='int', help='Number of clusters')
parser.add_option('-p', dest='proc', type='int', default=2, help='Number of processes to run [Default=%default]')
# help='Use a soft assignment of reads to clusters [Default=%default]'
parser.add_option('--em',dest='soft_assign', action='store_true', default=False, help=SUPPRESS_HELP)
# likelybin options
parser.add_option('--ls', dest='lb_starts', type='int', default=1, help='Number of random LikelyBin starts [Default=%default]')
parser.add_option('--ln', dest='lb_numreads', type='int', default=3000, help='Number of reads to sample for LikelyBin [Default=%default]')
parser.add_option('--lt', dest='lb_threads', type='int', default=2, help='Number of LikelyBin threads per start, and CPUs for imm_cluster [Default=%default]')
parser.add_option('--lo', dest='lb_order', type='int', default=3, help='Order of LikelyBin Markov model [Default=%default]')
# compostbin options
parser.add_option('--cs', dest='cb_starts', type='int', default=1, help='Number of random CompostBin starts [Default=%default]')
parser.add_option('--cn', dest='cb_numreads', type='int', default=3000, help='Number of reads to sample for CompostBin [Default=%default]')
parser.add_option('--ct', dest='cb_threads', type='int', default=1, help='Number of CPUs for imm_cluster [Default=%default]')
parser.add_option('--co','--cm', dest='cb_mers', type='int', default=5, help='mers to count in CompostBin [Default=%default]')
(options, args) = parser.parse_args()
options.readsf = os.path.abspath(options.readsf)
total_starts = options.lb_starts + options.cb_starts
if options.soft_assign:
em = '--em'
else:
em = ''
# run initial samples
i = 0
while i < total_starts:
p = []
j = 0
while j < options.proc and i < total_starts:
# LikelyBin
if i < options.lb_starts:
# double check processes
if j + options.lb_threads <= options.proc:
# make a temp dir to compute in and cd to it
temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/lb_init.py -r %s -n %d -k %d -o %d -p %d %s' % (bin_dir, options.readsf, options.lb_numreads, options.k, options.lb_order, options.lb_threads, em), shell=True))
os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more lb threads than processes'
exit()
j += options.lb_threads # even if not true, just move things along
# CompostBin
else:
# double check processes
if j + options.cb_threads <= options.proc:
# make a temp dir to compute in and cd to it
temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/cb_init.py -r %s -n %d -k %d -m %d -p %d %s' % (bin_dir, options.readsf, options.cb_numreads, options.k, options.cb_mers, options.cb_threads, em), shell=True))
os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more cb threads than processes'
exit()
j += options.lb_threads # even if not true, just move things along
# wait for processes to finish
for j in range(len(p)):
os.waitpid(p[j].pid, 0)
# choose best start
#maxlike_clusters(total_starts, options.readsf, options.k, options.soft_assign)
minentropy_clusters(total_starts, options.readsf, options.k, options.soft_assign)
# in case k changed
new_k = determine_k(options.soft_assign, options.k)
# run imm clustering completely
p = subprocess.Popen('%s/imm_cluster.py -k %d -r %s -p %d -i --trained %s &> immc.log' % (bin_dir, new_k, options.readsf, options.proc, em), shell=True)
os.waitpid(p.pid, 0)
############################################################
# temp_dir
#
# Create and change to a temporary directory to do initial
# runs within
############################################################
def temp_dir(tmpdir):
if os.path.isdir(tmpdir):
os.chdir(tmpdir)
for f in glob.glob('*'):
os.remove(f)
else:
os.mkdir(tmpdir)
os.chdir(tmpdir)
############################################################
# maxlike_clusters
#
# Copy the clustering with maximum likelihood to the main
# directory
######################################## | ####################
def maxlike_clusters(total_starts, readsf, k, soft_assign):
like = [0]*total_starts
for i in range(total_starts):
os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
like[i] = scimm_like(readsf, k, soft_assign)
else:
# something failed
like[i] = ''
os.chdir('..')
# find max likel | ihood initial partitioning
max_like = min(like) # '' is greater than numbers
for i in range(len(like)):
if like[i] != '' and like[i] >= max_like:
max_like = like[i]
max_clust = i
# get files from max
for c in range(len(glob.glob('cluster-*.fa'))):
shutil.copy('tmp.start%d/cluster-%d.fa' % (max_clust,c), 'cluster-%d.fa' % c)
shutil.copy('tmp.start%d/icm-%dscores.tmp' % (max_clust,c), 'icm-%dscores.tmp' % c)
############################################################
# scimm_like
#
# Calculate the likelihood of the given clustering and IMM
############################################################
def scimm_like(readsf, k, soft_assign):
new_k = determine_k(soft_assign, k)
priors = imm_cluster.update_priors([1.0/new_k]*new_k, readsf, {}, {}, soft_assign)
(likelihood,read_probs) = imm_cluster.get_read_probs(priors, {}, {}, soft_assign)
return likelihood
############################################################
# minentropy_clusters
#
# Copy the clustering with minimum entropy to the main
# directory.
############################################################
def minentropy_clusters(total_starts, readsf, k, soft_assign):
entropy = [0]*total_starts
for i in range(total_starts):
os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
entropy[i] = get_entropy(readsf, k, soft_assign)
else:
# something failed
entropy[i] = ''
os.chdir('..')
# find min entropy partitioning ('' is greater than numbers)
(min_entropy, min_clust) = util.min_i(entropy)
# get files from min
for c in range(len(glob.glob('tmp.start%d/cluster-*.fa' % min_clust))):
shutil.copy('tmp.start%d/cluster-%d.fa' % (min_clust,c), 'cluster-%d.fa' % c)
shutil.copy('tmp.start%d/icm-%d.scores.tmp' % (min_clust,c), 'icm-%d.scores.tmp' % c)
############################################################
# get_entropy
#
# Return the entropy of the clusters in the current
# directory.
############################################################
def get_entropy(readsf, k, soft_assign):
new_k = determine_k(soft_assign, k)
priors = imm_cluster.update_priors([1.0/new_k]*new_k, readsf, {}, {}, soft_assign)
(like, read_probs) = imm_cluster.get_read_probs(priors, {}, {}, soft_assign)
entropy = 0.0
for r in read_probs:
for c in range(len(read_probs[r])):
if read_probs[r][c] > 0:
entropy += -read_probs[r][c]*math.log(read_probs[r][c])
return entropy
############################################################
# determine_k
#
# In case, I'm letting k change within LikelyBin
############################################################
def determine_k(soft_assign, k):
new_k = 0
for i in range(k):
if soft_assign:
f = 'cluster-%d.build.fa' % i
else:
f = 'cluster-%d.fa' % i
if os.path.isfi |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''CMU dict file parser
Copyright (C) 2010
Yosuke Matsusaka
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed und | er the Eclipse Public Li | cense -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
'''
class CMUDict:
""" Utility class to parse CMU Pronunciation Dictionaly."""
def __init__(self, fname):
self._fname = fname
self._dict = {}
self.parse(self._fname)
def parse(self, fname):
f = open(fname, 'r')
f.readline()
for l in f:
t = l.strip().split(' ', 2)
w = t[0].strip('()"')
v = t[2].replace('(', '').replace(')', '').replace(' 0', '').replace(' 1', '')
try:
self._dict[w].append(v)
except KeyError:
self._dict[w] = [v,]
def lookup(self, w):
try:
return self._dict[w]
except KeyError:
return []
if __name__ == '__main__':
doc = CMUDict('/usr/share/festival/dicts/cmu/cmudict-0.4.out')
print doc.lookup('hello')
|
# -*- coding: utf-8 -*-
# | © 2015 Roberto Lizana (Trey)
# © 2016 Pedro M. Baeza
# © 2017 Rosen Vladimirov
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, SUPERUSER_ID
def post_init_hook(cr, registry):
cr.execute("""
INSERT INTO res_partner_id_number
(partner_id, name, category_id, status, active)
SELECT id, company_registry, 1, 'open', TRUE
FROM res_partner
WHERE company_registry IS NOT NULL""")
env = api.Environment(cr, SUPERUSER_ID, {})
| |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'débarquer'."""
from math import sqrt
from primaires.interpreteur.commande.commande import Commande
from secondaires.navigation.constantes import *
class CmdDebarquer(Commande):
"""Commande 'debarquer'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "debarquer", "debark")
self.nom_categorie = "navire"
self.aide_courte = "débarque du navire"
self.aide_longue = \
"Cette commande permet de débarquer du navire sur lequel " \
"on se trouve. On doit se trouver assez prêt d'une côte " \
"pour débarquer dessus."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
salle = personnage.salle
if not hasattr(salle, "navire | ") or salle.navire is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
if navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
| return
personnage.agir("bouger")
# On va chercher la salle la plus proche
etendue = navire.etendue
# On cherche la salle de nagvire la plus proche
d_salle = None # la salle de destination
distance = 2
x, y, z = salle.coords.tuple()
for t_salle in etendue.cotes.values():
if t_salle.coords.z == z:
t_x, t_y, t_z = t_salle.coords.tuple()
t_distance = sqrt((x - t_x) ** 2 + (y - t_y) ** 2)
if t_distance < distance and t_salle.nom_terrain in \
TERRAINS_ACCOSTABLES:
d_salle = t_salle
distance = t_distance
if d_salle is None:
personnage << "|err|Aucun quai n'a pu être trouvé à " \
"proximité.|ff|"
return
personnage.salle = d_salle
personnage << "Vous sautez sur {}.".format(
d_salle.titre.lower())
personnage << d_salle.regarder(personnage)
d_salle.envoyer("{{}} arrive en sautant depuis {}.".format(
navire.nom), personnage)
salle.envoyer("{{}} saute sur {}.".format(
d_salle.titre.lower()), personnage)
importeur.hook["personnage:deplacer"].executer(
personnage, d_salle, None, 0)
if not hasattr(d_salle, "navire") or d_salle.navire is None:
personnage.envoyer_tip("N'oubliez pas d'amarrer votre navire " \
"avec %amarre% %amarre:attacher%.")
|
),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
D | EFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_P | ID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
if _sys.version_info >= (3,):
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
else:
def next(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 10000 |
from distutils.core import set | up
setup(
name='jerboa',
packages=['jerboa'], # this must be the same as the name above
version='0.2.1-alpha',
description='',
author='Matt Badger',
author_email='foss@lighthouseuk.net',
url='https://github.com/LighthouseUK/jerboa', # use the URL to the github repo
download_url='https://github.com/LighthouseUK/jerboa/tarball/0. | 2.1-alpha', # I'll explain this in a second
keywords=['gae', 'lighthouse', 'jerboa', 'webapp2'], # arbitrary keywords
classifiers=[],
requires=['webapp2', 'blinker', 'wtforms', 'jinja2', 'pytz', 'babel', 'pycrypto'],
# tests_require=['WebTest']
)
|
##
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we | have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfun | d = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid elysium address")
except JSONRPCException as e:
assert("changeAddress must be a valid elysium address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
|
import unittest
import click
from tests.test_app import TestApp
from tests.test_models import TestModels
from tests.test_utils import TestUtils
@click.group(name='tests', invoke_without_command=False)
def test():
| pass
@test.command(name='test_models')
def test_models():
"""Tes | ts implemented models"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestModels)
unittest.TextTestRunner(verbosity=2).run(suite)
@test.command(name='test_utils')
def test_utils():
"""Tests utility functions"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestUtils)
unittest.TextTestRunner(verbosity=2).run(suite)
@test.command(name='test_app')
def test_app():
"""Tests utility functions"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestApp)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
test()
|
ntext else {}
conn = http_client.HTTPSConnection(server, port=port, **kwargs)
elif protocol == "http":
conn = http_client.HTTPConnection(server, port=port)
else:
raise Exception("Protocol " + protocol + " not supported.")
conn.request("GET", path)
response = conn.getresponse()
if response.status == 200:
try:
tree = ElementTree.fromstring(response.read())
return tree
except ExpatError:
pass
return None
## Private method that returns an ElementTree describing the API versions
## supported by the specified server. The result will be vimServiceVersions.xml
## if it exists, otherwise vimService.wsdl if it exists, otherwise None.
def __GetServiceVersionDescription(protocol, server, port, path, sslContext):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext)
return tree
## Private method that returns true if the service version description document
## indicates that the desired version is supported
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
## Private method that returns the most preferred API version supported by the
## specified server,
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
| port,
path,
sslContext)
if serviceVersi | onDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = localSslFixup(host, sslContext)
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext)
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk",
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, sslContext=None, b64token=None, mechanism='userpass'):
"""
Determine the most preferred API version supporte |
p
# preserves the getitem optimization
out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)
dsk = optimize_dataframe_getitem(out.dask, keys=[out.key])
read = [key for key in dsk.layers if key.startswith("read-parquet")][0]
subgraph = dsk.layers[read]
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == ["B"]
assert next(iter(subgraph.dsk.values()))[0].columns == ["B"]
assert_eq(ddf.compute(optimize_graph=False), ddf.compute())
def test_getitem_optimization_empty(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
df2 = dd.read_parquet(fn, columns=[], engine=engine)
dsk = optimize_dataframe_getitem(df2.dask, keys=[df2._name])
subgraph = next(iter(dsk.layers.values()))
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == []
def test_getitem_optimization_multi(tmpdir, engine):
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
fn = os.path.join(str(tmpdir))
ddf.to_parquet(fn, engine=engine)
a = dd.read_parquet(fn, engine=engine)["B"]
b = dd.read_parquet(fn, engine=engine)[["C"]]
c = dd.read_parquet(fn, engine=engine)[["C", "A"]]
a1, a2, a3 = dask.compute(a, b, c)
b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)
assert_eq(a1, b1)
assert_eq(a2, b2)
assert_eq(a3, b3)
@ANY_ENGINE_MARK
def test_blockwise_parquet_annotations(tmpdir):
df = pd.DataFrame({"a": np.arange(40, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=2)
expect.to_parquet(str(tmpdir))
with dask.annotate(foo="bar"):
ddf = dd.read_parquet(str(tmpdir))
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
layer = next(iter(layers.values()))
assert isinstance(layer, DataFrameIOLayer)
assert layer.annotations == {"foo": "bar"}
@ANY_ENGINE_MARK
def test_optimize_blockwise_parquet(tmpdir):
size = 40
npartitions = 2
tmp = str(tmpdir)
df = pd.DataFrame({"a": np.arange(size, dtype=np.int32)})
expect = dd.from_pandas(df, npartitions=npartitions)
expect.to_parquet(tmp)
ddf = dd.read_parquet(tmp)
# `ddf` should now have ONE Blockwise layer
layers = ddf.__dask_graph__().layers
assert len(layers) == 1
assert isinstance(list(layers.values())[0], Blockwise)
# Check single-layer result
assert_eq(ddf, expect)
# Increment by 1
ddf += 1
expect += 1
# Increment by 10
ddf += 10
expect += 10
# `ddf` should now have THREE Blockwise layers
layers = ddf.__dask_graph__().layers
assert len(layers) == 3
assert all(isinstance(layer, Blockwise) for layer in layers.values())
# Check that `optimize_blockwise` fuses all three
# `Blockwise` layers together into a singe `Blockwise` layer
keys = [(ddf._name, i) for i in range(npartitions)]
graph = optimize_blockwise(ddf.__dask_graph__(), keys)
layers = graph.layers
name = list(layers.keys())[0]
assert len(layers) == 1
assert isinstance(layers[name], Blockwise)
# Check final result
assert_eq(ddf, expect)
@PYARROW_MARK
def test_split_row_groups(tmpdir, engine):
"""Test split_row_groups read_parquet kwarg"""
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(
tmp, engine="pyarrow", row_group_size=100
)
ddf3 = dd.read_parquet(tmp, engine=engine, split_row_groups=True, chunksize=1)
assert ddf3.npartitions == 4
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 2
dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
chunksize=1,
)
assert ddf3.npartitions == 12
ddf3 = dd.read_parquet(
tmp, engine=engine, gather_statistics=True, split_row_groups=False
)
assert ddf3.npartitions == 4
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [1, 12])
@pytest.mark.parametrize("gather_statistics", [True, False])
def test_split_row_groups_int(tmpdir, split_row_groups, gather_statistics, engine):
tmp = str(tmpdir)
row_group_size = 10
npartitions = 4
half_size = 400
df = pd.DataFrame(
{
"i32": np.arange(2 * half_size, dtype=np.int32),
"f": np.arange(2 * half_size, dtype=np.float64),
}
)
half = len(df) // 2
dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(
tmp, engine="pyarrow", row_group_size=row_group_size
)
dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=row_group_size
)
ddf2 = dd.read_parquet(
tmp,
engine=engine,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
)
expected_rg_cout = int(half_size / row_group_size)
assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)
@PYARROW_MARK
@pytest.mark.parametrize("split_row_groups", [8, 25])
def test_split_row_groups_int_aggregate_files(tmpdir, engine, split_row_groups):
# Use pyarrow to write a multi-file dataset with
# multiple row-groups per file
row_group_size = 10
size = 800
df = pd.DataFrame(
{
"i32": np.arange(size, dtype=np.int32),
"f": np.arange(size, dtype=np.float64),
}
)
dd.from_pandas(df, npartitions=4).to_parquet(
str(tmpdir), engine="pyarrow", row_group_size=row_group_size, write_index=False
)
# Read back with both `split_row_groups>1` and
# `aggregate_files=True`
ddf2 = dd.read_parquet(
str(tmpdir),
engine=engine,
split_row_groups=split_row_groups,
aggregate_files=True,
)
# Check that we are aggregating files as expected
npartitions_expected = math.ceil((size / row_group_size) / split_row_groups)
assert ddf2.npartitions == npartitions_expected
assert len(ddf2) == size
assert_eq(df, ddf2, check_index=False)
@PYARROW_MARK
def test_split_row_groups_filter(tmpdir, engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"i32": np.arange(800, dtype=np.int32), "f": np.arange(800, dtype=np.float64)}
)
df.index.name = "index"
search_val = 600
filters = [("f", "==", search_val)]
dd.from_pandas(df, npartitions=4).to_parquet(
tmp, append=True, engine="pyarrow", row_group_size=50
)
ddf2 = dd.read_parquet(tmp, engine=engine)
ddf3 = dd.read_parquet(
tmp,
engine=engine,
gather_statistics=True,
split_row_groups=True,
filters=filters,
)
assert | (ddf3["i32"] == search_val).any().compute()
assert_eq(
ddf2[ddf2["i32"] == search_val].compute(),
ddf3[ddf3["i32"] == search_val].compute(),
)
@ANY_ENGINE_MARK
def test_optimize_getitem_and_nonblockwise(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df | .to_parquet(path)
df2 = dd.read_parquet(path)
df2[["a", "b"]].rolling(3).max().compute()
@ANY_ENGINE_MARK
def test_optimize_and_not(tmpdir):
path = os.path.join(tmpdir, "path.parquet")
df = pd.DataFrame(
{"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]},
index=["a", "b", "c"],
)
df.to_parquet(path)
df2 = dd.read_parquet(path)
df2a = df2["a"].groupby(df2["c"]).first().to_delayed()
|
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip)
def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Sect | ion 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
retur | n new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address)
def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname.endswith('.'):
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
candidate_ips = []
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
try:
# get the ip for this host; use getaddrinfo so that
# it works for both ipv4 and ipv6 addresses
addrinfo = socket.getaddrinfo(dev_ip, dev_port)
for addr in addrinfo:
family = addr[0]
dev_ip = addr[4][0] # get the ip-address
if family == socket.AF_INET6:
dev_ip = expand_ipv6(dev_ip)
candidate_ips.append(dev_ip)
except socket.gaierror:
return False
else:
if is_valid_ipv6(dev_ip):
dev_ip = expand_ipv6(dev_ip)
candidate_ips = [dev_ip]
for dev_ip in candidate_ips:
if dev_ip in my_ips and (my_port is None or dev_port == my_port):
return True
return False
def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['ip'] = validate_and_normalize_ip(match['ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
|
"""
__author__ = 'Christopher Fagiani'
"""
import sys, argparse, json, string, collections
from datetime import datetime
SummaryData = collections.namedtuple('SummaryData', 'wordCounts articles commentCount authors anonCount')
def main(args):
"""This program will output a word cloud as html based on the frequencies of words in a data file
"""
process_data(args.threshold,args.inputFile,args.stopFile, args.outputFile)
def process_data(threshold,dataFile, stopwordsFile, outputFile, interval=None):
with open(dataFile) as in_file:
data = json.load(in_file)
summary_data = build_counts(data,load_stopwords(stopwordsFile))
write_json(summary_data, outputFile,int(threshold),True,interval)
def write_json(summary_data, outputFile, threshold, as_variable=True, interval=None):
"""Writes a json file containing the count data for each word. If asVariable is true (the default), the data is
output as a javascript variable declaration rather than a raw JSON array.
"""
sorted_data = sorted(summary_data.wordCounts.items(),key=lambda x: x[1]['count'], reverse=True)
with open(outputFile,'w') as out_file:
count = 0
if as_variable:
out_file.write("var lastUpdated='"+datetime.now().strftime("%D at %H:%M:%S")+"';\n")
out_file.write("var threshold='"+str(threshold)+"';\n")
out_file.write("var commentCount='"+str(summary_data.commentCount)+"';\n")
out_file.write("var articleCount=' | "+str(len(summary_data.articles))+"';\n")
out_file.write("var authorCount='"+str(len(summary_data.authors))+"';\n")
out_file.write("var anonCount='"+str(summary_data.anonCount)+"';\n")
if interval is not None:
out_file.write("var intervalHrs='"+interval+"';\n")
else:
out_file.write("var intervalHrs='unknown' | ;\n")
out_file.write("var words = [")
else:
out_file.write("[")
for item in sorted_data:
if(item[1]['count']<threshold):
break
if count > 0:
out_file.write(",")
count += 1
out_file.write(json.dumps(item[1]))
out_file.write("]")
def load_stopwords(filename):
"""loads the stopwords file
"""
words = set()
with open(filename) as stop_file:
for line in stop_file:
words.add(line.strip())
return words
def build_counts(data, stop_words, lemmatize=True):
"""builds a dictionary keyed on the lowercase version of the sanitized string.
The values are a dictionary that contains the raw word, the count of occurrences
and a dictionary of articles (keys = links, values = titles) for each article associated with the word.
"""
words = {}
comment_count = 0
articles = set()
authors = set()
anon_count = 0
spam_count = 0
lm = None
if lemmatize:
lm = initialize_lemmatizer()
for item in data:
if compute_spam_score(item['msg']) >= .7:
spam_count += 1
continue
comment_count += 1
articles.add(item['link'])
text = item['msg']
authors.add(item['author'])
if item.get('authorId') == None:
anon_count += 1
for word in text.split():
raw_word = sanitize_word(word)
word = raw_word.lower()
if lemmatize:
word = lemmatize_word(lm,word)
if word not in stop_words and all(c in string.printable for c in word):
record = words.get(word,None)
if record is None:
if raw_word == 'us':
print word
record = {'count':1, 'word':raw_word,
'articles':{item['link']:item['title']}}
words[word]=record
else:
record['count']=record['count']+1
record['articles'][item['link']]=item['title']
return SummaryData(words, articles, comment_count, authors, anon_count)
def initialize_lemmatizer():
"""Initializes the wordnet lemmatizer. You must install nltk and download
the wordnet corpus prior to using this method (after downloading nltk, import it and run nltk.download())
"""
from nltk.stem.wordnet import WordNetLemmatizer
return WordNetLemmatizer()
def lemmatize_word(lm,word):
"""Lemmatizes a word using the nltk library.
Since we don't know the part of speech, this method performs 2 lemmatizations (once as a noun and once as a verb)
The verson of the word that differs from the input word is returned.
This is not always guaranteed to generate a correct answer, but it's good enough for our purposes.
"""
candidateN = lm.lemmatize(word,'n')
candidateV = lm.lemmatize(word,'v')
if candidateN == word:
return candidateV
else:
return candidateN
def sanitize_word(word):
"""returns word after replacing common punctuation with the empty string
"""
word = word.replace(".","").replace(",","").replace("?","").replace(":","")\
.replace("(","").replace(")","").replace("*","").replace(";","").replace('"',"").replace("!","")
word = word.replace("]","").replace("[","")
return word
def compute_spam_score(comment_text):
"""
Computes a "spam score" that is the likelihood that a comment is Spam (expressed as a value between 0 and 1).
:param comment_text: comment to score
:return: value between 0 (definitely not spam) and 1 (definitely spam)
"""
spam_indicator_count = 0
normalized_text = comment_text.lower()
if "getting paid" in normalized_text or "earn" in normalized_text or "earning" in normalized_text:
if "internet" in normalized_text:
spam_indicator_count += 5
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 5
if "an hour" in normalized_text or "hourly" in normalized_text or "/hr" in normalized_text or "monthly" in normalized_text:
if "job" in normalized_text:
spam_indicator_count += 3
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 5
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 2
return spam_indicator_count / 20.0
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Build a tag cloud from json data")
argparser.add_argument("-i","--input", metavar='inputfile',required=True,help='file containing json data',dest='inputFile')
argparser.add_argument("-o","--output", metavar='outputFile',required=True,help='output file',dest='outputFile')
argparser.add_argument("-s","--stopfile", metavar="stopwordFile",default="stopwords.txt",help="stopwords file",dest="stopFile")
argparser.add_argument("-t","--threshold", metavar="countThreshold",default=4,help="count threshold",dest="threshold")
main(argparser.parse_args())
|
f not self.request:
self.request = Request()
self.request.RequestName = 'noname_request'
self.request.SourceComponent = 'FailoverTransfer'
self.defaultChecksumType = defaultChecksumType
sel | f.registrationProtocols = getRegistrationProtocols()
| #############################################################################
def transferAndRegisterFile( self,
fileName,
localPath,
lfn,
destinationSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation with failover.
"""
errorList = []
fileGUID = fileMetaDict.get( "GUID", None )
for se in destinationSEList:
self.log.info( "Attempting dm.putAndRegister('%s','%s','%s',guid='%s',catalog='%s')" % ( lfn,
localPath,
se,
fileGUID,
fileCatalog ) )
result = DataManager( catalogs = fileCatalog, masterCatalogOnly = masterCatalogOnly ).putAndRegister( lfn, localPath, se, guid = fileGUID )
self.log.verbose( result )
if not result['OK']:
self.log.error( 'dm.putAndRegister failed with message', result['Message'] )
errorList.append( result['Message'] )
continue
if not result['Value']['Failed']:
self.log.info( 'dm.putAndRegister successfully uploaded and registered %s to %s' % ( fileName, se ) )
return S_OK( {'uploadedSE':se, 'lfn':lfn} )
# Now we know something went wrong
self.log.warn( "Didn't manage to do everything, now adding requests for the missing operation" )
errorDict = result['Value']['Failed'][lfn]
if 'register' not in errorDict:
self.log.error( 'dm.putAndRegister failed with unknown error', str( errorDict ) )
errorList.append( 'Unknown error while attempting upload to %s' % se )
continue
# fileDict = errorDict['register']
# Therefore the registration failed but the upload was successful
if not fileCatalog:
fileCatalog = ''
if masterCatalogOnly:
fileCatalog = FileCatalog().getMasterCatalogNames()['Value']
result = self._setRegistrationRequest( lfn, se, fileMetaDict, fileCatalog )
if not result['OK']:
self.log.error( 'Failed to set registration request', 'SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
errorList.append( 'Failed to set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
continue
else:
self.log.info( 'Successfully set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
metadata = {}
metadata['filedict'] = fileMetaDict
metadata['uploadedSE'] = se
metadata['lfn'] = lfn
metadata['registration'] = 'request'
return S_OK( metadata )
self.log.error( 'Failed to upload output data file', 'Encountered %s errors' % len( errorList ) )
return S_ERROR( 'Failed to upload output data file' )
#############################################################################
def transferAndRegisterFileFailover( self,
fileName,
localPath,
lfn,
targetSE,
failoverSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation to failover storage and sets the
necessary replication and removal requests to recover.
"""
failover = self.transferAndRegisterFile( fileName, localPath, lfn, failoverSEList, fileMetaDict, fileCatalog, masterCatalogOnly = masterCatalogOnly )
if not failover['OK']:
self.log.error( 'Could not upload file to failover SEs', failover['Message'] )
return failover
# set removal requests and replication requests
result = self._setFileReplicationRequest( lfn, targetSE, fileMetaDict, sourceSE = failover['Value']['uploadedSE'] )
if not result['OK']:
self.log.error( 'Could not set file replication request', result['Message'] )
return result
lfn = failover['Value']['lfn']
failoverSE = failover['Value']['uploadedSE']
self.log.info( 'Attempting to set replica removal request for LFN %s at failover SE %s' % ( lfn, failoverSE ) )
result = self._setReplicaRemovalRequest( lfn, failoverSE )
if not result['OK']:
self.log.error( 'Could not set removal request', result['Message'] )
return result
return S_OK( {'uploadedSE':failoverSE, 'lfn':lfn} )
def getRequest( self ):
""" get the accumulated request object
"""
return self.request
def commitRequest( self ):
""" Send request to the Request Management Service
"""
if self.request.isEmpty():
return S_OK()
isValid = RequestValidator().validate( self.request )
if not isValid["OK"]:
return S_ERROR( "Failover request is not valid: %s" % isValid["Message"] )
else:
requestClient = ReqClient()
result = requestClient.putRequest( self.request )
return result
#############################################################################
def _setFileReplicationRequest( self, lfn, targetSE, fileMetaDict, sourceSE = '' ):
""" Sets a registration request.
"""
self.log.info( 'Setting ReplicateAndRegister request for %s to %s' % ( lfn, targetSE ) )
transfer = Operation()
transfer.Type = "ReplicateAndRegister"
transfer.TargetSE = targetSE
if sourceSE:
transfer.SourceSE = sourceSE
trFile = File()
trFile.LFN = lfn
cksm = fileMetaDict.get( "Checksum", None )
cksmType = fileMetaDict.get( "ChecksumType", self.defaultChecksumType )
if cksm and cksmType:
trFile.Checksum = cksm
trFile.ChecksumType = cksmType
size = fileMetaDict.get( "Size", 0 )
if size:
trFile.Size = size
guid = fileMetaDict.get( "GUID", "" )
if guid:
trFile.GUID = guid
transfer.addFile( trFile )
self.request.addOperation( transfer )
return S_OK()
#############################################################################
def _setRegistrationRequest( self, lfn, targetSE, fileDict, catalog ):
""" Sets a registration request
:param str lfn: LFN
:param list se: list of SE (or just string)
:param list catalog: list (or string) of catalogs to use
:param dict fileDict: file metadata
"""
self.log.info( 'Setting registration request for %s at %s.' % ( lfn, targetSE ) )
if not isinstance( catalog, list ):
catalog = [catalog]
for cat in catalog:
register = Operation()
register.Type = "RegisterFile"
register.Catalog = cat
register.TargetSE = targetSE
regFile = File()
regFile.LFN = lfn
regFile.Checksum = fileDict.get( "Checksum", "" )
regFile.ChecksumType = fileDict.get( "ChecksumType", self.defaultChecksumType )
regFile.Size = fileDict.get( "Size", 0 )
regFile.GUID = fileDict.get( "GUID", "" )
se = StorageElement( targetSE )
pfn = se.getURL( lfn, self.registrationProtocols )
if not pfn["OK"] or lfn not in pfn["Value"]['Successful']:
self.log.error( "Unable to get PFN for LFN", "%s" % pfn.get( 'Message', pfn.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return pfn
regFile.PFN = pfn["Value"]['Successful'][lfn]
register.addFile( regFile )
self.request.addOperation( register )
return S |
def __init__(self, api_url=None, app_name=None, app_pwd=None):
"""Constructor function
Params:
api_url: The URL to the Crowd API
app_name: Application login name for Crowd server
app_pwd: Application password for Crowd server
"""
self.crowd_user = app_name
self.crowd_password = app_pwd
self.crowd_api = api_url
self.use_crowd = None
self.external_opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.ProxyHandler({}))
def config(self, config):
"""
Configure the crowd client
:param config:
:return:nothing
"""
self.crowd_user = config['crowd_app_name']
self.crowd_password = config['crowd_app_password']
self.crowd_api = config['crowd_api_url']
self.use_crowd = config['crowd_use_crowd'].lower() != 'false'
try:
self.external_opener = urllib2.build_opener(
urllib2.ProxyHandler({'http': config['external_http_proxy'],
'https': config['external_https_proxy']})
)
log.info("installed proxied external opener for crowd client")
except KeyError:
self.external_opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.ProxyHandler({}))
log.info("installed non-proxied external opener for crowd client")
def check_authenticated(self, user_name, password):
"""
Checks if the user in question is in the crowd system
:param user_name: Login name of the user to check
:param password: That user's password
:return: User information as JSON if session valid,
raises exception if not
"""
return self._make_request('authentication?username=%s' % user_name, '{ "value": "%s" }' % password)
def create_user_session(self, user_name, password, remote_addr):
"""
Asks the crowd provider for a user session token
:param user_name: Login name of the user
:param password: Password of the user
:param remote_addr: IP address the user is requesting from
:return: User object in JSON containing a 'token' from Crowd
raises exception if invalid credentials
"""
user = UserRequest()
user.username = user_name
user.password = password
user.remote_address = remote_addr
# Now we have enough information to make
# a request to the Crowd API for a session
return self._make_request('session', user.to_json())
def verify_user_session(self, token):
"""
Checks the supplied token against active Crowd sessions
:param token: The Crowd session ID to verify
:return: User information as JSON if session valid,
raises exception if not
"""
# Look for a user, and last access entry in the
# cache...
try:
user, last_access_time = self._token_cache[token]
time_since_last_access = datetime.datetime.now() - last_access_time
if time_since_last_access.seconds > 20:
del[self._token_cache[token]]
else:
log.debug("Found user in cache - no need to call Crowd")
return {
'user': user,
'token': token
}
except KeyError:
server_credentials = self._make_request('session/' + token)
self._token_cache[token] = (server_credentials['user'], datetime.datetime.now())
return server_credentials
return self._make_request('session/' + token)
def delete_session(self, token):
"""
Invalidates the specified session
:param token: Session identifier to invalidate
:return:Nothing
"""
if token in self._token_cache:
del[self._token_cache[token]]
self._make_request('session/' + token, method='DELETE')
def get_user_info(self, username):
"""
Gets a user object from Crowd
:param username: Name of user to get info for
:return: User JSON
"""
return self._make_request('user?username=%s' % username)
def create_user(self, username, first_name, last_name, email, password):
"""
Asks the client to create a user with the given information
:param username: login name for the user
:param first_name: The name given to the user for use in an informal setting
:param last_name: The name of the user's family
:param email: Email address
:param password: User's desired password
:return: nothing
"""
req = UserRequest()
req.username = username
req.first_name = first_name
req.last_name = last_name
req.email = email
req.password = password
return self._make_request('user', data=req.new_user_json())
def update_user(self, username, first_name, last_name, email, password):
"""
Asks the client to update the user record
:param username: login name for the user
:param first_name: The name given to the user for use in an informal setting
:param last_name: The name of the user's family
:param email: Email address
:param password: User's desired password
:return: nothing
"""
req = UserRequest()
req.username = username
req.first_name = first_name
req.last_name = last_name
req.email = email
req.password = password
return self._make_request('user?username=%s' % username, data=req.new_user_json(), method='PUT')
def update_users_password(self, username, password):
"""
Update a users password
:param username: the username
:param password: the new password
:return:nothing
"""
data = simplejson.dumps(
{
'value': password
})
return self._make_request('user/password?username=%s' % username, data=data, me | thod='PUT')
def delete_user(self, username):
"""
Performs a delete on a user
:param username: The login name of the user to delete
:return: nothing
"""
| self._make_request('user?username=%s' % username, method='DELETE')
def _make_request(self, resource, data=None, method=None):
"""
Helper function for making requests to the Crowd REST API
:param resource: The REST resource to access
:param data: Optional JSON payload
:param method: The HTTP verb used for the request
:return: The JSON response from the Crowd server, or
raises an exception if a problem was encountered
"""
# Make sure we specify that we're sending JSON, otherwise Crowd
# assumes XML
log.debug("Making a request for %s" % resource)
if data:
# Request implicitly becomes a POST if data is attached
request = urllib2.Request(self.crowd_api + resource, data)
request.add_header("Content-type", "application/json")
else:
# This will be a GET request
request = urllib2.Request(self.crowd_api + resource)
# Ask for JSON in return
request.add_header("Accept", "application/json")
base64string = base64.encodestring('%s:%s' % (self.crowd_user, self.crowd_password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
if method:
# Fancy verbs (like DELETE) can be dealt with here
request.get_method = lambda: method
# if not self.opener_installed:
# If we haven't connected to Crowd yet,
# make a dummy request in order to save our
# application credentials
# try:
# self.opener.open(self.crowd_api + 'group/membership')
# except urllib2.HTTPError as h_ex:
# log.error("CROWD CONNECTION ISSUE: %s" % h_ex)
|
import o | s
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import flask.ext.restless
app = Flask(__name__)
app.config.from_object('config')
#flask-sqlalchemy
db = SQLAlchemy(app)
from app import models, views
from app.models import Fact, Log
#API
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
manager.create_api(Fact, methods=['GET', 'POST', 'DELETE'])
manager.create_api(Log, methods=['GET', 'POST', 'PUT', 'DELETE | '])
|
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, unit_a, unit_b])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Test stacking.
cells = [
Minimal2DRNNCell(unit_a, unit_b),
Minimal2DRNNCell(unit_a * 2, unit_b * 2),
Minimal2DRNNCell(unit_a * 4, unit_b * 4)
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)))
self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
def test_high_dimension_RNN_with_init_state(self):
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
# Basic test case.
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((unit_a, unit_b))
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s)
model = keras.models.Model([x, s], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b))
], np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Bad init state shape.
bad_shape_a = unit_a * 2
bad_shape_b = unit_b * 2
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((bad_shape_a, bad_shape_b))
layer = keras.layers.RNN(cell)
with self.assertRaisesWithPredicateMatch(ValueError,
'however `cell.state_size` is'):
layer(x, initial_state=s)
def test_inconsistent_output_state_size(self):
batch = 32
time_step = 4
state_size = 5
input_size = 6
cell = PlusOneRNNCell(state_size)
x = keras.Input((None, input_size))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size, state_size)
if not context.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, state_size])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_size)),
np.zeros((batch, input_size)))
self.assertEqual(model.output_shape, (None, input_size))
def test_get_initial_state(self):
cell = keras.layers.SimpleRNNCell(5)
with self.assertRaisesRegexp(ValueError,
'batch_size and dtype cannot be None'):
cell.get_initial_state(None, None, None)
if not context.executing_eagerly():
inputs = keras.Input((None, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
else:
batch = 8
inputs = np.random.random((batch, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [8, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [batch, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def test_nested_input_output(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn((input_1, input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model((input_1, input_2), outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
def test_nested_input_output_with_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn((input_1, input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
| model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_nest_input_output_with_init_state(self):
| batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedC |
ro General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
from six import StringIO as NativeStringIO
from twisted.internet.selectreactor import SelectReactor
from crossbar.test import TestCase
from crossbar.controller import cli
from crossbar import _logging
from weakref import WeakKeyDictionary
import os
import sys
import platform
import twisted
class CLITestBase(TestCase):
# the tests here a mostly bogus, as they test for log message content,
# not actual functionality
skip = True
def setUp(self):
self._subprocess_timeout = 15
if platform.python_implementation() == 'PyPy':
self._subprocess_timeout = 30
self.stderr = NativeStringIO()
self.stdout = NativeStringIO()
self.patch(_logging, "_stderr", self.stderr)
self.patch(_logging, "_stdout", self.stdout)
self.patch(_logging, "_loggers", WeakKeyDictionary())
self.patch(_logging, "_loglevel", "info")
return super(CLITestBase, self).setUp()
def tearDown(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class VersionTests(CLITestBase):
"""
Tests for `crossbar version`.
"""
def test_basic(self):
"""
Just running `crossbar version` gets us the versions.
"""
reactor = SelectReactor()
cli.run("crossbar",
["version"],
reactor=reactor)
self.assertIn("Crossbar.io", self.stdout.getvalue())
self.assertIn(
("Twisted : \x1b[33m\x1b[1m" + twisted.version.short() + "-SelectReactor"),
self.stdout.getvalue())
def test_debug(self):
"""
Running `crossbar version` will give us the versions, plus the
locations of some of them.
"""
reactor = SelectReactor()
cli.run("crossbar",
["version", "--loglevel=debug"],
reactor=reactor)
self.assertIn("Crossbar.io", self.stdout.getvalue())
self.assertIn(
("Twisted : \x1b[33m\x1b[1m" + twisted.version.short() + "-SelectReactor"),
self.stdout.getvalue())
self.assertIn(
("[twisted.internet.selectreactor.SelectReactor]"),
self.stdout.getvalue())
class StartTests(CLITestBase):
"""
Tests for `crossbar start`.
"""
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
def test_start(self):
"""
A basic start, that doesn't actually enter the reactor.
"""
with open(self.config, "w") as f:
f.write("""{"controller": {}}""")
reactor = SelectReactor()
reactor.run = lambda: False
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
self.assertIn("Entering reactor event loop", self.stdout.getvalue())
def test_configValidationFailure(self):
"""
Running `crossbar start` with an invalid config will print a warning.
"""
with open(self.config, "w") as f:
f.write("")
reactor = SelectReactor()
with self.assertRaises(SystemExit) as e:
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
# Exit with code 1
self.assertEqual(e.exception.args[0], 1)
# The proper warning should be emitted
self.assertIn("*** Configuration validation failed ***",
self.stderr.getvalue())
self.assertIn(("configuration file does not seem to be proper JSON "),
self.stderr.getvalue())
def test_fileLogging(self):
"""
Running `crossbar start --logtofile` will log to cbdir/node.log.
"""
with open(self.config, "w") as f:
f. | write("""{"controller": {}}""")
reactor = SelectReactor()
reactor.run = lambda: None
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir), "--logtofile"],
reactor=reactor)
with open(os.path.join(self.cbdir, "node.l | og"), "r") as f:
logFile = f.read()
self.assertIn("Entering reactor event loop", logFile)
self.assertEqual("", self.stderr.getvalue())
self.assertEqual("", self.stdout.getvalue())
def test_stalePID(self):
with open(self.config, "w") as f:
f.write("""{"controller": {}}""")
with open(os.path.join(self.cbdir, "node.pid"), "w") as f:
f.write("""{"pid": 9999999}""")
reactor = SelectReactor()
reactor.run = lambda: None
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
self.assertIn(
("Stale Crossbar.io PID file (pointing to non-existing process "
"with PID {pid}) {fp} removed").format(
fp=os.path.abspath(os.path.join(self.cbdir, "node.pid")),
pid=9999999),
self.stdout.getvalue())
class ConvertTests(CLITestBase):
"""
Tests for `crossbar convert`.
"""
def test_unknown_format(self):
"""
Running `crossbar convert` with an unknown config file produces an
error.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.blah")
open(config_file, 'wb').close()
with self.assertRaises(SystemExit) as e:
cli.run("crossbar",
["convert", "--config={}".format(config_file)])
self.assertEqual(e.exception.args[0], 1)
self.assertIn(
("Error: configuration file needs to be '.json' or '.yaml'."),
self.stdout.getvalue())
def test_yaml_to_json(self):
"""
Running `crossbar convert` with a YAML config file will convert it to
JSON.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.yaml")
with open(config_file, 'w') as f:
f.write("""
foo:
bar: spam
baz:
foo: cat
""")
cli.run("crossbar",
["convert", "--config={}".format(config_file)])
self.assertIn(
("JSON formatted configuration written"),
self.stdout.getvalue())
with open(os.path.join(cbdir, "config.json"), 'r') as f:
self.assertEqual(f.read(), """{
"foo": {
"bar": "spam",
"baz": {
"foo": "cat"
}
}
}""")
def test_invalid_yaml_to_json(self):
"""
Running `crossbar convert` with an invalid YAML config file will error
saying it is invalid.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.yaml")
with open(config_file, 'w') as f:
f.write("""{{{{{{{{""")
with self.assertRaises(SystemExit) as e:
cli.run("crossbar",
["convert", "--config={}".format(config_file)])
self.assertEqual(e.exception.args[0], 1)
self.assertIn(
("not seem to be proper YAML"),
self.stdout.getvalue())
def test_json_to_yaml(self):
"""
Running `crossbar convert` with a YAML config file will convert it to
JSON.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.json")
with open(config_file, 'w') as f:
f.write("""{
"foo": {
|
import logging.handlers
import os
_pabotlog = logging.getLogger('PABot')
_pabotlog.setLevel(logging.DEBUG)
_logPath = os.path.abspath("./logging/pabot.log")
_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
_consoleStreamHandler = logging.StreamH | andler()
_consoleStreamHandler.setLevel(logging.DEBUG)
_consoleStreamHandler.setFormatter(_formatter)
_symLogRotFileHandler = logging.handlers.RotatingFileHandler(_logPath, maxBytes=2000000, backupCount=5)
_symLogRotFileHandler.setLevel(logging.DEBUG)
_symLogRotFileHandler.setFormatter(_formatter)
_pabotlog.addHandler(_consoleStreamHandler)
_pabotlog.addHan | dler(_symLogRotFileHandler)
def LogPABotMessage(message):
_pabotlog.info(message)
def LogPABotError(message):
_pabotlog.error(message)
|
"""
This module defines serializers f | or the main API data objects:
.. autosummary::
:nosignatures:
DimensionSerializer
FilterSerializer
MessageSerializer
Q | uestionSerializer
"""
from django.core.paginator import Paginator
from rest_framework import serializers, pagination
import emoticonvis.apps.corpus.models as corpus_models
import emoticonvis.apps.enhance.models as enhance_models
from django.contrib.auth.models import User
# A simple string field that looks up dimensions on deserialization
class MessageSerializer(serializers.ModelSerializer):
"""
JSON representation of :class:`.Message`
objects for the API.
Messages are provided in a simple format that is useful for displaying
examples:
::
{
"id": 52,
"dataset": 2,
"text": "Some sort of thing or other",
"sender": {
"id": 2,
"dataset": 1
"original_id": 2568434,
"username": "my_name",
"full_name": "My Name"
},
"time": "2010-02-25T00:23:53Z"
}
Additional fields may be added later.
"""
class Meta:
model = corpus_models.Message
fields = ('id', 'dataset', 'text', )
class UserSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
return instance.username
class Meta:
model = User
fields = ('username', )
class FeatureVectorSerializer(serializers.Serializer):
message = MessageSerializer()
tokens = serializers.ListField()
feature_vector = serializers.ListField(child=serializers.DictField())
class FeatureCodeDistributionSerializer(serializers.Serializer):
feature_index = serializers.IntegerField()
feature_text = serializers.CharField()
distribution = serializers.ListField(child=serializers.DictField())
class SVMResultSerializer(serializers.Serializer):
results = serializers.DictField()
messages = serializers.ListField(child=FeatureVectorSerializer(), required=True)
class FeatureSerializer(serializers.ModelSerializer):
token_list = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = enhance_models.Feature
fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', 'token_list', )
read_only_fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', )
class PaginatedMessageSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = MessageSerializer
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = corpus_models.Dataset
fields = ('id', 'name', 'description', 'message_count', )
read_only_fields = ('id', 'name', 'description', 'message_count', )
class DictionarySerializer(serializers.ModelSerializer):
dataset = DatasetSerializer()
class Meta:
model = enhance_models.Dictionary
fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
read_only_fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
class CodeAssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = coding_models.CodeAssignment
fields = ('id', 'source', 'message', 'code', 'is_example', 'is_ambiguous', 'is_saved', )
read_only_fields = ('id', 'source', )
class CodeDefinitionSerializer(serializers.Serializer):
code = serializers.CharField(required=False)
source = UserSerializer(required=False)
text = serializers.CharField()
examples = MessageSerializer(many=True, required=False)
class CodeMessageSerializer(serializers.Serializer):
code = serializers.CharField()
source = UserSerializer()
messages = MessageSerializer(many=True)
class DisagreementIndicatorSerializer(serializers.ModelSerializer):
user_assignment = CodeAssignmentSerializer(required=False)
partner_assignment = CodeAssignmentSerializer(required=False)
class Meta:
model = coding_models.DisagreementIndicator
fields = ('id', 'message', 'user_assignment', 'partner_assignment', 'type', )
read_only_fields = ('id', 'message', 'user_assignment', 'partner_assignment', )
class PairwiseSerializer(serializers.Serializer):
user_code = serializers.CharField()
partner_code = serializers.CharField()
count = serializers.IntegerField() |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=Tr | ue)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
| if args.config is not None:
main(args.config)
else:
main()
|
ncode('utf-8'))
root = tree.getroot()
self.assertEquals([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"),
_bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEquals(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(False, hasattr(results[0], 'getparent'))
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEquals(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assert_(el.xpath('boolean(c)'))
self.assert_(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEquals([c[0], c[1]],
c.xpath('b'))
self.assertEquals([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEquals(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEquals(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEquals(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(c)
self.assertEqual('/c/d',
tree.getpath(d2)[:4])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_xpath_evaluator(self):
tree = self.parse('<a><b><c></c></b></a>')
e = etree.XPathEvaluator(tree)
root = tree.getroot()
self.assertEquals(
[root],
e('//a'))
def test_xpath_evaluator_tree(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('a'))
root = child_tree.getroot()
self.assertEquals(
[root[0]],
e('c'))
def test_xpath_evaluator_tree_absolute(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('/a'))
root = child_tree.getroot()
self.assertEquals(
[root],
e('/b'))
self.assertEquals(
[],
e('/c'))
def test_xpath_evaluator_element(self):
tree = self.parse('<a><b><c></c></b></a>')
root = tree.getroot()
e = etree.XPathEvaluator(root[0])
self.assertEquals(
[root[0][0]],
e('c'))
def test_xpath_extensions(self):
def foo(evaluator, a):
return 'hello %s' % a
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertEquals(
"hello you", e("foo('you')"))
def test_xpath | _extensions_wrong_args(self):
def foo(evaluator, a, b):
retu | rn "hello %s and %s" % (a, b)
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(TypeError, e, "foo('you')")
def test_xpath_extensions_error(self):
def foo(evaluator, a):
return 1/0
extension = {(None, 'foo'): foo}
tree = self.parse('<a/>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(ZeroDivisionError, e, "foo('test')")
def test_xpath_extensions_nodes(self):
def f(evaluator, arg):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo('World')/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append(self):
def f(evaluator, nodes):
r = etree.SubElement(nodes[0], 'results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append2(self):
def f(evaluator, nodes):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubEl |
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import unittest
import common
from binascii import unhexlify
class TestMsgVerifymessageSegwitNative(common.KeepKeyTest):
def test_message_long(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('285ff795c29aef7538f8b3bdb2e8add0d0722ad630a140b6aefd504a5a895cbd867cbb00981afc50edd0398211e8d7c304bb8efa461181bc0afa67ea4a720a89ed'),
"VeryLongMessage!" * 64
)
assert ret is True
def test_message_testnet(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Testnet',
'tb1qyjjkmdpu7metqt5r36jf872a34syws336p3n3p',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert ret is True
def test_message_verify(self):
self.setup_mnemonic_nopin_nopassphrase()
# trezor pubkey - OK
res = self.client.veri | fy_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert res is True
# trezor pubkey - FAIL - wrong sig
res = self.client.verify_message(
| 'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be00'),
'This is an example of a signed message.'
)
assert res is False
# trezor pubkey - FAIL - wrong msg
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message!'
)
assert res is False
def test_verify_utf(self):
self.setup_mnemonic_nopin_nopassphrase()
words_nfkd = u'Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\u0301l zo\u0301ny u\u0301lu\u030a'
words_nfc = u'P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f'
res_nfkd = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfkd
)
res_nfc = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfc
)
assert res_nfkd is True
assert res_nfc is True
if __name__ == '__main__':
unittest.main()
|
class JWTValidationError( | Exception):
pass
class JWTAudienceError(JWTValid | ationError):
pass
|
ager
import pytest
import six
from cryptography.exceptions import UnsupportedAlgorithm
import cryptography_vectors
HashVector = collections.namedtuple("HashVector", ["message", "digest"])
KeyedHashVector = collections.namedtuple(
"KeyedHashVector", ["message", "digest", "key"]
)
def select_backends(names, backend_list):
if names is None:
return backend_list
split_names = [x.strip() for x in names.split(',')]
# this must be duplicated and then removed to preserve the metadata
# pytest associates. Appending backends to a new list doesn't seem to work
selected_backends = []
for backend in backend_list:
if backend.name in split_names:
selected_backends.append(backend)
if len(selected_backends) > 0:
return selected_backends
else:
raise ValueError(
"No backend selected. Tried to select: {0}".format(split_names)
)
def check_for_iface(name, iface, item):
if name in item.keywords and "backend" in item.funcargs:
if not isinstance(item.funcargs["backend"], iface):
pytest.skip("{0} backend does not support {1}".format(
item.funcargs["backend"], name
))
def check_backend_support(item):
supported = item.keywords.get("supported")
if supported and "backend" in item.funcargs:
if not supported.kwargs["only_if"](item.funcargs["backend"]):
pytest.skip("{0} ({1})".format(
supported.kwargs["skip_message"], item.funcargs["backend"]
))
elif supported:
raise ValueError("This mark is only available on methods that take a "
"backend")
@contextmanager
def raises_unsupported_algorithm(reason):
with pytest.raises(UnsupportedAlgorithm) as exc_info:
yield exc_info
assert exc_info.value._reason is reason
def load_vectors_from_file(filename, loader):
with cryptography_vectors.open_vector_file(filename) as vector_file:
return loader(vector_file)
def load_nist_vectors(vector_data):
test_data = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines, comments, and section headers are ignored
if not line or line.startswith("#") or (line.startswith("[")
and line.endswith("]")):
continue
if line.strip() == "FAIL":
test_data["fail"] = True
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
# Some tests (PBKDF2) contain \0, which should be interpreted as a
# null character rather than literal.
value = value.replace("\\0", "\0")
# COUNT is a special token that indicates a new block of data
if name.upper() == "COUNT":
test_data = {}
data.append(test_data)
continue
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_cryptrec_vectors(vector_data):
cryptrec_list = []
for line in vector_data:
line = line.strip()
# Blank lines and comments are ignored
if not line or line.startswith("#"):
continue
if line.startswith("K"):
key = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("P"):
pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("C"):
ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
# after a C is found the K+P+C tuple is complete
# there are many P+C pairs for each K
cryptrec_list.append({
"key": key,
"plaintext": pt,
"ciphertext": ct
})
else:
raise ValueError("Invalid line in file '{}'".format(line))
return cryptrec_list
def load_hash_vectors(vector_data):
vectors = []
key = None
msg = None
md = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#") or line.startswith("["):
continue
if line.startswith("Len"):
length = int(line.split(" = ")[1])
elif line.startswith("Key"):
# HMAC vectors contain a key attribute. Hash vectors do not.
key = line.split(" = ")[1].encode("ascii")
elif line.startswith("Msg"):
# In the NIST vectors they have chosen to represent an empty
# string as hex 00, which is of course not actually an empty
# string. So we parse the provided length and catch this edge case.
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
elif line.startswith("MD"):
md = line.split(" = ")[1]
# after MD is found the Msg+MD (+ potential key) tuple is complete
if key is not None:
vectors.append(KeyedHashVector(msg, md, key))
key = None
msg = None
md = None
else:
vectors.append(HashVector(msg, md))
msg = None
md = None
else:
raise ValueError("Unknown line in hash vector")
return vectors
def load_pkcs1_vectors(vector_data):
"""
Loads data out of RSA PKCS #1 vector files.
"""
private_key_vector = None
public_key_vector = None
attr = None
key = None
example_vector = None
examples = []
vectors = []
for line in vector_data:
if (
line.startswith("# PSS Example") or
line.startswith("# PKCS#1 v1.5 Signature")
):
if example_vector:
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
attr = None
example_vector = collections.defaultdict(list)
if line.startswith("# Message to be signed"):
attr = "message"
continue
elif line.startswith("# Salt"):
attr = "salt"
continue
elif line.startswith("# Signature"):
attr = "signature"
continue
elif | (
example_vector and
line.startswith("# =============================================")
):
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("asc | ii")
example_vector[key] = hex_str
examples.append(example_vector)
example_vector = None
attr = None
elif example_vector and line.startswith("#"):
continue
else:
if attr is not None and example_vector is not None:
example_vector[attr].append(line.strip())
continue
if (
line.startswith("# Example") or
line.startswith("# =============================================")
):
if key:
assert private_key_vector
assert public_key_vector
for key, value in six.iteritems(public_key_vector):
hex_str = "".join(value).replace(" ", "")
public_key_vector[key] = int(hex_str, 16)
for key, value in six.iteritems(private_key_vector):
hex_str = "".join(value).replace(" ", "")
private_key_vector[key] = int(hex_str, 16)
private_key_vector["examples"] = examples
examples = []
assert (
private_key_vector['public_exponent'] ==
public_key_vector['public_exponent']
)
assert (
private_key_vector['modulus'] ==
public_key_vector['modulus']
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_dzenlog. | models import GeneralPost
class TextPost(GeneralPost):
body_detail_template = 'blog/text_post.html'
feed_description_template = 'blog/text_feed_detail.html'
body = models.TextField(_('Post\'s body'))
class LinkPost(GeneralPost):
body_detail_template = 'blog/link_post.html'
feed_description_template = 'blog/link_feed_detail.html'
url = models.URLField(_('URL'), default='http://example.com', verify_exists=False)
description = models.TextField(_('URL\'s description | '), blank=True)
|
def subtrees_equal(expected_schema_node, actual_node):
if expected_schema_node[0] != actual_node.get_name():
return False
if expected_schema_node[1] != actual_node.get_state():
return False
expected_children = expected_schema_node[2]
actual_children = actual_node.get_children()
actual_ch | ildren_names = [child.get_name() for child in actual_children]
actual_ | children_names.sort()
if len(expected_children) != len(actual_children_names):
return False
for (expected_child, actual_child_name) in \
zip(expected_children, actual_children_names):
subtrees_equal(
expected_child, actual_node.get_child(actual_child_name))
return True |
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake(object):
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(['a', 'b', 'a'])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=['a', 'b'])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
| def test_positional_take(self, ordered):
cat = pd.Categorical | (['a', 'a', 'b', 'b'], categories=['b', 'a'],
ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'a', 'b'])
result = cat.take([0, -1, -1], allow_fill=True)
expected = pd.Categorical(['a', np.nan, np.nan],
categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = pd.Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'b', 'c'])
result = cat.take([0, 1, -1], fill_value='a', allow_fill=True)
expected = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'b', 'c'])
xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
with tm.assert_raises_regex(TypeError, xpr):
cat.take([0, 1, -1], fill_value='d', allow_fill=True)
|
his work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from cassandra.cluster import Cluster
from cassandra.policies import (
DCAwareRoundRobinPolicy,
RoundRobinPolicy,
TokenAwarePolicy,
WhiteListRoundRobinPolicy,
)
from airflow.models import Connection
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.utils import db
@pytest.mark.integration("cassandra")
class TestCassandraHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='cassandra_test',
conn_type='cassandra',
host='host-1,host-2',
port='9042',
schema='test_keyspace',
extra='{"load_balancing_policy":"TokenAwarePolicy","protocol_version":4}',
)
)
db.merge_conn(
Connection(
conn_id='cassandra_default_with_schema',
conn_type='cassandra',
host='cassandra',
port='9042',
schema='s',
)
)
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP SCHEMA IF EXISTS s",
"""
CREATE SCHEMA s WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
""",
]
for cql in cqls:
session.execute(cql)
session.shutdown()
hook.shutdown_cluster()
def test_get_conn(self):
with mock.patch.object(Cluster, "__init__") as mock_cluster_ctor:
mock_cluster_ctor.return_value = None
CassandraHook(cassandra_conn_id='cassandra_test')
mock_cluster_ctor.assert_called_once_with(
contact_points=['host-1', 'host-2'],
port=9042,
protocol_version=4,
load_balancing_policy=mock.ANY,
)
assert isinstance(mock_cluster_ctor.call_args[1]['load_balancing_policy'], TokenAwarePolicy)
def test_get_lb_policy_with_no_args(self):
# test LB policies with no args
self._assert_get_lb_policy('RoundRobinPolicy', {}, RoundRobinPolicy)
self._assert_get_lb_policy('DCAwareRoundRobinPolicy', {}, DCAwareRoundRobinPolicy)
self._assert_get_lb_policy(
'TokenAwarePolicy', {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
def test_get_lb_policy_with_args(self):
# test DCAwareRoundRobinPolicy with args
self._assert_get_lb_policy(
'DCAwareRoundRobinPolicy',
{'local_dc': 'foo', 'used_hosts_per_remote_dc': '3'},
DCAwareRoundRobinPolicy,
)
# test WhiteListRoundRobinPolicy with args
fake_addr_info = [
['family', 'sockettype', 'proto', 'canonname', | ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)]
]
with mock.patch('socket.getaddrinfo', return_value=fake_addr_info):
| self._assert_get_lb_policy(
'WhiteListRoundRobinPolicy', {'hosts': ['host1', 'host2']}, WhiteListRoundRobinPolicy
)
# test TokenAwarePolicy with args
with mock.patch('socket.getaddrinfo', return_value=fake_addr_info):
self._assert_get_lb_policy(
'TokenAwarePolicy',
{
'child_load_balancing_policy': 'WhiteListRoundRobinPolicy',
'child_load_balancing_policy_args': {'hosts': ['host-1', 'host-2']},
},
TokenAwarePolicy,
expected_child_policy_type=WhiteListRoundRobinPolicy,
)
def test_get_lb_policy_invalid_policy(self):
# test invalid policy name should default to RoundRobinPolicy
self._assert_get_lb_policy('DoesNotExistPolicy', {}, RoundRobinPolicy)
# test invalid child policy name should default child policy to RoundRobinPolicy
self._assert_get_lb_policy(
'TokenAwarePolicy', {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
self._assert_get_lb_policy(
'TokenAwarePolicy',
{'child_load_balancing_policy': 'DoesNotExistPolicy'},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
)
def test_get_lb_policy_no_host_for_allow_list(self):
# test host not specified for WhiteListRoundRobinPolicy should throw exception
self._assert_get_lb_policy(
'WhiteListRoundRobinPolicy', {}, WhiteListRoundRobinPolicy, should_throw=True
)
self._assert_get_lb_policy(
'TokenAwarePolicy',
{'child_load_balancing_policy': 'WhiteListRoundRobinPolicy'},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
should_throw=True,
)
def _assert_get_lb_policy(
self,
policy_name,
policy_args,
expected_policy_type,
expected_child_policy_type=None,
should_throw=False,
):
thrown = False
try:
policy = CassandraHook.get_lb_policy(policy_name, policy_args)
assert isinstance(policy, expected_policy_type)
if expected_child_policy_type:
assert isinstance(policy._child_policy, expected_child_policy_type)
except Exception: # pylint: disable=broad-except
thrown = True
assert should_throw == thrown
def test_record_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO s.t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("s.t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("s.t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_record_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text PRIMARY KEY)",
]
for cql in cqls:
session.execute(cql)
assert hook.table_exists("s.t")
assert not hook.table_exists("s.u")
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text PRIMARY KEY) |
from datapackage_pipelines.wrapper import ingest, spew
def get_votes(resource, data, stats):
data['session_voters'] = {}
stats['num_votes'] = 0
stats['num_vote_mks'] = 0
for vote in resource:
voters = data['session_voters'].setdefault(vote['session_id'], set())
for attr in ['mk_ids_pro', 'mk_ids_against', 'mk_ids_abstain']:
mk_ids = vote[attr]
if mk_ids:
for mk_id in mk_ids:
voters.add(mk_id)
stats['num_vote_mks'] += 1
stats['num_votes'] += 1
def get_plenum(resource, data, stats):
stats.update(known_sessions=0, unknown_sessions=0)
for session in resource:
if session['PlenumSessionID'] in data['session_voters']:
stats['known_sessions'] += 1
session['voter_mk_ids'] = list(data['session_voters'][session['PlenumSessionID']])
else:
session['voter_mk_ids'] = None
stats['unknown_sessions'] += 1
| if not session['voter_mk_ids']:
session['voter_mk_ids'] = None
yield session
def get_resources(resources, stats, data):
for i, resource in enumerate(resources):
if i == data['votes_index']:
get_votes(resource, data, stats)
elif i == data['plenum_index']:
yield get_plenum(resource, data, stats)
else:
yield resource
def get_datapackage(datapackage, data):
for i, descriptor in | enumerate(datapackage['resources']):
if descriptor['name'] == 'view_vote_rslts_hdr_approved':
data['votes_index'] = i
elif descriptor['name'] == 'kns_plenumsession':
data['plenum_index'] = i
fields = [{'name': 'voter_mk_ids', 'type': 'array'}]
descriptor['schema']['fields'] += fields
del datapackage['resources'][data['votes_index']]
return datapackage
def main():
parameters, datapackage, resources, stats, data = ingest() + ({}, {})
spew(get_datapackage(datapackage, data),
get_resources(resources, stats, data),
stats)
if __name__ == '__main__':
main()
|
ize=int(self.test_percentage * len(non_zeros)))
train[user, test_ratings] = 0.
test[user, test_ratings] = self.ratings[user, test_ratings]
assert(numpy.all((train * test) == 0))
self.test_indices = test
return train, test
def naive_split_items(self):
"""
Split the ratings on test and train data by removing random documents.
:returns: a tuple of train and test data.
:rtype: tuple
"""
if self.random_seed is False:
numpy.random.seed(42)
indices = list(range(self.n_items))
test_ratings = numpy.random.choice(indices, size=int(self.test_percentage * len(indices)))
train = self.ratings.copy()
test = numpy.zeros(self.ratings.shape)
for index in test_ratings:
train[:, index] = 0
test[:, index] = self.ratings[:, index]
assert(numpy.all((train * test) == 0))
return train, test
def get_fold(self, fold_num, fold_test_indices):
"""
Returns train and test data for a given fold number
:param int fold_num: the fold index to be returned
:param int[] fold_test_indices: A list of the indicies of the testing fold.
:returns: tuple of training and test data
:rtype: 2-tuple of 2d numpy arrays
"""
current_test_fold_indices = []
index = fold_num
for ctr in range(self.ratings.shape[0]):
current_test_fold_indices.append(fold_test_indices[index])
index += self.k_folds
return self.generate_kfold_matrix(current_test_fold_indices)
def get_kfold_indices(self):
"""
Returns the indices for rating matrix for each kfold split. Where each test set
contains ~1/k of the total items a user has in their digital library.
:returns: a list of all indices of the training set and test set.
:rtype: list of lists
"""
if self.random_seed is False:
numpy.random.seed(42)
test_indices = []
for user in range(self.ratings.shape[0]):
# Indices for all items in the rating matrix.
item_indices = numpy.arange(self.ratings.shape[1])
# Indices of all items in user's digital library.
rated_items_indices = self.ratings[user].nonzero()[0]
mask = numpy.ones(len(self.ratings[user]), dtype=bool)
mask[[rated_items_indices]] = False
# Indices of all items not in user's digital library.
non_rated_indices = item_indices[mask]
# Shuffle all rated items indices
numpy.random.shuffle(rated_items_indices)
# Size of 1/k of the total user's ratings
size_of_test = round((1.0 / self.k_folds) * le | n(rated_items_indices))
# 2d List that stores all the indices of each test set for each fold.
test_ratings = [[] for x in range(self.k_folds)]
counter = 0
numpy.random.shuffle(non_rated_indices)
# List that stores the number of indices to be added to each test set.
num_to_add = []
# create k different folds for each user.
for index in range(self.k_folds):
if ind | ex == self.k_folds - 1:
test_ratings[index] = numpy.array(rated_items_indices[counter:len(rated_items_indices)])
else:
test_ratings[index] = numpy.array(rated_items_indices[counter:counter + size_of_test])
counter += size_of_test
# adding unique zero ratings to each test set
num_to_add.append(int((self.ratings.shape[1] / self.k_folds) - len(test_ratings[index])))
if index > 0 and num_to_add[index] != num_to_add[index - 1]:
addition = non_rated_indices[index * (num_to_add[index - 1]):
(num_to_add[index - 1] * index) + num_to_add[index]]
else:
addition = non_rated_indices[index * (num_to_add[index]):num_to_add[index] * (index + 1)]
test_ratings[index] = numpy.append(test_ratings[index], addition)
test_indices.append(test_ratings[index])
self.test_indices = test_indices
return test_indices
def generate_kfold_matrix(self, test_indices):
"""
Returns a training set and a training set matrix for one fold.
This method is to be used in conjunction with get_kfold_indices()
:param int[] test_indices: array of test set indices.
:returns: Training set matrix and Test set matrix.
:rtype: 2-tuple of 2d numpy arrays
"""
train_matrix = numpy.zeros(self.ratings.shape)
test_matrix = numpy.zeros(self.ratings.shape)
for user in range(train_matrix.shape[0]):
train_indices = list(set(range(self.n_items)) - set(test_indices[user]))
test_matrix[user, test_indices[user]] = self.ratings[user, test_indices[user]]
train_matrix[user, train_indices] = self.ratings[user, train_indices]
return train_matrix, test_matrix
def load_top_recommendations(self, n_recommendations, predictions, test_data, fold):
"""
This method loads the top n recommendations into a local variable.
:param int n_recommendations: number of recommendations to be generated.
:param int[][] predictions: predictions matrix (only 0s or 1s)
:returns: A matrix of top recommendations for each user.
:rtype: int[][]
"""
for user in range(self.ratings.shape[0]):
nonzeros = self.test_indices[(user * (1 + fold))]
top_recommendations = TopRecommendations(n_recommendations)
for index in nonzeros:
index = int(index)
top_recommendations.insert(index, predictions[user][index])
self.recommendation_indices[user] = list(reversed(top_recommendations.get_indices()))
top_recommendations = None
self.recs_loaded = True
return self.recommendation_indices
def get_rmse(self, predicted, actual=None):
"""
The method given a prediction matrix returns the root mean squared error (rmse).
:param float[][] predicted: numpy matrix of floats representing the predicted ratings
:returns: root mean square error
:rtype: float
"""
if actual is None:
actual = self.ratings
rss = 0
for i in range(predicted.shape[0]):
rss += numpy.sum((predicted[i] - actual[i]) ** 2)
rss = float(rss) / numpy.size(predicted)
return numpy.sqrt(rss)
def calculate_recall(self, ratings, predictions):
"""
The method given original ratings and predictions returns the recall of the recommender
:param int[][] ratings: ratings matrix
:param int[][] predictions: predictions matrix (only 0s or 1s)
:returns: recall, ranges from 0 to 1
:rtype: float
"""
denom = sum(sum(ratings))
nonzeros = ratings.nonzero()
nonzeros_predictions = predictions[nonzeros]
return sum(nonzeros_predictions) / denom # Division by zeros are handled.
def recall_at_x(self, x, predictions, ratings, rounded_predictions):
"""
The method calculates the average recall of all users by only looking at the top x
and the normalized Discounted Cumulative Gain.
:param int x: number of recommendations to look at, sorted by relevance.
:param int[][] ratings: ratings matrix
:param float[][] predictions: calculated predictions of the recommender.
:param int[][] test_data: test data.
:returns: Recall at x
:rtype: float
"""
recalls = []
for user in range(ratings.shape[0]):
recommendation_hits = 0
user_likes = ratings[user].sum()
recall = 0
if user_likes != 0:
recommendation_hits = (self.ratings[user][self.recommendation_indic |
# -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entit | y
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1 | types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
|
"""
Hack to get scripts to run from source checkout without having t | o set
PYTHONPATH.
"""
import sys
from os.path import dirname, join, abspath
db_path | = dirname(__file__)
project_path = abspath(join(db_path, ".."))
sys.path.insert(0, project_path)
|
import calendar
from datetime import date
| from django.conf import settings
from django.core.mail import send_mail
from django.core.management import BaseCommand |
from django.template.loader import get_template
from compta.bank import get_bank_class
from compta.models import Compte
def generate_mail():
# Dernier jour du mois, on envoie un mail pour les comptes joints afin de fournir les sommes à y déposer
if date.today().day == calendar.monthrange(date.today().year, date.today().month)[1]:
comptes = Compte.objects.all()
for compte in comptes:
if compte.utilisateurs.count() > 1:
compte.calculer_parts()
if compte.total_salaire > 0:
html_content = get_template('compta/details_calcule_a_verser.html').render(locals())
mails = []
for user in compte.utilisateurs_list:
if user.email is not None:
mails.append(user.email)
if len(mails) > 0:
send_mail(
'[Homelab] Sommes à verser sur {}'.format(str(compte)),
"",
settings.DEFAULT_FROM_EMAIL, mails, html_message=html_content)
def check_operations():
"""Récupère les dernières opérations bancaires en ligne, inscrit les nouvelles en base et les envoie par mail"""
comptes = Compte.objects.all()
for compte in comptes:
operations = compte.operation_set.all()
bank_class = get_bank_class(compte.identifiant.banque)
has_changed = False
with bank_class(compte.identifiant.login, compte.identifiant.mot_de_passe, compte.numero_compte) as bank:
new_operations = bank.fetch_last_operations()
new_solde = bank.fetch_balance()
for new_operation in new_operations:
found = False
for operation in operations:
if operation.date_valeur == new_operation.date_valeur and operation.libelle == new_operation.libelle and float(operation.montant) == float(new_operation.montant):
found = True
break
if not found:
new_operation.compte = compte
new_operation.save()
has_changed = True
if compte.solde != new_solde:
compte.solde = new_solde
compte.save()
if has_changed:
mails = []
for user in compte.utilisateurs.all():
if user.email is not None:
mails.append(user.email)
if len(mails) > 0:
send_mail(
'[Homelab] De nouvelles opérations sont à catégoriser sur {}'.format(str(compte)),
"",
settings.DEFAULT_FROM_EMAIL, mails)
generate_mail()
class Command(BaseCommand):
help = "Déclenche le script qui vérifie les nouvelles opérations bancaires et qui envoie des mails lorsqu'il y en a des nouvelles"
def handle(self, *args, **options):
check_operations()
|
= pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = LeaveOneOutEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Strategies to encode categorical variables with many categories, from
https://www.kaggle.com/c/caterpillar-tube-pricing/discussion/15748#143154.
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self.fit_leave_one_out(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, override_return_df=True)
| self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols | ]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self.transform_leave_one_out(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_leave_one_out(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self.fit_column_map(X[col], y) for col in cols}
def fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def transform_leave_one_out(self, X_in, y, mapping=None):
"""
Leave one out encoding uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype)
is_nan = X[col].isnull()
is_unknown_value = X[col].isin(unseen_values.dropna().astype(object))
if X[col].dtype.name == 'category': # Pandas 0.24 tries hard to preserve categorical data type
X[col] = X[col].astype(str)
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = (colmap['sum'] / colmap['count']).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else: # Replace level with its mean target, calculated excluding this row's target
# The y (target) mean for this level is normally just the sum/count;
# excluding this row's y, it's (sum - y) / (count - 1)
level_means = (X[col].map(colmap['sum']) - y) / (X[col].map(colmap['count']) - 1)
# The 'where' fills in singleton levels (count = 1 -> div by 0) with the global mean
X[col] = level_means.where(X[col].map(colmap['count'][level_notunique]).notnull(), self._mean)
if self.handle_unknown == 'value':
|
import zlib
from test_support import TestFailed
import sys
import imp
try:
t = imp.find_module('test_zlib')
file = t[0]
except ImportError:
file = open(__file__)
buf = file.read() * 8
file.close()
# test the checksums (hex so the test doesn't break on 64-bit machines)
print hex(zlib.crc32('penguin')), hex(zlib.crc32('penguin', 1))
print hex(zlib.adler32('penguin')), hex(zlib.adler32('penguin', 1))
# make sure we generate some expected errors
try:
zlib.compress('ERROR', zlib.MAX_WBITS + 1)
except zlib.error, msg:
print "expecting", msg
try:
zlib.compressobj(1, 8, 0)
except ValueError, msg:
print "expecting", msg
try:
zlib.decompressobj(0)
except ValueError, msg:
print "expecting", msg
x = zlib.compress(buf)
y = zlib.decompress(x)
if buf != y:
print "normal compression/decompression failed"
else:
print "normal compression/decompression succeeded"
buf = buf * 16
co = zlib.compressobj(8, 8, -15)
x1 = co.compress(buf)
x2 = co.flush()
x = x1 + x2
dc = zlib.decompressobj(-15)
y1 = dc.decompress(x)
y2 = dc.flush()
y = y1 + y2
if buf != y:
print "compress/decompression obj failed"
else:
print "compress/decompression obj succeeded"
co = zlib.compressobj(2, 8, -12, 9, 1)
bufs = []
for i in range(0, len(buf), 256):
bufs.append(co.compress(buf[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
decomp1 = zlib.decompress(combuf, -12, -5)
if decomp1 != buf:
print "decompress with init options failed"
else:
print "decompress with init options succeeded"
deco = zlib.decompressobj(-12)
bufs = []
for i in range(0, len(combuf), 128):
bufs.append(deco.decompress(combuf[i:i+128]))
bufs.append(deco.flush())
decomp2 = ''.join(bufs)
if decomp2 != buf:
print "decompressobj with init options failed"
else:
print "decompressobj with init options succeeded"
print "should be '':", `deco.unconsumed_tail`
# Check a decompression object with max_length specified
deco = zlib.decompressobj(-12)
cb = combuf
bufs = []
while cb:
max_length = 1 + len(cb)/10
chunk = deco.decompress(cb, max_length)
if len(chunk) > max_length:
print 'chunk too big (%d>%d)' % (len(chunk),max_length)
bufs.append(chunk)
cb = deco.unconsumed_tail
bufs.append(deco.flush())
decomp2 = ''.join(buf)
if decomp2 != buf:
print "max_length decompressobj failed"
else:
print "max_length decompressobj succeeded"
# Misc tests of max_length
deco = zlib.decompressobj(-12)
try:
deco.decompress("", -1)
except ValueError:
pass
else:
print "failed to raise value error on bad max_length"
print "unconsumed_tail should be '':", `deco.unconsumed_tail`
# Test flush() with the various options, using all the different levels
# in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt if hasattr(zlib, opt)]
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
d = obj.compress( buf[:3000] )
d = d + obj.flush( sync )
d = d + obj.compress( buf[3000:] )
d = d + obj.flush()
if zlib.decompress(d) != buf:
print "Decompress failed: flush mode=%i, level=%i" % (sync,level)
del obj
# Test for the odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
random.seed(1)
print 'Testing on 17K of random data'
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Create compressor and decompressor objects
c=zlib.compressobj(9)
d=zlib.decompressobj()
# Try 17K of data
# generate random data stream
a=""
for i in range(17*1024):
a=a+chr(random.randint(0,255))
# compress, sync-flush, and decompress
t = d.decompress( c.compress(a)+c.flush(zlib.Z_SYNC_FLUSH) )
# if decompressed data is different from the input data, choke.
if len(t) != len(a):
print len(a),len(t),len(d.unused_data)
raise TestFailed, "output of 17K doesn't match"
def ignore():
"""An empty function with a big string.
Make the compression algorithm work a little harder.
"""
"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts n | o tongue,
| Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
|
import csv
import collections
import json
import numpy
from smqtk.utils.bin_utils import logging, initialize_logging
from smqtk.representation.data_set.memory_set import DataMemorySet
from smqtk.algorithms.descriptor_generator.caffe_descriptor import CaffeDescriptorGenerator
from smqtk.algorithms.classifier.index_label import IndexLabelClassifier
from smqtk.representation import ClassificationElementFactory
from smqtk.representation.classification_element.memory import MemoryClassificationElement
from smqtk.representation.descriptor_index.memory import MemoryDescriptorIndex
# in-memory data-set file cache
EVAL_DATASET = "eval.dataset.pickle"
CAFFE_DEPLOY = "CHANGE_ME"
CAFFE_MODEL = "CHANGE_ME"
CAFFE_IMG_MEAN = "CHANGE_ME"
# new-line separated file of index labels.
# Line index should correspont to caffe train/test truth labels.
CAFFE_LABELS = "labels.txt"
# CSV file detailing [cluster_id, ad_id, image_sha1] relationships.
EVAL_CLUSTERS_ADS_IMAGES_CSV = "eval.CP1_clusters_ads_images.csv"
# json-lines file of clusters missing from the above file. Should be at least
# composed of: {"cluster_id": <str>, ... }
EVAL_MISSING_CLUSTERS = "eval.cluster_scores.missing_clusters.jl"
OUTPUT_DESCR_PROB_INDEX = "cp1_img_prob_descriptors.pickle"
OUTPUT_MAX_JL = "cp1_scores_max.jl"
OUTPUT_AVG_JL = "cp1_scores_avg.jl"
###############################################################################
# Compute classification scores
initialize_logging(logging.getLogger('smqtk'), logging.DEBUG)
eval_data_set = DataMemorySet(EVAL_DATASET)
img_prob_descr_index = MemoryDescriptorIndex(OUTPUT_DESCR_PROB_INDEX)
img_prob_gen = CaffeDescriptorGenerator(CAFFE_DEPLOY, CAFFE_MODEL, CAFFE_IMG_MEAN,
'prob', batch_size=1000, use_gpu=True,
load_truncated_images=True)
img_c_mem_factory = ClassificationElementFactory(
MemoryClassificationElement, {}
)
img_prob_classifier = IndexLabelClassifier(CAFFE_LABELS)
eval_data2descr = {}
d_to_proc = set()
for data in eval_data_set:
if not img_prob_descr_index.has_descriptor(data.uuid()):
d_to_proc.add(data)
else:
eval_data2descr[data] = img_prob_descr_index[data.uuid()]
if d_to_proc:
eval_data2descr.update(
img_prob_gen.compute_descriptor_async(d_to_proc)
)
d_to_proc.clear()
assert len(eval_data2descr) == eval_data_set.count()
index_additions = []
for data in d_to_proc:
index_additions.append( eval_data2descr[data] )
print "Adding %d new descriptors to prob index" % len(index_additions)
img_prob_descr_index.add_many_descriptors(index_additions)
eval_descr2class = img_prob_classifier.classify_async(eval_data2descr.values(), img_c_mem_factory)
###############################################################################
# The shas that were actually computed
computed_shas = {e.uuid() for e in eval_data2descr}
len(computed_shas)
cluster2ads = collections.defaultdict(set)
cluster2shas = collections.defaultdict(set)
ad2shas = collections.defaultdict(set)
sha2ads = collections.defaultdict(set)
with open(EVAL_CLUSTERS_ADS_IMAGES_CSV) as f:
reader = csv.reader(f)
for i, r in enumerate(reader):
if i == 0:
# skip header line
continue
c, ad, sha = r
if sha in computed_shas:
cluster2ads[c].add(ad)
cluster2shas[c].add(sha)
ad2shas[ad].add(sha)
sha2ads[sha].add(ad)
assert len(sha2ads) == len(computed_shas)
###############################################################################
print "Collecting scores for SHA1s"
sha2score = {}
for c in eval_descr2class.values():
sha2score[c.uuid] = c['positive']
print "Collecting scores for ads (MAX and AVG)"
ad2score_max = {}
ad2score_avg = {}
for ad, child_shas in ad2shas.iteritems():
scores = [sha2score[sha] for sha in child_shas]
ad2score_max[ad] = numpy.max(scores)
ad2score_avg[ad] = numpy.average(scores)
# select cluster score from max and average of child ad scores
print "Collecting scores for ads (MAX and AVG)"
cluster2score_max = {}
cluster2score_avg = {}
for c, child_ads in cluster2ads.iteritems():
cluster2score_max[c] = numpy.max( [ad2score_max[ad | ] for ad in child_ads])
cluster2score_avg[c] = numpy.average([ad2score_avg[ad] for ad in child_ads])
len(cluster2score_max)
###############################################################################
missing_clusters = {json.loads(l)['cluster_id'] for l in open(EVAL_MISSING_CLUSTERS)}
cluster_id_order = sorted(set(cluste | r2score_avg) | missing_clusters)
with open(OUTPUT_MAX_JL, 'w') as f:
for c in cluster_id_order:
if c in cluster2score_max:
f.write( json.dumps({"cluster_id": c, "score": float(cluster2score_max[c])}) + '\n' )
else:
# Due to a cluster having no child ads with imagery
print "No childred with images for cluster '%s'" % c
f.write( json.dumps({"cluster_id": c, "score": 0.5}) + '\n' )
with open(OUTPUT_AVG_JL, 'w') as f:
for c in cluster_id_order:
if c in cluster2score_avg:
f.write( json.dumps({"cluster_id": c, "score": float(cluster2score_avg[c])}) + '\n' )
else:
# Due to a cluster having no child ads with imagery
print "No childred with images for cluster '%s'" % c
f.write( json.dumps({"cluster_id": c, "score": 0.5}) + '\n' )
|
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https:// | www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org | /licenses/agpl.html)
import report
import wizard
|
# -*- coding: utf-8 -*-
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License | at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribu | ted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
import uuid
import asyncio
from appdaemon.appdaemon import AppDaemon
class Sequences:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.logger = ad.logging.get_child("_sequences")
async def run_sequence_service(self, namespace, domain, service, kwargs):
if "entity_id" not in kwargs:
self.logger.warning("entity_id not given in service call, so will not be executing %s", service)
return
# await self.run_sequence("_services", namespace, kwargs["entity_id"])
self.AD.thread_async.call_async_no_wait(self.run_sequence, "_services", namespace, kwargs["entity_id"])
async def add_sequences(self, sequences):
for sequence in sequences:
entity = "sequence.{}".format(sequence)
attributes = {
"friendly_name": sequences[sequence].get("name", sequence),
"loop": sequences[sequence].get("loop", False),
"steps": sequences[sequence]["steps"],
}
if not await self.AD.state.entity_exists("rules", entity):
# it doesn't exist so add it
await self.AD.state.add_entity(
"rules", entity, "idle", attributes=attributes,
)
else:
await self.AD.state.set_state(
"_sequences", "rules", entity, state="idle", attributes=attributes, replace=True
)
async def remove_sequences(self, sequences):
if not isinstance(sequences, list):
sequences = [sequences]
for sequence in sequences:
await self.AD.state.remove_entity("rules", "sequence.{}".format(sequence))
async def run_sequence(self, _name, namespace, sequence):
coro = self.prep_sequence(_name, namespace, sequen | ce)
#
# OK, lets run it
#
future = asyncio.ensure_future(coro)
self.AD.futures.add_future(_name, future)
return future
async def prep_sequence(self, _name, namespace, sequence):
ephemeral_entity = False
loop = False
if isinstance(se | quence, str):
entity_id = sequence
if await self.AD.state.entity_exists("rules", entity_id) is False:
self.logger.warning('Unknown sequence "%s" in run_sequence()', sequence)
return None
entity = await self.AD.state.get_state("_services", "rules", sequence, attribute="all")
seq = entity["attributes"]["steps"]
loop = entity["attributes"]["loop"]
else:
#
# Assume it's a list with the actual commands in it
#
entity_id = "sequence.{}".format(uuid.uuid4().hex)
# Create an ephemeral entity for it
ephemeral_entity = True
await self.AD.state.add_entity("rules", entity_id, "idle", attributes={"steps": sequence})
seq = sequence
coro = await self.do_steps(namespace, entity_id, seq, ephemeral_entity, loop)
return coro
@staticmethod
async def cancel_sequence(_name, future):
future.cancel()
async def do_steps(self, namespace, entity_id, seq, ephemeral_entity, loop):
await self.AD.state.set_state("_sequences", "rules", entity_id, state="active")
try:
while True:
for step in seq:
for command, parameters in step.items():
if command == "sleep":
await asyncio.sleep(float(parameters))
elif command == "sequence":
# Running a sub-sequence so just recurse
await self.prep_sequence("_sequence", namespace, parameters)
pass
else:
domain, service = str.split(command, "/")
if "namespace" in parameters:
ns = parameters["namespace"]
del parameters["namespace"]
else:
ns = namespace
parameters["__name"] = entity_id
await self.AD.services.call_service(ns, domain, service, parameters)
if loop is not True:
break
finally:
await self.AD.state.set_state("_sequences", "rules", entity_id, state="idle")
if ephemeral_entity is True:
await self.AD.state.remove_entity("rules", entity_id)
|
# gcp xml backend
# Copyright (C) 2012 Jesse van den Kieboom <jessevdk@gnome.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject, Gcp
from document import Document
class Backend(GObject.Object, Gcp.Backend):
size = GObject.property(type=int, flags = GObject.P | ARAM_READABLE)
def __init__(self):
GObject.Object.__init__(self)
self.documents = []
def do_get_property(self, spec):
if spec.name == 'size':
return len(self.documents)
GObject.Obje | ct.do_get_property(self, spec)
def do_register_document(self, doc):
d = Document(document=doc)
self.documents.append(d)
d.connect('changed', self.on_document_changed)
return d
def do_unregister_document(self, doc):
doc.disconnect_by_func(self.on_document_changed)
self.documents.remove(doc)
def do_get(self, idx):
return self.documents[idx]
def on_document_changed(self, doc):
doc.update()
# ex:ts=4:et:
|
defa | ult_app_config = 'users.apps.UserConf | ig' |
from imap_tools import MailBox
with MailBox('imap.mail.com').login('test@mail.com', 'pwd', 'INBOX') as mailb | ox:
criteria = 'ALL'
found_nums = mailbox.numbers(criteria)
page_len = 3
pages = int(len(found_nums) // page_len) + 1 if len(found_nums) % page_len else int(len(found_nums) // page_len)
for page in range(pages):
print('page {}'.format(page))
page_limit = slice(page * page_len, page * page_len + page_len)
| print(page_limit)
for msg in mailbox.fetch(criteria, bulk=True, limit=page_limit):
print(' ', msg.date, msg.uid, msg.subject)
|
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
db = SQLAlchemy(session_options={"autoflush": False})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class Materials(db.Model, CRUD):
__tablename__ = 'materials'
MATERIAL_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
cn_id = db.Column(db.Integer)
pt_id = db.Column(db.Integer)
class MaterialsSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/materials/"
else:
if 'attributes' in data:
self_link = "/materials/{}".format(data['attributes']['MATERIAL_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials'
class MaterialsSalvage(db.Model, CRUD):
__tablename__ = 'materials_salvage'
MATERIAL_SALVAGE_ID = db.Column(db.Integer, primary_key=True)
name | = db.Column(db.String(250), unique=True, nullable=False)
class MaterialsSalvageSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_SALVAGE_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
| if many:
self_link = "/materials/salvage/"
else:
if 'attributes' in data:
self_link = "/materials/salvage/{}".format(data['attributes']['MATERIAL_SALVAGE_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials_salvage'
|
ysis_count = 100
def get_mongo_items(self, consistency_record):
# get the records from mongo in chunks
projection = dict(superdesk.resources[self.resource_name].endpoint_schema['datasource']['projection'])
superdesk.resources[self.resource_name].endpoint_schema['datasource']['projection'] = None
service = superdesk.get_resource_service(self.resource_name)
cursor = service.get_from_mongo(None, {})
count = cursor.count()
no_of_buckets = len(range(0, count, self.default_page_size))
mongo_items = []
updated_mongo_items = []
request = ParsedRequest()
request.projection = json.dumps({'_etag': 1, '_updated': 1})
for x in range(0, no_of_buckets):
skip = x * self.default_page_size
print('Page : {}, skip: {}'.format(x + 1, skip))
# don't get any new records since the elastic items are retrieved
cursor = service.get_from_mongo(request, {'_created': {'$lte': consistency_record['started_at']}})
cursor.skip(skip)
cursor.limit(self.default_page_size)
cursor = list(cursor)
mongo_items.extend([(mongo_item['_id'], mongo_item['_etag']) for mongo_item in cursor])
updated_mongo_items.extend([mongo_item['_id'] for mongo_item in cursor
if mongo_item['_updated'] > consistency_record['started_at']])
superdesk.resources[self.resource_name].endpoint_schema['datasource']['projection'] = projection
return mongo_items, updated_mongo_items
def get_mongo_item(self, id):
service = superdesk.get_resource_service(self.resource_name)
return list(service.get_from_mongo(None, {'_id': id}))[0]
def get_elastic_item(self, id):
resource = superdesk.get_resource_service(self.resource_name)
query = {'query': {'filtered': {'filter': {'term': {'_id': id}}}}}
request = ParsedRequest()
request.args = {'source': json.dumps(query)}
items = resource.get(req=request, lookup=None)
return items[0]
def get_elastic_items(self, elasticsearch_index, elasticsearch_url):
# get the all hits from elastic
post_data = {'fields': ['_etag']}
response = requests.get('{}/{}/{}'.format(elasticsearch_url,
elasticsearch_index, '_search?size=5000&q=*:*'), params=post_data)
elastic_results = response.json()["hits"]['hits']
elastic_items = [(elastic_item['_id'], elastic_item["fields"]['_etag'][0])
for elastic_item in elastic_results]
return elastic_items
def process_results(self,
consistency_record,
elastic_items,
mongo_items,
updated_mongo_items,
analyse_differences=True):
# form the sets
mongo_item_ids = list(map(list, zip(*mongo_items)))[0]
mongo_item_ids_set = set(mongo_item_ids)
elastic_item_ids = list(map(list, zip(*elastic_items)))[0]
elastic_item_ids_set = set(elastic_item_ids)
mongo_items_set = set(mongo_items)
elastic_items_set = set(elastic_items)
updated_mongo_items_set = set(updated_mongo_items)
differences = []
# items that exist both in mongo and elastic with the same etags
shared_items = mongo_items_set & elastic_items_set
# items that exist only in mongo but not in elastic
mongo_only = mongo_item_ids_set - elastic_item_ids_set
# items that exist only in elastic but not in mongo
elastic_only = elastic_item_ids_set - mongo_item_ids_set
# items that exist both in mongo and elastic with different etags
# filter out the ones that has been updated since elastic is queried
different_items = (elastic_items_set ^ mongo_items_set) - updated_mongo_items_set
if len(different_items) > 0:
different_items = set(list(map(list, zip(*list(different_items))))[0]) \
- updated_mongo_items_set \
- mongo_only \
- elastic_only
if analyse_differences:
differences = self.analyse_differences(different_items)
consistency_record['completed_at'] = utcnow()
consistency_record['mongo'] = len(mongo_items)
consistency_record['elastic'] = len(elastic_items)
consistency_record['identical'] = len(shared_items)
consistency_record['mongo_only'] = len(mongo_only)
consistency_record['mongo_only_ids'] = list(mongo_only)
consistency_record['elastic_only'] = len(elastic_only)
consistency_record['elastic_only_ids'] = list(elastic_only)
consistency_record['inconsistent'] = len(different_items)
consistency_record['inconsistent_ids'] = list(different_items)
consistency_record['differences'] = differences
def analyse_differences(self, different_items):
all_differences = []
counter = 1
for item in different_items:
differences = []
mongo_item = self.get_mongo_item(item)
elastic_item = self.get_elastic_item(item)
print('Analysing item# {}'.format(counter))
self.compare_dicts(mongo_item, elastic_item, differences)
all_differences.append({'_id': item, 'differences': differences})
counter += 1
if counter > self.analysis_count:
break
return all_differences
def are_lists_equal(self, list_1, list_2):
if len(list_1) > 0 and not isinstance(list_1[0], dict):
return len(list(set(list_1) ^ set(list_2))) > 0
else:
return True
def compare_dicts(self, dict_1, dict_2, differences=None):
if differences is None:
differences = list()
diff_keys = list(set(dict_1.keys()) ^ set(dict_2.keys()))
if len(diff_keys) > 0:
# there are differences in keys so report them
differences.extend(diff_keys)
self.compare_dict_values(dict_1, dict_2, differences)
return list(set(differences))
def compare_dict_values(self, dict_1, dict_2, differences=None):
if differences is None:
differences = list()
for key in dict_1.keys():
if key in differences:
continue
if key not in dict_2:
differences.append(key)
continue
if isinstance(dict_1[key], list):
if not self.are_lists_equal(dict_1[key], dict_2[key]):
differences. | append(key)
elif isinstance(dict_1[key], dict):
differences.extend(self.compare_dicts(dict_1[key], dict_2[key]))
else:
if not dict_1[key] == dict_2[key]:
differences.append(key)
def run(self, resource_name,
elasticsearch_url=ELASTICSEARCH_URL,
elasticsearch_index=ELASTICSEARCH_INDEX,
analysis_count=100):
"""
Comp | ares the records in mongo and elastic for a given collection
Saves the results to "consistency" collection
:param resource_name: Name of the collection i.e. ingest, archive, published, text_archive
:param elasticsearch_url: url of the elasticsearch
:param elasticsearch_index: name of the index
:param analysis_count: number of inconsistencies to be analyzed
:return: dictionary of findings
"""
print('Comparing data in mongo:{} and elastic:{}'.format(resource_name, resource_name))
consistency_record = {}
consistency_record['started_at'] = utcnow()
consistency_record['resource_name'] = resource_name
self.resource_name = resource_name
self.analysis_count = analysis_count
elastic_items = self.get_elastic_items(elasticsearch_index, elasticsearch_url)
print('Retreiving {} items from mongo'.format(len(elastic_items)))
mongo_items, updated_mongo_items = self.get_mongo_items(c |
# -*- coding: utf-8 -*-
# Ge | nerated by Django 1.11.28 on 2021-11-10 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0238_abstractprovider_allow_updates'),
]
operations = [
migrations.AddIndex(
model_name='schemaresponse',
index=models.Index(fields=['object_id', 'content_type'], name='osf_schemar_ob | ject__8cc95e_idx'),
),
]
|
.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['karaage.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'karaage.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate_for'", 'to': "orm['karaage.Person']", 'through': "orm['karaage.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'karaage.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate', 'db_table': "'institutedelegate'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {})
},
'karaage.machinecategory': {
'Meta': {'object_name': 'MachineCategory', 'db_table': "'machine_category'"},
'datastore': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'karaage.person': {
'Meta': {'ordering': "['full_name', 'short_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['karaage.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['karaage.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'legacy_ldap_password': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'state': ('django.db.models.fields.CharField', [], {'m | ax_length': | '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'karaage.project': {
'Meta': {'ordering': "['pid']", 'object_name': 'Project', 'db_table': "'project'"},
'additional_req': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_approver'", 'null': 'True', 'to': "orm['karaage.Person']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_deletor'", 'null': 'True', 'to': "orm['karaage.Person']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['karaage.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'leaders': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'leads'", 'symmetrical': 'False', 'to': "orm['karaage.Person']"}),
'name' |
# -*- coding: utf-8 -*-
from werkzeug.contrib import wrappers
from werkzeug import routing
from werkzeug.wrappers import Request, Response
def test_reverse_slash_behavior():
"""Test ReverseSlashBehaviorRequestMixin"""
class MyRequest(wrappers.ReverseSlashBehaviorRequestMixin, Request):
pass
req = MyRequest.from_values('/foo/bar', 'http://example.com/test')
assert req.url == 'http://example.com/test/foo/bar'
assert req.path == 'foo/bar'
assert req.script_root == '/test/'
# make sure the routing system works with the slashes in
# reverse order as well.
map = routing.Map([routing.Rule('/foo/bar', endpoint='foo')])
adapter = map.bind_to_environ(req.environ)
assert adapter.match() == ('foo', {})
adapter = map.bind(req.host, req.script_root)
assert adapter.match(req.path) == ('foo', {})
def test_dynamic_charset_request_mixin():
"""Test DynamicCharsetRequestMixin"""
class MyRequest(wrappers.DynamicCharsetRequestMixin, Request):
pass
env = {'CONTENT_TYPE': 'text/html'}
req = MyRequest(env)
assert req.charset == 'latin1'
env = {'CONTENT_TYPE': 'text/html; charset=utf-8'}
req = MyRequest(env)
assert req.charset == 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'latin1'
MyRequest.url_charset = 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'utf-8'
def return_ascii(x):
return "ascii"
env = {'CONTENT_TYPE': 'text/plain; charset=x-weird-charset'}
req = MyRequest(env)
req.unknown_charset = return_ascii
assert req.charset == 'ascii'
assert req.url_charset == 'utf-8'
def test_dynamic_charset_response_mixin():
"""Test DynamicCharsetResponseMixin"""
class MyResponse(wrapp | ers.DynamicCharsetResponseMixin, Response):
default_charset = 'utf-7'
resp = MyResponse(mimetype='te | xt/html')
assert resp.charset == 'utf-7'
resp.charset = 'utf-8'
assert resp.charset == 'utf-8'
assert resp.mimetype == 'text/html'
assert resp.mimetype_params == {'charset': 'utf-8'}
resp.mimetype_params['charset'] = 'iso-8859-15'
assert resp.charset == 'iso-8859-15'
resp.data = u'Hällo Wörld'
assert ''.join(resp.iter_encoded()) == \
u'Hällo Wörld'.encode('iso-8859-15')
del resp.headers['content-type']
try:
resp.charset = 'utf-8'
except TypeError, e:
pass
else:
assert False, 'expected type error on charset setting without ct'
|
# -*- coding: utf-8 -*-
import datetime as dt
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import textblob
sys.path.append(os.path.abspath("_themes"))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx_issues',
]
primary_domain = 'py'
default_role = 'py:obj'
issues_github_path = 'sloria/TextBlob'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TextBlob'
copyright = u'{0:%Y} <a href="http://stevenloria.com/">Steven Loria</a>'.format(
dt.datetime.utcnow()
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = textblob.__version__
exclude_patterns = ['_build']
pygments_style = 'flask_theme_support.FlaskyStyle'
html_theme = 'kr'
html_theme_path = ['_themes']
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['side-primary.html', 'searchbox.html'],
'**': ['side-secondary.html', 'localtoc.html',
'relations.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'textblobdoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TextBlob.tex', u'textblob Documentation',
u'Steven Loria', 'manual'),
]
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'textblob', u'textblob Documentation',
[u'Steven Loria'], 1)
]
# -- Options for | Texinfo output ---------- | --------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'textblob', u'TextBlob Documentation',
u'Steven Loria', 'textblob', 'Simplified Python text-processing.',
'Natural Language Processing'),
]
|
#!/usr/bin/env python
""":mod:`Redirection <testbed.resources._redirect>` tests."""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import json as _json
import unittest as _unittest
from urllib import quote as _percent_encode
import napper as _napper
import spruce.logging as _logging
import testbed.testing as _testbedtest
class TestRedirections(_testbedtest.TestTestbed):
@property
def webservice_path(self):
return '/redirect'
@property
def webservice_probe_path(self):
return self._redirect_path
def _create_requests_session(self):
return _napper.WebRequestSession(follow_redirects=False)
class TestResponseRedirection(TestRedirections):
def test_get_response_redirect(self):
response = self.request('get', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_get_response_redirect_as_html(self):
response = self.request('get',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_post_response_redirect(self):
response = self.request('post', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_post_response_redirect_as_html(self):
response = self.request('post',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; | q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_postget_response_redirect(self):
response = self.request('postget', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_postget_response_redirect_as_html(self) | :
response = self.request('postget',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
@property
def _redirect_loc(self):
return 'aoeu'
@property
def _redirect_path(self):
return 'response;loc={}'\
.format(_percent_encode(_json.dumps(self._redirect_loc),
safe=''))
class _TestRedirectionsCorsWithUntrustedOriginMixin(object):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_rejected_response(response,
exc_name='CorsOriginForbidden',
**kwargs_)
class TestRedirectionsCorsActualWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin, _testbedtest.TestCorsActual,
TestRedirections):
pass
class TestRedirectionsCorsActualWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsActual, TestRedirections):
pass
class TestRedirectionsCorsPreflightWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_preflight_accepted_response(response, **kwargs_)
class TestRedirectionsCorsPreflightWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
pass
if __name__ == '__main__':
_logging.basicConfig()
_unittest.main()
|
s None
def test_iter(self, element_cls):
keys = list(element_cls({u"foo": 1}))
assert len(keys) == 1
assert keys[0].value == u"foo"
@python2_only
def test_iterkeys(self, element_cls):
keys = list(element_cls({u"foo": 1}).iterkeys())
assert len(keys) == 1
assert keys[0].value == u"foo"
@python2_only
def test_viewkeys(self, element_cls):
keys = list(element_cls({u"foo": 1}).viewkeys())
assert len(keys) == 1
assert keys[0].value == u"foo"
def test_keys(self, element_cls):
keys = list(element_cls({u"foo": 1}).keys())
assert len(keys) == 1
assert keys[0].value == u"foo"
@python2_only
def test_itervalues(self, element_cls):
values = list(element_cls({u"foo": 1}).itervalues())
assert len(values) == 1
assert values[0].value == 1
@python2_only
def test_viewvalues(self, element_cls):
values = list(element_cls({u"foo": 1}).viewvalues())
assert len(values) == 1
assert values[0].value == 1
def test_values(self, element_cls):
values = list(element_cls({u"foo": 1}).values())
assert len(values) == 1
assert values[0].value == 1
@python2_only
def test_iteritems(self, element_cls):
items = list(element_cls({u"foo": 1}).iteritems())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
@python2_only
def test_viewitems(self, element_cls):
items = list(element_cls({u"foo": 1}).viewitems())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
def test_items(self, element_cls):
items = list(element_cls({u"foo": 1}).items())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
def test_set_list_of_tuples(self, element_cls):
element = element_cls([(u"foo", 1)])
assert element.raw_value == [(u"foo", 1)]
assert element.value == {u"foo": 1}
def test_set_non_mapping(self, element_cls):
element = element_cls(1)
assert element.raw_value == 1
assert element.value is NotUnserializable
def test_validate_empty(self, element | _cls):
element = element_cls()
assert element.is_valid is None
assert not element.validate()
assert not element.is_valid
def test_validate_is_recursive(self):
validators = []
def key_validator(element, st | ate):
validators.append("key")
return True
def value_validator(element, state):
validators.append("value")
return True
element = Dict.of(
Integer.validated_by([key_validator]),
Integer.validated_by([value_validator])
)({1: 1})
assert element.validate()
assert element.is_valid
assert validators == ["key", "value"]
def test_validate_value_empty(self, element_cls):
element = element_cls({})
assert element.is_valid is None
assert element.validate()
assert element.is_valid
def test_validate_value(self, element_cls):
element = element_cls({"foo": "1"})
assert element.raw_value == {"foo": "1"}
assert element.value == {u"foo": 1}
assert element.is_valid is None
assert element.validate()
assert element.is_valid
def test_validate_invalid_value(self, element_cls):
element = element_cls({"foo": "foo"})
assert element.raw_value == {"foo": "foo"}
assert element.value is NotUnserializable
assert element.is_valid is None
assert not element.validate()
assert not element.is_valid
class MutableMappingTest(MappingTest):
def test_setitem(self, element_cls):
element = element_cls()
with pytest.raises(TypeError):
element[u"foo"] = 1
@pytest.mark.parametrize('method', [
'setdefault', 'popitem', 'pop', 'update', 'clear'
])
def test_mutating_method_missing(self, element_cls, method):
element = element_cls()
assert not hasattr(element, method)
with pytest.raises(AttributeError):
getattr(element, method)
class TestDict(MutableMappingTest):
@pytest.fixture
def element_cls(self):
return Dict.of(Unicode, Integer)
@pytest.fixture
def possible_value(self):
return {u"foo": 1}
@python2_only
def test_has_key(self, element_cls):
assert element_cls({u"foo": 1}).has_key(u"foo")
assert not element_cls({u"foo": 1}).has_key(u"bar")
def test_set_strict(self, element_cls):
element = element_cls.using(strict=True)({u"foo": 1})
assert element.raw_value == {u"foo": 1}
assert element.value == {u"foo": 1}
def test_set_strict_raw(self, element_cls):
element = element_cls.using(strict=True)([(u"foo", 1)])
assert element.raw_value == [(u"foo", 1)]
assert element.value is NotUnserializable
def test_retains_ordering(self, element_cls):
value = [
(u"foo", 1),
(u"bar", 2),
(u"baz", 3)
]
assert element_cls(value).value == _compat.OrderedDict(value)
class TestOrderedDict(MutableMappingTest):
@pytest.fixture
def element_cls(self):
return OrderedDict.of(Unicode, Integer)
@pytest.fixture
def possible_value(self):
return _compat.OrderedDict([(u"foo", 1)])
@python2_only
def test_has_key(self, element_cls):
assert element_cls({u"foo": 1}).has_key(u"foo")
assert not element_cls({u"foo": 1}).has_key(u"bar")
def test_set_strict(self, element_cls):
value = _compat.OrderedDict({u"foo": 1})
element = element_cls.using(strict=True)(value)
assert element.raw_value == value
assert element.value == value
def test_set_strict_raw(self, element_cls):
element = element_cls.using(strict=True)({u"foo": 1})
assert element.raw_value == {u"foo": 1}
assert element.value is NotUnserializable
class TestForm(object):
def test_member_schema_ordering(self):
class Foo(Form):
spam = Element
eggs = Element
assert list(Foo.member_schema.keys()) == ["spam", "eggs"]
def test_member_schema_inheritance(self):
class Foo(Form):
spam = Element
class Bar(Foo):
eggs = Element
assert list(Bar.member_schema.keys()) == ["spam", "eggs"]
def test_getitem(self):
class Foo(Form):
spam = Unicode
foo = Foo({"spam": u"one"})
assert foo["spam"].value == u"one"
def test_contains(self):
class Foo(Form):
spam = Unicode
foo = Foo()
assert "spam" in foo
assert "eggs" not in foo
def test_len(self):
class Foo(Form):
spam = Unicode
assert len(Foo()) == 1
def test_iter(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
assert list(Foo()) == ["spam", "eggs"]
@python2_only
def test_iterkeys(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
assert list(Foo().iterkeys()) == ["spam", "eggs"]
def test_keys(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
assert list(Foo().keys()) == ["spam", "eggs"]
@python2_only
def test_itervalues(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
foo = Foo({"spam": u"one", "eggs": u"two"})
assert [element.value for element in foo.itervalues()] == [u"one", u"two"]
def test_values(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
foo = Foo({"spam": u"one", "eggs": u"two"})
assert [element.value for element in foo.values()] == [u"one", u"two"]
@python2_only
def test_iteritems(self):
class Foo(Form):
spam = Unicode
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/CC/SHCC.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wr | apper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
| foo = Environment()
shcc = foo.Dictionary('SHCC')
bar = Environment(SHCC = r'%(_python_)s wrapper.py ' + shcc)
foo.SharedObject(target = 'foo/foo', source = 'foo.c')
bar.SharedObject(target = 'bar/bar', source = 'bar.c')
""" % locals())
test.write('foo.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.write('bar.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.run(arguments = 'foo')
test.fail_test(os.path.exists(test.workpath('wrapper.out')))
test.run(arguments = 'bar')
test.fail_test(test.read('wrapper.out') != "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'id': getId,
'ip': getIp,
'int': getInt,
'index': getIndex,
'color': getIrcColor,
'now': getNow,
'url': getUrl,
'email': getEmail,
'httpUrl': getHttpUrl,
'long': getLong,
'float': getFloat,
'nonInt': getNonInt,
'positiveInt': getPositiveInt,
'nonNegativeInt': getNonNegativeInt,
'letter': getLetter,
'haveOp': getHaveOp,
'expiry': getExpiry,
'literal': getLiteral,
'to': getTo,
'nick': getNick,
'seenNick': getSeenNick,
'channel': getChannel,
'inChannel': inChannel,
'onlyInChannel': onlyInChannel,
'nickInChannel': nickInChannel,
'networkIrc': getNetworkIrc,
'callerInGivenChannel': callerInGivenChannel,
'plugin': getPlugin,
'boolean': getBoolean,
'lowered': getLowered,
'anything': anything,
'something': getSomething,
'filename': getSomething, # XXX Check for validity.
'commandName': getCommandName,
'text': getText,
'glob': getGlob,
'somethingWithoutSpaces': getSomethingNoSpaces,
'capability': getSomethingNoSpaces,
'channelDb': getChannelDb,
'hostmask': getHostmask,
'banmask': getBanmask,
'user': getUser,
'matches': getMatch,
'public': public,
'private': private,
'otherUser': getOtherUser,
'regexpMatcher': getMatcher,
'validChannel': validChannel,
'regexpReplacer': getReplacer,
'owner': owner,
'admin': admin,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'op': getOp,
'halfop': getHalfop,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, | **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.sp | ecs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
|
__author__ = 'Maximilian Bisani'
__version__ = '$LastChangedRevision: 1691 $'
__date__ = '$LastChangedDate: 2011-08-03 15:38:08 +0200 (Wed, 03 Aug 2011) $'
__copyright__ = 'Copyright (c) 2004-2005 RWTH Aachen University'
__license__ = """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 2 (June
1991) as published by the Free Software Foundation.
This program is distributed in the hope that it will be | useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public Li | cense
along with this program; if not, you will find it at
http://www.gnu.org/licenses/gpl.html, or write to the Free Software
Foundation, Inc., 51 Franlin Street, Fifth Floor, Boston, MA 02110,
USA.
Should a provision of no. 9 and 10 of the GNU General Public License
be invalid or become invalid, a valid provision is deemed to have been
agreed upon which comes closest to what the parties intended
commercially. In any case guarantee/warranty shall be limited to gross
negligent actions or intended actions or fraudulent concealment.
"""
class SymbolInventory:
"""
0 (zero) is __void__ which is used internally as a terminator to
indicate end of a multigram
1 (one) is __term__, the end-of-string symbol (similar to the
end-of-sentence word in language modeling).
"""
term = 1
def __init__(self):
self.list = ['__void__', '__term__']
self.dir = { '__term__' : self.term }
def size(self):
"The number of symbols, including __term__, but not counting __void__."
return len(self.list) - 1
def index(self, sym):
try:
return self.dir[sym]
except KeyError:
result = self.dir[sym] = len(self.list)
self.list.append(sym)
return result
def parse(self, seq):
return tuple(map(self.index, list(seq)))
def symbol(self, ind):
return self.list[ind]
def format(self, seq):
return tuple(map(self.symbol, seq))
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_utils import encodeutils
except ImportError:
from oslo.utils import encodeutils
import six
from .._i18n import _
from . import exceptions
from .. import uuidutils
def find_resource(manager, name_or_id, **find_args):
"""Look for resource in a given manager.
Used as a helper for the _find_* methods.
Example:
.. code-block:: python
def _find_hypervisor(cs, hypervisor):
#Get a hypervisor by name or ID.
return cliutils.find_resource(cs.hypervisors, hypervisor)
"""
# first try to get entity as integer id
try:
return manager.get(int(name_or_id))
except (TypeError, ValueError, exceptions.NotFound):
pass
# now try to get entity as uuid
try:
if six.PY2:
tmp_id = encodeutils.safe_encode(name_or_id)
else:
tmp_id = encodeutils.safe_decode(name_or_id)
if uuidutils.is_uuid_like(tmp_id):
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
# for str id which is not uuid
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
| except exceptions.NotFound:
pass
try:
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
| except exceptions.NotFound:
msg = _("No %(name)s with a name or "
"ID of '%(name_or_id)s' exists.") % \
{
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = _("Multiple %(name)s matches found for "
"'%(name_or_id)s', use an ID to be more specific.") % \
{
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
|
# 0 = open space, 1=boundary , 2= the robot, 3= finish
def maze_vision():
path= ''
maze=[]
maze.append(list('000000002000000'))
maze.append(list('000000003001100'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
#print(maze)
fx=0
fy=0
sx=0
sy=0
#print(maze[0][8])
#print(len(maze[0]))
for x in range(0,len(maze[0])-1):
for y in | range(0,len(maze)-1):
if maze[y][x]=='2':
sx=x
sy=y
elif maze[y][x]=='3':
fx=x
fy=y
#print(fx)
#print(fy)
#print(sx)
#print(sy)
ans= distance(maze,sx,sy,fx,fy,path)
print ("the shortest path is "+str(ans)+ " spaces")
print(path)
def distance(maz | e, sx, sy, fx, fy,path):
up= int(sy-1)
down= int(sy+1)
left = int(sx-1)
right = int(sx+1)
print(str(sx)+','+str(sy))
updist=3333333
downdist=6666666
leftdist=5555555
rightdist=4444444
if maze[sy][sx]=='3': #reached finish
print(hit)
return 0 #return
#up
# if up >-1:
# if maze[sy][up]=='0': #if this direction is open
# maze[sy][up]='4' #mark it as traveled to
# path= path +'u' #add that direction to final path
# updist= 1+ distance(maze,up,sy,fx,fy,path) #calculate shortest dist from there
#if it makes it past here, that was not the shortest distance
#path= path[:-1] #remove that direction from final path
#maze[sy][up]=0 #mark that direction as not traveled
#down
print(down)
if down < (len(maze)-1):
print('w')
print(maze[down][sx])
if maze[down][sx]=='0':
maze[sy][sx]='4'
#path path +'d'
downdist= 1 + distance(maze,down,sy,fx,fy,path)
#path= path[:-1]
#maze[sy][down]='0'
#else:
#downdist=999999
#left
# if left>-1:
# if maze[left][sx]=='0':
# maze[left][sx]='4'
# path= path +'l'
# leftdist= 1+distance(maze,sx,left,fx,fy,path)
# path= path[:-1]
# maze[left][sx]='0'
#right
# if right<(len(maze[0])-1):
# if maze[sx][right]=='0':
# maze[sx][right]='4'
# path=path+'r'
# rightdist= 1+distance(maze,sx,right,fx,fy,path)
# path=path[:-1]
# maze[right][sx]='0'
#print(str(sx)+','+str(sy))
return min(updist,downdist,rightdist,leftdist)
# sum2= min(rightdist,leftdist)
# return min(sum2,sum1)
maze_vision()
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from nova.db.sqlalchemy import api as db_session
from nova import exception
from nova.openstack.common.gettextutils import _
INIT_VERSION = 215
_REPOSITORY = None
get_engine = db_session.get_engine
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.NovaException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.NovaException(
_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.jo | in(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repos | itory(path)
return _REPOSITORY
|
c | lass PowerDNSRouter(object):
"""Route all operations on powerdns models to the powerdns database."""
db_name = 'powerdns'
app_name = 'powerdns'
def db_for_read(self, model, **hints):
if model._meta.app_label | == self.app_name:
return self.db_name
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == self.app_name:
return self.db_name
return None
def allow_relation(self, obj1, obj2, **hints):
if (obj1._meta.app_label == self.app_name and
obj2._meta.app_label == self.app_name):
return True
return None
def allow_syncdb(self, db, model):
if model._meta.app_label == self.app_name:
return db == self.db_name
elif db == self.db_name:
# workaround for http://south.aeracode.org/ticket/370
return model._meta.app_label == 'south'
return None
|
# -*- coding: utf-8
"""
Tests related to the Cuttle class.
"""
import os
import unittest
import warnings
import time
from cuttle.reef import Cuttle, Column
from cuttlepool import CuttlePool
from cuttlepool.cuttlepool import PoolConnection
DB = '_cuttle_test_db'
DB2 = '_cuttle_test_db2'
HOST = 'localhost'
class BaseDbTestCase(unittest.TestCase):
def setUp(self):
self.Pool = CuttlePool
self.Connection = PoolConnection
self.credentials = dict(host=HOST)
self.sql_type = os.environ['TEST_CUTTLE'].lower()
if self.sql_type == 'mysql':
import pymysql
from mysql_credentials import USER, PASSWD
self.Cursor = pymysql.cursors.Cursor
self.connect = pymysql.connect
self.credentials.update(dict(user=USER, passwd=PASSWD))
self.db = Cuttle(self.sql_type, db=DB, **self.credentials)
class Heros(self.db.Model):
columns = [
Column('hero_id', 'INT', auto_increment=True, primary_key=True),
Column('hero_name', 'VARCHAR', maximum=16)
]
self.testtable1 = Heros
self.create_heros_statement = (
'CREATE TABLE IF NOT EXISTS {} (\n'
'hero_id INT AUTO_INCREMENT PRIMARY KEY,\n'
'hero_name VARCHAR(16)\n'
')').format(self.testtable1().name)
self.heros_schema = (('hero_id', 'int(11)', 'NO', 'PRI', None, 'auto_increment'),
('hero_name', 'varchar(16)', 'YES', '', None, ''))
def tearDown(self):
warnings.filterwarnings('ignore')
self.db.drop_db()
def createPool(self, **kwargs):
warnings.filterwarnings('ignore')
return CuttlePool(self.connect, **kwargs)
class DbNestedModelTestCase(BaseDbTestCase):
def setUp(self):
super(DbNestedModelTestCase, self).setUp()
class UselessTable(self.db.Model):
pass
self.uselesstable = UselessTable
class Villains(UselessTable):
columns = [
Column('villain_id', 'INT'),
Column('villain_name', 'VARCHAR', maximum=16)
]
self.testtable2 = Villains
c | lass TwoDbTestCase(BaseDbTestCase):
def setUp(self):
super(TwoDbTestCase, self).setUp()
self.db2 = Cuttle(self.sql_type, db=DB2, **self.credentials)
class ThrowAway(self.db2.Model):
columns = [
Column('throwaway', 'INT')
]
self.testtable2 = Thr | owAway
def tearDown(self):
super(TwoDbTestCase, self).tearDown()
self.db2.drop_db()
class CuttleInstanceTestCase(unittest.TestCase):
def test_improper_sql_type(self):
with self.assertRaises(ValueError):
db = Cuttle('wrongsql', db='db')
def test_no_db(self):
with self.assertRaises(ValueError):
db = Cuttle('mysql')
def test_name_property(self):
db_name = 'get_schwifty'
db = Cuttle('mysql', db=db_name)
self.assertEqual(db.name, db_name)
class CuttleCreateDbTestCase(BaseDbTestCase):
def test_create_db(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
self.assertIn((DB,), dbs)
def test_table_schema(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get tables
cur.execute('SHOW TABLES')
tbls = cur.fetchall()
self.assertEqual(((self.testtable1().name,),), tbls)
# get table schema
cur.execute('DESCRIBE {}'.format(self.testtable1().name))
tblschma = cur.fetchall()
self.assertEqual(self.heros_schema, tblschma)
class CuttleCreateMultiDbTestCase(TwoDbTestCase):
def test_create_two_dbs(self):
self.db.create_db()
self.db2.create_db()
pool1 = self.createPool(db=DB, **self.credentials)
pool2 = self.createPool(db=DB2, **self.credentials)
con1 = pool1.get_connection()
cur1 = con1.cursor()
con2 = pool2.get_connection()
cur2 = con2.cursor()
# get databases
cur1.execute('SHOW DATABASES')
dbs = cur1.fetchall()
self.assertIn((DB,), dbs)
self.assertIn((DB2,), dbs)
# get tables
cur1.execute('SHOW TABLES')
tbls1 = cur1.fetchall()
cur2.execute('SHOW TABLES')
tbls2 = cur2.fetchall()
self.assertIn((self.testtable1().name,), tbls1)
self.assertNotIn((self.testtable2().name,), tbls1)
self.assertIn((self.testtable2().name,), tbls2)
self.assertNotIn((self.testtable1().name,), tbls2)
class CuttleCreateDbNestedModelsTestCase(DbNestedModelTestCase):
def test_correct_tables_made(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get tables
cur.execute('SHOW TABLES')
tbls = cur.fetchall()
self.assertIn((self.testtable1().name,), tbls)
self.assertIn((self.testtable2().name,), tbls)
self.assertNotIn((self.uselesstable().name,), tbls)
class CuttleDropDbTestCase(BaseDbTestCase):
def setUp(self):
super(CuttleDropDbTestCase, self).setUp()
self.db.create_db()
def test_drop_db(self):
pool = self.createPool(**self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
# make sure database actually exists
self.assertIn((DB,), dbs)
# drop the database
self.db.drop_db()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
# make sure database no longer exists
self.assertNotIn((DB,), dbs)
|
# coding: utf-8
from unittest import TestCase
from yased import EventsDispatcher, Event
class AnyEvent(Event):
"""Represents any event."""
class EventsDispatcherTestCase(TestCase):
def setUp(self):
self.ed = EventsDispatcher()
def test_send(self):
calls = []
def handler(*args, **kwargs):
calls.append(True)
self.ed.connect(handler, AnyEvent)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
self.ed.disconnect(handler, AnyEvent)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
def test_send_with_sender(self):
calls = []
def handler(*args, **kwargs):
calls.append(True)
self.ed.connect(handler, AnyEvent, self)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 1)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
self.ed.disconnect(handler, AnyEvent)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 2)
self.ed.disconnect(handler, AnyE | vent, sender=self)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 2)
def test_send_args_kwargs(self):
calls = []
event_args = (1, 2, 3)
event_kwargs = {'a': 4, 'b': 5, 'c': 6}
def handler(*args, **kwargs):
calls.append(True)
self.assertEqual(args, event_args)
self.assertEqual(kwargs, event_kwargs)
self.ed.connect(handler, AnyEvent) |
self.ed.send(AnyEvent(*event_args, **event_kwargs))
self.assertEqual(len(calls), 1) |
# django-offline-messages Test Settings
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.contenttype | s',
'django.contrib.messages',
'offline_messages',
'tests'
)
ROOT_URLCONF = ''
CO | VERAGE_ADDITIONAL_MODULES = ('offline_messages',)
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warra | nty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# | You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 1}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
|
""" | Subpackage for the Archiver server."""
from ..envars import SRVURL_ARCHIVER as SERVER_URL
from .client import ClientArchiver
from .pvarch import PVDetails, PVData, PVDataSet
from .devices import Orbit, Correctors, TrimQuads
from .time import Time
from . import exceptions
del client, pvarch, devic | es
|
# coding=utf-8
import unittest
"""754. Reach a Number
https://leetcode.com/problems/reach-a-number/description/
You are standing at position `0` on an infinite number line. There is a goal
at position `target`.
On each move, you can either go left or right. During the _n_ -th move
(starting from 1), you take _n_ steps.
Return the minimum numbe | r of steps required to reach the destination.
**Example 1:**
**Input:** target = 3
**Output:** 2
**Explanation:**
On the first move we step from 0 to 1.
On the second step we step from 1 to 3.
**Example 2:**
**In | put:** target = 2
**Output:** 3
**Explanation:**
On the first move we step from 0 to 1.
On the second move we step from 1 to -1.
On the third move we step from -1 to 2.
**Note:**
* `target` will be a non-zero integer in the range `[-10^9, 10^9]`.
Similar Questions:
"""
class Solution(object):
def reachNumber(self, target):
"""
:type target: int
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from kivy.uix.listview import ListView
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from kivy.adapters.listadapter import ListAdapter
from kivy.adapters.models import SelectableDataItem
from kivy.uix.listview import ListItemButton
from random import choice
from string import ascii_uppercase, digits
import random
class DataItem(SelectableDataItem):
def __init__(self, **kwargs):
super(DataItem, self).__init__(**kwargs)
self.name = ''.join(choice(ascii_uppercase + digits) for x in range(6))
class MainView(FloatLayout):
"""
Implementation of a ListView using the kv language.
"""
def __init__(self, **kwargs):
super(MainView, self).__init__(**kwargs)
data_items = []
data_items.append(DataItem())
data_items.append(DataItem())
data_items.append(DataItem())
list_item_args_converter = lambda row_index, obj: {'text': obj.name,
'size_hint_y': None,
'height': 25}
self.list_adapter = \
ListAdapter(data=data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=False,
allow_empty_selection=False,
cls=ListItemButton)
self.list_view = ListView(adapter=self.list_adapter)
self.add_widget(self.list_view) |
self.toggle = 'adding'
Clock.schedule_interval(self.update_list_data, 1)
def update_list_data(self, dt):
items = self.list_adapter.data
if self.toggle == 'ad | ding':
item = DataItem(name='New ' * random.randint(1, 2))
items.append(item)
self.toggle = 'changing'
print 'added ' + item.name
else:
random_index = random.randint(0, len(items) - 1)
item = items[random_index]
items[random_index] = DataItem()
self.toggle = 'adding'
print 'changed {0} to {1}'.format(item.name,
items[random_index].name)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import HookspecMarker, HookimplMarker, PluginManager
from pluggy._hooks import HookImpl
from pluggy._callers import _multicall
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
@hookimpl
def hook(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def wrapper(arg1, arg2, arg3):
yield
@pytest.fixture(params=[10, 100], ids="hooks={}".format)
def hooks(request):
return [hook for i in range(request.param)]
@pytest.fixture(params=[10, 100], ids="wrappers={}".format)
def wrappers(request):
return [wrapper for i in range(request.param)]
def test_hook_and_wrappers_speed(benchmark, hooks, wrappers):
def setup():
hook_name = "foo"
hook_impls = []
for method in hooks + wrappers:
f = HookImpl(None, "<temp>", method, method.example_impl)
hook_impls.append(f)
caller_kwargs = {"arg1": 1, "arg2": 2, "arg3": 3}
firstresult = False
return (hook_name, hook_impls, caller_kwargs, firstresult), {}
benchmark.pedantic(_multicall, setup=setup)
@pytest.mark.parametrize(
("plugins, wrappers, nesting"),
[
(1, 1, 0),
(1, 1 | , 1),
(1, 1, 5),
(1, 5, 1),
(1, 5, 5),
(5, 1, 1),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(20, 20, 0),
(100, 1 | 00, 0),
],
)
def test_call_hook(benchmark, plugins, wrappers, nesting):
pm = PluginManager("example")
class HookSpec:
@hookspec
def fun(self, hooks, nesting: int):
yield
class Plugin:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<Plugin {self.num}>"
@hookimpl
def fun(self, hooks, nesting: int):
if nesting:
hooks.fun(hooks=hooks, nesting=nesting - 1)
class PluginWrap:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<PluginWrap {self.num}>"
@hookimpl(hookwrapper=True)
def fun(self):
yield
pm.add_hookspecs(HookSpec)
for i in range(plugins):
pm.register(Plugin(i), name=f"plug_{i}")
for i in range(wrappers):
pm.register(PluginWrap(i), name=f"wrap_plug_{i}")
benchmark(pm.hook.fun, hooks=pm.hook, nesting=nesting)
|
__author__ = 'ariel'
"""
Python Population Simulator
Copyright (C) 2015 Ariel Young
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from Genetics import Expressions, Genotypes
from Crypto.Random import random
class TraitAlleles(object):
traitPhenotypes = {"furColor" : {"dominant" : "long", "recessive" : "short"},
"furLength" : {"dominant" : "black", "recessive" : "brown"},
"isTa" : {"dominant" : "tall", "recessive" : "short"}}
traitIsComplete = False
expression = None
trait = None
genotype = None
phenotype = None
letterOne = str
letterTwo = str
choices = []
def __init__(self, trait, alleles=False, letter_on | e = None, letter_two = None):
self.trait = trait
if alleles:
if letter_one != None:
self.letterOne = letter_one
if letter_two != None:
self.letterTwo = letter_two
if self.letterOne and self.letterTwo:
if self.letterOne. | isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
if trait == "furColor":
choices = list('Ff')
elif trait == "furLength":
choices = list('Ll')
elif trait == "isTall":
choices = list("Hh")
def getGenotype(self):
return self.genotype
def getExpression(self):
return self.expression
def setLetterOne(self, letter):
self.letterOne = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def setLetterTwo(self, letter):
self.letterTwo = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getRandomAllele(self):
rand = random.randint(0, 1)
if rand:
return self.letterOne
else:
return self.letterTwo
def __determinePhenotype(self):
if self.genotype == Genotypes.DOMINANT:
self.phenotype = self.traitPhenotypes[self.trait]["dominant"]
else:
self.genotype = self.traitPhenotypes[self.trait]["recessive"]
self.choices = [self.letterOne, self.letterTwo]
def populateWithRandom(self):
self.letterOne = random.choice(self.choices)
self.letterTwo = random.choice(self.choices)
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getAlleles(self):
if self.letterOne and self.letterTwo:
return (self.letterOne, self.letterTwo)
elif self.letterOne and not self.letterTwo:
return self.letterOne
elif self.letterTwo and not self.letterOne:
return self.letterTwo
def getAllelesAsList(self):
return [self.letterOne, self.letterTwo]
def getPhenotype(self):
return self.phenotype |
rt pyamg
from pyamg import smoothed_aggregation_solver
from scipy.sparse.linalg import lobpcg
from diffusions import _build_laplacian
def spectral_embedding(adjacency):
""" A diffusion reordering, but that works for negative values.
"""
# Normalize the graph: the sum of each set of edges must be one
abs_adjacency = np.abs(adjacency)
diag_weights = abs_adjacency.sum(axis=1)
diag_mask = (diag_weights == 0)
diag_weights[diag_mask] = 1
d = np.sign(diag_weights)/np.sqrt(np.abs(diag_weights))
lap = abs_adjacency*d[:, np.newaxis]*d[np.newaxis, :]
lambdas, diffusion_map = linalg.eigh(lap)
return lambdas, diffusion_map.T[-2::-1]*d
def spectral_embedding_sparse(adjacency, k_max=14, mode='amg', take_first=True):
""" A diffusion reordering, but that works for negative values.
"""
# Normalize the graph: the sum of each set of edges must be one
diag_weights = np.array(adjacency.sum(axis=1))
diag_mask = (diag_weights == 0)
diag_weights[diag_mask] = 1
dd = np.sign(diag_weights)/np.sqrt(np.abs(diag_weights))
if mode == 'bf':
lambdas, diffusion_map = eigen_symmetric(adjacency, k=k_max, which='LA')
print lambdas
if take_first:
res = diffusion_map.T[::-1]*dd.ravel()
else:
res = diffusion_map.T[-2::-1]*dd.ravel()
elif mode == 'amg':
print 'amg'
sh = adjacency.shape[0]
adjacency = adjacency.copy()
#diag = sparse.coo_matrix((diag_weights.ravel(), (range(sh), range(sh))))
diag = sparse.eye(sh, sh)
adjacency = - adjacency + diag
ml = smoothed_aggregation_solver(adjacency.tocsr())
X = scipy.rand(adjacency.shape[0], k_max)
#X[:, 0] = 1. / np.sqrt(adjacency.shape[0])
X[:, 0] = 1. / dd.ravel()
M = ml.aspreconditioner()
lambdas, diffusion_map = lobpcg(adjacency, X, M=M, tol=1.e-12, largest=False)
print lambdas
if take_first:
res = diffusion_map.T * dd.ravel()
else:
res = diffusion_map.T[1:] * dd.ravel()
print res.shape, dd.shape
return res
def modularity_embedding(adjacency, kmax=10):
""" Proceedings of the fifth SIAM international conference on data
mining, Smyth, A spectral clustering approach to finding
communities in graphs.
Return the eigenvalues of the Q matrice
"""
#n = len(adjacency)
abs_adjacency = np.abs(adjacency)
#degrees = adjacency.copy()
#degrees.flat[::n+1] = 0
#degrees = degrees.sum(axis=0)
#weights = 1/degrees[:, np.newaxis] * abs_adjacency
#weights.flat[::n+1] = 1
weights = abs_adjacency/abs_adjacency.sum(axis=0)
lambdas, maps = linalg.eig(weights)
indices = np.argsort(lambdas)[::-1]
print lambdas[:10]
return maps.T[indices]
def modularity_embedding_sparse(adjacency, kmax=10):
""" Proceedings of the fifth SIAM international conference on data
mining, Smyth, A spectral clustering approach to finding
communities in graphs.
Return the eigenvalues of the Q matrice
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
abs_adjacency = np.abs(adjacency)
weights = abs_adjacency/abs_adjacency.sum(axis=0)
weights = sparse.csc_matrix(weights)
lambdas, maps = eigen(weights, \
k=kmax, which='LR')
print lambdas
return maps.T#[1:]
def newman_clustering(adjacency, eps=1e-8):
""" Newmann's spectral embedding algorithm to maximize modularity.
"""
n | = len(adjacency)
abs_adjacency = np.abs(adjacency)
abs_adjacency.flat[::n+1] = 0
degrees = abs_adjacency.sum(axis=0)
weights = abs_adjacency - np.dot(degrees[:, np.newaxis], |
degrees[np.newaxis, :])/degrees.sum()
weights.flat[::n+1] = 0
weights -= np.diag(weights.sum(axis=0))
lambdas, maps = linalg.eigh(weights)
if lambdas[-1] <= eps:
return np.ones(n, dtype=np.int)
cluster1 = maps.T[-1] >= 0
cluster2 = maps.T[-1] < 0
labels = np.zeros(n, dtype=np.int)
labels[cluster1] = 2*newman_clustering(adjacency[cluster1].T[cluster1])
labels[cluster2] = (1+
2*newman_clustering(adjacency[cluster2].T[cluster2])
)
return labels
def q_score(adjacency, labels):
""" Returns the Q score of a clustering.
"""
q = 0
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
"""
weights = adjacency
total_weights = 0.5 * weights.sum()
for label in np.unique(labels):
inds = np.nonzero(labels == label)[0]
a = 0.5 * (weights[inds][:, inds]).sum()
b = weights[inds].sum() - a
q += a/total_weights
q -= 0.5*(b/total_weights)
#q += weights[label == labels].T[label == labels].sum()/total_weights
#q -= (weights[label == labels].sum()/total_weights)**2
return 2 * q
def n_cut(adjacency, labels):
""" Returns the Q score of a clustering.
"""
q = 0
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
"""
weights = adjacency
total_weights = 0.5 * weights.sum()
for label in np.unique(labels):
inds = np.nonzero(labels == label)[0]
a = (weights[inds][:, inds]).sum()
b = weights[inds].sum()
q += (b - a)/b
return - q
def best_k_means(k, maps, adjacency, n_bst=10):
from nipy.neurospin.clustering.clustering import _kmeans
best_score = -np.inf
for _ in range(n_bst):
print "doing kmeans"
_, labels, _ = _kmeans(maps, nbclusters=k)
score2 = q_score(adjacency, labels)
score = n_cut(adjacency, labels)
if score > best_score:
best_score = score
best_score2 = score2
best_labels = labels
return best_labels, best_score2 #best_score
def communities_clustering(adjacency, k_best=None, n_bst=2):
adjacency = np.abs(adjacency)
n_features = adjacency.shape[0]
adjacency.flat[::n_features+1] = 0
maps = modularity_embedding(adjacency)
scores = dict()
if k_best is None:
#for k in range(2, .3*n_features):
for k in range(2, 6):
this_maps = maps[:k-1].T.copy()
labels, score = best_k_means(k, this_maps, adjacency, n_bst=n_bst)
scores[k] = score
print scores[k]
k_best = scores.keys()[np.argmax(scores.values())]
this_maps = maps[:k_best-1].T.copy()
labels, score = best_k_means(k_best, this_maps, adjacency,
n_bst=5*n_bst)
print 'Final : k=%i, score=%s' % (k_best, score)
return labels
def communities_clustering_sparse(adjacency, k_best=None, k_min=2, k_max=8, n_bst=4, mode='bf', take_first=False):
maps = spectral_embedding_sparse(adjacency, k_max=k_max+2, mode=mode, \
take_first=take_first)
scores = dict()
res = dict()
if k_best is None:
for k in range(k_min, k_max + 1):
this_maps = maps[:k - 1].T.copy()
labels, score = best_k_means(k, this_maps, adjacency, n_bst=n_bst)
scores[k] = score
print scores[k]
res[k] = labels
#k_best = scores.keys()[np.argmax(scores.values())]
else:
this_maps = maps[:k_best - 1].T.copy()
res, scores = best_k_means(k_best, this_maps, adjacency,
n_bst=4*n_bst)
print 'Final : k=%i, score=%s' % (k_best, scores)
return res, scores
def separate_in_regions(data, mask=None, k_best=None, k_min=2, k_max=8, \
center=None, only_connex=True, n_times=4,\
take_first=True, beta=10, mode='bf'):
"""
Separate an image in different regions, using spectral clustering.
Parameters
----------
data: array
Image to be segmented in regions. `data` can be two- or
three-dimensional.
mask: array, optional
Mask of the pixels to be clustered. I |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import SearchServiceClient
from .async_client import SearchServiceAsyncClient
__all__ = (
| "SearchServiceClient",
"SearchServiceAsyncClient",
)
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
| result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WIT | H_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
|
from datetime import datetime, timedelta
from django.db import models
from django.db.models import Max, Min
from tinymce.models import HTMLField
class Company(models.Model | ):
name = models.CharField(max_length=75, blank=True, null=True)
symbol = models.CharField(max_length=10, blank=True, null=True)
description = HTMLField(blank=True, null=True, default='')
listing_date = models.DateField(blank=True, null=True)
renamed_to = models.ForeignKey('self', blank=True, null=True, default=None, related_name='renamed_from')
order = models.IntegerField(blank=True, default=0)
is_index = models.BooleanField(blank=True, default | =False)
is_currently_listed = models.BooleanField(blank=True, default=True)
is_suspended = models.BooleanField(blank=True, default=False)
created_datetime = models.DateTimeField(auto_now_add=True)
updated_datetime = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('symbol',)
verbose_name = 'Company'
verbose_name_plural = 'Companies'
def __unicode__(self):
return self.symbol if self.symbol is not None else self.name
def __str__(self):
return self.symbol if self.symbol is not None else self.name
@property
def readable_name(self):
if self.is_index:
return self.name[1:]
else:
return self.name
@property
def year_high(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - timedelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_high = quotes.aggregate(Max('price_high'))
return ('%f' % year_high['price_high__max']).rstrip('0').rstrip('.')
@property
def year_low(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - timedelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_low = quotes.aggregate(Min('price_low'))
return ('%f' % year_low['price_low__min']).rstrip('0').rstrip('.')
@property
def last_thirty_quotes(self):
quotes = self.quote_set.order_by('-quote_date')[:30]
return quotes
|
from draftjs_exporter.constants import BLOCK_TYPES, INLINE_STYLES
from draftjs_exporter.dom import DOM
from draftjs_exporter.types import Element, Props
def render_children(props: Props) -> Element:
"""
Renders the children of a component without any specific
markup for the component itself.
"""
return props["children"]
def code_block(props: Props) -> Elemen | t:
return DOM.create_element(
"pre", {}, DOM.create_element("code", {}, props["children"])
)
# Default block map to extend.
BLOCK_MAP = {
BLOCK_TYPES.UNSTYLED: "p",
BLOCK_TYPES.HEADER_ONE: "h1",
BLOCK_TYPES.HEADER_TWO: "h2",
BLOCK_TYPES.HEADER_THREE: "h3",
BLOCK_TYPES.HEADER_FOUR: " | h4",
BLOCK_TYPES.HEADER_FIVE: "h5",
BLOCK_TYPES.HEADER_SIX: "h6",
BLOCK_TYPES.UNORDERED_LIST_ITEM: {"element": "li", "wrapper": "ul"},
BLOCK_TYPES.ORDERED_LIST_ITEM: {"element": "li", "wrapper": "ol"},
BLOCK_TYPES.BLOCKQUOTE: "blockquote",
BLOCK_TYPES.PRE: "pre",
BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
# Default style map to extend.
# Tags come from https://developer.mozilla.org/en-US/docs/Web/HTML/Element.
# and are loosely aligned with https://github.com/jpuri/draftjs-to-html.
# Only styles that map to HTML elements are allowed as defaults.
STYLE_MAP = {
INLINE_STYLES.BOLD: "strong",
INLINE_STYLES.CODE: "code",
INLINE_STYLES.ITALIC: "em",
INLINE_STYLES.UNDERLINE: "u",
INLINE_STYLES.STRIKETHROUGH: "s",
INLINE_STYLES.SUPERSCRIPT: "sup",
INLINE_STYLES.SUBSCRIPT: "sub",
INLINE_STYLES.MARK: "mark",
INLINE_STYLES.QUOTATION: "q",
INLINE_STYLES.SMALL: "small",
INLINE_STYLES.SAMPLE: "samp",
INLINE_STYLES.INSERT: "ins",
INLINE_STYLES.DELETE: "del",
INLINE_STYLES.KEYBOARD: "kbd",
}
|
rl_two = provider.bucket.generate_url(
100,
'POST',
query_parameters=query_params,
headers=headers_two,
)
aiohttpretty.register_uri('POST', delete_url_two, status=204)
await provider.delete(path)
assert aiohttpretty.has_call(method='GET', uri=query_url, params=params_one)
assert aiohttpretty.has_call(method='GET', uri=query_url, params=params_two)
assert aiohttpretty.has_call(method='POST', uri=delete_url_one)
assert aiohttpretty.has_call(method='POST', uri=delete_url_two)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_accepts_url(self, provider, mock_time):
path = WaterButlerPath('/my-image')
response_headers = {'response-content-disposition': 'attachment'}
url = provider.bucket.new_key(path.path).generate_url(100,
'GET',
response_headers=response_headers)
ret_url = await provider.download(path, accept_url=True)
assert ret_url == url
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/darp/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 3
assert result[0].name == ' photos'
assert result[1].name == 'my-image.jpg'
assert result[2].extra['md5'] == '1b2cf535f27731c974343645a3985328'
assert result[2].extra['hashes']['md5'] == '1b2cf535f27731c974343645a3985328'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_self_listing(self, provider, folder_and_contents, mock_time):
path = WaterButlerPath('/thisfolder/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=folder_and_contents)
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
for fobj in result:
assert fobj.name != path.path
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_folder_metadata_folder_item(self, provider, folder_item_metadata, mock_time):
path = WaterButlerPath('/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=folder_item_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 1
assert result[0].kind == 'folder'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
as | ync def test_empty_metadata_folder(self, provider, folder_empty_ | metadata, mock_time):
path = WaterButlerPath('/this-is-not-the-root/')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=folder_empty_metadata,
headers={'Content-Type': 'application/xml'})
aiohttpretty.register_uri('HEAD', metadata_url, header=folder_empty_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, file_header_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, headers=file_header_metadata)
result = await provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.extra['md5'] == 'fba9dede5f27731c9771645a39863328'
assert result.extra['hashes']['md5'] == 'fba9dede5f27731c9771645a39863328'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_lastest_revision(self, provider, file_header_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, headers=file_header_metadata)
result = await provider.metadata(path, revision='Latest')
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.extra['md5'] == 'fba9dede5f27731c9771645a39863328'
assert result.extra['hashes']['md5'] == 'fba9dede5f27731c9771645a39863328'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_missing(self, provider, mock_time):
path = WaterButlerPath('/notfound.txt')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self,
provider,
file_content,
file_stream,
file_header_metadata,
mock_time):
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_header_metadata},
],
)
headers = {'ETag': '"{}"'.format(content_md5)}
aiohttpretty.register_uri('PUT', url, status=200, headers=headers),
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_checksum_mismatch(self,
provider,
file_stream,
file_header_metadata,
mock_time):
path = WaterButlerPath('/foobah')
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_header_metadata},
],
)
aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"bad hash"'})
with pytest.raises(exceptions.UploadChecksumMismatchError):
await provider.upload(file_stream, path)
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def te |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bankaccounts', '0001_initial'),
('banktransactiontags', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BankTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=255, verbose_name='Label')),
('date', models.DateField(verbose_name='Date', default=datetime.date.today)),
| ('amount', models.DecimalField(max_digits=10, verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(editable=False, max_length=3, verbose_name='Currency')),
('status', models.CharField(max_length=32, default='active', verbose_name='Status', help_text='Depending on its value, determine whether it could alter the bank account balance or be | ing used by statistics.', choices=[('active', 'Active'), ('ignored', 'Ignored'), ('inactive', 'Inactive')])),
('reconciled', models.BooleanField(verbose_name='Reconciled', help_text='Whether the bank transaction has been applied on the real bank account.', default=False)),
('payment_method', models.CharField(max_length=32, default='credit_card', verbose_name='Payment method', choices=[('credit_card', 'Credit card'), ('cash', 'Cash'), ('transfer', 'Transfer'), ('transfer_internal', 'Transfer internal'), ('check', 'Check')])),
('memo', models.TextField(blank=True, verbose_name='Memo')),
('scheduled', models.BooleanField(editable=False, default=False)),
('bankaccount', models.ForeignKey(to='bankaccounts.BankAccount', related_name='banktransactions', on_delete=models.CASCADE)),
('tag', models.ForeignKey(related_name='banktransactions', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Tag', to='banktransactiontags.BankTransactionTag', blank=True, null=True)),
],
options={
'get_latest_by': 'date',
'db_table': 'banktransactions',
},
),
migrations.AlterIndexTogether(
name='banktransaction',
index_together=set([('bankaccount', 'reconciled'), ('bankaccount', 'date'), ('bankaccount', 'amount')]),
),
]
|
##########
def _doxygen_version_okay(s, want_major, want_minor, want_fix):
values = s.split('.')
maj =int(values[0])
minor = int(values[1])
fix = 0
if len(values) > 2:
# remove everything after the dash for things like: 'Doxygen
# 1.5.1-p1'
values[2] = re.sub(r'-.*$','',values[2])
try:
fix = int(values[2])
except ValueError as v:
pass
if (maj > 1) or \
(maj == want_major and minor > want_minor) or \
(maj == want_major and minor == want_minor and fix >= want_fix):
return True
return False
def _find_doxygen(env):
"""Find the right version of doxygen. Return a tuple of the
command name and a boolean indicating whether or not the version
checked out."""
if env['doxygen_cmd'] == '':
doxygen_cmd_intel = "/usr/intel/bin/doxygen"
doxygen_cmd_cygwin = "C:/cygwin/bin/doxygen"
doxygen_cmd_mac = \
"/Applications/Doxygen.app/Contents/Resources/doxygen"
doxygen_cmd = "doxygen"
if env['build_os'] == 'win':
if os.path.exists(doxygen_cmd_cygwin):
doxygen_cmd = doxygen_cmd_cygwin
else:
base.msgb('DOXYGEN',"Could not find cygwin's doxygen," +
"trying doxygen from PATH")
elif env['build_os'] == 'lin':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_intel)
if os.path.exists(doxygen_cmd_intel):
doxygen_cmd = doxygen_cmd_intel
elif env['build_os'] == 'mac':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_mac)
if os.path.exists(doxygen_cmd_mac):
doxygen_cmd | = doxygen_cmd_mac
else:
doxygen_cmd = env['doxygen_cmd']
doxygen_cmd = env.escape_string(doxygen_cmd)
doxygen_okay = False
if base.verbose(2):
base.msgb('Checking doxygen version','...')
if base.check_python_version(2,4):
| try:
(retval, output, error_output) = \
util.run_command(doxygen_cmd + " --version")
if retval==0:
if len(output) > 0:
first_line = output[0].strip()
if base.verbose(2):
base.msgb("Doxygen version", first_line)
doxygen_okay = _doxygen_version_okay(first_line, 1,4,6)
else:
for o in output:
base.msgb("Doxygen-version-check STDOUT", o)
if error_output:
for line in error_output:
base.msgb("STDERR ",line.rstrip())
except:
base.die("Doxygen required by the command line options " +
"but no doxygen found")
return (doxygen_cmd, doxygen_okay)
def _replace_match(istring, mtch, newstring, group_name):
"""This is a lame way of avoiding regular expression backslashing
issues"""
x1= mtch.start(group_name)
x2= mtch.end(group_name)
ostring = istring[0:x1] + newstring + istring[x2:]
return ostring
def _customize_doxygen_file(env, subs):
"""Change the $(*) strings to the proper value in the config file.
Returns True on success"""
# doxygen wants quotes around paths with spaces
for k,s in iter(subs.items()):
if re.search(' ',s):
if not re.search('^".*"$',s):
base.die("Doxygen requires quotes around strings with spaces: [%s]->[%s]" %
( k,s))
return False
# input and output files
try:
lines = open(env['doxygen_config']).readlines()
except:
base.msgb("Could not open input file: " + env['doxygen_config'])
return False
env['doxygen_config_customized'] = \
env.build_dir_join(os.path.basename(env['doxygen_config']) + '.customized')
try:
ofile = open(env['doxygen_config_customized'],'w')
except:
base.msgb("Could not open output file: " + env['doxygen_config_customized'])
return False
# compile the patterns
rsubs = {}
for k,v in iter(subs.items()):
rsubs[k]=re.compile(r'(?P<tag>[$][(]' + k + '[)])')
olines = []
for line in lines:
oline = line
for k,p in iter(rsubs.items()):
#print ('searching for', k, 'to replace it with', subs[k])
m = p.search(oline)
while m:
#print ('replacing', k, 'with', subs[k])
oline = _replace_match(oline, m, subs[k], 'tag')
m = p.search(oline)
olines.append(oline)
try:
for line in olines:
ofile.write(line)
except:
ofile.close()
base.msgb("Could not write output file: " + env['doxygen_config_customized'])
return False
ofile.close()
return True
def _build_doxygen_main(args, env):
"""Customize the doxygen input file. Run the doxygen command, copy
in any images, and put the output in the right place."""
if isinstance(args, list):
if len(args) < 2:
base.die("Need subs dictionary and dummy file arg for the doxygen command " +
"to indicate its processing")
else:
base.die("Need a list for _build_doxygen_main with the subs " +
"dictionary and the dummy file name")
(subs,dummy_file) = args
(doxygen_cmd, doxygen_okay) = _find_doxygen(env)
if not doxygen_okay:
msg = 'No good doxygen available on this system; ' + \
'Your command line arguments\n\trequire it to be present. ' + \
'Consider dropping the "doc" and "doc-build" options\n\t or ' + \
'specify a path to doxygen with the --doxygen knob.\n\n\n'
return (1, [msg]) # failure
else:
env['DOXYGEN'] = doxygen_cmd
try:
okay = _customize_doxygen_file(env, subs)
except:
base.die("CUSTOMIZE DOXYGEN INPUT FILE FAILED")
if not okay:
return (1, ['Doxygen customization failed'])
cmd = env['DOXYGEN'] + ' ' + \
env.escape_string(env['doxygen_config_customized'])
if base.verbose(2):
base.msgb("RUN DOXYGEN", cmd)
(retval, output, error_output) = util.run_command(cmd)
for line in output:
base.msgb("DOX",line.rstrip())
if error_output:
for line in error_output:
base.msgb("DOX-ERROR",line.rstrip())
if retval != 0:
base.msgb("DOXYGEN FAILED")
base.die("Doxygen run failed. Retval=", str(retval))
util.touch(dummy_file)
base.msgb("DOXYGEN","succeeded")
return (0, []) # success
###########################################################################
# Doxygen build
###########################################################################
def _empty_dir(d):
"""return True if the directory d does not exist or if it contains no
files/subdirectories."""
if not os.path.exists(d):
return True
for (root, subdirs, subfiles) in os.walk(d):
if len(subfiles) or len(subdirs):
return False
return True
def _make_doxygen_reference_manual(env, doxygen_inputs, subs, work_queue,
hash_file_name='dox'):
"""Install the doxygen reference manual the doyxgen_output_dir
directory. doxygen_inputs is a list of files """
dox_dag = dag.dag_t(hash_file_name,env=env)
# so that the scanner can find them
dirs = {}
for f in doxygen_inputs:
dirs[os.path.dirname(f)]=True
for d in dirs.keys():
env.add_include_dir(d)
# make sure the config and top file are in the inptus list
doxygen_inputs.append(env['doxygen_config'])
doxygen_inputs.append(env['doxygen_top_src'])
dummy = env.build_dir_join('dummy-doxygen-' + hash_file_name)
# Run it via the builder to make it dependence driven
run_always = False
if _empty_dir(env['doxygen_install']):
run_always = True
if run_always:
_build_doxygen_main([subs,d |
# systemOverloadHardening
#
# Used by:
# Celestials named like: Red Giant Beacon Class (6 of 6)
runTime = "early"
type = ("projected", "passive")
def handler(fit, module, context):
fit.modules.filteredItemMultiply(lambda mod | : "overloadHardeningBonus" in mod.itemModifiedAttributes,
"overloadHardeningBonus", module. | getModifiedItemAttr("overloadBonusMultiplier"))
|
# Django settings for celery_http_gateway project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
CARROT_BACKEND = "amqp"
CELERY_RESULT_BACKEND = "database"
BROKER_HOST = "localhost"
BROKER_VHOST = "/"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_ENGINE = 'sqlite3'
# path to database file if using sqlite3.
DATABASE_NAME = 'development.db'
# Not used with sqlite3.
DATABASE_USER = ''
# Not used with sqlite3.
DATABASE_PASSWORD = ''
# Set to empty string for localhost. Not used with sqlite3.
DATABASE_HOST = ''
# Set to empty string for default. Not used with sqlite3.
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all ope | rating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# | Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#1i=edpk55k3781$z-p%b#dbn&n+-rtt83pgz2o9o)v8g7(owq'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'celery_http_gateway.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.